diff --git a/README.md b/README.md index d53a100..013addd 100644 --- a/README.md +++ b/README.md @@ -1,26 +1 @@ -Squirrel is a secure and easy to use webserver for [munki](https://github.com/munki/munki). -Squirrel is built on top of the [caddy](https://caddyserver.com/) webserver and adds munki specific features through plugins. - -Below is a list of features. Some are immediately usable, while others are in various stages of completion. - -# Features - -### Completed - -* [X] **Automatic HTTPS** - squirrel provides a built in Let's Encrypt Client(through caddy). You can also provide your own certs. -* [X] **Built in [SCEP](https://tools.ietf.org/html/draft-nourse-scep-23) server** - The `scepclient` can request client certificates in a munki preflight script. -* [X] **HTTP/2** - Automatically supported by the server and NSURLSession on OS X. -* [X] **git/git-fat/lfs sync** - syncing a repo on a time interval. provided by the caddy [addon](https://caddyserver.com/docs/git) - -### In Progress -* [ ] **API** - A REST API for managing a munki repo remotely. Mostly complete. Porting over from `https://github.com/groob/ape` -* [ ] **apiimport** - A custom `munkiimport` tool which allows importing packages using the API instead of mounting the repo. -* [ ] **Web UI** - A web interface for managing the munki repo. -* [ ] **dynamic catalogs** - currently possible to run `makecatalogs` after a git pull, but the server will also support this feature natively. -* [ ] **autopromotion/sharding** - part of having dynamic catalogs. The server will allow configuration of promotion between catalogs and [sharding](http://grahamgilbert.com/blog/2015/11/23/releasing-changes-with-sharding/) support. -* [ ] **monitoring** - structured logging and prometheus metrics. - -### Future -* [ ] DEP/MDM integration - as [micromdm](https://github.com/micromdm/micromdm) is developed, integrations will be added where they make sense. For example - ability to create manifests or validate SCEP requests based on DEP membership. -* [ ] rsync - another way to sync a repo at an interval for those who don't use git. -* [ ] [The Update Framework](https://theupdateframework.github.io/) - investigating TUF/[notary](https://github.com/docker/notary) as a way to validate catalogs and manifests. +Squirrel is a simple HTTP server for munki diff --git a/cmd/squirrel/main.go b/cmd/squirrel/main.go deleted file mode 100644 index 1787bdc..0000000 --- a/cmd/squirrel/main.go +++ /dev/null @@ -1,165 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "log" - "net/http" - "os" - "path/filepath" - - "golang.org/x/net/context" - - kitlog "github.com/go-kit/kit/log" - - "github.com/micromdm/squirrel/munki/datastore" - "github.com/micromdm/squirrel/munki/server" -) - -const usage = "usage: MUNKI_REPO_PATH= SQUIRREL_HTTP_LISTEN_PORT= ape -repo MUNKI_REPO_PATH -port SQUIRREL_HTTP_LISTEN_PORT" - -func main() { - var ( - flRepo = flag.String("repo", envString("SQUIRREL_MUNKI_REPO_PATH", ""), "path to munki repo") - flPort = flag.String("port", envString("SQUIRREL_HTTP_LISTEN_PORT", ""), "port to listen on") - flBasic = flag.Bool("basic", envBool("SQUIRREL_BASIC_AUTH"), "enable basic auth") - flJWT = flag.Bool("jwt", envBool("SQUIRREL_JWT_AUTH"), "enable jwt authentication for api calls") - flJWTSecret = flag.String("jwt-signing-key", envString("SQUIRREL_JWT_SIGNING_KEY", ""), "jwt signing key") - flTLS = flag.Bool("tls", envBool("SQUIRREL_USE_TLS"), "use https") - flTLSCert = flag.String("tls-cert", envString("SQUIRREL_TLS_CERT", ""), "path to TLS certificate") - flTLSKey = flag.String("tls-key", envString("SQUIRREL_TLS_KEY", ""), "path to TLS private key") - ) - *flTLS = true - flag.Parse() - if *flRepo == "" { - flag.Usage() - log.Fatal(usage) - } - - // create the folders if they don't yet exist - checkRepo(*flRepo) - - // validate port flag - if *flPort == "" { - port := defaultPort(*flTLS) - log.Printf("no port flag specified. Using %v by default", port) - *flPort = port - } - - if *flTLS { - checkTLSFlags(*flTLSKey, *flTLSCert) - } - - // validate JWT flags - if *flJWT { - checkJWTFlags(*flJWTSecret) - } - // validate basic auth - if *flBasic && !*flJWT { - log.Fatal("Basic Authentication is used to issue JWT Tokens. You must enable JWT as well") - } - - var repo datastore.Datastore - { - repo = &datastore.SimpleRepo{Path: *flRepo} - - } - - var err error - var svc munkiserver.Service - { - svc, err = munkiserver.NewService(repo) - if err != nil { - log.Fatal(err) - } - } - - var logger kitlog.Logger - { - logger = kitlog.NewLogfmtLogger(os.Stderr) - logger = kitlog.NewContext(logger).With("ts", kitlog.DefaultTimestampUTC) - logger = kitlog.NewContext(logger).With("caller", kitlog.DefaultCaller) - } - - ctx := context.Background() - var h http.Handler - { - h = munkiserver.ServiceHandler(ctx, svc, logger) - } - - mux := http.NewServeMux() - mux.Handle("/api/v1/", h) - mux.Handle("/repo/", http.StripPrefix("/repo/", http.FileServer(http.Dir(*flRepo)))) - - port := fmt.Sprintf(":%v", *flPort) - - if *flTLS { - log.Fatal(http.ListenAndServeTLS(port, *flTLSCert, *flTLSKey, mux)) - } else { - log.Fatal(http.ListenAndServe(port, mux)) - } -} - -func defaultPort(tls bool) string { - if tls { - return "443" - } - return "80" -} - -func checkJWTFlags(secret string) { - if secret == "" { - log.Fatal("You must provide a signing key to enable JWT authentication") - } -} - -func checkTLSFlags(keypath, certpath string) { - if keypath == "" || certpath == "" { - log.Fatal("You must provide a valid path to a TLS cert and key") - } -} - -func envString(key, def string) string { - if env := os.Getenv(key); env != "" { - return env - } - return def -} - -func envBool(key string) bool { - if env := os.Getenv(key); env == "true" { - return true - } - return false -} - -func createDir(path string) { - if !dirExists(path) { - dir := filepath.Dir(path) - if err := os.MkdirAll(dir, 0755); err != nil { - log.Fatalf("%v must exits", path) - } - } -} - -func dirExists(path string) bool { - _, err := os.Stat(path) - if os.IsNotExist(err) { - return false - } - return true -} - -func checkRepo(repoPath string) { - pkgsinfoPath := fmt.Sprintf("%v/pkgsinfo/", repoPath) - createDir(pkgsinfoPath) - - manifestPath := fmt.Sprintf("%v/manifests/", repoPath) - createDir(manifestPath) - - pkgsPath := fmt.Sprintf("%v/pkgs/", repoPath) - createDir(pkgsPath) - - catalogsPath := fmt.Sprintf("%v/catalogs/", repoPath) - createDir(catalogsPath) -} diff --git a/glide.lock b/glide.lock deleted file mode 100644 index 6b0e7cb..0000000 --- a/glide.lock +++ /dev/null @@ -1,137 +0,0 @@ -hash: 912c3e755581cb8bbabe09be289dd32f31d3cd40042d5cca0f88ee79de9aad13 -updated: 2016-06-16T10:45:40.430407388-04:00 -imports: -- name: git.schwanenlied.me/yawning/chacha20.git - version: c91e78db502ff629614837aacb7aa4efa61c651a -- name: git.schwanenlied.me/yawning/poly1305.git - version: dfc796fe731c2cfd3aa1e96115ab0aaa3cbb7849 -- name: github.com/abiosoft/caddy-git - version: bf8112054dfc2a0303412c102a17ba05732b345b - subpackages: - - gitos -- name: github.com/BurntSushi/toml - version: f0aeabca5a127c4078abb8c8d64298b147264b55 -- name: github.com/dgrijalva/jwt-go - version: c04502f106d7c5b3fae17c5da49a1bbdd3006b3c -- name: github.com/dustin/go-humanize - version: 499693e27ee0d14ffab67c31ad065fdb3d34ea75 -- name: github.com/flynn/go-shlex - version: 3f9db97f856818214da2e1057f8ad84803971cff -- name: github.com/go-kit/kit - version: 1078c87a1351dee509efd1ea7e7cb9f42840ac51 - subpackages: - - log - - endpoint - - transport/http -- name: github.com/go-logfmt/logfmt - version: d4327190ff838312623b09bfeb50d7c93c8d9c1d -- name: github.com/go-stack/stack - version: 100eb0c0a9c5b306ca2fb4f165df21d80ada4b82 -- name: github.com/gorilla/websocket - version: a68708917c6a4f06314ab4e52493cc61359c9d42 -- name: github.com/groob/plist - version: e9ca5cd129407b401d850bc3248338865a7490ae -- name: github.com/hashicorp/go-syslog - version: 42a2b573b664dbf281bd48c3cc12c086b17a39ba -- name: github.com/jimstudt/http-authentication - version: 3eca13d6893afd7ecabe15f4445f5d2872a1b012 - subpackages: - - basic -- name: github.com/julienschmidt/httprouter - version: 77366a47451a56bb3ba682481eed85b64fea14e8 -- name: github.com/kr/logfmt - version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 -- name: github.com/lucas-clemente/chacha20poly1305 - version: ecc9f245638b6ac6fec56f508a61afc99f353007 -- name: github.com/lucas-clemente/fnv128a - version: 393af48d391698c6ae4219566bfbdfef67269997 -- name: github.com/lucas-clemente/quic-go - version: c76d438808b909d6794a229f980021b5254356dc - subpackages: - - h2quic - - protocol - - utils - - ackhandler - - crypto - - flowcontrol - - frames - - handshake - - qerr - - congestion -- name: github.com/lucas-clemente/quic-go-certificates - version: 9bb36d3159787cca26dcfa15e23049615e307ef8 -- name: github.com/mholt/caddy - version: 6fe5c1a69fc3801dd2a20864777f66cc118ca19a - subpackages: - - caddyhttp - - caddytls - - caddyhttp/httpserver - - caddyfile - - caddyhttp/basicauth - - caddyhttp/bind - - caddyhttp/browse - - caddyhttp/errors - - caddyhttp/expvar - - caddyhttp/extensions - - caddyhttp/fastcgi - - caddyhttp/gzip - - caddyhttp/header - - caddyhttp/internalsrv - - caddyhttp/log - - caddyhttp/markdown - - caddyhttp/mime - - caddyhttp/pprof - - caddyhttp/proxy - - caddyhttp/redirect - - caddyhttp/rewrite - - caddyhttp/root - - caddyhttp/templates - - caddyhttp/websocket - - startupshutdown - - caddyhttp/staticfiles - - caddyhttp/markdown/metadata - - caddyhttp/markdown/summary -- name: github.com/micromdm/scep - version: b7516ce6fe521e14e2d9dbbf03004ad206135807 - subpackages: - - caddy - - server - - scep - - scep/internal/pkcs7 -- name: github.com/miekg/dns - version: 5d001d020961ae1c184f9f8152fdc73810481677 -- name: github.com/russross/blackfriday - version: 1d6b8e9301e720b08a8938b8c25c018285885438 -- name: github.com/shurcooL/sanitized_anchor_name - version: 10ef21a441db47d8b13ebcc5fd2310f636973c77 -- name: github.com/xenolf/lego - version: b2fad6198110326662e9e356a97199078a4a775c - subpackages: - - acme -- name: golang.org/x/crypto - version: f3241ce8505855877cc8a9717bd61a0f7c4ea83c - subpackages: - - bcrypt - - ocsp - - blowfish - - curve25519 - - hkdf -- name: golang.org/x/net - version: d7bf3545bb0dacf009c535b3d3fbf53ac0a339ab - subpackages: - - context - - publicsuffix - - http2 - - http2/hpack - - context/ctxhttp - - lex/httplex -- name: gopkg.in/natefinch/lumberjack.v2 - version: 514cbda263a734ae8caac038dadf05f8f3f9f738 -- name: gopkg.in/square/go-jose.v1 - version: e3f973b66b91445ec816dd7411ad1b6495a5a2fc - subpackages: - - cipher - - json -- name: gopkg.in/yaml.v2 - version: a83829b6f1293c91addabc89d0571c246397bbf4 -devImports: [] diff --git a/glide.yaml b/glide.yaml deleted file mode 100644 index c0f57e7..0000000 --- a/glide.yaml +++ /dev/null @@ -1,16 +0,0 @@ -package: github.com/micromdm/squirrel -import: -- package: github.com/abiosoft/caddy-git -- package: github.com/go-kit/kit - subpackages: - - log -- package: github.com/mholt/caddy - subpackages: - - caddyhttp - - caddytls -- package: github.com/micromdm/scep - subpackages: - - caddy -- package: github.com/xenolf/lego - subpackages: - - acme diff --git a/munki/datastore/catalogs.go b/munki/datastore/catalogs.go deleted file mode 100644 index 75091f8..0000000 --- a/munki/datastore/catalogs.go +++ /dev/null @@ -1,64 +0,0 @@ -package datastore - -import ( - "fmt" - "log" - "os" - - "github.com/groob/plist" - "github.com/micromdm/squirrel/munki/munki" -) - -var makecatalogs = make(chan bool, 1) - -func (r *SimpleRepo) makeCatalogs(done chan bool) { - catalogs := map[string]*munki.Catalogs{} - pkgsinfos, err := r.AllPkgsinfos() - if err != nil { - log.Println(err) - } - allCatalogs := pkgsinfos.Catalog("all") - catalogs["all"] = allCatalogs - for _, info := range *allCatalogs { - for _, catalogName := range info.Catalogs { - catalogs[catalogName] = pkgsinfos.Catalog(catalogName) - } - } - - for k, v := range catalogs { - err = r.saveCatalog(k, v) - if err != nil { - log.Println(err) - } - } - done <- true -} - -func (r *SimpleRepo) saveCatalog(name string, catalogs *munki.Catalogs) error { - catalogsPath := fmt.Sprintf("%v/catalogs/%v", r.Path, name) - var file *os.File - var err error - if _, err := os.Stat(catalogsPath); err != nil { - file, err = os.Create(catalogsPath) - } else { - file, err = os.OpenFile(catalogsPath, os.O_TRUNC|os.O_WRONLY, 0755) - } - if err != nil { - return err - } - defer file.Close() - enc := plist.NewEncoder(file) - enc.Indent(" ") - return enc.Encode(catalogs) - -} - -//WatchCatalogs creates catalogs from pkgsinfos -func (r *SimpleRepo) WatchCatalogs() { - done := make(chan bool, 1) - for { - <-makecatalogs - go r.makeCatalogs(done) - <-done - } -} diff --git a/munki/datastore/datastore.go b/munki/datastore/datastore.go deleted file mode 100644 index 0973d66..0000000 --- a/munki/datastore/datastore.go +++ /dev/null @@ -1,57 +0,0 @@ -package datastore - -import ( - "errors" - "os" - "path/filepath" - - "github.com/micromdm/squirrel/munki/munki" -) - -var ( - // ErrExists file already exists - ErrExists = errors.New("resource already exists") - - // ErrNotFound = resource not found - ErrNotFound = errors.New("resource not found") -) - -// Datastore is an interface around munki storage -type Datastore interface { - munki.PkgsinfoStore - munki.ManifestStore -} - -// SimpleRepo is a filesystem based backend -type SimpleRepo struct { - Path string - indexManifests map[string]*munki.Manifest - indexPkgsinfo map[string]*munki.PkgsInfo -} - -func deleteFile(path string) error { - if err := os.Remove(path); err != nil { - return ErrNotFound - } - return nil -} - -func createFile(path string) error { - // check if exists - if _, err := os.Stat(path); err == nil { - return ErrExists - } - // create the relative directory if it doesn't exist - dir := filepath.Dir(path) - if err := os.MkdirAll(dir, 0755); err != nil { - return err - } - - // create the file - file, err := os.Create(path) - if err != nil { - return err - } - defer file.Close() - return nil -} diff --git a/munki/datastore/manifest.go b/munki/datastore/manifest.go deleted file mode 100644 index cc796f0..0000000 --- a/munki/datastore/manifest.go +++ /dev/null @@ -1,110 +0,0 @@ -package datastore - -import ( - "errors" - "fmt" - "log" - "os" - "path/filepath" - - "github.com/groob/plist" - "github.com/micromdm/squirrel/munki/munki" -) - -// AllManifests returns an array of manifests -func (r *SimpleRepo) AllManifests() (*munki.ManifestCollection, error) { - manifests := &munki.ManifestCollection{} - err := loadManifests(r.Path, manifests) - if err != nil { - return nil, err - } - r.updateManifestIndex(manifests) - return manifests, nil -} - -// Manifest returns a single manifest from repo -func (r *SimpleRepo) Manifest(name string) (*munki.Manifest, error) { - manifests := &munki.ManifestCollection{} - err := loadManifests(r.Path, manifests) - if err != nil { - return nil, err - } - r.updateManifestIndex(manifests) - - manifest, ok := r.indexManifests[name] - if !ok { - return nil, ErrNotFound - } - return manifest, nil -} - -// NewManifest returns a single manifest from repo -func (r *SimpleRepo) NewManifest(name string) (*munki.Manifest, error) { - manifest := &munki.Manifest{} - manifestPath := fmt.Sprintf("%v/manifests/%v", r.Path, name) - err := createFile(manifestPath) - return manifest, err -} - -// SaveManifest saves a manifest to the datastore -func (r *SimpleRepo) SaveManifest(path string, manifest *munki.Manifest) error { - if path == "" { - return errors.New("must specify a manifest name") - } - manifestPath := fmt.Sprintf("%v/manifests/%v", r.Path, path) - file, err := os.OpenFile(manifestPath, os.O_WRONLY, 0755) - if err != nil { - return err - } - defer file.Close() - if err := plist.NewEncoder(file).Encode(manifest); err != nil { - return err - } - return nil -} - -// DeleteManifest removes a manifest file from the repository -func (r *SimpleRepo) DeleteManifest(name string) error { - manifestPath := fmt.Sprintf("%v/manifests/%v", r.Path, name) - return deleteFile(manifestPath) -} - -func (r *SimpleRepo) updateManifestIndex(manifests *munki.ManifestCollection) { - r.indexManifests = make(map[string]*munki.Manifest, len(*manifests)) - for _, manifest := range *manifests { - r.indexManifests[manifest.Filename] = manifest - } -} - -func walkManifests(manifests *munki.ManifestCollection) filepath.WalkFunc { - return func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - file, err := os.Open(path) - if err != nil { - return err - } - defer file.Close() - if !info.IsDir() { - // Decode manifest - manifest := &munki.Manifest{} - err := plist.NewDecoder(file).Decode(manifest) - if err != nil { - log.Printf("git-repo: failed to decode %v, skipping \n", info.Name()) - return nil - } - // set filename to relative path + filename - manifest.Filename = info.Name() - // add to ManifestCollection - *manifests = append(*manifests, manifest) - return nil - } - return nil - } -} - -func loadManifests(path string, manifests *munki.ManifestCollection) error { - manifestPath := fmt.Sprintf("%v/manifests", path) - return filepath.Walk(manifestPath, walkManifests(manifests)) -} diff --git a/munki/datastore/pkgsinfo.go b/munki/datastore/pkgsinfo.go deleted file mode 100644 index ee9388c..0000000 --- a/munki/datastore/pkgsinfo.go +++ /dev/null @@ -1,123 +0,0 @@ -package datastore - -import ( - "errors" - "fmt" - "log" - "os" - "path/filepath" - - "github.com/groob/plist" - "github.com/micromdm/squirrel/munki/munki" -) - -// AllPkgsinfos returns a pkgsinfo collection -func (r *SimpleRepo) AllPkgsinfos() (*munki.PkgsInfoCollection, error) { - pkgsinfos := &munki.PkgsInfoCollection{} - if err := loadPkgsinfos(r.Path, pkgsinfos); err != nil { - return nil, err - } - r.updatePkgsinfoIndex(pkgsinfos) - return pkgsinfos, nil -} - -// Pkgsinfo returns a single pkgsinfo from repo -func (r *SimpleRepo) Pkgsinfo(name string) (*munki.PkgsInfo, error) { - pkgsinfos := &munki.PkgsInfoCollection{} - if err := loadPkgsinfos(r.Path, pkgsinfos); err != nil { - return nil, err - } - r.updatePkgsinfoIndex(pkgsinfos) - pkgsinfo, ok := r.indexPkgsinfo[name] - if !ok { - return nil, ErrNotFound - } - return pkgsinfo, nil -} - -// NewPkgsinfo returns a single manifest from repo -func (r *SimpleRepo) NewPkgsinfo(name string) (*munki.PkgsInfo, error) { - pkgsinfo := &munki.PkgsInfo{} - pkgsinfoPath := fmt.Sprintf("%v/pkgsinfo/%v", r.Path, name) - err := createFile(pkgsinfoPath) - return pkgsinfo, err -} - -// SavePkgsinfo saves a pkgsinfo file to the datastore -func (r *SimpleRepo) SavePkgsinfo(path string, pkgsinfo *munki.PkgsInfo) error { - if path == "" { - return errors.New("must specify a pkgsinfo path") - } - pkgsinfoPath := fmt.Sprintf("%v/pkgsinfo/%v", r.Path, path) - file, err := os.OpenFile(pkgsinfoPath, os.O_WRONLY, 0755) - if err != nil { - return err - } - defer file.Close() - if err := plist.NewEncoder(file).Encode(pkgsinfo); err != nil { - return err - } - go func() { - makecatalogs <- true - }() - return nil -} - -// DeletePkgsinfo deletes a pkgsinfo file from the datastore and triggers makecatalogs if succesful -func (r *SimpleRepo) DeletePkgsinfo(name string) error { - pkgsinfoPath := fmt.Sprintf("%v/pkgsinfo/%v", r.Path, name) - if err := deleteFile(pkgsinfoPath); err != nil { - return err - } - go func() { - makecatalogs <- true - }() - return nil -} - -func (r *SimpleRepo) updatePkgsinfoIndex(pkgsinfos *munki.PkgsInfoCollection) { - r.indexPkgsinfo = make(map[string]*munki.PkgsInfo, len(*pkgsinfos)) - for _, pkgsinfo := range *pkgsinfos { - r.indexPkgsinfo[pkgsinfo.Filename] = pkgsinfo - } -} - -func walkPkgsinfo(pkgsinfos *munki.PkgsInfoCollection, pkgsinfoPath string) filepath.WalkFunc { - return func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - file, err := os.Open(path) - if err != nil { - return err - } - defer file.Close() - if !info.IsDir() { - // Decode pkgsinfo - pkgsinfo := &munki.PkgsInfo{} - err := plist.NewDecoder(file).Decode(pkgsinfo) - if err != nil { - log.Printf("simple-repo: failed to decode %v, skipping \n", info.Name()) - return nil - } - // set filename to relative path - relpath, err := filepath.Rel(pkgsinfoPath, path) - if err != nil { - log.Printf("simple-repo: failed to get relative path %v, skipping \n", info.Name()) - return err - } - // use the relative path as the filename - pkgsinfo.Filename = relpath - // add to ManifestCollection - *pkgsinfos = append(*pkgsinfos, pkgsinfo) - return nil - } - return nil - } -} - -// load the pkgsinfos -func loadPkgsinfos(path string, pkgsinfos *munki.PkgsInfoCollection) error { - pkgsinfoPath := fmt.Sprintf("%v/pkgsinfo", path) - return filepath.Walk(pkgsinfoPath, walkPkgsinfo(pkgsinfos, pkgsinfoPath)) -} diff --git a/munki/munki/catalog.go b/munki/munki/catalog.go deleted file mode 100644 index 5db2c51..0000000 --- a/munki/munki/catalog.go +++ /dev/null @@ -1,9 +0,0 @@ -package munki - -// Catalog is a munki catalog -type Catalog struct { - pkgsinfo -} - -// Catalogs is an array of catalogs -type Catalogs []*Catalog diff --git a/munki/munki/manifest.go b/munki/munki/manifest.go deleted file mode 100644 index 92f70aa..0000000 --- a/munki/munki/manifest.go +++ /dev/null @@ -1,81 +0,0 @@ -package munki - -// ManifestStore is the interface for accessing manifests in a database or filesystem -type ManifestStore interface { - AllManifests() (*ManifestCollection, error) - Manifest(name string) (*Manifest, error) - NewManifest(name string) (*Manifest, error) - SaveManifest(path string, manifest *Manifest) error - DeleteManifest(name string) error -} - -// Manifest represents the structure of a munki manifest -// This is what would be serialized in a datastore -type Manifest struct { - Filename string `plist:"-" json:"-"` - Catalogs []string `plist:"catalogs,omitempty" json:"catalogs,omitempty"` - DisplayName string `plist:"display_name,omitempty" json:"display_name,omitempty"` - IncludedManifests []string `plist:"included_manifests,omitempty" json:"included_manifests,omitempty"` - Notes string `plist:"notes,omitempty" json:"notes,omitempty"` - User string `plist:"user,omitempty" json:"user,omitempty"` - ConditionalItems []condition `plist:"conditional_items,omitempty" json:"conditional_items,omitempty"` - manifestItems -} - -type manifestItems struct { - OptionalInstalls []string `plist:"optional_installs,omitempty" json:"optional_installs,omitempty"` - ManagedInstalls []string `plist:"managed_installs,omitempty" json:"managed_installs,omitempty"` - ManagedUninstalls []string `plist:"managed_uninstalls,omitempty" json:"managed_uninstalls,omitempty"` - ManagedUpdates []string `plist:"managed_updates,omitempty" json:"managed_updates,omitempty"` -} - -type condition struct { - Condition string `plist:"condition" json:"condition"` - manifestItems -} - -// ManifestCollection represents a list of manifests -type ManifestCollection []*Manifest - -// UpdateFromPayload updates a manifest from a ManifestPayload -func (m *Manifest) UpdateFromPayload(payload *ManifestPayload) { - if payload.Catalogs != nil { - m.Catalogs = *payload.Catalogs - } - - if payload.DisplayName != nil { - m.DisplayName = *payload.DisplayName - } - - if payload.IncludedManifests != nil { - m.IncludedManifests = *payload.IncludedManifests - } - - if payload.OptionalInstalls != nil { - m.OptionalInstalls = *payload.OptionalInstalls - } - - if payload.ManagedInstalls != nil { - m.ManagedInstalls = *payload.ManagedInstalls - } - - if payload.ManagedUninstalls != nil { - m.ManagedUninstalls = *payload.ManagedUninstalls - } - - if payload.ManagedUpdates != nil { - m.ManagedUpdates = *payload.ManagedUpdates - } - - if payload.Notes != nil { - m.Notes = *payload.Notes - } - - if payload.User != nil { - m.User = *payload.User - } - - if payload.ConditionalItems != nil { - m.ConditionalItems = *payload.ConditionalItems - } -} diff --git a/munki/munki/manifest_payload.go b/munki/munki/manifest_payload.go deleted file mode 100644 index eededa9..0000000 --- a/munki/munki/manifest_payload.go +++ /dev/null @@ -1,21 +0,0 @@ -package munki - -// ManifestPayload represents a payload type -// The payload type is what the client would send over the wire -type ManifestPayload struct { - Filename *string `plist:"filename,omitempty" json:"filename,omitempty"` - Catalogs *[]string `plist:"catalogs,omitempty" json:"catalogs,omitempty"` - DisplayName *string `plist:"display_name,omitempty" json:"display_name,omitempty"` - IncludedManifests *[]string `plist:"included_manifests,omitempty" json:"included_manifests,omitempty"` - Notes *string `plist:"notes,omitempty" json:"notes,omitempty"` - User *string `plist:"user,omitempty" json:"user,omitempty"` - ConditionalItems *[]condition `plist:"conditional_items,omitempty" json:"conditional_items,omitempty"` - manifestItemsPayload -} - -type manifestItemsPayload struct { - OptionalInstalls *[]string `plist:"optional_installs,omitempty" json:"optional_installs,omitempty"` - ManagedInstalls *[]string `plist:"managed_installs,omitempty" json:"managed_installs,omitempty"` - ManagedUninstalls *[]string `plist:"managed_uninstalls,omitempty" json:"managed_uninstalls,omitempty"` - ManagedUpdates *[]string `plist:"managed_updates,omitempty" json:"managed_updates,omitempty"` -} diff --git a/munki/munki/pkgsinfo.go b/munki/munki/pkgsinfo.go deleted file mode 100644 index 4359c4c..0000000 --- a/munki/munki/pkgsinfo.go +++ /dev/null @@ -1,203 +0,0 @@ -package munki - -import "time" - -// PkgsinfoStore is an interface for interacting with Pkgsinfo types -type PkgsinfoStore interface { - AllPkgsinfos() (*PkgsInfoCollection, error) - Pkgsinfo(name string) (*PkgsInfo, error) - NewPkgsinfo(name string) (*PkgsInfo, error) - SavePkgsinfo(path string, pkgsinfo *PkgsInfo) error - DeletePkgsinfo(name string) error -} - -// PkgsInfo represents the structure of a pkgsinfo file -type PkgsInfo struct { - pkgsinfo - Filename string `plist:"-" json:"-"` - Metadata metadata `plist:"_metadata,omitempty" json:"_metadata,omitempty"` - adobeRelatedItems -} - -type pkgsinfo struct { - Autoremove bool `plist:"autoremove,omitempty" json:"autoremove,omitempty"` - Catalogs []string `plist:"catalogs,omitempty" json:"catalogs,omitempty"` - Category string `plist:"category,omitempty" json:"category,omitempty"` - CopyLocal bool `plist:"copy_local,omitempty" json:"copy_local,omitempty"` - Description string `plist:"description,omitempty" json:"description,omitempty"` - Developer string `plist:"developer,omitempty" json:"developer,omitempty"` - DisplayName string `plist:"display_name,omitempty" json:"display_name,omitempty"` - ForceInstallAfterDate time.Time `plist:"force_install_after_date,omitempty" json:"force_install_after_date,omitempty"` - IconName string `plist:"icon_name,omitempty" json:"icon_name,omitempty"` - InstallableCondition string `plist:"installable_condition,omitempty" json:"installable_condition,omitempty"` - InstalledSize int `plist:"installed_size,omitempty" json:"installed_size,omitempty"` - InstallerItemHash string `plist:"installer_item_hash,omitempty" json:"installer_item_hash,omitempty"` - InstallerItemLocation string `plist:"installer_item_location,omitempty" json:"installer_item_location,omitempty"` - InstallerItemSize int `plist:"installer_item_size,omitempty" json:"installer_item_size,omitempty"` - InstallerType string `plist:"installer_type,omitempty" json:"installer_item_type,omitempty"` - Installs []install `plist:"installs,omitempty" json:"installs,omitempty"` - Receipts []receipt `plist:"receipts,omitempty" json:"receipts,omitempty"` - ItemsToCopy []itemsToCopy `plist:"items_to_copy,omitempty" json:"items_to_copy,omitempty"` - MinimumMunkiVersion string `plist:"minimum_munki_version,omitempty" json:"minimum_munki_version,omitempty"` - MinimumOSVersion string `plist:"minimum_os_version,omitempty" json:"minimum_os_version,omitempty"` - MaximumOSVersion string `plist:"maximum_os_version,omitempty" json:"maximum_os_version,omitempty"` - Name string `plist:"name,omitempty" json:"name,omitempty"` - Notes string `plist:"notes,omitempty" json:"notes,omitempty"` - PackageCompleteURL string `plist:"PackageCompleteURL,omitempty" json:"PackageCompleteURL,omitempty"` - PackageURL string `plist:"PackageURL,omitempty" json:"PackageURL,omitempty"` - PackagePath string `plist:"package_path,omitempty" json:"package_path,omitempty"` - InstallCheckScript string `plist:"installcheck_script,omitempty" json:"installcheck_script,omitempty"` - UninstallCheckScript string `plist:"uninstallcheck_script,omitempty" json:"uninstallcheck_script,omitempty"` - OnDemand bool `plist:"OnDemand,omitempty" json:"OnDemand,omitempty"` - PostInstallScript string `plist:"postinstall_script,omitempty" json:"postinstall_script,omitempty"` - PreInstallScript string `plist:"preinstall_script,omitempty" json:"preinstall_script,omitempty"` - PostUninstallScript string `plist:"postuninstall_script,omitempty" json:"postuninstall_script,omitempty"` - SuppressBundleRelocation bool `plist:"suppress_bundle_relocation,omitempty" json:"suppress_bundle_relocation,omitempty"` - UnattendedInstall bool `plist:"unattended_install,omitempty" json:"unattended_install,omitempty"` - UnattendedUninstall bool `plist:"unattended_uninstall,omitempty" json:"unattended_uninstall,omitempty"` - Requires []string `plist:"requires,omitempty" json:"requires,omitempty"` - RestartAction string `plist:"RestartAction,omitempty" json:"RestartAction,omitempty"` - Uninstallmethod string `plist:"uninstall_method,omitempty" json:"uninstall_method,omitempty"` - UninstallScript string `plist:"uninstall_script,omitempty" json:"uninstall_script,omitempty"` - UninstallerItemLocation string `plist:"uninstaller_item_location,omitempty" json:"uninstaller_item_location,omitempty"` - AppleItem bool `plist:"apple_item,omitempty" json:"apple_item,omitempty"` - Uninstallable bool `plist:"uninstallable,omitempty" json:"uninstallable,omitempty"` - BlockingApplications []string `plist:"blocking_applications,omitempty" json:"blocking_applications,omitempty"` - SupportedArchitectures []string `plist:"supported_architectures,omitempty" json:"supported_architectures,omitempty"` - UpdateFor []string `plist:"update_for,omitempty" json:"update_for,omitempty"` - Version string `plist:"version,omitempty" json:"version,omitempty"` - InstallerChoicesXML []installerChoicesXML `plist:"installer_choices_xml,omitempty" json:"installer_choices_xml,omitempty"` - InstallerEnvironment map[string]string `plist:"installer_environment,omitempty" json:"installer_environment,omitempty"` -} - -type metadata struct { - CreatedBy string `plist:"created_by,omitempty" json:"created_by,omitempty"` - CreatedDate time.Time `plist:"creation_date,omitempty" json:"created_date,omitempty"` - MunkiVersion string `plist:"munki_version,omitempty" json:"munki_version,omitempty"` - OSVersion string `plist:"os_version,omitempty" json:"os_version,omitempty"` -} - -type install struct { - CFBundleIdentifier string `plist:"CFBundleIdentifier,omitempty" json:"CFBundleIdentifier,omitempty"` - CFBundleName string `plist:"CFBundleName,omitempty" json:"CFBundleName,omitempty"` - CFBundleShortVersionString string `plist:"CFBundleShortVersionString,omitempty" json:"CFBundleShortVersionString,omitempty"` - CFBundleVersion string `plist:"CFBundleVersion,omitempty" json:"CFBundleVersion,omitempty"` - MD5Checksum string `plist:"md5checksum,omitempty" json:"md5checksum,omitempty"` - MinOSVersion string `plist:"minosversion,omitempty" json:"min_os_version,omitempty"` - Path string `plist:"path,omitempty" json:"path,omitempty"` - Type string `plist:"type,omitempty" json:"type,omitempty"` - VersionComparisonKey string `plist:"version_comparison_key,omitempty" json:"version_comparision_key,omitempty"` -} - -type receipt struct { - Filename string `plist:"filename,omitempty" json:"filename,omitempty"` - InstalledSize int `plist:"installed_size,omitempty" json:"installed_size,omitempty"` - Name string `plist:"name,omitempty" json:"name,omitempty"` - PackageID string `plist:"packageid,omitempty" json:"packageid,omitempty"` - Version string `plist:"version,omitempty" json:"version,omitempty"` - Optional bool `plist:"optional,omitempty" json:"optional,omitempty"` -} - -type itemsToCopy struct { - DestinationPath string `plist:"destination_path" json:"destination_path"` - Group string `plist:"group,omitempty" json:"group,omitempty"` - Mode string `plist:"mode,omitempty" json:"mode,omitempty"` - SourceItem string `plist:"source_item" json:"source_item"` - User string `plist:"user,omitempty" json:"user,omitempty"` -} - -type installerChoicesXML struct { - AttributeSetting int `plist:"attributeSetting,omitempty" json:"attributeSetting,omitempty"` - ChoiceAttribute string `plist:"choiceAttribute,omitempty" json:"choiceAttribute,omitempty"` - ChoiceIdentifier string `plist:"choiceIdentifier,omitempty" json:"choiceIdentifier,omitempty"` -} - -type adobeRelatedItems struct { - AdobeSetupType string `plist:"AdobeSetupType,omitempty" json:"AdobeSetupType,omitempty"` - Payloads []map[string]interface{} `plist:"payloads,omitempty" json:"payloads,omitempty"` - // Only available in CS, not CC - AdobeInstallInfo adobeInstallInfo `plist:"adobe_install_info,omitempty" json:"adobe_install_info,omitempty"` -} - -type adobeInstallInfo struct { - SerialNumber string `plist:"serialnumber,omitempty" json:"serialnumber,omitempty"` - InstallXML string `plist:"installxml,omitempty" json:"installxml,omitempty"` - UninstallXML string `plist:"uninstallxml,omitempty" json:"uninstallxml,omitempty"` - MediaSignature string `plist:"media_signature,omitempty" json:"media_signature,omitempty"` - MediaDigest string `plist:"media_digest,omitempty" json:"media_signature,omitempty"` - PayloadCount int `plist:"payload_count,omitempty" json:"payload_count,omitempty"` - SuppressRegistration bool `plist:"suppress_registration,omitempty" json:"suppress_registration,omitempty"` - SuppressUpdates bool `plist:"suppress_updates,omitempty" json:"suppress_updates,omitempty"` -} - -// Catalog converts a pkgsinfo to a catalog -func (p *PkgsInfo) catalog() *Catalog { - catalog := &Catalog{ - p.pkgsinfo, - } - return catalog -} - -type pkgsinfoView struct { - Filename string `plist:"filename,omitempty" json:"filename,omitempty"` - *PkgsInfo -} - -// PkgsInfoCollection is a collection of pkgsinfos -type PkgsInfoCollection []*PkgsInfo - -// Catalog return a specific catalog -func (p *PkgsInfoCollection) Catalog(name string) *Catalogs { - catalogs := Catalogs{} - var pkgsinfos *PkgsInfoCollection - if name != "all" { - filtered := p.ByCatalog(name) - pkgsinfos = filtered - } else { - pkgsinfos = p - } - for _, info := range *pkgsinfos { - catalog := info.catalog() - catalogs = append(catalogs, catalog) - } - return &catalogs -} - -// ByCatalog returns an array of items in catalog -func (p *PkgsInfoCollection) ByCatalog(catalogs ...string) *PkgsInfoCollection { - byCatalogIndex := map[string]*PkgsInfo{} - byCatalog := PkgsInfoCollection{} - for _, item := range *p { - for _, catalog := range catalogs { - if containsString(item.Catalogs, catalog) { - byCatalogIndex[item.Filename] = item - } - } - } - - for _, v := range byCatalogIndex { - byCatalog = append(byCatalog, v) - } - - return &byCatalog -} - -// ByName returns a list of pkgsinfos filtered by name -func (p *PkgsInfoCollection) ByName(name string) *PkgsInfoCollection { - byName := PkgsInfoCollection{} - for _, item := range *p { - if item.Name == name { - byName = append(byName, item) - } - } - return &byName -} - -func containsString(s []string, e string) bool { - for _, a := range s { - if a == e { - return true - } - } - return false -} diff --git a/munki/server/endpoint_manifest.go b/munki/server/endpoint_manifest.go deleted file mode 100644 index 63a5854..0000000 --- a/munki/server/endpoint_manifest.go +++ /dev/null @@ -1,125 +0,0 @@ -package munkiserver - -import ( - "github.com/go-kit/kit/endpoint" - "github.com/micromdm/squirrel/munki/munki" - "golang.org/x/net/context" -) - -type listManifestsRequest struct { -} - -type listManifestsResponse struct { - manifests *munki.ManifestCollection - Err error `json:"error,omitempty" plist:"error,omitempty"` -} - -func (r listManifestsResponse) subset() interface{} { - return r.manifests -} - -func (r listManifestsResponse) error() error { return r.Err } - -func makeListManifestsEndpoint(svc Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - manifests, err := svc.ListManifests(ctx) - return listManifestsResponse{manifests: manifests, Err: err}, nil - } -} - -type showManifestRequest struct { - Path string -} - -type showManifestResponse struct { - *munki.Manifest - Err error `json:"error,omitempty" plist:"error,omitempty"` -} - -func (r showManifestResponse) error() error { return r.Err } - -func makeShowManifestEndpoint(svc Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(showManifestRequest) - manifest, err := svc.ShowManifest(ctx, req.Path) - return showManifestResponse{Manifest: manifest, Err: err}, nil - } -} - -type createManifestRequest struct { - Filename string `plist:"filename" json:"filename"` - *munki.Manifest -} - -type createManifestResponse struct { - *munki.Manifest - Err error `json:"error,omitempty" plist:"error,omitempty"` -} - -func (r createManifestResponse) error() error { return r.Err } - -func makeCreateManifestEndpoint(svc Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(createManifestRequest) - manifest, err := svc.CreateManifest(ctx, req.Filename, req.Manifest) - return showManifestResponse{Manifest: manifest, Err: err}, nil - } -} - -type deleteManifestRequest struct { - Path string `plist:"filename" json:"filename"` -} - -type deleteManifestResponse struct { - Err error `json:"error,omitempty" plist:"error,omitempty"` -} - -func (r deleteManifestResponse) error() error { return r.Err } - -func makeDeleteManifestEndpoint(svc Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(deleteManifestRequest) - err := svc.DeleteManifest(ctx, req.Path) - return deleteManifestResponse{Err: err}, nil - } -} - -type replaceManifestRequest struct { - Path string `plist:"filename" json:"filename"` - *munki.Manifest -} - -type replaceManifestResponse struct { - *munki.Manifest - Err error `json:"error,omitempty" plist:"error,omitempty"` -} - -func (r replaceManifestResponse) error() error { return r.Err } - -func makeReplaceManifestEndpoint(svc Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(replaceManifestRequest) - manifest, err := svc.ReplaceManifest(ctx, req.Path, req.Manifest) - return replaceManifestResponse{Manifest: manifest, Err: err}, nil - } -} - -type updateManifestRequest struct { - Path string `plist:"filename" json:"filename"` - *munki.ManifestPayload -} - -type updateManifestResponse struct { - *munki.Manifest - Err error `json:"error,omitempty" plist:"error,omitempty"` -} - -func (r updateManifestResponse) error() error { return r.Err } - -func makeUpdateManifestEndpoint(svc Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(updateManifestRequest) - manifest, err := svc.UpdateManifest(ctx, req.Path, req.ManifestPayload) - return updateManifestResponse{Manifest: manifest, Err: err}, nil - } -} diff --git a/munki/server/endpoint_pkgsinfo.go b/munki/server/endpoint_pkgsinfo.go deleted file mode 100644 index c0367ac..0000000 --- a/munki/server/endpoint_pkgsinfo.go +++ /dev/null @@ -1,48 +0,0 @@ -package munkiserver - -import ( - "github.com/go-kit/kit/endpoint" - "github.com/micromdm/squirrel/munki/munki" - "golang.org/x/net/context" -) - -type listPkgsinfosRequest struct { -} - -type listPkgsinfosResponse struct { - pkgsinfos *munki.PkgsInfoCollection - Err error `json:"error,omitempty" plist:"error,omitempty"` -} - -func (r listPkgsinfosResponse) subset() interface{} { - return r.pkgsinfos -} - -func (r listPkgsinfosResponse) error() error { return r.Err } - -func makeListPkgsinfosEndpoint(svc Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - pkgsinfos, err := svc.ListPkgsinfos(ctx) - return listPkgsinfosResponse{pkgsinfos: pkgsinfos, Err: err}, nil - } -} - -type createPkgsinfoRequest struct { - Filename string `plist:"filename" json:"filename"` - *munki.PkgsInfo -} - -type createPkgsinfoResponse struct { - *munki.PkgsInfo - Err error `json:"error,omitempty" plist:"error,omitempty"` -} - -func (r createPkgsinfoResponse) error() error { return r.Err } - -func makeCreatePkgsinfoEndpoint(svc Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(createPkgsinfoRequest) - pkgsinfo, err := svc.CreatePkgsinfo(ctx, req.Filename, req.PkgsInfo) - return createPkgsinfoResponse{PkgsInfo: pkgsinfo, Err: err}, nil - } -} diff --git a/munki/server/service.go b/munki/server/service.go deleted file mode 100644 index b2b8ec0..0000000 --- a/munki/server/service.go +++ /dev/null @@ -1,20 +0,0 @@ -package munkiserver - -import "github.com/micromdm/squirrel/munki/datastore" - -// Service describes the actions of a munki server -type Service interface { - ManifestService - PkgsinfoService -} - -type service struct { - repo datastore.Datastore -} - -// NewService creates a new munki api service -func NewService(repo datastore.Datastore) (Service, error) { - return &service{ - repo: repo, - }, nil -} diff --git a/munki/server/service_manifest.go b/munki/server/service_manifest.go deleted file mode 100644 index 9a29319..0000000 --- a/munki/server/service_manifest.go +++ /dev/null @@ -1,58 +0,0 @@ -package munkiserver - -import ( - "github.com/micromdm/squirrel/munki/munki" - "golang.org/x/net/context" -) - -// ManifestService describes the actions of a munki server -type ManifestService interface { - ListManifests(ctx context.Context) (*munki.ManifestCollection, error) - ShowManifest(ctx context.Context, name string) (*munki.Manifest, error) - CreateManifest(ctx context.Context, name string, manifest *munki.Manifest) (*munki.Manifest, error) - ReplaceManifest(ctx context.Context, name string, manifest *munki.Manifest) (*munki.Manifest, error) - DeleteManifest(ctx context.Context, name string) error - UpdateManifest(ctx context.Context, name string, payload *munki.ManifestPayload) (*munki.Manifest, error) -} - -func (svc service) ListManifests(ctx context.Context) (*munki.ManifestCollection, error) { - return svc.repo.AllManifests() -} - -func (svc service) ShowManifest(ctx context.Context, name string) (*munki.Manifest, error) { - return svc.repo.Manifest(name) -} - -func (svc service) CreateManifest(ctx context.Context, name string, manifest *munki.Manifest) (*munki.Manifest, error) { - _, err := svc.repo.NewManifest(name) - if err != nil { - return nil, err - } - if err := svc.repo.SaveManifest(name, manifest); err != nil { - return nil, err - } - return manifest, nil -} - -func (svc service) DeleteManifest(ctx context.Context, name string) error { - return svc.repo.DeleteManifest(name) -} - -func (svc service) ReplaceManifest(ctx context.Context, name string, manifest *munki.Manifest) (*munki.Manifest, error) { - if err := svc.repo.DeleteManifest(name); err != nil { - return nil, err - } - return svc.CreateManifest(ctx, name, manifest) -} - -func (svc service) UpdateManifest(ctx context.Context, name string, payload *munki.ManifestPayload) (*munki.Manifest, error) { - manifest, err := svc.repo.Manifest(name) - if err != nil { - return nil, err - } - manifest.UpdateFromPayload(payload) - if err := svc.repo.SaveManifest(name, manifest); err != nil { - return nil, err - } - return manifest, nil -} diff --git a/munki/server/service_pkgsinfo.go b/munki/server/service_pkgsinfo.go deleted file mode 100644 index e2bbbce..0000000 --- a/munki/server/service_pkgsinfo.go +++ /dev/null @@ -1,27 +0,0 @@ -package munkiserver - -import ( - "github.com/micromdm/squirrel/munki/munki" - "golang.org/x/net/context" -) - -// PkgsinfoService describes the methods for managing Pkgsinfo files in a munki repository -type PkgsinfoService interface { - ListPkgsinfos(ctx context.Context) (*munki.PkgsInfoCollection, error) - CreatePkgsinfo(ctx context.Context, name string, pkgsinfo *munki.PkgsInfo) (*munki.PkgsInfo, error) -} - -func (svc service) ListPkgsinfos(ctx context.Context) (*munki.PkgsInfoCollection, error) { - return svc.repo.AllPkgsinfos() -} - -func (svc service) CreatePkgsinfo(ctx context.Context, name string, pkgsinfo *munki.PkgsInfo) (*munki.PkgsInfo, error) { - _, err := svc.repo.NewPkgsinfo(name) - if err != nil { - return nil, err - } - if err := svc.repo.SavePkgsinfo(name, pkgsinfo); err != nil { - return nil, err - } - return pkgsinfo, nil -} diff --git a/munki/server/testdata/testrepo/manifests/site_default b/munki/server/testdata/testrepo/manifests/site_default deleted file mode 100644 index 45a615b..0000000 --- a/munki/server/testdata/testrepo/manifests/site_default +++ /dev/null @@ -1,22 +0,0 @@ - - - - - catalogs - - production - - included_manifests - - managed_installs - - munkitools - munkitools_core - munkitools_launchd - - managed_uninstalls - - optional_installs - - - diff --git a/munki/server/transport.go b/munki/server/transport.go deleted file mode 100644 index 666ad11..0000000 --- a/munki/server/transport.go +++ /dev/null @@ -1,215 +0,0 @@ -package munkiserver - -import ( - "encoding/json" - "errors" - "net/http" - - kitlog "github.com/go-kit/kit/log" - httptransport "github.com/go-kit/kit/transport/http" - kithttp "github.com/go-kit/kit/transport/http" - "github.com/gorilla/mux" - "github.com/groob/plist" - "github.com/micromdm/squirrel/munki/datastore" - - "golang.org/x/net/context" -) - -var ( - // ErrEmptyRequest is returned if the request body is empty - errEmptyRequest = errors.New("request must contain all required fields") - errBadRouting = errors.New("inconsistent mapping between route and handler (programmer error)") -) - -// ServiceHandler creates an HTTP handler for the munki Service -func ServiceHandler(ctx context.Context, svc Service, logger kitlog.Logger) http.Handler { - opts := []kithttp.ServerOption{ - kithttp.ServerErrorLogger(logger), - kithttp.ServerErrorEncoder(encodeError), - kithttp.ServerBefore(updateContext), - } - listManifestsHandler := kithttp.NewServer( - ctx, - makeListManifestsEndpoint(svc), - decodeListManifestsRequest, - encodeResponse, - opts..., - ) - showManifestHandler := kithttp.NewServer( - ctx, - makeShowManifestEndpoint(svc), - decodeShowManifestRequest, - encodeResponse, - opts..., - ) - createManifestHandler := kithttp.NewServer( - ctx, - makeCreateManifestEndpoint(svc), - decodeCreateManifestRequest, - encodeResponse, - opts..., - ) - deleteManifestHandler := kithttp.NewServer( - ctx, - makeDeleteManifestEndpoint(svc), - decodeDeleteManifestRequest, - encodeResponse, - opts..., - ) - replaceManifestHandler := kithttp.NewServer( - ctx, - makeReplaceManifestEndpoint(svc), - decodeReplaceManifestRequest, - encodeResponse, - opts..., - ) - updateManifestHandler := kithttp.NewServer( - ctx, - makeUpdateManifestEndpoint(svc), - decodeUpdateManifestRequest, - encodeResponse, - opts..., - ) - listPkgsinfosHandler := kithttp.NewServer( - ctx, - makeListPkgsinfosEndpoint(svc), - decodeListPkgsinfosRequest, - encodeResponse, - opts..., - ) - createPkgsinfoHandler := kithttp.NewServer( - ctx, - makeCreatePkgsinfoEndpoint(svc), - decodeCreatePkgsinfoRequest, - encodeResponse, - opts..., - ) - - r := mux.NewRouter() - - r.Handle("/api/v1/manifests/{path}", showManifestHandler).Methods("GET") - r.Handle("/api/v1/manifests", listManifestsHandler).Methods("GET") - r.Handle("/api/v1/manifests", createManifestHandler).Methods("POST") - r.Handle("/api/v1/manifests/{path}", deleteManifestHandler).Methods("DELETE") - r.Handle("/api/v1/manifests/{path}", replaceManifestHandler).Methods("PUT") - r.Handle("/api/v1/manifests/{path}", updateManifestHandler).Methods("PATCH") - - r.Handle("/api/v1/pkgsinfos", listPkgsinfosHandler).Methods("GET") - r.Handle("/api/v1/pkgsinfos", createPkgsinfoHandler).Methods("POST") - return r -} - -func updateContext(ctx context.Context, r *http.Request) context.Context { - return context.WithValue(ctx, "mediaType", acceptHeader(r)) -} - -// if header is not set to json or xml, return json header -func acceptHeader(r *http.Request) string { - accept := r.Header.Get("Accept") - switch accept { - case "application/xml", "application/xml; charset=utf-8": - return "application/xml" - default: - return "application/json" - } -} - -// set the Content-Type header -func setContentType(rw http.ResponseWriter, accept string) { - switch accept { - case "application/xml": - rw.Header().Set("Content-Type", "application/xml; charset=utf-8") - return - default: - rw.Header().Set("Content-Type", "application/json; charset=utf-8") - return - } -} - -type errorer interface { - error() error -} - -type statuser interface { - status() int -} - -type subsetEncoder interface { - subset() interface{} -} - -func encodeJSON(w http.ResponseWriter, from interface{}) error { - data, err := json.MarshalIndent(from, "", " ") - if err != nil { - return err - } - w.Write(data) - return nil -} - -func encodePLIST(w http.ResponseWriter, from interface{}) error { - enc := plist.NewEncoder(w) - enc.Indent(" ") - return enc.Encode(from) -} - -func encodeResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error { - if e, ok := response.(errorer); ok && e.error() != nil { - encodeError(ctx, e.error(), w) - return nil - } - mediaType := ctx.Value("mediaType").(string) - setContentType(w, mediaType) - // for success responses - if e, ok := response.(statuser); ok { - w.WriteHeader(e.status()) - if e.status() == http.StatusNoContent { - return nil - } - } - - // check if this is a collection - if e, ok := response.(subsetEncoder); ok { - response = e.subset() - } - if mediaType == "application/xml" { - return encodePLIST(w, response) - } - return encodeJSON(w, response) -} - -func encodeError(ctx context.Context, err error, w http.ResponseWriter) { - if err == nil { - panic("encodeError with nil error") - } - mediaType := ctx.Value("mediaType").(string) - setContentType(w, mediaType) - w.WriteHeader(codeFrom(err)) - errData := map[string]interface{}{ - "error": err.Error(), - } - if mediaType == "application/xml" { - encodePLIST(w, errData) - return - } - encodeJSON(w, errData) -} - -func codeFrom(err error) int { - switch err { - case datastore.ErrNotFound: - return http.StatusNotFound - default: - if e, ok := err.(httptransport.Error); ok { - switch e.Domain { - case httptransport.DomainDecode: - return http.StatusBadRequest - case httptransport.DomainDo: - return http.StatusServiceUnavailable - default: - return http.StatusInternalServerError - } - } - return http.StatusInternalServerError - } -} diff --git a/munki/server/transport_manifest.go b/munki/server/transport_manifest.go deleted file mode 100644 index 3e709c4..0000000 --- a/munki/server/transport_manifest.go +++ /dev/null @@ -1,67 +0,0 @@ -package munkiserver - -import ( - "encoding/json" - "net/http" - - "github.com/gorilla/mux" - "golang.org/x/net/context" -) - -func decodeListManifestsRequest(_ context.Context, r *http.Request) (interface{}, error) { - return listManifestsRequest{}, nil -} - -func decodeShowManifestRequest(_ context.Context, r *http.Request) (interface{}, error) { - vars := mux.Vars(r) - path, ok := vars["path"] - if !ok { - return nil, errBadRouting - } - return showManifestRequest{Path: path}, nil -} - -func decodeCreateManifestRequest(_ context.Context, r *http.Request) (interface{}, error) { - var request createManifestRequest - if err := json.NewDecoder(r.Body).Decode(&request); err != nil { - return nil, err - } - return request, nil -} - -func decodeDeleteManifestRequest(_ context.Context, r *http.Request) (interface{}, error) { - vars := mux.Vars(r) - path, ok := vars["path"] - if !ok { - return nil, errBadRouting - } - return deleteManifestRequest{Path: path}, nil -} - -func decodeReplaceManifestRequest(_ context.Context, r *http.Request) (interface{}, error) { - var request replaceManifestRequest - if err := json.NewDecoder(r.Body).Decode(&request.Manifest); err != nil { - return nil, err - } - vars := mux.Vars(r) - path, ok := vars["path"] - if !ok { - return nil, errBadRouting - } - request.Path = path - return request, nil -} - -func decodeUpdateManifestRequest(_ context.Context, r *http.Request) (interface{}, error) { - var request updateManifestRequest - if err := json.NewDecoder(r.Body).Decode(&request.ManifestPayload); err != nil { - return nil, err - } - vars := mux.Vars(r) - path, ok := vars["path"] - if !ok { - return nil, errBadRouting - } - request.Path = path - return request, nil -} diff --git a/munki/server/transport_manifest_test.go b/munki/server/transport_manifest_test.go deleted file mode 100644 index 0e636d4..0000000 --- a/munki/server/transport_manifest_test.go +++ /dev/null @@ -1,221 +0,0 @@ -package munkiserver_test - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - "testing" - - "github.com/micromdm/squirrel/munki/munki" -) - -func TestListManifests(t *testing.T) { - server, _ := newServer(t) - defer server.Close() - testListManifestsHTTP(t, server, http.StatusOK) -} - -func TestShowManifests(t *testing.T) { - server, _ := newServer(t) - defer server.Close() - testShowManifestHTTP(t, server, "site_default", http.StatusOK) - testShowManifestHTTP(t, server, "site_none", http.StatusNotFound) -} - -func TestUpdateManifest(t *testing.T) { - server, _ := newServer(t) - defer server.Close() - manifests := []*munki.Manifest{ - &munki.Manifest{ - Filename: "update-manifest", - Catalogs: []string{"production", "testing"}, - }, - } - - for _, m := range manifests { - os.Remove("testdata/testrepo/manifests/" + m.Filename) - testCreateManifestHTTP(t, server, m.Filename, m, http.StatusOK) - m1 := &munki.ManifestPayload{ - Catalogs: &[]string{"foo"}, - } - testUpdateManifestHTTP(t, server, m.Filename, m1, http.StatusOK) - os.Remove("testdata/testrepo/manifests/" + m.Filename) - } -} - -func TestReplaceManifest(t *testing.T) { - server, _ := newServer(t) - defer server.Close() - manifests := []*munki.Manifest{ - &munki.Manifest{ - Filename: "replace-manifest", - Catalogs: []string{"production", "testing"}, - }, - } - - for _, m := range manifests { - os.Remove("testdata/testrepo/manifests/" + m.Filename) - testCreateManifestHTTP(t, server, m.Filename, m, http.StatusOK) - testReplaceManifestHTTP(t, server, m.Filename, m, http.StatusOK) - os.Remove("testdata/testrepo/manifests/" + m.Filename) - } -} - -func TestDeleteManifest(t *testing.T) { - server, _ := newServer(t) - defer server.Close() - manifests := []*munki.Manifest{ - &munki.Manifest{ - Filename: "del-manifest", - Catalogs: []string{"production", "testing"}, - }, - } - for _, m := range manifests { - os.Remove("testdata/testrepo/manifests/" + m.Filename) - testCreateManifestHTTP(t, server, m.Filename, m, http.StatusOK) - testDeleteManifestHTTP(t, server, m.Filename, http.StatusOK) - } -} - -func TestCreateManifest(t *testing.T) { - server, _ := newServer(t) - defer server.Close() - manifests := []*munki.Manifest{ - &munki.Manifest{ - Filename: "foo-manifest", - Catalogs: []string{"production", "testing"}, - }, - } - - for _, m := range manifests { - testCreateManifestHTTP(t, server, m.Filename, m, http.StatusOK) - os.Remove("testdata/testrepo/manifests/" + m.Filename) - } -} - -type createManifestRequest struct { - Filename string `plist:"filename" json:"filename"` - *munki.Manifest -} - -func testCreateManifestHTTP(t *testing.T, server *httptest.Server, filename string, manifest *munki.Manifest, expectedStatus int) *munki.Manifest { - client := http.DefaultClient - theURL := server.URL + "/api/v1/manifests" - var req = &createManifestRequest{ - Filename: filename, - Manifest: manifest, - } - data, err := json.Marshal(req) - if err != nil { - t.Fatal(err) - } - body := ioutil.NopCloser(bytes.NewBuffer(data)) - resp, err := client.Post(theURL, "application/json", body) - if err != nil { - t.Fatal(err) - } - if resp.StatusCode != expectedStatus { - io.Copy(os.Stdout, resp.Body) - t.Fatal("expected", expectedStatus, "got", resp.StatusCode) - } - - return nil -} - -func testReplaceManifestHTTP(t *testing.T, server *httptest.Server, path string, m *munki.Manifest, expectedStatus int) { - client := http.DefaultClient - theURL := server.URL + "/api/v1/manifests/" + path - data, err := json.Marshal(m) - if err != nil { - t.Fatal(err) - } - body := ioutil.NopCloser(bytes.NewBuffer(data)) - req, err := http.NewRequest("PUT", theURL, body) - if err != nil { - t.Fatal(err) - } - resp, err := client.Do(req) - if err != nil { - t.Fatal(err) - } - if resp.StatusCode != expectedStatus { - fmt.Println(theURL) - io.Copy(os.Stdout, resp.Body) - t.Fatal("expected", expectedStatus, "got", resp.StatusCode) - } -} - -func testUpdateManifestHTTP(t *testing.T, server *httptest.Server, path string, m *munki.ManifestPayload, expectedStatus int) { - client := http.DefaultClient - theURL := server.URL + "/api/v1/manifests/" + path - data, err := json.Marshal(m) - if err != nil { - t.Fatal(err) - } - body := ioutil.NopCloser(bytes.NewBuffer(data)) - req, err := http.NewRequest("PATCH", theURL, body) - if err != nil { - t.Fatal(err) - } - resp, err := client.Do(req) - if err != nil { - t.Fatal(err) - } - if resp.StatusCode != expectedStatus { - fmt.Println(theURL) - io.Copy(os.Stdout, resp.Body) - t.Fatal("expected", expectedStatus, "got", resp.StatusCode) - } -} - -func testDeleteManifestHTTP(t *testing.T, server *httptest.Server, path string, expectedStatus int) { - client := http.DefaultClient - theURL := server.URL + "/api/v1/manifests/" + path - req, err := http.NewRequest("DELETE", theURL, nil) - if err != nil { - t.Fatal(err) - } - resp, err := client.Do(req) - if err != nil { - t.Fatal(err) - } - if resp.StatusCode != expectedStatus { - fmt.Println(theURL) - io.Copy(os.Stdout, resp.Body) - t.Fatal("expected", expectedStatus, "got", resp.StatusCode) - } -} - -func testShowManifestHTTP(t *testing.T, server *httptest.Server, path string, expectedStatus int) *munki.Manifest { - client := http.DefaultClient - theURL := server.URL + "/api/v1/manifests/" + path - resp, err := client.Get(theURL) - if err != nil { - t.Fatal(err) - } - if resp.StatusCode != expectedStatus { - fmt.Println(theURL) - io.Copy(os.Stdout, resp.Body) - t.Fatal("expected", expectedStatus, "got", resp.StatusCode) - } - return nil -} - -func testListManifestsHTTP(t *testing.T, server *httptest.Server, expectedStatus int) *munki.ManifestCollection { - client := http.DefaultClient - theURL := server.URL + "/api/v1/manifests" - resp, err := client.Get(theURL) - if err != nil { - t.Fatal(err) - } - if resp.StatusCode != expectedStatus { - io.Copy(os.Stdout, resp.Body) - t.Fatal("expected", expectedStatus, "got", resp.StatusCode) - } - return nil -} diff --git a/munki/server/transport_pkgsinfo.go b/munki/server/transport_pkgsinfo.go deleted file mode 100644 index 68a2b16..0000000 --- a/munki/server/transport_pkgsinfo.go +++ /dev/null @@ -1,20 +0,0 @@ -package munkiserver - -import ( - "encoding/json" - "net/http" - - "golang.org/x/net/context" -) - -func decodeListPkgsinfosRequest(_ context.Context, r *http.Request) (interface{}, error) { - return listPkgsinfosRequest{}, nil -} - -func decodeCreatePkgsinfoRequest(_ context.Context, r *http.Request) (interface{}, error) { - var request createPkgsinfoRequest - if err := json.NewDecoder(r.Body).Decode(&request); err != nil { - return nil, err - } - return request, nil -} diff --git a/munki/server/transport_pkgsinfo_test.go b/munki/server/transport_pkgsinfo_test.go deleted file mode 100644 index b9ba650..0000000 --- a/munki/server/transport_pkgsinfo_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package munkiserver_test - -import ( - "bytes" - "encoding/json" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - "testing" - - "github.com/micromdm/squirrel/munki/munki" -) - -func TestListPkgsinfos(t *testing.T) { - server, _ := newServer(t) - defer server.Close() - - testListPkgsinfosHTTP(t, server, http.StatusOK) -} - -func testListPkgsinfosHTTP(t *testing.T, server *httptest.Server, expectedStatus int) *munki.PkgsInfoCollection { - client := http.DefaultClient - theURL := server.URL + "/api/v1/pkgsinfos" - resp, err := client.Get(theURL) - if err != nil { - t.Fatal(err) - } - if resp.StatusCode != expectedStatus { - io.Copy(os.Stdout, resp.Body) - t.Fatal("expected", expectedStatus, "got", resp.StatusCode) - } - return nil -} - -func TestCreatePkgsinfo(t *testing.T) { - server, _ := newServer(t) - defer server.Close() - pkgsinfos := []*munki.PkgsInfo{ - &munki.PkgsInfo{ - Filename: "foo-pkgsinfo", - }, - } - - for _, p := range pkgsinfos { - testCreatePkgsinfoHTTP(t, server, p.Filename, p, http.StatusOK) - os.Remove("testdata/testrepo/pkgsinfo/" + p.Filename) - } -} - -type createPkgsinfoRequest struct { - Filename string `plist:"filename" json:"filename"` - *munki.PkgsInfo -} - -func testCreatePkgsinfoHTTP(t *testing.T, server *httptest.Server, filename string, pkgsinfo *munki.PkgsInfo, expectedStatus int) *munki.PkgsInfo { - client := http.DefaultClient - theURL := server.URL + "/api/v1/pkgsinfos" - var req = &createPkgsinfoRequest{ - Filename: filename, - PkgsInfo: pkgsinfo, - } - data, err := json.Marshal(req) - if err != nil { - t.Fatal(err) - } - body := ioutil.NopCloser(bytes.NewBuffer(data)) - resp, err := client.Post(theURL, "application/json", body) - if err != nil { - t.Fatal(err) - } - if resp.StatusCode != expectedStatus { - io.Copy(os.Stdout, resp.Body) - t.Fatal("expected", expectedStatus, "got", resp.StatusCode) - } - - return nil -} diff --git a/munki/server/transport_test.go b/munki/server/transport_test.go deleted file mode 100644 index bf296b1..0000000 --- a/munki/server/transport_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package munkiserver_test - -import ( - "net/http/httptest" - "os" - "testing" - - "github.com/go-kit/kit/log" - "github.com/micromdm/squirrel/munki/datastore" - "github.com/micromdm/squirrel/munki/server" - "golang.org/x/net/context" -) - -func newServer(t *testing.T) (*httptest.Server, munkiserver.Service) { - ctx := context.Background() - l := log.NewLogfmtLogger(os.Stderr) - logger := log.NewContext(l).With("source", "testing") - path := "testdata/testrepo" - repo := &datastore.SimpleRepo{Path: path} - svc, err := munkiserver.NewService(repo) - if err != nil { - t.Fatal(err) - } - handler := munkiserver.ServiceHandler(ctx, svc, logger) - server := httptest.NewServer(handler) - return server, svc -} diff --git a/vendor/git.schwanenlied.me/yawning/chacha20.git/LICENSE b/vendor/git.schwanenlied.me/yawning/chacha20.git/LICENSE deleted file mode 100644 index 6ca207e..0000000 --- a/vendor/git.schwanenlied.me/yawning/chacha20.git/LICENSE +++ /dev/null @@ -1,122 +0,0 @@ -Creative Commons Legal Code - -CC0 1.0 Universal - - CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE - LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN - ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS - INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES - REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS - PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM - THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED - HEREUNDER. - -Statement of Purpose - -The laws of most jurisdictions throughout the world automatically confer -exclusive Copyright and Related Rights (defined below) upon the creator -and subsequent owner(s) (each and all, an "owner") of an original work of -authorship and/or a database (each, a "Work"). - -Certain owners wish to permanently relinquish those rights to a Work for -the purpose of contributing to a commons of creative, cultural and -scientific works ("Commons") that the public can reliably and without fear -of later claims of infringement build upon, modify, incorporate in other -works, reuse and redistribute as freely as possible in any form whatsoever -and for any purposes, including without limitation commercial purposes. -These owners may contribute to the Commons to promote the ideal of a free -culture and the further production of creative, cultural and scientific -works, or to gain reputation or greater distribution for their Work in -part through the use and efforts of others. - -For these and/or other purposes and motivations, and without any -expectation of additional consideration or compensation, the person -associating CC0 with a Work (the "Affirmer"), to the extent that he or she -is an owner of Copyright and Related Rights in the Work, voluntarily -elects to apply CC0 to the Work and publicly distribute the Work under its -terms, with knowledge of his or her Copyright and Related Rights in the -Work and the meaning and intended legal effect of CC0 on those rights. - -1. Copyright and Related Rights. A Work made available under CC0 may be -protected by copyright and related or neighboring rights ("Copyright and -Related Rights"). Copyright and Related Rights include, but are not -limited to, the following: - - i. the right to reproduce, adapt, distribute, perform, display, - communicate, and translate a Work; - ii. moral rights retained by the original author(s) and/or performer(s); -iii. publicity and privacy rights pertaining to a person's image or - likeness depicted in a Work; - iv. rights protecting against unfair competition in regards to a Work, - subject to the limitations in paragraph 4(a), below; - v. rights protecting the extraction, dissemination, use and reuse of data - in a Work; - vi. database rights (such as those arising under Directive 96/9/EC of the - European Parliament and of the Council of 11 March 1996 on the legal - protection of databases, and under any national implementation - thereof, including any amended or successor version of such - directive); and -vii. other similar, equivalent or corresponding rights throughout the - world based on applicable law or treaty, and any national - implementations thereof. - -2. Waiver. To the greatest extent permitted by, but not in contravention -of, applicable law, Affirmer hereby overtly, fully, permanently, -irrevocably and unconditionally waives, abandons, and surrenders all of -Affirmer's Copyright and Related Rights and associated claims and causes -of action, whether now known or unknown (including existing as well as -future claims and causes of action), in the Work (i) in all territories -worldwide, (ii) for the maximum duration provided by applicable law or -treaty (including future time extensions), (iii) in any current or future -medium and for any number of copies, and (iv) for any purpose whatsoever, -including without limitation commercial, advertising or promotional -purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each -member of the public at large and to the detriment of Affirmer's heirs and -successors, fully intending that such Waiver shall not be subject to -revocation, rescission, cancellation, termination, or any other legal or -equitable action to disrupt the quiet enjoyment of the Work by the public -as contemplated by Affirmer's express Statement of Purpose. - -3. Public License Fallback. Should any part of the Waiver for any reason -be judged legally invalid or ineffective under applicable law, then the -Waiver shall be preserved to the maximum extent permitted taking into -account Affirmer's express Statement of Purpose. In addition, to the -extent the Waiver is so judged Affirmer hereby grants to each affected -person a royalty-free, non transferable, non sublicensable, non exclusive, -irrevocable and unconditional license to exercise Affirmer's Copyright and -Related Rights in the Work (i) in all territories worldwide, (ii) for the -maximum duration provided by applicable law or treaty (including future -time extensions), (iii) in any current or future medium and for any number -of copies, and (iv) for any purpose whatsoever, including without -limitation commercial, advertising or promotional purposes (the -"License"). The License shall be deemed effective as of the date CC0 was -applied by Affirmer to the Work. Should any part of the License for any -reason be judged legally invalid or ineffective under applicable law, such -partial invalidity or ineffectiveness shall not invalidate the remainder -of the License, and in such case Affirmer hereby affirms that he or she -will not (i) exercise any of his or her remaining Copyright and Related -Rights in the Work or (ii) assert any associated claims and causes of -action with respect to the Work, in either case contrary to Affirmer's -express Statement of Purpose. - -4. Limitations and Disclaimers. - - a. No trademark or patent rights held by Affirmer are waived, abandoned, - surrendered, licensed or otherwise affected by this document. - b. Affirmer offers the Work as-is and makes no representations or - warranties of any kind concerning the Work, express, implied, - statutory or otherwise, including without limitation warranties of - title, merchantability, fitness for a particular purpose, non - infringement, or the absence of latent or other defects, accuracy, or - the present or absence of errors, whether or not discoverable, all to - the greatest extent permissible under applicable law. - c. Affirmer disclaims responsibility for clearing rights of other persons - that may apply to the Work or any use thereof, including without - limitation any person's Copyright and Related Rights in the Work. - Further, Affirmer disclaims responsibility for obtaining any necessary - consents, permissions or other rights required for any use of the - Work. - d. Affirmer understands and acknowledges that Creative Commons is not a - party to this document and has no duty or obligation with respect to - this CC0 or use of the Work. - diff --git a/vendor/git.schwanenlied.me/yawning/chacha20.git/README.md b/vendor/git.schwanenlied.me/yawning/chacha20.git/README.md deleted file mode 100644 index 9080a84..0000000 --- a/vendor/git.schwanenlied.me/yawning/chacha20.git/README.md +++ /dev/null @@ -1,14 +0,0 @@ -### chacha20 - ChaCha20 -#### Yawning Angel (yawning at schwanenlied dot me) - -Yet another Go ChaCha20 implementation. Everything else I found was slow, -didn't support all the variants I need to use, or relied on cgo to go fast. - -Features: - - * 20 round, 256 bit key only. Everything else is pointless and stupid. - * IETF 96 bit nonce variant. - * XChaCha 24 byte nonce variant. - * SSE2 and AVX2 support on amd64 targets. - * Incremental encrypt/decrypt support, unlike golang.org/x/crypto/salsa20. - diff --git a/vendor/git.schwanenlied.me/yawning/chacha20.git/chacha20.go b/vendor/git.schwanenlied.me/yawning/chacha20.git/chacha20.go deleted file mode 100644 index 07d5e4b..0000000 --- a/vendor/git.schwanenlied.me/yawning/chacha20.git/chacha20.go +++ /dev/null @@ -1,273 +0,0 @@ -// chacha20.go - A ChaCha stream cipher implementation. -// -// To the extent possible under law, Yawning Angel has waived all copyright -// and related or neighboring rights to chacha20, using the Creative -// Commons "CC0" public domain dedication. See LICENSE or -// for full details. - -package chacha20 - -import ( - "crypto/cipher" - "encoding/binary" - "errors" - "math" - "runtime" -) - -const ( - // KeySize is the ChaCha20 key size in bytes. - KeySize = 32 - - // NonceSize is the ChaCha20 nonce size in bytes. - NonceSize = 8 - - // INonceSize is the IETF ChaCha20 nonce size in bytes. - INonceSize = 12 - - // XNonceSize is the XChaCha20 nonce size in bytes. - XNonceSize = 24 - - // HNonceSize is the HChaCha20 nonce size in bytes. - HNonceSize = 16 - - // BlockSize is the ChaCha20 block size in bytes. - BlockSize = 64 - - stateSize = 16 - chachaRounds = 20 - - // The constant "expand 32-byte k" as little endian uint32s. - sigma0 = uint32(0x61707865) - sigma1 = uint32(0x3320646e) - sigma2 = uint32(0x79622d32) - sigma3 = uint32(0x6b206574) -) - -var ( - // ErrInvalidKey is the error returned when the key is invalid. - ErrInvalidKey = errors.New("key length must be KeySize bytes") - - // ErrInvalidNonce is the error returned when the nonce is invalid. - ErrInvalidNonce = errors.New("nonce length must be NonceSize/INonceSize/XNonceSize bytes") - - // ErrInvalidCounter is the error returned when the counter is invalid. - ErrInvalidCounter = errors.New("block counter is invalid (out of range)") - - useUnsafe = false - usingVectors = false - blocksFn = blocksRef -) - -// A Cipher is an instance of ChaCha20/XChaCha20 using a particular key and -// nonce. -type Cipher struct { - state [stateSize]uint32 - - buf [BlockSize]byte - off int - ietf bool -} - -// Reset zeros the key data so that it will no longer appear in the process's -// memory. -func (c *Cipher) Reset() { - for i := range c.state { - c.state[i] = 0 - } - for i := range c.buf { - c.buf[i] = 0 - } -} - -// XORKeyStream sets dst to the result of XORing src with the key stream. Dst -// and src may be the same slice but otherwise should not overlap. -func (c *Cipher) XORKeyStream(dst, src []byte) { - if len(dst) < len(src) { - src = src[:len(dst)] - } - - for remaining := len(src); remaining > 0; { - // Process multiple blocks at once. - if c.off == BlockSize { - nrBlocks := remaining / BlockSize - directBytes := nrBlocks * BlockSize - if nrBlocks > 0 { - blocksFn(&c.state, src, dst, nrBlocks, c.ietf) - remaining -= directBytes - if remaining == 0 { - return - } - dst = dst[directBytes:] - src = src[directBytes:] - } - - // If there's a partial block, generate 1 block of keystream into - // the internal buffer. - blocksFn(&c.state, nil, c.buf[:], 1, c.ietf) - c.off = 0 - } - - // Process partial blocks from the buffered keystream. - toXor := BlockSize - c.off - if remaining < toXor { - toXor = remaining - } - if toXor > 0 { - for i, v := range src[:toXor] { - dst[i] = v ^ c.buf[c.off+i] - } - dst = dst[toXor:] - src = src[toXor:] - - remaining -= toXor - c.off += toXor - } - } -} - -// KeyStream sets dst to the raw keystream. -func (c *Cipher) KeyStream(dst []byte) { - for remaining := len(dst); remaining > 0; { - // Process multiple blocks at once. - if c.off == BlockSize { - nrBlocks := remaining / BlockSize - directBytes := nrBlocks * BlockSize - if nrBlocks > 0 { - blocksFn(&c.state, nil, dst, nrBlocks, c.ietf) - remaining -= directBytes - if remaining == 0 { - return - } - dst = dst[directBytes:] - } - - // If there's a partial block, generate 1 block of keystream into - // the internal buffer. - blocksFn(&c.state, nil, c.buf[:], 1, c.ietf) - c.off = 0 - } - - // Process partial blocks from the buffered keystream. - toCopy := BlockSize - c.off - if remaining < toCopy { - toCopy = remaining - } - if toCopy > 0 { - copy(dst[:toCopy], c.buf[c.off:c.off+toCopy]) - dst = dst[toCopy:] - remaining -= toCopy - c.off += toCopy - } - } -} - -// ReKey reinitializes the ChaCha20/XChaCha20 instance with the provided key -// and nonce. -func (c *Cipher) ReKey(key, nonce []byte) error { - if len(key) != KeySize { - return ErrInvalidKey - } - - switch len(nonce) { - case NonceSize: - case INonceSize: - case XNonceSize: - var subkey [KeySize]byte - var subnonce [HNonceSize]byte - copy(subnonce[:], nonce[0:16]) - HChaCha(key, &subnonce, &subkey) - key = subkey[:] - nonce = nonce[16:24] - defer func() { - for i := range subkey { - subkey[i] = 0 - } - }() - default: - return ErrInvalidNonce - } - - c.Reset() - c.state[0] = sigma0 - c.state[1] = sigma1 - c.state[2] = sigma2 - c.state[3] = sigma3 - c.state[4] = binary.LittleEndian.Uint32(key[0:4]) - c.state[5] = binary.LittleEndian.Uint32(key[4:8]) - c.state[6] = binary.LittleEndian.Uint32(key[8:12]) - c.state[7] = binary.LittleEndian.Uint32(key[12:16]) - c.state[8] = binary.LittleEndian.Uint32(key[16:20]) - c.state[9] = binary.LittleEndian.Uint32(key[20:24]) - c.state[10] = binary.LittleEndian.Uint32(key[24:28]) - c.state[11] = binary.LittleEndian.Uint32(key[28:32]) - c.state[12] = 0 - if len(nonce) == INonceSize { - c.state[13] = binary.LittleEndian.Uint32(nonce[0:4]) - c.state[14] = binary.LittleEndian.Uint32(nonce[4:8]) - c.state[15] = binary.LittleEndian.Uint32(nonce[8:12]) - c.ietf = true - } else { - c.state[13] = 0 - c.state[14] = binary.LittleEndian.Uint32(nonce[0:4]) - c.state[15] = binary.LittleEndian.Uint32(nonce[4:8]) - c.ietf = false - } - c.off = BlockSize - return nil - -} - -// Seek sets the block counter to a given offset. -func (c *Cipher) Seek(blockCounter uint64) error { - if c.ietf { - if blockCounter > math.MaxUint32 { - return ErrInvalidCounter - } - c.state[12] = uint32(blockCounter) - } else { - c.state[12] = uint32(blockCounter) - c.state[13] = uint32(blockCounter >> 32) - } - c.off = BlockSize - return nil -} - -// NewCipher returns a new ChaCha20/XChaCha20 instance. -func NewCipher(key, nonce []byte) (*Cipher, error) { - c := new(Cipher) - if err := c.ReKey(key, nonce); err != nil { - return nil, err - } - return c, nil -} - -// HChaCha is the HChaCha20 hash function used to make XChaCha. -func HChaCha(key []byte, nonce *[HNonceSize]byte, out *[32]byte) { - var x [stateSize]uint32 // Last 4 slots unused, sigma hardcoded. - x[0] = binary.LittleEndian.Uint32(key[0:4]) - x[1] = binary.LittleEndian.Uint32(key[4:8]) - x[2] = binary.LittleEndian.Uint32(key[8:12]) - x[3] = binary.LittleEndian.Uint32(key[12:16]) - x[4] = binary.LittleEndian.Uint32(key[16:20]) - x[5] = binary.LittleEndian.Uint32(key[20:24]) - x[6] = binary.LittleEndian.Uint32(key[24:28]) - x[7] = binary.LittleEndian.Uint32(key[28:32]) - x[8] = binary.LittleEndian.Uint32(nonce[0:4]) - x[9] = binary.LittleEndian.Uint32(nonce[4:8]) - x[10] = binary.LittleEndian.Uint32(nonce[8:12]) - x[11] = binary.LittleEndian.Uint32(nonce[12:16]) - hChaChaRef(&x, out) -} - -func init() { - switch runtime.GOARCH { - case "386", "amd64": - // Abuse unsafe to skip calling binary.LittleEndian.PutUint32 - // in the critical path. This is a big boost on systems that are - // little endian and not overly picky about alignment. - useUnsafe = true - } -} - -var _ cipher.Stream = (*Cipher)(nil) diff --git a/vendor/git.schwanenlied.me/yawning/chacha20.git/chacha20_amd64.go b/vendor/git.schwanenlied.me/yawning/chacha20.git/chacha20_amd64.go deleted file mode 100644 index b2c8623..0000000 --- a/vendor/git.schwanenlied.me/yawning/chacha20.git/chacha20_amd64.go +++ /dev/null @@ -1,95 +0,0 @@ -// chacha20_amd64.go - AMD64 optimized chacha20. -// -// To the extent possible under law, Yawning Angel has waived all copyright -// and related or neighboring rights to chacha20, using the Creative -// Commons "CC0" public domain dedication. See LICENSE or -// for full details. - -// +build amd64,!gccgo,!appengine - -package chacha20 - -import ( - "math" -) - -var usingAVX2 = false - -func blocksAmd64SSE2(x *uint32, inp, outp *byte, nrBlocks uint) - -func blocksAmd64AVX2(x *uint32, inp, outp *byte, nrBlocks uint) - -func cpuidAmd64(cpuidParams *uint32) - -func xgetbv0Amd64(xcrVec *uint32) - -func blocksAmd64(x *[stateSize]uint32, in []byte, out []byte, nrBlocks int, isIetf bool) { - // Probably unneeded, but stating this explicitly simplifies the assembly. - if nrBlocks == 0 { - return - } - - if isIetf { - var totalBlocks uint64 - totalBlocks = uint64(x[8]) + uint64(nrBlocks) - if totalBlocks > math.MaxUint32 { - panic("chacha20: Exceeded keystream per nonce limit") - } - } - - if in == nil { - for i := range out { - out[i] = 0 - } - in = out - } - - // Pointless to call the AVX2 code for just a single block, since half of - // the output gets discarded... - if usingAVX2 && nrBlocks > 1 { - blocksAmd64AVX2(&x[0], &in[0], &out[0], uint(nrBlocks)) - } else { - blocksAmd64SSE2(&x[0], &in[0], &out[0], uint(nrBlocks)) - } -} - -func supportsAVX2() bool { - // https://software.intel.com/en-us/articles/how-to-detect-new-instruction-support-in-the-4th-generation-intel-core-processor-family - const ( - osXsaveBit = 1 << 27 - avx2Bit = 1 << 5 - ) - - // Check to see if CPUID actually supports the leaf that indicates AVX2. - // CPUID.(EAX=0H, ECX=0H) >= 7 - regs := [4]uint32{0x00} - cpuidAmd64(®s[0]) - if regs[0] < 7 { - return false - } - - // Check to see if the OS knows how to save/restore XMM/YMM state. - // CPUID.(EAX=01H, ECX=0H):ECX.OSXSAVE[bit 27]==1 - regs = [4]uint32{0x01} - cpuidAmd64(®s[0]) - if regs[2]&osXsaveBit == 0 { - return false - } - xcrRegs := [2]uint32{} - xgetbv0Amd64(&xcrRegs[0]) - if xcrRegs[0]&6 != 6 { - return false - } - - // Check for AVX2 support. - // CPUID.(EAX=07H, ECX=0H):EBX.AVX2[bit 5]==1 - regs = [4]uint32{0x07} - cpuidAmd64(®s[0]) - return regs[1]&avx2Bit != 0 -} - -func init() { - blocksFn = blocksAmd64 - usingVectors = true - usingAVX2 = supportsAVX2() -} diff --git a/vendor/git.schwanenlied.me/yawning/chacha20.git/chacha20_amd64.py b/vendor/git.schwanenlied.me/yawning/chacha20.git/chacha20_amd64.py deleted file mode 100644 index 5b689d1..0000000 --- a/vendor/git.schwanenlied.me/yawning/chacha20.git/chacha20_amd64.py +++ /dev/null @@ -1,1303 +0,0 @@ -#!/usr/bin/env python3 -# -# To the extent possible under law, Yawning Angel has waived all copyright -# and related or neighboring rights to chacha20, using the Creative -# Commons "CC0" public domain dedication. See LICENSE or -# for full details. - -# -# cgo sucks. Plan 9 assembly sucks. Real languages have SIMD intrinsics. -# The least terrible/retarded option is to use a Python code generator, so -# that's what I did. -# -# Code based on Ted Krovetz's vec128 C implementation, with corrections -# to use a 64 bit counter instead of 32 bit, and to allow unaligned input and -# output pointers. -# -# Dependencies: https://github.com/Maratyszcza/PeachPy -# -# python3 -m peachpy.x86_64 -mabi=goasm -S -o chacha20_amd64.s chacha20_amd64.py -# - -from peachpy import * -from peachpy.x86_64 import * - -x = Argument(ptr(uint32_t)) -inp = Argument(ptr(const_uint8_t)) -outp = Argument(ptr(uint8_t)) -nrBlocks = Argument(ptr(size_t)) - -# -# SSE2 helper functions. A temporary register is explicitly passed in because -# the main fast loop uses every single register (and even spills) so manual -# control is needed. -# -# This used to also have a DQROUNDS helper that did 2 rounds of ChaCha like -# in the C code, but the C code has the luxury of an optimizer reordering -# everything, while this does not. -# - -def ROTW16_sse2(tmp, d): - MOVDQA(tmp, d) - PSLLD(tmp, 16) - PSRLD(d, 16) - PXOR(d, tmp) - -def ROTW12_sse2(tmp, b): - MOVDQA(tmp, b) - PSLLD(tmp, 12) - PSRLD(b, 20) - PXOR(b, tmp) - -def ROTW8_sse2(tmp, d): - MOVDQA(tmp, d) - PSLLD(tmp, 8) - PSRLD(d, 24) - PXOR(d, tmp) - -def ROTW7_sse2(tmp, b): - MOVDQA(tmp, b) - PSLLD(tmp, 7) - PSRLD(b, 25) - PXOR(b, tmp) - -def WriteXor_sse2(tmp, inp, outp, d, v0, v1, v2, v3): - MOVDQU(tmp, [inp+d]) - PXOR(tmp, v0) - MOVDQU([outp+d], tmp) - MOVDQU(tmp, [inp+d+16]) - PXOR(tmp, v1) - MOVDQU([outp+d+16], tmp) - MOVDQU(tmp, [inp+d+32]) - PXOR(tmp, v2) - MOVDQU([outp+d+32], tmp) - MOVDQU(tmp, [inp+d+48]) - PXOR(tmp, v3) - MOVDQU([outp+d+48], tmp) - -# SSE2 ChaCha20 (aka vec128). Does not handle partial blocks, and will -# process 4/2/1 blocks at a time. x (the ChaCha20 state) must be 16 byte -# aligned. -with Function("blocksAmd64SSE2", (x, inp, outp, nrBlocks)): - reg_x = GeneralPurposeRegister64() - reg_inp = GeneralPurposeRegister64() - reg_outp = GeneralPurposeRegister64() - reg_blocks = GeneralPurposeRegister64() - reg_sp_save = GeneralPurposeRegister64() - - LOAD.ARGUMENT(reg_x, x) - LOAD.ARGUMENT(reg_inp, inp) - LOAD.ARGUMENT(reg_outp, outp) - LOAD.ARGUMENT(reg_blocks, nrBlocks) - - # Align the stack to a 32 byte boundary. - reg_align = GeneralPurposeRegister64() - MOV(reg_sp_save, registers.rsp) - MOV(reg_align, 0x1f) - NOT(reg_align) - AND(registers.rsp, reg_align) - SUB(registers.rsp, 0x20) - - # Build the counter increment vector on the stack, and allocate the scratch - # space - xmm_v0 = XMMRegister() - PXOR(xmm_v0, xmm_v0) - SUB(registers.rsp, 16+16) - MOVDQA([registers.rsp], xmm_v0) - reg_tmp = GeneralPurposeRegister32() - MOV(reg_tmp, 0x00000001) - MOV([registers.rsp], reg_tmp) - mem_one = [registers.rsp] # (Stack) Counter increment vector - mem_tmp0 = [registers.rsp+16] # (Stack) Scratch space. - - mem_s0 = [reg_x] # (Memory) Cipher state [0..3] - mem_s1 = [reg_x+16] # (Memory) Cipher state [4..7] - mem_s2 = [reg_x+32] # (Memory) Cipher state [8..11] - mem_s3 = [reg_x+48] # (Memory) Cipher state [12..15] - - # xmm_v0 allocated above... - xmm_v1 = XMMRegister() - xmm_v2 = XMMRegister() - xmm_v3 = XMMRegister() - - xmm_v4 = XMMRegister() - xmm_v5 = XMMRegister() - xmm_v6 = XMMRegister() - xmm_v7 = XMMRegister() - - xmm_v8 = XMMRegister() - xmm_v9 = XMMRegister() - xmm_v10 = XMMRegister() - xmm_v11 = XMMRegister() - - xmm_v12 = XMMRegister() - xmm_v13 = XMMRegister() - xmm_v14 = XMMRegister() - xmm_v15 = XMMRegister() - - xmm_tmp = xmm_v12 - - # - # 4 blocks at a time. - # - - vector_loop4 = Loop() - SUB(reg_blocks, 4) - JB(vector_loop4.end) - with vector_loop4: - MOVDQA(xmm_v0, mem_s0) - MOVDQA(xmm_v1, mem_s1) - MOVDQA(xmm_v2, mem_s2) - MOVDQA(xmm_v3, mem_s3) - - MOVDQA(xmm_v4, xmm_v0) - MOVDQA(xmm_v5, xmm_v1) - MOVDQA(xmm_v6, xmm_v2) - MOVDQA(xmm_v7, xmm_v3) - PADDQ(xmm_v7, mem_one) - - MOVDQA(xmm_v8, xmm_v0) - MOVDQA(xmm_v9, xmm_v1) - MOVDQA(xmm_v10, xmm_v2) - MOVDQA(xmm_v11, xmm_v7) - PADDQ(xmm_v11, mem_one) - - MOVDQA(xmm_v12, xmm_v0) - MOVDQA(xmm_v13, xmm_v1) - MOVDQA(xmm_v14, xmm_v2) - MOVDQA(xmm_v15, xmm_v11) - PADDQ(xmm_v15, mem_one) - - reg_rounds = GeneralPurposeRegister64() - MOV(reg_rounds, 20) - rounds_loop4 = Loop() - with rounds_loop4: - # a += b; d ^= a; d = ROTW16(d); - PADDD(xmm_v0, xmm_v1) - PADDD(xmm_v4, xmm_v5) - PADDD(xmm_v8, xmm_v9) - PADDD(xmm_v12, xmm_v13) - PXOR(xmm_v3, xmm_v0) - PXOR(xmm_v7, xmm_v4) - PXOR(xmm_v11, xmm_v8) - PXOR(xmm_v15, xmm_v12) - - MOVDQA(mem_tmp0, xmm_tmp) # Save - - ROTW16_sse2(xmm_tmp, xmm_v3) - ROTW16_sse2(xmm_tmp, xmm_v7) - ROTW16_sse2(xmm_tmp, xmm_v11) - ROTW16_sse2(xmm_tmp, xmm_v15) - - # c += d; b ^= c; b = ROTW12(b); - PADDD(xmm_v2, xmm_v3) - PADDD(xmm_v6, xmm_v7) - PADDD(xmm_v10, xmm_v11) - PADDD(xmm_v14, xmm_v15) - PXOR(xmm_v1, xmm_v2) - PXOR(xmm_v5, xmm_v6) - PXOR(xmm_v9, xmm_v10) - PXOR(xmm_v13, xmm_v14) - ROTW12_sse2(xmm_tmp, xmm_v1) - ROTW12_sse2(xmm_tmp, xmm_v5) - ROTW12_sse2(xmm_tmp, xmm_v9) - ROTW12_sse2(xmm_tmp, xmm_v13) - - # a += b; d ^= a; d = ROTW8(d); - MOVDQA(xmm_tmp, mem_tmp0) # Restore - - PADDD(xmm_v0, xmm_v1) - PADDD(xmm_v4, xmm_v5) - PADDD(xmm_v8, xmm_v9) - PADDD(xmm_v12, xmm_v13) - PXOR(xmm_v3, xmm_v0) - PXOR(xmm_v7, xmm_v4) - PXOR(xmm_v11, xmm_v8) - PXOR(xmm_v15, xmm_v12) - - MOVDQA(mem_tmp0, xmm_tmp) # Save - - ROTW8_sse2(xmm_tmp, xmm_v3) - ROTW8_sse2(xmm_tmp, xmm_v7) - ROTW8_sse2(xmm_tmp, xmm_v11) - ROTW8_sse2(xmm_tmp, xmm_v15) - - # c += d; b ^= c; b = ROTW7(b) - PADDD(xmm_v2, xmm_v3) - PADDD(xmm_v6, xmm_v7) - PADDD(xmm_v10, xmm_v11) - PADDD(xmm_v14, xmm_v15) - PXOR(xmm_v1, xmm_v2) - PXOR(xmm_v5, xmm_v6) - PXOR(xmm_v9, xmm_v10) - PXOR(xmm_v13, xmm_v14) - ROTW7_sse2(xmm_tmp, xmm_v1) - ROTW7_sse2(xmm_tmp, xmm_v5) - ROTW7_sse2(xmm_tmp, xmm_v9) - ROTW7_sse2(xmm_tmp, xmm_v13) - - # b = ROTV1(b); c = ROTV2(c); d = ROTV3(d); - PSHUFD(xmm_v1, xmm_v1, 0x39) - PSHUFD(xmm_v5, xmm_v5, 0x39) - PSHUFD(xmm_v9, xmm_v9, 0x39) - PSHUFD(xmm_v13, xmm_v13, 0x39) - PSHUFD(xmm_v2, xmm_v2, 0x4e) - PSHUFD(xmm_v6, xmm_v6, 0x4e) - PSHUFD(xmm_v10, xmm_v10, 0x4e) - PSHUFD(xmm_v14, xmm_v14, 0x4e) - PSHUFD(xmm_v3, xmm_v3, 0x93) - PSHUFD(xmm_v7, xmm_v7, 0x93) - PSHUFD(xmm_v11, xmm_v11, 0x93) - PSHUFD(xmm_v15, xmm_v15, 0x93) - - MOVDQA(xmm_tmp, mem_tmp0) # Restore - - # a += b; d ^= a; d = ROTW16(d); - PADDD(xmm_v0, xmm_v1) - PADDD(xmm_v4, xmm_v5) - PADDD(xmm_v8, xmm_v9) - PADDD(xmm_v12, xmm_v13) - PXOR(xmm_v3, xmm_v0) - PXOR(xmm_v7, xmm_v4) - PXOR(xmm_v11, xmm_v8) - PXOR(xmm_v15, xmm_v12) - - MOVDQA(mem_tmp0, xmm_tmp) # Save - - ROTW16_sse2(xmm_tmp, xmm_v3) - ROTW16_sse2(xmm_tmp, xmm_v7) - ROTW16_sse2(xmm_tmp, xmm_v11) - ROTW16_sse2(xmm_tmp, xmm_v15) - - # c += d; b ^= c; b = ROTW12(b); - PADDD(xmm_v2, xmm_v3) - PADDD(xmm_v6, xmm_v7) - PADDD(xmm_v10, xmm_v11) - PADDD(xmm_v14, xmm_v15) - PXOR(xmm_v1, xmm_v2) - PXOR(xmm_v5, xmm_v6) - PXOR(xmm_v9, xmm_v10) - PXOR(xmm_v13, xmm_v14) - ROTW12_sse2(xmm_tmp, xmm_v1) - ROTW12_sse2(xmm_tmp, xmm_v5) - ROTW12_sse2(xmm_tmp, xmm_v9) - ROTW12_sse2(xmm_tmp, xmm_v13) - - # a += b; d ^= a; d = ROTW8(d); - MOVDQA(xmm_tmp, mem_tmp0) # Restore - - PADDD(xmm_v0, xmm_v1) - PADDD(xmm_v4, xmm_v5) - PADDD(xmm_v8, xmm_v9) - PADDD(xmm_v12, xmm_v13) - PXOR(xmm_v3, xmm_v0) - PXOR(xmm_v7, xmm_v4) - PXOR(xmm_v11, xmm_v8) - PXOR(xmm_v15, xmm_v12) - - MOVDQA(mem_tmp0, xmm_tmp) # Save - - ROTW8_sse2(xmm_tmp, xmm_v3) - ROTW8_sse2(xmm_tmp, xmm_v7) - ROTW8_sse2(xmm_tmp, xmm_v11) - ROTW8_sse2(xmm_tmp, xmm_v15) - - # c += d; b ^= c; b = ROTW7(b) - PADDD(xmm_v2, xmm_v3) - PADDD(xmm_v6, xmm_v7) - PADDD(xmm_v10, xmm_v11) - PADDD(xmm_v14, xmm_v15) - PXOR(xmm_v1, xmm_v2) - PXOR(xmm_v5, xmm_v6) - PXOR(xmm_v9, xmm_v10) - PXOR(xmm_v13, xmm_v14) - ROTW7_sse2(xmm_tmp, xmm_v1) - ROTW7_sse2(xmm_tmp, xmm_v5) - ROTW7_sse2(xmm_tmp, xmm_v9) - ROTW7_sse2(xmm_tmp, xmm_v13) - - # b = ROTV1(b); c = ROTV2(c); d = ROTV3(d); - PSHUFD(xmm_v1, xmm_v1, 0x93) - PSHUFD(xmm_v5, xmm_v5, 0x93) - PSHUFD(xmm_v9, xmm_v9, 0x93) - PSHUFD(xmm_v13, xmm_v13, 0x93) - PSHUFD(xmm_v2, xmm_v2, 0x4e) - PSHUFD(xmm_v6, xmm_v6, 0x4e) - PSHUFD(xmm_v10, xmm_v10, 0x4e) - PSHUFD(xmm_v14, xmm_v14, 0x4e) - PSHUFD(xmm_v3, xmm_v3, 0x39) - PSHUFD(xmm_v7, xmm_v7, 0x39) - PSHUFD(xmm_v11, xmm_v11, 0x39) - PSHUFD(xmm_v15, xmm_v15, 0x39) - - MOVDQA(xmm_tmp, mem_tmp0) # Restore - - SUB(reg_rounds, 2) - JNZ(rounds_loop4.begin) - - MOVDQA(mem_tmp0, xmm_tmp) - - PADDD(xmm_v0, mem_s0) - PADDD(xmm_v1, mem_s1) - PADDD(xmm_v2, mem_s2) - PADDD(xmm_v3, mem_s3) - WriteXor_sse2(xmm_tmp, reg_inp, reg_outp, 0, xmm_v0, xmm_v1, xmm_v2, xmm_v3) - MOVDQA(xmm_v3, mem_s3) - PADDQ(xmm_v3, mem_one) - - PADDD(xmm_v4, mem_s0) - PADDD(xmm_v5, mem_s1) - PADDD(xmm_v6, mem_s2) - PADDD(xmm_v7, xmm_v3) - WriteXor_sse2(xmm_tmp, reg_inp, reg_outp, 64, xmm_v4, xmm_v5, xmm_v6, xmm_v7) - PADDQ(xmm_v3, mem_one) - - PADDD(xmm_v8, mem_s0) - PADDD(xmm_v9, mem_s1) - PADDD(xmm_v10, mem_s2) - PADDD(xmm_v11, xmm_v3) - WriteXor_sse2(xmm_tmp, reg_inp, reg_outp, 128, xmm_v8, xmm_v9, xmm_v10, xmm_v11) - PADDQ(xmm_v3, mem_one) - - MOVDQA(xmm_tmp, mem_tmp0) - - PADDD(xmm_v12, mem_s0) - PADDD(xmm_v13, mem_s1) - PADDD(xmm_v14, mem_s2) - PADDD(xmm_v15, xmm_v3) - WriteXor_sse2(xmm_v0, reg_inp, reg_outp, 192, xmm_v12, xmm_v13, xmm_v14, xmm_v15) - PADDQ(xmm_v3, mem_one) - - MOVDQA(mem_s3, xmm_v3) - - ADD(reg_inp, 4 * 64) - ADD(reg_outp, 4 * 64) - - SUB(reg_blocks, 4) - JAE(vector_loop4.begin) - - ADD(reg_blocks, 4) - out = Label() - JZ(out) - - # Past this point, we no longer need to use every single register to hold - # the in progress state. - - xmm_s0 = xmm_v8 - xmm_s1 = xmm_v9 - xmm_s2 = xmm_v10 - xmm_s3 = xmm_v11 - xmm_one = xmm_v13 - MOVDQA(xmm_s0, mem_s0) - MOVDQA(xmm_s1, mem_s1) - MOVDQA(xmm_s2, mem_s2) - MOVDQA(xmm_s3, mem_s3) - MOVDQA(xmm_one, mem_one) - - # - # 2 blocks at a time. - # - - SUB(reg_blocks, 2) - vector_loop2 = Loop() - JB(vector_loop2.end) - with vector_loop2: - MOVDQA(xmm_v0, xmm_s0) - MOVDQA(xmm_v1, xmm_s1) - MOVDQA(xmm_v2, xmm_s2) - MOVDQA(xmm_v3, xmm_s3) - - MOVDQA(xmm_v4, xmm_v0) - MOVDQA(xmm_v5, xmm_v1) - MOVDQA(xmm_v6, xmm_v2) - MOVDQA(xmm_v7, xmm_v3) - PADDQ(xmm_v7, xmm_one) - - reg_rounds = GeneralPurposeRegister64() - MOV(reg_rounds, 20) - rounds_loop2 = Loop() - with rounds_loop2: - # a += b; d ^= a; d = ROTW16(d); - PADDD(xmm_v0, xmm_v1) - PADDD(xmm_v4, xmm_v5) - PXOR(xmm_v3, xmm_v0) - PXOR(xmm_v7, xmm_v4) - ROTW16_sse2(xmm_tmp, xmm_v3) - ROTW16_sse2(xmm_tmp, xmm_v7) - - # c += d; b ^= c; b = ROTW12(b); - PADDD(xmm_v2, xmm_v3) - PADDD(xmm_v6, xmm_v7) - PXOR(xmm_v1, xmm_v2) - PXOR(xmm_v5, xmm_v6) - ROTW12_sse2(xmm_tmp, xmm_v1) - ROTW12_sse2(xmm_tmp, xmm_v5) - - # a += b; d ^= a; d = ROTW8(d); - PADDD(xmm_v0, xmm_v1) - PADDD(xmm_v4, xmm_v5) - PXOR(xmm_v3, xmm_v0) - PXOR(xmm_v7, xmm_v4) - ROTW8_sse2(xmm_tmp, xmm_v3) - ROTW8_sse2(xmm_tmp, xmm_v7) - - # c += d; b ^= c; b = ROTW7(b) - PADDD(xmm_v2, xmm_v3) - PADDD(xmm_v6, xmm_v7) - PXOR(xmm_v1, xmm_v2) - PXOR(xmm_v5, xmm_v6) - ROTW7_sse2(xmm_tmp, xmm_v1) - ROTW7_sse2(xmm_tmp, xmm_v5) - - # b = ROTV1(b); c = ROTV2(c); d = ROTV3(d); - PSHUFD(xmm_v1, xmm_v1, 0x39) - PSHUFD(xmm_v5, xmm_v5, 0x39) - PSHUFD(xmm_v2, xmm_v2, 0x4e) - PSHUFD(xmm_v6, xmm_v6, 0x4e) - PSHUFD(xmm_v3, xmm_v3, 0x93) - PSHUFD(xmm_v7, xmm_v7, 0x93) - - # a += b; d ^= a; d = ROTW16(d); - PADDD(xmm_v0, xmm_v1) - PADDD(xmm_v4, xmm_v5) - PXOR(xmm_v3, xmm_v0) - PXOR(xmm_v7, xmm_v4) - ROTW16_sse2(xmm_tmp, xmm_v3) - ROTW16_sse2(xmm_tmp, xmm_v7) - - # c += d; b ^= c; b = ROTW12(b); - PADDD(xmm_v2, xmm_v3) - PADDD(xmm_v6, xmm_v7) - PXOR(xmm_v1, xmm_v2) - PXOR(xmm_v5, xmm_v6) - ROTW12_sse2(xmm_tmp, xmm_v1) - ROTW12_sse2(xmm_tmp, xmm_v5) - - # a += b; d ^= a; d = ROTW8(d); - PADDD(xmm_v0, xmm_v1) - PADDD(xmm_v4, xmm_v5) - PXOR(xmm_v3, xmm_v0) - PXOR(xmm_v7, xmm_v4) - ROTW8_sse2(xmm_tmp, xmm_v3) - ROTW8_sse2(xmm_tmp, xmm_v7) - - # c += d; b ^= c; b = ROTW7(b) - PADDD(xmm_v2, xmm_v3) - PADDD(xmm_v6, xmm_v7) - PXOR(xmm_v1, xmm_v2) - PXOR(xmm_v5, xmm_v6) - ROTW7_sse2(xmm_tmp, xmm_v1) - ROTW7_sse2(xmm_tmp, xmm_v5) - - # b = ROTV1(b); c = ROTV2(c); d = ROTV3(d); - PSHUFD(xmm_v1, xmm_v1, 0x93) - PSHUFD(xmm_v5, xmm_v5, 0x93) - PSHUFD(xmm_v2, xmm_v2, 0x4e) - PSHUFD(xmm_v6, xmm_v6, 0x4e) - PSHUFD(xmm_v3, xmm_v3, 0x39) - PSHUFD(xmm_v7, xmm_v7, 0x39) - - SUB(reg_rounds, 2) - JNZ(rounds_loop2.begin) - - PADDD(xmm_v0, xmm_s0) - PADDD(xmm_v1, xmm_s1) - PADDD(xmm_v2, xmm_s2) - PADDD(xmm_v3, xmm_s3) - WriteXor_sse2(xmm_tmp, reg_inp, reg_outp, 0, xmm_v0, xmm_v1, xmm_v2, xmm_v3) - PADDQ(xmm_s3, xmm_one) - - PADDD(xmm_v4, xmm_s0) - PADDD(xmm_v5, xmm_s1) - PADDD(xmm_v6, xmm_s2) - PADDD(xmm_v7, xmm_s3) - WriteXor_sse2(xmm_tmp, reg_inp, reg_outp, 64, xmm_v4, xmm_v5, xmm_v6, xmm_v7) - PADDQ(xmm_s3, xmm_one) - - ADD(reg_inp, 2 * 64) - ADD(reg_outp, 2 * 64) - - SUB(reg_blocks, 2) - JAE(vector_loop2.begin) - - ADD(reg_blocks, 2) - out_serial = Label() - JZ(out_serial) - - # - # 1 block at a time. Only executed once, because if there was > 1, - # the parallel code would have processed it already. - # - - MOVDQA(xmm_v0, xmm_s0) - MOVDQA(xmm_v1, xmm_s1) - MOVDQA(xmm_v2, xmm_s2) - MOVDQA(xmm_v3, xmm_s3) - - reg_rounds = GeneralPurposeRegister64() - MOV(reg_rounds, 20) - rounds_loop1 = Loop() - with rounds_loop1: - # a += b; d ^= a; d = ROTW16(d); - PADDD(xmm_v0, xmm_v1) - PXOR(xmm_v3, xmm_v0) - ROTW16_sse2(xmm_tmp, xmm_v3) - - # c += d; b ^= c; b = ROTW12(b); - PADDD(xmm_v2, xmm_v3) - PXOR(xmm_v1, xmm_v2) - ROTW12_sse2(xmm_tmp, xmm_v1) - - # a += b; d ^= a; d = ROTW8(d); - PADDD(xmm_v0, xmm_v1) - PXOR(xmm_v3, xmm_v0) - ROTW8_sse2(xmm_tmp, xmm_v3) - - # c += d; b ^= c; b = ROTW7(b) - PADDD(xmm_v2, xmm_v3) - PXOR(xmm_v1, xmm_v2) - ROTW7_sse2(xmm_tmp, xmm_v1) - - # b = ROTV1(b); c = ROTV2(c); d = ROTV3(d); - PSHUFD(xmm_v1, xmm_v1, 0x39) - PSHUFD(xmm_v2, xmm_v2, 0x4e) - PSHUFD(xmm_v3, xmm_v3, 0x93) - - # a += b; d ^= a; d = ROTW16(d); - PADDD(xmm_v0, xmm_v1) - PXOR(xmm_v3, xmm_v0) - ROTW16_sse2(xmm_tmp, xmm_v3) - - # c += d; b ^= c; b = ROTW12(b); - PADDD(xmm_v2, xmm_v3) - PXOR(xmm_v1, xmm_v2) - ROTW12_sse2(xmm_tmp, xmm_v1) - - # a += b; d ^= a; d = ROTW8(d); - PADDD(xmm_v0, xmm_v1) - PXOR(xmm_v3, xmm_v0) - ROTW8_sse2(xmm_tmp, xmm_v3) - - # c += d; b ^= c; b = ROTW7(b) - PADDD(xmm_v2, xmm_v3) - PXOR(xmm_v1, xmm_v2) - ROTW7_sse2(xmm_tmp, xmm_v1) - - # b = ROTV1(b); c = ROTV2(c); d = ROTV3(d); - PSHUFD(xmm_v1, xmm_v1, 0x93) - PSHUFD(xmm_v2, xmm_v2, 0x4e) - PSHUFD(xmm_v3, xmm_v3, 0x39) - - SUB(reg_rounds, 2) - JNZ(rounds_loop1.begin) - - PADDD(xmm_v0, xmm_s0) - PADDD(xmm_v1, xmm_s1) - PADDD(xmm_v2, xmm_s2) - PADDD(xmm_v3, xmm_s3) - WriteXor_sse2(xmm_tmp, reg_inp, reg_outp, 0, xmm_v0, xmm_v1, xmm_v2, xmm_v3) - PADDQ(xmm_s3, xmm_one) - - LABEL(out_serial) - - # Write back the updated counter. Stoping at 2^70 bytes is the user's - # problem, not mine. (Skipped if there's exactly a multiple of 4 blocks - # because the counter is incremented in memory while looping.) - MOVDQA(mem_s3, xmm_s3) - - LABEL(out) - - # Paranoia, cleanse the scratch space. - PXOR(xmm_v0, xmm_v0) - MOVDQA(mem_tmp0, xmm_v0) - - # Remove our stack allocation. - MOV(registers.rsp, reg_sp_save) - - RETURN() - -# -# AVX2 helpers. Like the SSE2 equivalents, the scratch register is explicit, -# and more helpers are used to increase readability for destructive operations. -# -# XXX/Performance: ROTW16_avx2/ROTW8_avx2 both can use VPSHUFFB. -# - -def ADD_avx2(dst, src): - VPADDD(dst, dst, src) - -def XOR_avx2(dst, src): - VPXOR(dst, dst, src) - -def ROTW16_avx2(tmp, d): - VPSLLD(tmp, d, 16) - VPSRLD(d, d, 16) - XOR_avx2(d, tmp) - -def ROTW12_avx2(tmp, b): - VPSLLD(tmp, b, 12) - VPSRLD(b, b, 20) - XOR_avx2(b, tmp) - -def ROTW8_avx2(tmp, d): - VPSLLD(tmp, d, 8) - VPSRLD(d, d, 24) - XOR_avx2(d, tmp) - -def ROTW7_avx2(tmp, b): - VPSLLD(tmp, b, 7) - VPSRLD(b, b, 25) - XOR_avx2(b, tmp) - -def WriteXor_avx2(tmp, inp, outp, d, v0, v1, v2, v3): - # XOR_WRITE(out+ 0, in+ 0, _mm256_permute2x128_si256(v0,v1,0x20)); - VPERM2I128(tmp, v0, v1, 0x20) - VPXOR(tmp, tmp, [inp+d]) - VMOVDQU([outp+d], tmp) - - # XOR_WRITE(out+32, in+32, _mm256_permute2x128_si256(v2,v3,0x20)); - VPERM2I128(tmp, v2, v3, 0x20) - VPXOR(tmp, tmp, [inp+d+32]) - VMOVDQU([outp+d+32], tmp) - - # XOR_WRITE(out+64, in+64, _mm256_permute2x128_si256(v0,v1,0x31)); - VPERM2I128(tmp, v0, v1, 0x31) - VPXOR(tmp, tmp, [inp+d+64]) - VMOVDQU([outp+d+64], tmp) - - # XOR_WRITE(out+96, in+96, _mm256_permute2x128_si256(v2,v3,0x31)); - VPERM2I128(tmp, v2, v3, 0x31) - VPXOR(tmp, tmp, [inp+d+96]) - VMOVDQU([outp+d+96], tmp) - -# AVX2 ChaCha20 (aka avx2). Does not handle partial blocks, will process -# 8/4/2 blocks at a time. Alignment blah blah blah fuck you. -with Function("blocksAmd64AVX2", (x, inp, outp, nrBlocks), target=uarch.broadwell): - reg_x = GeneralPurposeRegister64() - reg_inp = GeneralPurposeRegister64() - reg_outp = GeneralPurposeRegister64() - reg_blocks = GeneralPurposeRegister64() - reg_sp_save = GeneralPurposeRegister64() - - LOAD.ARGUMENT(reg_x, x) - LOAD.ARGUMENT(reg_inp, inp) - LOAD.ARGUMENT(reg_outp, outp) - LOAD.ARGUMENT(reg_blocks, nrBlocks) - - # Align the stack to a 32 byte boundary. - reg_align = GeneralPurposeRegister64() - MOV(reg_sp_save, registers.rsp) - MOV(reg_align, 0x1f) - NOT(reg_align) - AND(registers.rsp, reg_align) - SUB(registers.rsp, 0x20) - - x_s0 = [reg_x] # (Memory) Cipher state [0..3] - x_s1 = [reg_x+16] # (Memory) Cipher state [4..7] - x_s2 = [reg_x+32] # (Memory) Cipher state [8..11] - x_s3 = [reg_x+48] # (Memory) Cipher state [12..15] - - ymm_v0 = YMMRegister() - ymm_v1 = YMMRegister() - ymm_v2 = YMMRegister() - ymm_v3 = YMMRegister() - - ymm_v4 = YMMRegister() - ymm_v5 = YMMRegister() - ymm_v6 = YMMRegister() - ymm_v7 = YMMRegister() - - ymm_v8 = YMMRegister() - ymm_v9 = YMMRegister() - ymm_v10 = YMMRegister() - ymm_v11 = YMMRegister() - - ymm_v12 = YMMRegister() - ymm_v13 = YMMRegister() - ymm_v14 = YMMRegister() - ymm_v15 = YMMRegister() - - ymm_tmp0 = ymm_v12 - - # Allocate the neccecary stack space for the counter vector and two ymm - # registers that we will spill. - SUB(registers.rsp, 96) - mem_tmp0 = [registers.rsp+64] # (Stack) Scratch space. - mem_s3 = [registers.rsp+32] # (Stack) Working copy of s3. (8x) - mem_inc = [registers.rsp] # (Stack) Counter increment vector. - - # Increment the counter for one side of the state vector. - VPXOR(ymm_tmp0, ymm_tmp0, ymm_tmp0) - VMOVDQU(mem_inc, ymm_tmp0) - reg_tmp = GeneralPurposeRegister32() - MOV(reg_tmp, 0x00000001) - MOV([registers.rsp+16], reg_tmp) - VBROADCASTI128(ymm_v3, x_s3) - VPADDQ(ymm_v3, ymm_v3, [registers.rsp]) - VMOVDQA(mem_s3, ymm_v3) - - # As we process 2xN blocks at a time, so the counter increment for both - # sides of the state vector is 2. - MOV(reg_tmp, 0x00000002) - MOV([registers.rsp], reg_tmp) - MOV([registers.rsp+16], reg_tmp) - - out_write_even = Label() - out_write_odd = Label() - - # - # 8 blocks at a time. Ted Krovetz's avx2 code does not do this, but it's - # a decent gain despite all the pain... - # - - vector_loop8 = Loop() - SUB(reg_blocks, 8) - JB(vector_loop8.end) - with vector_loop8: - VBROADCASTI128(ymm_v0, x_s0) - VBROADCASTI128(ymm_v1, x_s1) - VBROADCASTI128(ymm_v2, x_s2) - VMOVDQA(ymm_v3, mem_s3) - - VMOVDQA(ymm_v4, ymm_v0) - VMOVDQA(ymm_v5, ymm_v1) - VMOVDQA(ymm_v6, ymm_v2) - VPADDQ(ymm_v7, ymm_v3, mem_inc) - - VMOVDQA(ymm_v8, ymm_v0) - VMOVDQA(ymm_v9, ymm_v1) - VMOVDQA(ymm_v10, ymm_v2) - VPADDQ(ymm_v11, ymm_v7, mem_inc) - - VMOVDQA(ymm_v12, ymm_v0) - VMOVDQA(ymm_v13, ymm_v1) - VMOVDQA(ymm_v14, ymm_v2) - VPADDQ(ymm_v15, ymm_v11, mem_inc) - - reg_rounds = GeneralPurposeRegister64() - MOV(reg_rounds, 20) - rounds_loop8 = Loop() - with rounds_loop8: - # a += b; d ^= a; d = ROTW16(d); - ADD_avx2(ymm_v0, ymm_v1) - ADD_avx2(ymm_v4, ymm_v5) - ADD_avx2(ymm_v8, ymm_v9) - ADD_avx2(ymm_v12, ymm_v13) - XOR_avx2(ymm_v3, ymm_v0) - XOR_avx2(ymm_v7, ymm_v4) - XOR_avx2(ymm_v11, ymm_v8) - XOR_avx2(ymm_v15, ymm_v12) - - VMOVDQA(mem_tmp0, ymm_tmp0) # Save - - ROTW16_avx2(ymm_tmp0, ymm_v3) - ROTW16_avx2(ymm_tmp0, ymm_v7) - ROTW16_avx2(ymm_tmp0, ymm_v11) - ROTW16_avx2(ymm_tmp0, ymm_v15) - - # c += d; b ^= c; b = ROTW12(b); - ADD_avx2(ymm_v2, ymm_v3) - ADD_avx2(ymm_v6, ymm_v7) - ADD_avx2(ymm_v10, ymm_v11) - ADD_avx2(ymm_v14, ymm_v15) - XOR_avx2(ymm_v1, ymm_v2) - XOR_avx2(ymm_v5, ymm_v6) - XOR_avx2(ymm_v9, ymm_v10) - XOR_avx2(ymm_v13, ymm_v14) - ROTW12_avx2(ymm_tmp0, ymm_v1) - ROTW12_avx2(ymm_tmp0, ymm_v5) - ROTW12_avx2(ymm_tmp0, ymm_v9) - ROTW12_avx2(ymm_tmp0, ymm_v13) - - # a += b; d ^= a; d = ROTW8(d); - VMOVDQA(ymm_tmp0, mem_tmp0) # Restore - - ADD_avx2(ymm_v0, ymm_v1) - ADD_avx2(ymm_v4, ymm_v5) - ADD_avx2(ymm_v8, ymm_v9) - ADD_avx2(ymm_v12, ymm_v13) - XOR_avx2(ymm_v3, ymm_v0) - XOR_avx2(ymm_v7, ymm_v4) - XOR_avx2(ymm_v11, ymm_v8) - XOR_avx2(ymm_v15, ymm_v12) - - VMOVDQA(mem_tmp0, ymm_tmp0) # Save - - ROTW8_avx2(ymm_tmp0, ymm_v3) - ROTW8_avx2(ymm_tmp0, ymm_v7) - ROTW8_avx2(ymm_tmp0, ymm_v11) - ROTW8_avx2(ymm_tmp0, ymm_v15) - - # c += d; b ^= c; b = ROTW7(b) - ADD_avx2(ymm_v2, ymm_v3) - ADD_avx2(ymm_v6, ymm_v7) - ADD_avx2(ymm_v10, ymm_v11) - ADD_avx2(ymm_v14, ymm_v15) - XOR_avx2(ymm_v1, ymm_v2) - XOR_avx2(ymm_v5, ymm_v6) - XOR_avx2(ymm_v9, ymm_v10) - XOR_avx2(ymm_v13, ymm_v14) - ROTW7_avx2(ymm_tmp0, ymm_v1) - ROTW7_avx2(ymm_tmp0, ymm_v5) - ROTW7_avx2(ymm_tmp0, ymm_v9) - ROTW7_avx2(ymm_tmp0, ymm_v13) - - # b = ROTV1(b); c = ROTV2(c); d = ROTV3(d); - VPSHUFD(ymm_v1, ymm_v1, 0x39) - VPSHUFD(ymm_v5, ymm_v5, 0x39) - VPSHUFD(ymm_v9, ymm_v9, 0x39) - VPSHUFD(ymm_v13, ymm_v13, 0x39) - VPSHUFD(ymm_v2, ymm_v2, 0x4e) - VPSHUFD(ymm_v6, ymm_v6, 0x4e) - VPSHUFD(ymm_v10, ymm_v10, 0x4e) - VPSHUFD(ymm_v14, ymm_v14, 0x4e) - VPSHUFD(ymm_v3, ymm_v3, 0x93) - VPSHUFD(ymm_v7, ymm_v7, 0x93) - VPSHUFD(ymm_v11, ymm_v11, 0x93) - VPSHUFD(ymm_v15, ymm_v15, 0x93) - - # a += b; d ^= a; d = ROTW16(d); - VMOVDQA(ymm_tmp0, mem_tmp0) # Restore - - ADD_avx2(ymm_v0, ymm_v1) - ADD_avx2(ymm_v4, ymm_v5) - ADD_avx2(ymm_v8, ymm_v9) - ADD_avx2(ymm_v12, ymm_v13) - XOR_avx2(ymm_v3, ymm_v0) - XOR_avx2(ymm_v7, ymm_v4) - XOR_avx2(ymm_v11, ymm_v8) - XOR_avx2(ymm_v15, ymm_v12) - - VMOVDQA(mem_tmp0, ymm_tmp0) # Save - - ROTW16_avx2(ymm_tmp0, ymm_v3) - ROTW16_avx2(ymm_tmp0, ymm_v7) - ROTW16_avx2(ymm_tmp0, ymm_v11) - ROTW16_avx2(ymm_tmp0, ymm_v15) - - # c += d; b ^= c; b = ROTW12(b); - ADD_avx2(ymm_v2, ymm_v3) - ADD_avx2(ymm_v6, ymm_v7) - ADD_avx2(ymm_v10, ymm_v11) - ADD_avx2(ymm_v14, ymm_v15) - XOR_avx2(ymm_v1, ymm_v2) - XOR_avx2(ymm_v5, ymm_v6) - XOR_avx2(ymm_v9, ymm_v10) - XOR_avx2(ymm_v13, ymm_v14) - ROTW12_avx2(ymm_tmp0, ymm_v1) - ROTW12_avx2(ymm_tmp0, ymm_v5) - ROTW12_avx2(ymm_tmp0, ymm_v9) - ROTW12_avx2(ymm_tmp0, ymm_v13) - - # a += b; d ^= a; d = ROTW8(d); - VMOVDQA(ymm_tmp0, mem_tmp0) # Restore - - ADD_avx2(ymm_v0, ymm_v1) - ADD_avx2(ymm_v4, ymm_v5) - ADD_avx2(ymm_v8, ymm_v9) - ADD_avx2(ymm_v12, ymm_v13) - XOR_avx2(ymm_v3, ymm_v0) - XOR_avx2(ymm_v7, ymm_v4) - XOR_avx2(ymm_v11, ymm_v8) - XOR_avx2(ymm_v15, ymm_v12) - - VMOVDQA(mem_tmp0, ymm_tmp0) # Save - - ROTW8_avx2(ymm_tmp0, ymm_v3) - ROTW8_avx2(ymm_tmp0, ymm_v7) - ROTW8_avx2(ymm_tmp0, ymm_v11) - ROTW8_avx2(ymm_tmp0, ymm_v15) - - # c += d; b ^= c; b = ROTW7(b) - ADD_avx2(ymm_v2, ymm_v3) - ADD_avx2(ymm_v6, ymm_v7) - ADD_avx2(ymm_v10, ymm_v11) - ADD_avx2(ymm_v14, ymm_v15) - XOR_avx2(ymm_v1, ymm_v2) - XOR_avx2(ymm_v5, ymm_v6) - XOR_avx2(ymm_v9, ymm_v10) - XOR_avx2(ymm_v13, ymm_v14) - ROTW7_avx2(ymm_tmp0, ymm_v1) - ROTW7_avx2(ymm_tmp0, ymm_v5) - ROTW7_avx2(ymm_tmp0, ymm_v9) - ROTW7_avx2(ymm_tmp0, ymm_v13) - - # b = ROTV1(b); c = ROTV2(c); d = ROTV3(d); - VPSHUFD(ymm_v1, ymm_v1, 0x93) - VPSHUFD(ymm_v5, ymm_v5, 0x93) - VPSHUFD(ymm_v9, ymm_v9, 0x93) - VPSHUFD(ymm_v13, ymm_v13, 0x93) - VPSHUFD(ymm_v2, ymm_v2, 0x4e) - VPSHUFD(ymm_v6, ymm_v6, 0x4e) - VPSHUFD(ymm_v10, ymm_v10, 0x4e) - VPSHUFD(ymm_v14, ymm_v14, 0x4e) - VPSHUFD(ymm_v3, ymm_v3, 0x39) - VPSHUFD(ymm_v7, ymm_v7, 0x39) - VPSHUFD(ymm_v11, ymm_v11, 0x39) - VPSHUFD(ymm_v15, ymm_v15, 0x39) - - VMOVDQA(ymm_tmp0, mem_tmp0) # Restore - - SUB(reg_rounds, 2) - JNZ(rounds_loop8.begin) - - # ymm_v12 is in mem_tmp0 and is current.... - - # XXX: I assume VBROADCASTI128 is about as fast as VMOVDQA.... - VBROADCASTI128(ymm_tmp0, x_s0) - ADD_avx2(ymm_v0, ymm_tmp0) - ADD_avx2(ymm_v4, ymm_tmp0) - ADD_avx2(ymm_v8, ymm_tmp0) - ADD_avx2(ymm_tmp0, mem_tmp0) - VMOVDQA(mem_tmp0, ymm_tmp0) - - VBROADCASTI128(ymm_tmp0, x_s1) - ADD_avx2(ymm_v1, ymm_tmp0) - ADD_avx2(ymm_v5, ymm_tmp0) - ADD_avx2(ymm_v9, ymm_tmp0) - ADD_avx2(ymm_v13, ymm_tmp0) - - VBROADCASTI128(ymm_tmp0, x_s2) - ADD_avx2(ymm_v2, ymm_tmp0) - ADD_avx2(ymm_v6, ymm_tmp0) - ADD_avx2(ymm_v10, ymm_tmp0) - ADD_avx2(ymm_v14, ymm_tmp0) - - ADD_avx2(ymm_v3, mem_s3) - WriteXor_avx2(ymm_tmp0, reg_inp, reg_outp, 0, ymm_v0, ymm_v1, ymm_v2, ymm_v3) - VMOVDQA(ymm_v3, mem_s3) - ADD_avx2(ymm_v3, mem_inc) - - ADD_avx2(ymm_v7, ymm_v3) - WriteXor_avx2(ymm_tmp0, reg_inp, reg_outp, 128, ymm_v4, ymm_v5, ymm_v6, ymm_v7) - ADD_avx2(ymm_v3, mem_inc) - - ADD_avx2(ymm_v11, ymm_v3) - WriteXor_avx2(ymm_tmp0, reg_inp, reg_outp, 256, ymm_v8, ymm_v9, ymm_v10, ymm_v11) - ADD_avx2(ymm_v3, mem_inc) - - VMOVDQA(ymm_v12, mem_tmp0) - ADD_avx2(ymm_v15, ymm_v3) - WriteXor_avx2(ymm_v0, reg_inp, reg_outp, 384, ymm_v12, ymm_v13, ymm_v14, ymm_v15) - ADD_avx2(ymm_v3, mem_inc) - - VMOVDQA(mem_s3, ymm_v3) - - ADD(reg_inp, 8 * 64) - ADD(reg_outp, 8 * 64) - - SUB(reg_blocks, 8) - JAE(vector_loop8.begin) - - # ymm_v3 contains a current copy of mem_s3 either from when it was built, - # or because the loop updates it. Copy this before we mess with the block - # counter in case we need to write it back and return. - ymm_s3 = ymm_v11 - VMOVDQA(ymm_s3, ymm_v3) - - ADD(reg_blocks, 8) - JZ(out_write_even) - - # We now actually can do everything in registers. - ymm_s0 = ymm_v8 - VBROADCASTI128(ymm_s0, x_s0) - ymm_s1 = ymm_v9 - VBROADCASTI128(ymm_s1, x_s1) - ymm_s2 = ymm_v10 - VBROADCASTI128(ymm_s2, x_s2) - ymm_inc = ymm_v14 - VMOVDQA(ymm_inc, mem_inc) - - # - # 4 blocks at a time. - # - - SUB(reg_blocks, 4) - vector_loop4 = Loop() - JB(vector_loop4.end) - with vector_loop4: - VMOVDQA(ymm_v0, ymm_s0) - VMOVDQA(ymm_v1, ymm_s1) - VMOVDQA(ymm_v2, ymm_s2) - VMOVDQA(ymm_v3, ymm_s3) - - VMOVDQA(ymm_v4, ymm_v0) - VMOVDQA(ymm_v5, ymm_v1) - VMOVDQA(ymm_v6, ymm_v2) - VPADDQ(ymm_v7, ymm_v3, ymm_inc) - - reg_rounds = GeneralPurposeRegister64() - MOV(reg_rounds, 20) - rounds_loop4 = Loop() - with rounds_loop4: - # a += b; d ^= a; d = ROTW16(d); - ADD_avx2(ymm_v0, ymm_v1) - ADD_avx2(ymm_v4, ymm_v5) - XOR_avx2(ymm_v3, ymm_v0) - XOR_avx2(ymm_v7, ymm_v4) - ROTW16_avx2(ymm_tmp0, ymm_v3) - ROTW16_avx2(ymm_tmp0, ymm_v7) - - # c += d; b ^= c; b = ROTW12(b); - ADD_avx2(ymm_v2, ymm_v3) - ADD_avx2(ymm_v6, ymm_v7) - XOR_avx2(ymm_v1, ymm_v2) - XOR_avx2(ymm_v5, ymm_v6) - ROTW12_avx2(ymm_tmp0, ymm_v1) - ROTW12_avx2(ymm_tmp0, ymm_v5) - - # a += b; d ^= a; d = ROTW8(d); - ADD_avx2(ymm_v0, ymm_v1) - ADD_avx2(ymm_v4, ymm_v5) - XOR_avx2(ymm_v3, ymm_v0) - XOR_avx2(ymm_v7, ymm_v4) - ROTW8_avx2(ymm_tmp0, ymm_v3) - ROTW8_avx2(ymm_tmp0, ymm_v7) - - # c += d; b ^= c; b = ROTW7(b) - ADD_avx2(ymm_v2, ymm_v3) - ADD_avx2(ymm_v6, ymm_v7) - XOR_avx2(ymm_v1, ymm_v2) - XOR_avx2(ymm_v5, ymm_v6) - ROTW7_avx2(ymm_tmp0, ymm_v1) - ROTW7_avx2(ymm_tmp0, ymm_v5) - - # b = ROTV1(b); c = ROTV2(c); d = ROTV3(d); - VPSHUFD(ymm_v1, ymm_v1, 0x39) - VPSHUFD(ymm_v5, ymm_v5, 0x39) - VPSHUFD(ymm_v2, ymm_v2, 0x4e) - VPSHUFD(ymm_v6, ymm_v6, 0x4e) - VPSHUFD(ymm_v3, ymm_v3, 0x93) - VPSHUFD(ymm_v7, ymm_v7, 0x93) - - # a += b; d ^= a; d = ROTW16(d); - ADD_avx2(ymm_v0, ymm_v1) - ADD_avx2(ymm_v4, ymm_v5) - XOR_avx2(ymm_v3, ymm_v0) - XOR_avx2(ymm_v7, ymm_v4) - ROTW16_avx2(ymm_tmp0, ymm_v3) - ROTW16_avx2(ymm_tmp0, ymm_v7) - - # c += d; b ^= c; b = ROTW12(b); - ADD_avx2(ymm_v2, ymm_v3) - ADD_avx2(ymm_v6, ymm_v7) - XOR_avx2(ymm_v1, ymm_v2) - XOR_avx2(ymm_v5, ymm_v6) - ROTW12_avx2(ymm_tmp0, ymm_v1) - ROTW12_avx2(ymm_tmp0, ymm_v5) - - # a += b; d ^= a; d = ROTW8(d); - ADD_avx2(ymm_v0, ymm_v1) - ADD_avx2(ymm_v4, ymm_v5) - XOR_avx2(ymm_v3, ymm_v0) - XOR_avx2(ymm_v7, ymm_v4) - ROTW8_avx2(ymm_tmp0, ymm_v3) - ROTW8_avx2(ymm_tmp0, ymm_v7) - - # c += d; b ^= c; b = ROTW7(b) - ADD_avx2(ymm_v2, ymm_v3) - ADD_avx2(ymm_v6, ymm_v7) - XOR_avx2(ymm_v1, ymm_v2) - XOR_avx2(ymm_v5, ymm_v6) - ROTW7_avx2(ymm_tmp0, ymm_v1) - ROTW7_avx2(ymm_tmp0, ymm_v5) - - # b = ROTV1(b); c = ROTV2(c); d = ROTV3(d); - VPSHUFD(ymm_v1, ymm_v1, 0x93) - VPSHUFD(ymm_v5, ymm_v5, 0x93) - VPSHUFD(ymm_v2, ymm_v2, 0x4e) - VPSHUFD(ymm_v6, ymm_v6, 0x4e) - VPSHUFD(ymm_v3, ymm_v3, 0x39) - VPSHUFD(ymm_v7, ymm_v7, 0x39) - - SUB(reg_rounds, 2) - JNZ(rounds_loop4.begin) - - ADD_avx2(ymm_v0, ymm_s0) - ADD_avx2(ymm_v1, ymm_s1) - ADD_avx2(ymm_v2, ymm_s2) - ADD_avx2(ymm_v3, ymm_s3) - WriteXor_avx2(ymm_tmp0, reg_inp, reg_outp, 0, ymm_v0, ymm_v1, ymm_v2, ymm_v3) - ADD_avx2(ymm_s3, ymm_inc) - - ADD_avx2(ymm_v4, ymm_s0) - ADD_avx2(ymm_v5, ymm_s1) - ADD_avx2(ymm_v6, ymm_s2) - ADD_avx2(ymm_v7, ymm_s3) - WriteXor_avx2(ymm_tmp0, reg_inp, reg_outp, 128, ymm_v4, ymm_v5, ymm_v6, ymm_v7) - ADD_avx2(ymm_s3, ymm_inc) - - ADD(reg_inp, 4 * 64) - ADD(reg_outp, 4 * 64) - - SUB(reg_blocks, 4) - JAE(vector_loop4.begin) - - ADD(reg_blocks, 4) - JZ(out_write_even) - - # - # 2/1 blocks at a time. The two codepaths are unified because - # with AVX2 we do 2 blocks at a time anyway, and this only gets called - # if 3/2/1 blocks are remaining, so the extra branches don't hurt that - # much. - # - - vector_loop2 = Loop() - with vector_loop2: - VMOVDQA(ymm_v0, ymm_s0) - VMOVDQA(ymm_v1, ymm_s1) - VMOVDQA(ymm_v2, ymm_s2) - VMOVDQA(ymm_v3, ymm_s3) - - reg_rounds = GeneralPurposeRegister64() - MOV(reg_rounds, 20) - rounds_loop2 = Loop() - with rounds_loop2: - # a += b; d ^= a; d = ROTW16(d); - ADD_avx2(ymm_v0, ymm_v1) - XOR_avx2(ymm_v3, ymm_v0) - ROTW16_avx2(ymm_tmp0, ymm_v3) - - # c += d; b ^= c; b = ROTW12(b); - ADD_avx2(ymm_v2, ymm_v3) - XOR_avx2(ymm_v1, ymm_v2) - ROTW12_avx2(ymm_tmp0, ymm_v1) - - # a += b; d ^= a; d = ROTW8(d); - ADD_avx2(ymm_v0, ymm_v1) - XOR_avx2(ymm_v3, ymm_v0) - ROTW8_avx2(ymm_tmp0, ymm_v3) - - # c += d; b ^= c; b = ROTW7(b) - ADD_avx2(ymm_v2, ymm_v3) - XOR_avx2(ymm_v1, ymm_v2) - ROTW7_avx2(ymm_tmp0, ymm_v1) - - # b = ROTV1(b); c = ROTV2(c); d = ROTV3(d); - VPSHUFD(ymm_v1, ymm_v1, 0x39) - VPSHUFD(ymm_v2, ymm_v2, 0x4e) - VPSHUFD(ymm_v3, ymm_v3, 0x93) - - # a += b; d ^= a; d = ROTW16(d); - ADD_avx2(ymm_v0, ymm_v1) - XOR_avx2(ymm_v3, ymm_v0) - ROTW16_avx2(ymm_tmp0, ymm_v3) - - # c += d; b ^= c; b = ROTW12(b); - ADD_avx2(ymm_v2, ymm_v3) - XOR_avx2(ymm_v1, ymm_v2) - ROTW12_avx2(ymm_tmp0, ymm_v1) - - # a += b; d ^= a; d = ROTW8(d); - ADD_avx2(ymm_v0, ymm_v1) - XOR_avx2(ymm_v3, ymm_v0) - ROTW8_avx2(ymm_tmp0, ymm_v3) - - # c += d; b ^= c; b = ROTW7(b) - ADD_avx2(ymm_v2, ymm_v3) - XOR_avx2(ymm_v1, ymm_v2) - ROTW7_avx2(ymm_tmp0, ymm_v1) - - # b = ROTV1(b); c = ROTV2(c); d = ROTV3(d); - VPSHUFD(ymm_v1, ymm_v1, 0x93) - VPSHUFD(ymm_v2, ymm_v2, 0x4e) - VPSHUFD(ymm_v3, ymm_v3, 0x39) - - SUB(reg_rounds, 2) - JNZ(rounds_loop2.begin) - - ADD_avx2(ymm_v0, ymm_s0) - ADD_avx2(ymm_v1, ymm_s1) - ADD_avx2(ymm_v2, ymm_s2) - ADD_avx2(ymm_v3, ymm_s3) - - # XOR_WRITE(out+ 0, in+ 0, _mm256_permute2x128_si256(v0,v1,0x20)); - VPERM2I128(ymm_tmp0, ymm_v0, ymm_v1, 0x20) - VPXOR(ymm_tmp0, ymm_tmp0, [reg_inp]) - VMOVDQU([reg_outp], ymm_tmp0) - - # XOR_WRITE(out+32, in+32, _mm256_permute2x128_si256(v2,v3,0x20)); - VPERM2I128(ymm_tmp0, ymm_v2, ymm_v3, 0x20) - VPXOR(ymm_tmp0, ymm_tmp0, [reg_inp+32]) - VMOVDQU([reg_outp+32], ymm_tmp0) - - SUB(reg_blocks, 1) - JZ(out_write_odd) - - ADD_avx2(ymm_s3, ymm_inc) - - # XOR_WRITE(out+64, in+64, _mm256_permute2x128_si256(v0,v1,0x31)); - VPERM2I128(ymm_tmp0, ymm_v0, ymm_v1, 0x31) - VPXOR(ymm_tmp0, ymm_tmp0, [reg_inp+64]) - VMOVDQU([reg_outp+64], ymm_tmp0) - - # XOR_WRITE(out+96, in+96, _mm256_permute2x128_si256(v2,v3,0x31)); - VPERM2I128(ymm_tmp0, ymm_v2, ymm_v3, 0x31) - VPXOR(ymm_tmp0, ymm_tmp0, [reg_inp+96]) - VMOVDQU([reg_outp+96], ymm_tmp0) - - SUB(reg_blocks, 1) - JZ(out_write_even) - - ADD(reg_inp, 2 * 64) - ADD(reg_outp, 2 * 64) - JMP(vector_loop2.begin) - - LABEL(out_write_odd) - VPERM2I128(ymm_s3, ymm_s3, ymm_s3, 0x01) # Odd number of blocks. - - LABEL(out_write_even) - VMOVDQA(x_s3, ymm_s3.as_xmm) # Write back ymm_s3 to x_v3 - - # Paranoia, cleanse the scratch space. - VPXOR(ymm_v0, ymm_v0, ymm_v0) - VMOVDQA(mem_tmp0, ymm_v0) - VMOVDQA(mem_s3, ymm_v0) - - # Remove our stack allocation. - MOV(registers.rsp, reg_sp_save) - - RETURN() - -# -# CPUID -# - -cpuidParams = Argument(ptr(uint32_t)) - -with Function("cpuidAmd64", (cpuidParams,)): - reg_params = registers.r15 - LOAD.ARGUMENT(reg_params, cpuidParams) - - MOV(registers.eax, [reg_params]) - MOV(registers.ecx, [reg_params+4]) - - CPUID() - - MOV([reg_params], registers.eax) - MOV([reg_params+4], registers.ebx) - MOV([reg_params+8], registers.ecx) - MOV([reg_params+12], registers.edx) - - RETURN() - -# -# XGETBV (ECX = 0) -# - -xcrVec = Argument(ptr(uint32_t)) - -with Function("xgetbv0Amd64", (xcrVec,)): - reg_vec = GeneralPurposeRegister64() - - LOAD.ARGUMENT(reg_vec, xcrVec) - - XOR(registers.ecx, registers.ecx) - - XGETBV() - - MOV([reg_vec], registers.eax) - MOV([reg_vec+4], registers.edx) - - RETURN() diff --git a/vendor/git.schwanenlied.me/yawning/chacha20.git/chacha20_amd64.s b/vendor/git.schwanenlied.me/yawning/chacha20.git/chacha20_amd64.s deleted file mode 100644 index 4970397..0000000 --- a/vendor/git.schwanenlied.me/yawning/chacha20.git/chacha20_amd64.s +++ /dev/null @@ -1,1187 +0,0 @@ -// Generated by PeachPy 0.2.0 from chacha20_amd64.py - - -// func blocksAmd64SSE2(x *uint32, inp *uint8, outp *uint8, nrBlocks *uint) -TEXT ·blocksAmd64SSE2(SB),4,$0-32 - MOVQ x+0(FP), AX - MOVQ inp+8(FP), BX - MOVQ outp+16(FP), CX - MOVQ nrBlocks+24(FP), DX - MOVQ SP, DI - MOVQ $31, SI - NOTQ SI - ANDQ SI, SP - SUBQ $32, SP - PXOR X0, X0 - SUBQ $32, SP - MOVO X0, 0(SP) - MOVL $1, SI - MOVL SI, 0(SP) - SUBQ $4, DX - JCS vector_loop4_end -vector_loop4_begin: - MOVO 0(AX), X0 - MOVO 16(AX), X1 - MOVO 32(AX), X2 - MOVO 48(AX), X3 - MOVO X0, X4 - MOVO X1, X5 - MOVO X2, X6 - MOVO X3, X7 - PADDQ 0(SP), X7 - MOVO X0, X8 - MOVO X1, X9 - MOVO X2, X10 - MOVO X7, X11 - PADDQ 0(SP), X11 - MOVO X0, X12 - MOVO X1, X13 - MOVO X2, X14 - MOVO X11, X15 - PADDQ 0(SP), X15 - MOVQ $20, SI -rounds_loop4_begin: - PADDL X1, X0 - PADDL X5, X4 - PADDL X9, X8 - PADDL X13, X12 - PXOR X0, X3 - PXOR X4, X7 - PXOR X8, X11 - PXOR X12, X15 - MOVO X12, 16(SP) - MOVO X3, X12 - PSLLL $16, X12 - PSRLL $16, X3 - PXOR X12, X3 - MOVO X7, X12 - PSLLL $16, X12 - PSRLL $16, X7 - PXOR X12, X7 - MOVO X11, X12 - PSLLL $16, X12 - PSRLL $16, X11 - PXOR X12, X11 - MOVO X15, X12 - PSLLL $16, X12 - PSRLL $16, X15 - PXOR X12, X15 - PADDL X3, X2 - PADDL X7, X6 - PADDL X11, X10 - PADDL X15, X14 - PXOR X2, X1 - PXOR X6, X5 - PXOR X10, X9 - PXOR X14, X13 - MOVO X1, X12 - PSLLL $12, X12 - PSRLL $20, X1 - PXOR X12, X1 - MOVO X5, X12 - PSLLL $12, X12 - PSRLL $20, X5 - PXOR X12, X5 - MOVO X9, X12 - PSLLL $12, X12 - PSRLL $20, X9 - PXOR X12, X9 - MOVO X13, X12 - PSLLL $12, X12 - PSRLL $20, X13 - PXOR X12, X13 - MOVO 16(SP), X12 - PADDL X1, X0 - PADDL X5, X4 - PADDL X9, X8 - PADDL X13, X12 - PXOR X0, X3 - PXOR X4, X7 - PXOR X8, X11 - PXOR X12, X15 - MOVO X12, 16(SP) - MOVO X3, X12 - PSLLL $8, X12 - PSRLL $24, X3 - PXOR X12, X3 - MOVO X7, X12 - PSLLL $8, X12 - PSRLL $24, X7 - PXOR X12, X7 - MOVO X11, X12 - PSLLL $8, X12 - PSRLL $24, X11 - PXOR X12, X11 - MOVO X15, X12 - PSLLL $8, X12 - PSRLL $24, X15 - PXOR X12, X15 - PADDL X3, X2 - PADDL X7, X6 - PADDL X11, X10 - PADDL X15, X14 - PXOR X2, X1 - PXOR X6, X5 - PXOR X10, X9 - PXOR X14, X13 - MOVO X1, X12 - PSLLL $7, X12 - PSRLL $25, X1 - PXOR X12, X1 - MOVO X5, X12 - PSLLL $7, X12 - PSRLL $25, X5 - PXOR X12, X5 - MOVO X9, X12 - PSLLL $7, X12 - PSRLL $25, X9 - PXOR X12, X9 - MOVO X13, X12 - PSLLL $7, X12 - PSRLL $25, X13 - PXOR X12, X13 - PSHUFL $57, X1, X1 - PSHUFL $57, X5, X5 - PSHUFL $57, X9, X9 - PSHUFL $57, X13, X13 - PSHUFL $78, X2, X2 - PSHUFL $78, X6, X6 - PSHUFL $78, X10, X10 - PSHUFL $78, X14, X14 - PSHUFL $147, X3, X3 - PSHUFL $147, X7, X7 - PSHUFL $147, X11, X11 - PSHUFL $147, X15, X15 - MOVO 16(SP), X12 - PADDL X1, X0 - PADDL X5, X4 - PADDL X9, X8 - PADDL X13, X12 - PXOR X0, X3 - PXOR X4, X7 - PXOR X8, X11 - PXOR X12, X15 - MOVO X12, 16(SP) - MOVO X3, X12 - PSLLL $16, X12 - PSRLL $16, X3 - PXOR X12, X3 - MOVO X7, X12 - PSLLL $16, X12 - PSRLL $16, X7 - PXOR X12, X7 - MOVO X11, X12 - PSLLL $16, X12 - PSRLL $16, X11 - PXOR X12, X11 - MOVO X15, X12 - PSLLL $16, X12 - PSRLL $16, X15 - PXOR X12, X15 - PADDL X3, X2 - PADDL X7, X6 - PADDL X11, X10 - PADDL X15, X14 - PXOR X2, X1 - PXOR X6, X5 - PXOR X10, X9 - PXOR X14, X13 - MOVO X1, X12 - PSLLL $12, X12 - PSRLL $20, X1 - PXOR X12, X1 - MOVO X5, X12 - PSLLL $12, X12 - PSRLL $20, X5 - PXOR X12, X5 - MOVO X9, X12 - PSLLL $12, X12 - PSRLL $20, X9 - PXOR X12, X9 - MOVO X13, X12 - PSLLL $12, X12 - PSRLL $20, X13 - PXOR X12, X13 - MOVO 16(SP), X12 - PADDL X1, X0 - PADDL X5, X4 - PADDL X9, X8 - PADDL X13, X12 - PXOR X0, X3 - PXOR X4, X7 - PXOR X8, X11 - PXOR X12, X15 - MOVO X12, 16(SP) - MOVO X3, X12 - PSLLL $8, X12 - PSRLL $24, X3 - PXOR X12, X3 - MOVO X7, X12 - PSLLL $8, X12 - PSRLL $24, X7 - PXOR X12, X7 - MOVO X11, X12 - PSLLL $8, X12 - PSRLL $24, X11 - PXOR X12, X11 - MOVO X15, X12 - PSLLL $8, X12 - PSRLL $24, X15 - PXOR X12, X15 - PADDL X3, X2 - PADDL X7, X6 - PADDL X11, X10 - PADDL X15, X14 - PXOR X2, X1 - PXOR X6, X5 - PXOR X10, X9 - PXOR X14, X13 - MOVO X1, X12 - PSLLL $7, X12 - PSRLL $25, X1 - PXOR X12, X1 - MOVO X5, X12 - PSLLL $7, X12 - PSRLL $25, X5 - PXOR X12, X5 - MOVO X9, X12 - PSLLL $7, X12 - PSRLL $25, X9 - PXOR X12, X9 - MOVO X13, X12 - PSLLL $7, X12 - PSRLL $25, X13 - PXOR X12, X13 - PSHUFL $147, X1, X1 - PSHUFL $147, X5, X5 - PSHUFL $147, X9, X9 - PSHUFL $147, X13, X13 - PSHUFL $78, X2, X2 - PSHUFL $78, X6, X6 - PSHUFL $78, X10, X10 - PSHUFL $78, X14, X14 - PSHUFL $57, X3, X3 - PSHUFL $57, X7, X7 - PSHUFL $57, X11, X11 - PSHUFL $57, X15, X15 - MOVO 16(SP), X12 - SUBQ $2, SI - JNE rounds_loop4_begin - MOVO X12, 16(SP) - PADDL 0(AX), X0 - PADDL 16(AX), X1 - PADDL 32(AX), X2 - PADDL 48(AX), X3 - MOVOU 0(BX), X12 - PXOR X0, X12 - MOVOU X12, 0(CX) - MOVOU 16(BX), X12 - PXOR X1, X12 - MOVOU X12, 16(CX) - MOVOU 32(BX), X12 - PXOR X2, X12 - MOVOU X12, 32(CX) - MOVOU 48(BX), X12 - PXOR X3, X12 - MOVOU X12, 48(CX) - MOVO 48(AX), X3 - PADDQ 0(SP), X3 - PADDL 0(AX), X4 - PADDL 16(AX), X5 - PADDL 32(AX), X6 - PADDL X3, X7 - MOVOU 64(BX), X12 - PXOR X4, X12 - MOVOU X12, 64(CX) - MOVOU 80(BX), X12 - PXOR X5, X12 - MOVOU X12, 80(CX) - MOVOU 96(BX), X12 - PXOR X6, X12 - MOVOU X12, 96(CX) - MOVOU 112(BX), X12 - PXOR X7, X12 - MOVOU X12, 112(CX) - PADDQ 0(SP), X3 - PADDL 0(AX), X8 - PADDL 16(AX), X9 - PADDL 32(AX), X10 - PADDL X3, X11 - MOVOU 128(BX), X12 - PXOR X8, X12 - MOVOU X12, 128(CX) - MOVOU 144(BX), X12 - PXOR X9, X12 - MOVOU X12, 144(CX) - MOVOU 160(BX), X12 - PXOR X10, X12 - MOVOU X12, 160(CX) - MOVOU 176(BX), X12 - PXOR X11, X12 - MOVOU X12, 176(CX) - PADDQ 0(SP), X3 - MOVO 16(SP), X12 - PADDL 0(AX), X12 - PADDL 16(AX), X13 - PADDL 32(AX), X14 - PADDL X3, X15 - MOVOU 192(BX), X0 - PXOR X12, X0 - MOVOU X0, 192(CX) - MOVOU 208(BX), X0 - PXOR X13, X0 - MOVOU X0, 208(CX) - MOVOU 224(BX), X0 - PXOR X14, X0 - MOVOU X0, 224(CX) - MOVOU 240(BX), X0 - PXOR X15, X0 - MOVOU X0, 240(CX) - PADDQ 0(SP), X3 - MOVO X3, 48(AX) - ADDQ $256, BX - ADDQ $256, CX - SUBQ $4, DX - JCC vector_loop4_begin -vector_loop4_end: - ADDQ $4, DX - JEQ out - MOVO 0(AX), X8 - MOVO 16(AX), X9 - MOVO 32(AX), X10 - MOVO 48(AX), X11 - MOVO 0(SP), X13 - SUBQ $2, DX - JCS vector_loop2_end -vector_loop2_begin: - MOVO X8, X0 - MOVO X9, X1 - MOVO X10, X2 - MOVO X11, X3 - MOVO X0, X4 - MOVO X1, X5 - MOVO X2, X6 - MOVO X3, X7 - PADDQ X13, X7 - MOVQ $20, SI -rounds_loop2_begin: - PADDL X1, X0 - PADDL X5, X4 - PXOR X0, X3 - PXOR X4, X7 - MOVO X3, X12 - PSLLL $16, X12 - PSRLL $16, X3 - PXOR X12, X3 - MOVO X7, X12 - PSLLL $16, X12 - PSRLL $16, X7 - PXOR X12, X7 - PADDL X3, X2 - PADDL X7, X6 - PXOR X2, X1 - PXOR X6, X5 - MOVO X1, X12 - PSLLL $12, X12 - PSRLL $20, X1 - PXOR X12, X1 - MOVO X5, X12 - PSLLL $12, X12 - PSRLL $20, X5 - PXOR X12, X5 - PADDL X1, X0 - PADDL X5, X4 - PXOR X0, X3 - PXOR X4, X7 - MOVO X3, X12 - PSLLL $8, X12 - PSRLL $24, X3 - PXOR X12, X3 - MOVO X7, X12 - PSLLL $8, X12 - PSRLL $24, X7 - PXOR X12, X7 - PADDL X3, X2 - PADDL X7, X6 - PXOR X2, X1 - PXOR X6, X5 - MOVO X1, X12 - PSLLL $7, X12 - PSRLL $25, X1 - PXOR X12, X1 - MOVO X5, X12 - PSLLL $7, X12 - PSRLL $25, X5 - PXOR X12, X5 - PSHUFL $57, X1, X1 - PSHUFL $57, X5, X5 - PSHUFL $78, X2, X2 - PSHUFL $78, X6, X6 - PSHUFL $147, X3, X3 - PSHUFL $147, X7, X7 - PADDL X1, X0 - PADDL X5, X4 - PXOR X0, X3 - PXOR X4, X7 - MOVO X3, X12 - PSLLL $16, X12 - PSRLL $16, X3 - PXOR X12, X3 - MOVO X7, X12 - PSLLL $16, X12 - PSRLL $16, X7 - PXOR X12, X7 - PADDL X3, X2 - PADDL X7, X6 - PXOR X2, X1 - PXOR X6, X5 - MOVO X1, X12 - PSLLL $12, X12 - PSRLL $20, X1 - PXOR X12, X1 - MOVO X5, X12 - PSLLL $12, X12 - PSRLL $20, X5 - PXOR X12, X5 - PADDL X1, X0 - PADDL X5, X4 - PXOR X0, X3 - PXOR X4, X7 - MOVO X3, X12 - PSLLL $8, X12 - PSRLL $24, X3 - PXOR X12, X3 - MOVO X7, X12 - PSLLL $8, X12 - PSRLL $24, X7 - PXOR X12, X7 - PADDL X3, X2 - PADDL X7, X6 - PXOR X2, X1 - PXOR X6, X5 - MOVO X1, X12 - PSLLL $7, X12 - PSRLL $25, X1 - PXOR X12, X1 - MOVO X5, X12 - PSLLL $7, X12 - PSRLL $25, X5 - PXOR X12, X5 - PSHUFL $147, X1, X1 - PSHUFL $147, X5, X5 - PSHUFL $78, X2, X2 - PSHUFL $78, X6, X6 - PSHUFL $57, X3, X3 - PSHUFL $57, X7, X7 - SUBQ $2, SI - JNE rounds_loop2_begin - PADDL X8, X0 - PADDL X9, X1 - PADDL X10, X2 - PADDL X11, X3 - MOVOU 0(BX), X12 - PXOR X0, X12 - MOVOU X12, 0(CX) - MOVOU 16(BX), X12 - PXOR X1, X12 - MOVOU X12, 16(CX) - MOVOU 32(BX), X12 - PXOR X2, X12 - MOVOU X12, 32(CX) - MOVOU 48(BX), X12 - PXOR X3, X12 - MOVOU X12, 48(CX) - PADDQ X13, X11 - PADDL X8, X4 - PADDL X9, X5 - PADDL X10, X6 - PADDL X11, X7 - MOVOU 64(BX), X12 - PXOR X4, X12 - MOVOU X12, 64(CX) - MOVOU 80(BX), X12 - PXOR X5, X12 - MOVOU X12, 80(CX) - MOVOU 96(BX), X12 - PXOR X6, X12 - MOVOU X12, 96(CX) - MOVOU 112(BX), X12 - PXOR X7, X12 - MOVOU X12, 112(CX) - PADDQ X13, X11 - ADDQ $128, BX - ADDQ $128, CX - SUBQ $2, DX - JCC vector_loop2_begin -vector_loop2_end: - ADDQ $2, DX - JEQ out_serial - MOVO X8, X0 - MOVO X9, X1 - MOVO X10, X2 - MOVO X11, X3 - MOVQ $20, DX -rounds_loop1_begin: - PADDL X1, X0 - PXOR X0, X3 - MOVO X3, X12 - PSLLL $16, X12 - PSRLL $16, X3 - PXOR X12, X3 - PADDL X3, X2 - PXOR X2, X1 - MOVO X1, X12 - PSLLL $12, X12 - PSRLL $20, X1 - PXOR X12, X1 - PADDL X1, X0 - PXOR X0, X3 - MOVO X3, X12 - PSLLL $8, X12 - PSRLL $24, X3 - PXOR X12, X3 - PADDL X3, X2 - PXOR X2, X1 - MOVO X1, X12 - PSLLL $7, X12 - PSRLL $25, X1 - PXOR X12, X1 - PSHUFL $57, X1, X1 - PSHUFL $78, X2, X2 - PSHUFL $147, X3, X3 - PADDL X1, X0 - PXOR X0, X3 - MOVO X3, X12 - PSLLL $16, X12 - PSRLL $16, X3 - PXOR X12, X3 - PADDL X3, X2 - PXOR X2, X1 - MOVO X1, X12 - PSLLL $12, X12 - PSRLL $20, X1 - PXOR X12, X1 - PADDL X1, X0 - PXOR X0, X3 - MOVO X3, X12 - PSLLL $8, X12 - PSRLL $24, X3 - PXOR X12, X3 - PADDL X3, X2 - PXOR X2, X1 - MOVO X1, X12 - PSLLL $7, X12 - PSRLL $25, X1 - PXOR X12, X1 - PSHUFL $147, X1, X1 - PSHUFL $78, X2, X2 - PSHUFL $57, X3, X3 - SUBQ $2, DX - JNE rounds_loop1_begin - PADDL X8, X0 - PADDL X9, X1 - PADDL X10, X2 - PADDL X11, X3 - MOVOU 0(BX), X12 - PXOR X0, X12 - MOVOU X12, 0(CX) - MOVOU 16(BX), X12 - PXOR X1, X12 - MOVOU X12, 16(CX) - MOVOU 32(BX), X12 - PXOR X2, X12 - MOVOU X12, 32(CX) - MOVOU 48(BX), X12 - PXOR X3, X12 - MOVOU X12, 48(CX) - PADDQ X13, X11 -out_serial: - MOVO X11, 48(AX) -out: - PXOR X0, X0 - MOVO X0, 16(SP) - MOVQ DI, SP - RET - -// func blocksAmd64AVX2(x *uint32, inp *uint8, outp *uint8, nrBlocks *uint) -TEXT ·blocksAmd64AVX2(SB),4,$0-32 - MOVQ x+0(FP), AX - MOVQ inp+8(FP), BX - MOVQ outp+16(FP), CX - MOVQ nrBlocks+24(FP), DX - MOVQ SP, DI - MOVQ $31, SI - NOTQ SI - ANDQ SI, SP - SUBQ $32, SP - SUBQ $96, SP - BYTE $0xC4; BYTE $0x41; BYTE $0x1D; BYTE $0xEF; BYTE $0xE4 // VPXOR ymm12, ymm12, ymm12 - BYTE $0xC5; BYTE $0x7E; BYTE $0x7F; BYTE $0x24; BYTE $0x24 // VMOVDQU [rsp], ymm12 - MOVL $1, SI - MOVL SI, 16(SP) - BYTE $0xC4; BYTE $0xE2; BYTE $0x7D; BYTE $0x5A; BYTE $0x58; BYTE $0x30 // VBROADCASTI128 ymm3, [rax + 48] - BYTE $0xC5; BYTE $0xE5; BYTE $0xD4; BYTE $0x1C; BYTE $0x24 // VPADDQ ymm3, ymm3, [rsp] - BYTE $0xC5; BYTE $0xFD; BYTE $0x7F; BYTE $0x5C; BYTE $0x24; BYTE $0x20 // VMOVDQA [rsp + 32], ymm3 - MOVL $2, SI - MOVL SI, 0(SP) - MOVL SI, 16(SP) - SUBQ $8, DX - JCS vector_loop8_end -vector_loop8_begin: - BYTE $0xC4; BYTE $0xE2; BYTE $0x7D; BYTE $0x5A; BYTE $0x00 // VBROADCASTI128 ymm0, [rax] - BYTE $0xC4; BYTE $0xE2; BYTE $0x7D; BYTE $0x5A; BYTE $0x48; BYTE $0x10 // VBROADCASTI128 ymm1, [rax + 16] - BYTE $0xC4; BYTE $0xE2; BYTE $0x7D; BYTE $0x5A; BYTE $0x50; BYTE $0x20 // VBROADCASTI128 ymm2, [rax + 32] - BYTE $0xC5; BYTE $0xFD; BYTE $0x6F; BYTE $0x5C; BYTE $0x24; BYTE $0x20 // VMOVDQA ymm3, [rsp + 32] - BYTE $0xC5; BYTE $0xFD; BYTE $0x6F; BYTE $0xE0 // VMOVDQA ymm4, ymm0 - BYTE $0xC5; BYTE $0xFD; BYTE $0x6F; BYTE $0xE9 // VMOVDQA ymm5, ymm1 - BYTE $0xC5; BYTE $0xFD; BYTE $0x6F; BYTE $0xF2 // VMOVDQA ymm6, ymm2 - BYTE $0xC5; BYTE $0xE5; BYTE $0xD4; BYTE $0x3C; BYTE $0x24 // VPADDQ ymm7, ymm3, [rsp] - BYTE $0xC5; BYTE $0x7D; BYTE $0x6F; BYTE $0xC0 // VMOVDQA ymm8, ymm0 - BYTE $0xC5; BYTE $0x7D; BYTE $0x6F; BYTE $0xC9 // VMOVDQA ymm9, ymm1 - BYTE $0xC5; BYTE $0x7D; BYTE $0x6F; BYTE $0xD2 // VMOVDQA ymm10, ymm2 - BYTE $0xC5; BYTE $0x45; BYTE $0xD4; BYTE $0x1C; BYTE $0x24 // VPADDQ ymm11, ymm7, [rsp] - BYTE $0xC5; BYTE $0x7D; BYTE $0x6F; BYTE $0xE0 // VMOVDQA ymm12, ymm0 - BYTE $0xC5; BYTE $0x7D; BYTE $0x6F; BYTE $0xE9 // VMOVDQA ymm13, ymm1 - BYTE $0xC5; BYTE $0x7D; BYTE $0x6F; BYTE $0xF2 // VMOVDQA ymm14, ymm2 - BYTE $0xC5; BYTE $0x25; BYTE $0xD4; BYTE $0x3C; BYTE $0x24 // VPADDQ ymm15, ymm11, [rsp] - MOVQ $20, SI -rounds_loop8_begin: - BYTE $0xC5; BYTE $0xFD; BYTE $0xFE; BYTE $0xC1 // VPADDD ymm0, ymm0, ymm1 - BYTE $0xC5; BYTE $0xDD; BYTE $0xFE; BYTE $0xE5 // VPADDD ymm4, ymm4, ymm5 - BYTE $0xC4; BYTE $0x41; BYTE $0x3D; BYTE $0xFE; BYTE $0xC1 // VPADDD ymm8, ymm8, ymm9 - BYTE $0xC4; BYTE $0x41; BYTE $0x1D; BYTE $0xFE; BYTE $0xE5 // VPADDD ymm12, ymm12, ymm13 - BYTE $0xC5; BYTE $0xE5; BYTE $0xEF; BYTE $0xD8 // VPXOR ymm3, ymm3, ymm0 - BYTE $0xC5; BYTE $0xC5; BYTE $0xEF; BYTE $0xFC // VPXOR ymm7, ymm7, ymm4 - BYTE $0xC4; BYTE $0x41; BYTE $0x25; BYTE $0xEF; BYTE $0xD8 // VPXOR ymm11, ymm11, ymm8 - BYTE $0xC4; BYTE $0x41; BYTE $0x05; BYTE $0xEF; BYTE $0xFC // VPXOR ymm15, ymm15, ymm12 - BYTE $0xC5; BYTE $0x7D; BYTE $0x7F; BYTE $0x64; BYTE $0x24; BYTE $0x40 // VMOVDQA [rsp + 64], ymm12 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF3; BYTE $0x10 // VPSLLD ymm12, ymm3, 16 - BYTE $0xC5; BYTE $0xE5; BYTE $0x72; BYTE $0xD3; BYTE $0x10 // VPSRLD ymm3, ymm3, 16 - BYTE $0xC4; BYTE $0xC1; BYTE $0x65; BYTE $0xEF; BYTE $0xDC // VPXOR ymm3, ymm3, ymm12 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF7; BYTE $0x10 // VPSLLD ymm12, ymm7, 16 - BYTE $0xC5; BYTE $0xC5; BYTE $0x72; BYTE $0xD7; BYTE $0x10 // VPSRLD ymm7, ymm7, 16 - BYTE $0xC4; BYTE $0xC1; BYTE $0x45; BYTE $0xEF; BYTE $0xFC // VPXOR ymm7, ymm7, ymm12 - BYTE $0xC4; BYTE $0xC1; BYTE $0x1D; BYTE $0x72; BYTE $0xF3; BYTE $0x10 // VPSLLD ymm12, ymm11, 16 - BYTE $0xC4; BYTE $0xC1; BYTE $0x25; BYTE $0x72; BYTE $0xD3; BYTE $0x10 // VPSRLD ymm11, ymm11, 16 - BYTE $0xC4; BYTE $0x41; BYTE $0x25; BYTE $0xEF; BYTE $0xDC // VPXOR ymm11, ymm11, ymm12 - BYTE $0xC4; BYTE $0xC1; BYTE $0x1D; BYTE $0x72; BYTE $0xF7; BYTE $0x10 // VPSLLD ymm12, ymm15, 16 - BYTE $0xC4; BYTE $0xC1; BYTE $0x05; BYTE $0x72; BYTE $0xD7; BYTE $0x10 // VPSRLD ymm15, ymm15, 16 - BYTE $0xC4; BYTE $0x41; BYTE $0x05; BYTE $0xEF; BYTE $0xFC // VPXOR ymm15, ymm15, ymm12 - BYTE $0xC5; BYTE $0xED; BYTE $0xFE; BYTE $0xD3 // VPADDD ymm2, ymm2, ymm3 - BYTE $0xC5; BYTE $0xCD; BYTE $0xFE; BYTE $0xF7 // VPADDD ymm6, ymm6, ymm7 - BYTE $0xC4; BYTE $0x41; BYTE $0x2D; BYTE $0xFE; BYTE $0xD3 // VPADDD ymm10, ymm10, ymm11 - BYTE $0xC4; BYTE $0x41; BYTE $0x0D; BYTE $0xFE; BYTE $0xF7 // VPADDD ymm14, ymm14, ymm15 - BYTE $0xC5; BYTE $0xF5; BYTE $0xEF; BYTE $0xCA // VPXOR ymm1, ymm1, ymm2 - BYTE $0xC5; BYTE $0xD5; BYTE $0xEF; BYTE $0xEE // VPXOR ymm5, ymm5, ymm6 - BYTE $0xC4; BYTE $0x41; BYTE $0x35; BYTE $0xEF; BYTE $0xCA // VPXOR ymm9, ymm9, ymm10 - BYTE $0xC4; BYTE $0x41; BYTE $0x15; BYTE $0xEF; BYTE $0xEE // VPXOR ymm13, ymm13, ymm14 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF1; BYTE $0x0C // VPSLLD ymm12, ymm1, 12 - BYTE $0xC5; BYTE $0xF5; BYTE $0x72; BYTE $0xD1; BYTE $0x14 // VPSRLD ymm1, ymm1, 20 - BYTE $0xC4; BYTE $0xC1; BYTE $0x75; BYTE $0xEF; BYTE $0xCC // VPXOR ymm1, ymm1, ymm12 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF5; BYTE $0x0C // VPSLLD ymm12, ymm5, 12 - BYTE $0xC5; BYTE $0xD5; BYTE $0x72; BYTE $0xD5; BYTE $0x14 // VPSRLD ymm5, ymm5, 20 - BYTE $0xC4; BYTE $0xC1; BYTE $0x55; BYTE $0xEF; BYTE $0xEC // VPXOR ymm5, ymm5, ymm12 - BYTE $0xC4; BYTE $0xC1; BYTE $0x1D; BYTE $0x72; BYTE $0xF1; BYTE $0x0C // VPSLLD ymm12, ymm9, 12 - BYTE $0xC4; BYTE $0xC1; BYTE $0x35; BYTE $0x72; BYTE $0xD1; BYTE $0x14 // VPSRLD ymm9, ymm9, 20 - BYTE $0xC4; BYTE $0x41; BYTE $0x35; BYTE $0xEF; BYTE $0xCC // VPXOR ymm9, ymm9, ymm12 - BYTE $0xC4; BYTE $0xC1; BYTE $0x1D; BYTE $0x72; BYTE $0xF5; BYTE $0x0C // VPSLLD ymm12, ymm13, 12 - BYTE $0xC4; BYTE $0xC1; BYTE $0x15; BYTE $0x72; BYTE $0xD5; BYTE $0x14 // VPSRLD ymm13, ymm13, 20 - BYTE $0xC4; BYTE $0x41; BYTE $0x15; BYTE $0xEF; BYTE $0xEC // VPXOR ymm13, ymm13, ymm12 - BYTE $0xC5; BYTE $0x7D; BYTE $0x6F; BYTE $0x64; BYTE $0x24; BYTE $0x40 // VMOVDQA ymm12, [rsp + 64] - BYTE $0xC5; BYTE $0xFD; BYTE $0xFE; BYTE $0xC1 // VPADDD ymm0, ymm0, ymm1 - BYTE $0xC5; BYTE $0xDD; BYTE $0xFE; BYTE $0xE5 // VPADDD ymm4, ymm4, ymm5 - BYTE $0xC4; BYTE $0x41; BYTE $0x3D; BYTE $0xFE; BYTE $0xC1 // VPADDD ymm8, ymm8, ymm9 - BYTE $0xC4; BYTE $0x41; BYTE $0x1D; BYTE $0xFE; BYTE $0xE5 // VPADDD ymm12, ymm12, ymm13 - BYTE $0xC5; BYTE $0xE5; BYTE $0xEF; BYTE $0xD8 // VPXOR ymm3, ymm3, ymm0 - BYTE $0xC5; BYTE $0xC5; BYTE $0xEF; BYTE $0xFC // VPXOR ymm7, ymm7, ymm4 - BYTE $0xC4; BYTE $0x41; BYTE $0x25; BYTE $0xEF; BYTE $0xD8 // VPXOR ymm11, ymm11, ymm8 - BYTE $0xC4; BYTE $0x41; BYTE $0x05; BYTE $0xEF; BYTE $0xFC // VPXOR ymm15, ymm15, ymm12 - BYTE $0xC5; BYTE $0x7D; BYTE $0x7F; BYTE $0x64; BYTE $0x24; BYTE $0x40 // VMOVDQA [rsp + 64], ymm12 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF3; BYTE $0x08 // VPSLLD ymm12, ymm3, 8 - BYTE $0xC5; BYTE $0xE5; BYTE $0x72; BYTE $0xD3; BYTE $0x18 // VPSRLD ymm3, ymm3, 24 - BYTE $0xC4; BYTE $0xC1; BYTE $0x65; BYTE $0xEF; BYTE $0xDC // VPXOR ymm3, ymm3, ymm12 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF7; BYTE $0x08 // VPSLLD ymm12, ymm7, 8 - BYTE $0xC5; BYTE $0xC5; BYTE $0x72; BYTE $0xD7; BYTE $0x18 // VPSRLD ymm7, ymm7, 24 - BYTE $0xC4; BYTE $0xC1; BYTE $0x45; BYTE $0xEF; BYTE $0xFC // VPXOR ymm7, ymm7, ymm12 - BYTE $0xC4; BYTE $0xC1; BYTE $0x1D; BYTE $0x72; BYTE $0xF3; BYTE $0x08 // VPSLLD ymm12, ymm11, 8 - BYTE $0xC4; BYTE $0xC1; BYTE $0x25; BYTE $0x72; BYTE $0xD3; BYTE $0x18 // VPSRLD ymm11, ymm11, 24 - BYTE $0xC4; BYTE $0x41; BYTE $0x25; BYTE $0xEF; BYTE $0xDC // VPXOR ymm11, ymm11, ymm12 - BYTE $0xC4; BYTE $0xC1; BYTE $0x1D; BYTE $0x72; BYTE $0xF7; BYTE $0x08 // VPSLLD ymm12, ymm15, 8 - BYTE $0xC4; BYTE $0xC1; BYTE $0x05; BYTE $0x72; BYTE $0xD7; BYTE $0x18 // VPSRLD ymm15, ymm15, 24 - BYTE $0xC4; BYTE $0x41; BYTE $0x05; BYTE $0xEF; BYTE $0xFC // VPXOR ymm15, ymm15, ymm12 - BYTE $0xC5; BYTE $0xED; BYTE $0xFE; BYTE $0xD3 // VPADDD ymm2, ymm2, ymm3 - BYTE $0xC5; BYTE $0xCD; BYTE $0xFE; BYTE $0xF7 // VPADDD ymm6, ymm6, ymm7 - BYTE $0xC4; BYTE $0x41; BYTE $0x2D; BYTE $0xFE; BYTE $0xD3 // VPADDD ymm10, ymm10, ymm11 - BYTE $0xC4; BYTE $0x41; BYTE $0x0D; BYTE $0xFE; BYTE $0xF7 // VPADDD ymm14, ymm14, ymm15 - BYTE $0xC5; BYTE $0xF5; BYTE $0xEF; BYTE $0xCA // VPXOR ymm1, ymm1, ymm2 - BYTE $0xC5; BYTE $0xD5; BYTE $0xEF; BYTE $0xEE // VPXOR ymm5, ymm5, ymm6 - BYTE $0xC4; BYTE $0x41; BYTE $0x35; BYTE $0xEF; BYTE $0xCA // VPXOR ymm9, ymm9, ymm10 - BYTE $0xC4; BYTE $0x41; BYTE $0x15; BYTE $0xEF; BYTE $0xEE // VPXOR ymm13, ymm13, ymm14 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF1; BYTE $0x07 // VPSLLD ymm12, ymm1, 7 - BYTE $0xC5; BYTE $0xF5; BYTE $0x72; BYTE $0xD1; BYTE $0x19 // VPSRLD ymm1, ymm1, 25 - BYTE $0xC4; BYTE $0xC1; BYTE $0x75; BYTE $0xEF; BYTE $0xCC // VPXOR ymm1, ymm1, ymm12 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF5; BYTE $0x07 // VPSLLD ymm12, ymm5, 7 - BYTE $0xC5; BYTE $0xD5; BYTE $0x72; BYTE $0xD5; BYTE $0x19 // VPSRLD ymm5, ymm5, 25 - BYTE $0xC4; BYTE $0xC1; BYTE $0x55; BYTE $0xEF; BYTE $0xEC // VPXOR ymm5, ymm5, ymm12 - BYTE $0xC4; BYTE $0xC1; BYTE $0x1D; BYTE $0x72; BYTE $0xF1; BYTE $0x07 // VPSLLD ymm12, ymm9, 7 - BYTE $0xC4; BYTE $0xC1; BYTE $0x35; BYTE $0x72; BYTE $0xD1; BYTE $0x19 // VPSRLD ymm9, ymm9, 25 - BYTE $0xC4; BYTE $0x41; BYTE $0x35; BYTE $0xEF; BYTE $0xCC // VPXOR ymm9, ymm9, ymm12 - BYTE $0xC4; BYTE $0xC1; BYTE $0x1D; BYTE $0x72; BYTE $0xF5; BYTE $0x07 // VPSLLD ymm12, ymm13, 7 - BYTE $0xC4; BYTE $0xC1; BYTE $0x15; BYTE $0x72; BYTE $0xD5; BYTE $0x19 // VPSRLD ymm13, ymm13, 25 - BYTE $0xC4; BYTE $0x41; BYTE $0x15; BYTE $0xEF; BYTE $0xEC // VPXOR ymm13, ymm13, ymm12 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xC9; BYTE $0x39 // VPSHUFD ymm1, ymm1, 57 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xED; BYTE $0x39 // VPSHUFD ymm5, ymm5, 57 - BYTE $0xC4; BYTE $0x41; BYTE $0x7D; BYTE $0x70; BYTE $0xC9; BYTE $0x39 // VPSHUFD ymm9, ymm9, 57 - BYTE $0xC4; BYTE $0x41; BYTE $0x7D; BYTE $0x70; BYTE $0xED; BYTE $0x39 // VPSHUFD ymm13, ymm13, 57 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xD2; BYTE $0x4E // VPSHUFD ymm2, ymm2, 78 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xF6; BYTE $0x4E // VPSHUFD ymm6, ymm6, 78 - BYTE $0xC4; BYTE $0x41; BYTE $0x7D; BYTE $0x70; BYTE $0xD2; BYTE $0x4E // VPSHUFD ymm10, ymm10, 78 - BYTE $0xC4; BYTE $0x41; BYTE $0x7D; BYTE $0x70; BYTE $0xF6; BYTE $0x4E // VPSHUFD ymm14, ymm14, 78 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xDB; BYTE $0x93 // VPSHUFD ymm3, ymm3, 147 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xFF; BYTE $0x93 // VPSHUFD ymm7, ymm7, 147 - BYTE $0xC4; BYTE $0x41; BYTE $0x7D; BYTE $0x70; BYTE $0xDB; BYTE $0x93 // VPSHUFD ymm11, ymm11, 147 - BYTE $0xC4; BYTE $0x41; BYTE $0x7D; BYTE $0x70; BYTE $0xFF; BYTE $0x93 // VPSHUFD ymm15, ymm15, 147 - BYTE $0xC5; BYTE $0x7D; BYTE $0x6F; BYTE $0x64; BYTE $0x24; BYTE $0x40 // VMOVDQA ymm12, [rsp + 64] - BYTE $0xC5; BYTE $0xFD; BYTE $0xFE; BYTE $0xC1 // VPADDD ymm0, ymm0, ymm1 - BYTE $0xC5; BYTE $0xDD; BYTE $0xFE; BYTE $0xE5 // VPADDD ymm4, ymm4, ymm5 - BYTE $0xC4; BYTE $0x41; BYTE $0x3D; BYTE $0xFE; BYTE $0xC1 // VPADDD ymm8, ymm8, ymm9 - BYTE $0xC4; BYTE $0x41; BYTE $0x1D; BYTE $0xFE; BYTE $0xE5 // VPADDD ymm12, ymm12, ymm13 - BYTE $0xC5; BYTE $0xE5; BYTE $0xEF; BYTE $0xD8 // VPXOR ymm3, ymm3, ymm0 - BYTE $0xC5; BYTE $0xC5; BYTE $0xEF; BYTE $0xFC // VPXOR ymm7, ymm7, ymm4 - BYTE $0xC4; BYTE $0x41; BYTE $0x25; BYTE $0xEF; BYTE $0xD8 // VPXOR ymm11, ymm11, ymm8 - BYTE $0xC4; BYTE $0x41; BYTE $0x05; BYTE $0xEF; BYTE $0xFC // VPXOR ymm15, ymm15, ymm12 - BYTE $0xC5; BYTE $0x7D; BYTE $0x7F; BYTE $0x64; BYTE $0x24; BYTE $0x40 // VMOVDQA [rsp + 64], ymm12 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF3; BYTE $0x10 // VPSLLD ymm12, ymm3, 16 - BYTE $0xC5; BYTE $0xE5; BYTE $0x72; BYTE $0xD3; BYTE $0x10 // VPSRLD ymm3, ymm3, 16 - BYTE $0xC4; BYTE $0xC1; BYTE $0x65; BYTE $0xEF; BYTE $0xDC // VPXOR ymm3, ymm3, ymm12 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF7; BYTE $0x10 // VPSLLD ymm12, ymm7, 16 - BYTE $0xC5; BYTE $0xC5; BYTE $0x72; BYTE $0xD7; BYTE $0x10 // VPSRLD ymm7, ymm7, 16 - BYTE $0xC4; BYTE $0xC1; BYTE $0x45; BYTE $0xEF; BYTE $0xFC // VPXOR ymm7, ymm7, ymm12 - BYTE $0xC4; BYTE $0xC1; BYTE $0x1D; BYTE $0x72; BYTE $0xF3; BYTE $0x10 // VPSLLD ymm12, ymm11, 16 - BYTE $0xC4; BYTE $0xC1; BYTE $0x25; BYTE $0x72; BYTE $0xD3; BYTE $0x10 // VPSRLD ymm11, ymm11, 16 - BYTE $0xC4; BYTE $0x41; BYTE $0x25; BYTE $0xEF; BYTE $0xDC // VPXOR ymm11, ymm11, ymm12 - BYTE $0xC4; BYTE $0xC1; BYTE $0x1D; BYTE $0x72; BYTE $0xF7; BYTE $0x10 // VPSLLD ymm12, ymm15, 16 - BYTE $0xC4; BYTE $0xC1; BYTE $0x05; BYTE $0x72; BYTE $0xD7; BYTE $0x10 // VPSRLD ymm15, ymm15, 16 - BYTE $0xC4; BYTE $0x41; BYTE $0x05; BYTE $0xEF; BYTE $0xFC // VPXOR ymm15, ymm15, ymm12 - BYTE $0xC5; BYTE $0xED; BYTE $0xFE; BYTE $0xD3 // VPADDD ymm2, ymm2, ymm3 - BYTE $0xC5; BYTE $0xCD; BYTE $0xFE; BYTE $0xF7 // VPADDD ymm6, ymm6, ymm7 - BYTE $0xC4; BYTE $0x41; BYTE $0x2D; BYTE $0xFE; BYTE $0xD3 // VPADDD ymm10, ymm10, ymm11 - BYTE $0xC4; BYTE $0x41; BYTE $0x0D; BYTE $0xFE; BYTE $0xF7 // VPADDD ymm14, ymm14, ymm15 - BYTE $0xC5; BYTE $0xF5; BYTE $0xEF; BYTE $0xCA // VPXOR ymm1, ymm1, ymm2 - BYTE $0xC5; BYTE $0xD5; BYTE $0xEF; BYTE $0xEE // VPXOR ymm5, ymm5, ymm6 - BYTE $0xC4; BYTE $0x41; BYTE $0x35; BYTE $0xEF; BYTE $0xCA // VPXOR ymm9, ymm9, ymm10 - BYTE $0xC4; BYTE $0x41; BYTE $0x15; BYTE $0xEF; BYTE $0xEE // VPXOR ymm13, ymm13, ymm14 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF1; BYTE $0x0C // VPSLLD ymm12, ymm1, 12 - BYTE $0xC5; BYTE $0xF5; BYTE $0x72; BYTE $0xD1; BYTE $0x14 // VPSRLD ymm1, ymm1, 20 - BYTE $0xC4; BYTE $0xC1; BYTE $0x75; BYTE $0xEF; BYTE $0xCC // VPXOR ymm1, ymm1, ymm12 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF5; BYTE $0x0C // VPSLLD ymm12, ymm5, 12 - BYTE $0xC5; BYTE $0xD5; BYTE $0x72; BYTE $0xD5; BYTE $0x14 // VPSRLD ymm5, ymm5, 20 - BYTE $0xC4; BYTE $0xC1; BYTE $0x55; BYTE $0xEF; BYTE $0xEC // VPXOR ymm5, ymm5, ymm12 - BYTE $0xC4; BYTE $0xC1; BYTE $0x1D; BYTE $0x72; BYTE $0xF1; BYTE $0x0C // VPSLLD ymm12, ymm9, 12 - BYTE $0xC4; BYTE $0xC1; BYTE $0x35; BYTE $0x72; BYTE $0xD1; BYTE $0x14 // VPSRLD ymm9, ymm9, 20 - BYTE $0xC4; BYTE $0x41; BYTE $0x35; BYTE $0xEF; BYTE $0xCC // VPXOR ymm9, ymm9, ymm12 - BYTE $0xC4; BYTE $0xC1; BYTE $0x1D; BYTE $0x72; BYTE $0xF5; BYTE $0x0C // VPSLLD ymm12, ymm13, 12 - BYTE $0xC4; BYTE $0xC1; BYTE $0x15; BYTE $0x72; BYTE $0xD5; BYTE $0x14 // VPSRLD ymm13, ymm13, 20 - BYTE $0xC4; BYTE $0x41; BYTE $0x15; BYTE $0xEF; BYTE $0xEC // VPXOR ymm13, ymm13, ymm12 - BYTE $0xC5; BYTE $0x7D; BYTE $0x6F; BYTE $0x64; BYTE $0x24; BYTE $0x40 // VMOVDQA ymm12, [rsp + 64] - BYTE $0xC5; BYTE $0xFD; BYTE $0xFE; BYTE $0xC1 // VPADDD ymm0, ymm0, ymm1 - BYTE $0xC5; BYTE $0xDD; BYTE $0xFE; BYTE $0xE5 // VPADDD ymm4, ymm4, ymm5 - BYTE $0xC4; BYTE $0x41; BYTE $0x3D; BYTE $0xFE; BYTE $0xC1 // VPADDD ymm8, ymm8, ymm9 - BYTE $0xC4; BYTE $0x41; BYTE $0x1D; BYTE $0xFE; BYTE $0xE5 // VPADDD ymm12, ymm12, ymm13 - BYTE $0xC5; BYTE $0xE5; BYTE $0xEF; BYTE $0xD8 // VPXOR ymm3, ymm3, ymm0 - BYTE $0xC5; BYTE $0xC5; BYTE $0xEF; BYTE $0xFC // VPXOR ymm7, ymm7, ymm4 - BYTE $0xC4; BYTE $0x41; BYTE $0x25; BYTE $0xEF; BYTE $0xD8 // VPXOR ymm11, ymm11, ymm8 - BYTE $0xC4; BYTE $0x41; BYTE $0x05; BYTE $0xEF; BYTE $0xFC // VPXOR ymm15, ymm15, ymm12 - BYTE $0xC5; BYTE $0x7D; BYTE $0x7F; BYTE $0x64; BYTE $0x24; BYTE $0x40 // VMOVDQA [rsp + 64], ymm12 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF3; BYTE $0x08 // VPSLLD ymm12, ymm3, 8 - BYTE $0xC5; BYTE $0xE5; BYTE $0x72; BYTE $0xD3; BYTE $0x18 // VPSRLD ymm3, ymm3, 24 - BYTE $0xC4; BYTE $0xC1; BYTE $0x65; BYTE $0xEF; BYTE $0xDC // VPXOR ymm3, ymm3, ymm12 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF7; BYTE $0x08 // VPSLLD ymm12, ymm7, 8 - BYTE $0xC5; BYTE $0xC5; BYTE $0x72; BYTE $0xD7; BYTE $0x18 // VPSRLD ymm7, ymm7, 24 - BYTE $0xC4; BYTE $0xC1; BYTE $0x45; BYTE $0xEF; BYTE $0xFC // VPXOR ymm7, ymm7, ymm12 - BYTE $0xC4; BYTE $0xC1; BYTE $0x1D; BYTE $0x72; BYTE $0xF3; BYTE $0x08 // VPSLLD ymm12, ymm11, 8 - BYTE $0xC4; BYTE $0xC1; BYTE $0x25; BYTE $0x72; BYTE $0xD3; BYTE $0x18 // VPSRLD ymm11, ymm11, 24 - BYTE $0xC4; BYTE $0x41; BYTE $0x25; BYTE $0xEF; BYTE $0xDC // VPXOR ymm11, ymm11, ymm12 - BYTE $0xC4; BYTE $0xC1; BYTE $0x1D; BYTE $0x72; BYTE $0xF7; BYTE $0x08 // VPSLLD ymm12, ymm15, 8 - BYTE $0xC4; BYTE $0xC1; BYTE $0x05; BYTE $0x72; BYTE $0xD7; BYTE $0x18 // VPSRLD ymm15, ymm15, 24 - BYTE $0xC4; BYTE $0x41; BYTE $0x05; BYTE $0xEF; BYTE $0xFC // VPXOR ymm15, ymm15, ymm12 - BYTE $0xC5; BYTE $0xED; BYTE $0xFE; BYTE $0xD3 // VPADDD ymm2, ymm2, ymm3 - BYTE $0xC5; BYTE $0xCD; BYTE $0xFE; BYTE $0xF7 // VPADDD ymm6, ymm6, ymm7 - BYTE $0xC4; BYTE $0x41; BYTE $0x2D; BYTE $0xFE; BYTE $0xD3 // VPADDD ymm10, ymm10, ymm11 - BYTE $0xC4; BYTE $0x41; BYTE $0x0D; BYTE $0xFE; BYTE $0xF7 // VPADDD ymm14, ymm14, ymm15 - BYTE $0xC5; BYTE $0xF5; BYTE $0xEF; BYTE $0xCA // VPXOR ymm1, ymm1, ymm2 - BYTE $0xC5; BYTE $0xD5; BYTE $0xEF; BYTE $0xEE // VPXOR ymm5, ymm5, ymm6 - BYTE $0xC4; BYTE $0x41; BYTE $0x35; BYTE $0xEF; BYTE $0xCA // VPXOR ymm9, ymm9, ymm10 - BYTE $0xC4; BYTE $0x41; BYTE $0x15; BYTE $0xEF; BYTE $0xEE // VPXOR ymm13, ymm13, ymm14 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF1; BYTE $0x07 // VPSLLD ymm12, ymm1, 7 - BYTE $0xC5; BYTE $0xF5; BYTE $0x72; BYTE $0xD1; BYTE $0x19 // VPSRLD ymm1, ymm1, 25 - BYTE $0xC4; BYTE $0xC1; BYTE $0x75; BYTE $0xEF; BYTE $0xCC // VPXOR ymm1, ymm1, ymm12 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF5; BYTE $0x07 // VPSLLD ymm12, ymm5, 7 - BYTE $0xC5; BYTE $0xD5; BYTE $0x72; BYTE $0xD5; BYTE $0x19 // VPSRLD ymm5, ymm5, 25 - BYTE $0xC4; BYTE $0xC1; BYTE $0x55; BYTE $0xEF; BYTE $0xEC // VPXOR ymm5, ymm5, ymm12 - BYTE $0xC4; BYTE $0xC1; BYTE $0x1D; BYTE $0x72; BYTE $0xF1; BYTE $0x07 // VPSLLD ymm12, ymm9, 7 - BYTE $0xC4; BYTE $0xC1; BYTE $0x35; BYTE $0x72; BYTE $0xD1; BYTE $0x19 // VPSRLD ymm9, ymm9, 25 - BYTE $0xC4; BYTE $0x41; BYTE $0x35; BYTE $0xEF; BYTE $0xCC // VPXOR ymm9, ymm9, ymm12 - BYTE $0xC4; BYTE $0xC1; BYTE $0x1D; BYTE $0x72; BYTE $0xF5; BYTE $0x07 // VPSLLD ymm12, ymm13, 7 - BYTE $0xC4; BYTE $0xC1; BYTE $0x15; BYTE $0x72; BYTE $0xD5; BYTE $0x19 // VPSRLD ymm13, ymm13, 25 - BYTE $0xC4; BYTE $0x41; BYTE $0x15; BYTE $0xEF; BYTE $0xEC // VPXOR ymm13, ymm13, ymm12 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xC9; BYTE $0x93 // VPSHUFD ymm1, ymm1, 147 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xED; BYTE $0x93 // VPSHUFD ymm5, ymm5, 147 - BYTE $0xC4; BYTE $0x41; BYTE $0x7D; BYTE $0x70; BYTE $0xC9; BYTE $0x93 // VPSHUFD ymm9, ymm9, 147 - BYTE $0xC4; BYTE $0x41; BYTE $0x7D; BYTE $0x70; BYTE $0xED; BYTE $0x93 // VPSHUFD ymm13, ymm13, 147 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xD2; BYTE $0x4E // VPSHUFD ymm2, ymm2, 78 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xF6; BYTE $0x4E // VPSHUFD ymm6, ymm6, 78 - BYTE $0xC4; BYTE $0x41; BYTE $0x7D; BYTE $0x70; BYTE $0xD2; BYTE $0x4E // VPSHUFD ymm10, ymm10, 78 - BYTE $0xC4; BYTE $0x41; BYTE $0x7D; BYTE $0x70; BYTE $0xF6; BYTE $0x4E // VPSHUFD ymm14, ymm14, 78 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xDB; BYTE $0x39 // VPSHUFD ymm3, ymm3, 57 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xFF; BYTE $0x39 // VPSHUFD ymm7, ymm7, 57 - BYTE $0xC4; BYTE $0x41; BYTE $0x7D; BYTE $0x70; BYTE $0xDB; BYTE $0x39 // VPSHUFD ymm11, ymm11, 57 - BYTE $0xC4; BYTE $0x41; BYTE $0x7D; BYTE $0x70; BYTE $0xFF; BYTE $0x39 // VPSHUFD ymm15, ymm15, 57 - BYTE $0xC5; BYTE $0x7D; BYTE $0x6F; BYTE $0x64; BYTE $0x24; BYTE $0x40 // VMOVDQA ymm12, [rsp + 64] - SUBQ $2, SI - JNE rounds_loop8_begin - BYTE $0xC4; BYTE $0x62; BYTE $0x7D; BYTE $0x5A; BYTE $0x20 // VBROADCASTI128 ymm12, [rax] - BYTE $0xC4; BYTE $0xC1; BYTE $0x7D; BYTE $0xFE; BYTE $0xC4 // VPADDD ymm0, ymm0, ymm12 - BYTE $0xC4; BYTE $0xC1; BYTE $0x5D; BYTE $0xFE; BYTE $0xE4 // VPADDD ymm4, ymm4, ymm12 - BYTE $0xC4; BYTE $0x41; BYTE $0x3D; BYTE $0xFE; BYTE $0xC4 // VPADDD ymm8, ymm8, ymm12 - BYTE $0xC5; BYTE $0x1D; BYTE $0xFE; BYTE $0x64; BYTE $0x24; BYTE $0x40 // VPADDD ymm12, ymm12, [rsp + 64] - BYTE $0xC5; BYTE $0x7D; BYTE $0x7F; BYTE $0x64; BYTE $0x24; BYTE $0x40 // VMOVDQA [rsp + 64], ymm12 - BYTE $0xC4; BYTE $0x62; BYTE $0x7D; BYTE $0x5A; BYTE $0x60; BYTE $0x10 // VBROADCASTI128 ymm12, [rax + 16] - BYTE $0xC4; BYTE $0xC1; BYTE $0x75; BYTE $0xFE; BYTE $0xCC // VPADDD ymm1, ymm1, ymm12 - BYTE $0xC4; BYTE $0xC1; BYTE $0x55; BYTE $0xFE; BYTE $0xEC // VPADDD ymm5, ymm5, ymm12 - BYTE $0xC4; BYTE $0x41; BYTE $0x35; BYTE $0xFE; BYTE $0xCC // VPADDD ymm9, ymm9, ymm12 - BYTE $0xC4; BYTE $0x41; BYTE $0x15; BYTE $0xFE; BYTE $0xEC // VPADDD ymm13, ymm13, ymm12 - BYTE $0xC4; BYTE $0x62; BYTE $0x7D; BYTE $0x5A; BYTE $0x60; BYTE $0x20 // VBROADCASTI128 ymm12, [rax + 32] - BYTE $0xC4; BYTE $0xC1; BYTE $0x6D; BYTE $0xFE; BYTE $0xD4 // VPADDD ymm2, ymm2, ymm12 - BYTE $0xC4; BYTE $0xC1; BYTE $0x4D; BYTE $0xFE; BYTE $0xF4 // VPADDD ymm6, ymm6, ymm12 - BYTE $0xC4; BYTE $0x41; BYTE $0x2D; BYTE $0xFE; BYTE $0xD4 // VPADDD ymm10, ymm10, ymm12 - BYTE $0xC4; BYTE $0x41; BYTE $0x0D; BYTE $0xFE; BYTE $0xF4 // VPADDD ymm14, ymm14, ymm12 - BYTE $0xC5; BYTE $0xE5; BYTE $0xFE; BYTE $0x5C; BYTE $0x24; BYTE $0x20 // VPADDD ymm3, ymm3, [rsp + 32] - BYTE $0xC4; BYTE $0x63; BYTE $0x7D; BYTE $0x46; BYTE $0xE1; BYTE $0x20 // VPERM2I128 ymm12, ymm0, ymm1, 32 - BYTE $0xC5; BYTE $0x1D; BYTE $0xEF; BYTE $0x23 // VPXOR ymm12, ymm12, [rbx] - BYTE $0xC5; BYTE $0x7E; BYTE $0x7F; BYTE $0x21 // VMOVDQU [rcx], ymm12 - BYTE $0xC4; BYTE $0x63; BYTE $0x6D; BYTE $0x46; BYTE $0xE3; BYTE $0x20 // VPERM2I128 ymm12, ymm2, ymm3, 32 - BYTE $0xC5; BYTE $0x1D; BYTE $0xEF; BYTE $0x63; BYTE $0x20 // VPXOR ymm12, ymm12, [rbx + 32] - BYTE $0xC5; BYTE $0x7E; BYTE $0x7F; BYTE $0x61; BYTE $0x20 // VMOVDQU [rcx + 32], ymm12 - BYTE $0xC4; BYTE $0x63; BYTE $0x7D; BYTE $0x46; BYTE $0xE1; BYTE $0x31 // VPERM2I128 ymm12, ymm0, ymm1, 49 - BYTE $0xC5; BYTE $0x1D; BYTE $0xEF; BYTE $0x63; BYTE $0x40 // VPXOR ymm12, ymm12, [rbx + 64] - BYTE $0xC5; BYTE $0x7E; BYTE $0x7F; BYTE $0x61; BYTE $0x40 // VMOVDQU [rcx + 64], ymm12 - BYTE $0xC4; BYTE $0x63; BYTE $0x6D; BYTE $0x46; BYTE $0xE3; BYTE $0x31 // VPERM2I128 ymm12, ymm2, ymm3, 49 - BYTE $0xC5; BYTE $0x1D; BYTE $0xEF; BYTE $0x63; BYTE $0x60 // VPXOR ymm12, ymm12, [rbx + 96] - BYTE $0xC5; BYTE $0x7E; BYTE $0x7F; BYTE $0x61; BYTE $0x60 // VMOVDQU [rcx + 96], ymm12 - BYTE $0xC5; BYTE $0xFD; BYTE $0x6F; BYTE $0x5C; BYTE $0x24; BYTE $0x20 // VMOVDQA ymm3, [rsp + 32] - BYTE $0xC5; BYTE $0xE5; BYTE $0xFE; BYTE $0x1C; BYTE $0x24 // VPADDD ymm3, ymm3, [rsp] - BYTE $0xC5; BYTE $0xC5; BYTE $0xFE; BYTE $0xFB // VPADDD ymm7, ymm7, ymm3 - BYTE $0xC4; BYTE $0x63; BYTE $0x5D; BYTE $0x46; BYTE $0xE5; BYTE $0x20 // VPERM2I128 ymm12, ymm4, ymm5, 32 - BYTE $0xC5; BYTE $0x1D; BYTE $0xEF; BYTE $0xA3; BYTE $0x80; BYTE $0x00; BYTE $0x00; BYTE $0x00 // VPXOR ymm12, ymm12, [rbx + 128] - BYTE $0xC5; BYTE $0x7E; BYTE $0x7F; BYTE $0xA1; BYTE $0x80; BYTE $0x00; BYTE $0x00; BYTE $0x00 // VMOVDQU [rcx + 128], ymm12 - BYTE $0xC4; BYTE $0x63; BYTE $0x4D; BYTE $0x46; BYTE $0xE7; BYTE $0x20 // VPERM2I128 ymm12, ymm6, ymm7, 32 - BYTE $0xC5; BYTE $0x1D; BYTE $0xEF; BYTE $0xA3; BYTE $0xA0; BYTE $0x00; BYTE $0x00; BYTE $0x00 // VPXOR ymm12, ymm12, [rbx + 160] - BYTE $0xC5; BYTE $0x7E; BYTE $0x7F; BYTE $0xA1; BYTE $0xA0; BYTE $0x00; BYTE $0x00; BYTE $0x00 // VMOVDQU [rcx + 160], ymm12 - BYTE $0xC4; BYTE $0x63; BYTE $0x5D; BYTE $0x46; BYTE $0xE5; BYTE $0x31 // VPERM2I128 ymm12, ymm4, ymm5, 49 - BYTE $0xC5; BYTE $0x1D; BYTE $0xEF; BYTE $0xA3; BYTE $0xC0; BYTE $0x00; BYTE $0x00; BYTE $0x00 // VPXOR ymm12, ymm12, [rbx + 192] - BYTE $0xC5; BYTE $0x7E; BYTE $0x7F; BYTE $0xA1; BYTE $0xC0; BYTE $0x00; BYTE $0x00; BYTE $0x00 // VMOVDQU [rcx + 192], ymm12 - BYTE $0xC4; BYTE $0x63; BYTE $0x4D; BYTE $0x46; BYTE $0xE7; BYTE $0x31 // VPERM2I128 ymm12, ymm6, ymm7, 49 - BYTE $0xC5; BYTE $0x1D; BYTE $0xEF; BYTE $0xA3; BYTE $0xE0; BYTE $0x00; BYTE $0x00; BYTE $0x00 // VPXOR ymm12, ymm12, [rbx + 224] - BYTE $0xC5; BYTE $0x7E; BYTE $0x7F; BYTE $0xA1; BYTE $0xE0; BYTE $0x00; BYTE $0x00; BYTE $0x00 // VMOVDQU [rcx + 224], ymm12 - BYTE $0xC5; BYTE $0xE5; BYTE $0xFE; BYTE $0x1C; BYTE $0x24 // VPADDD ymm3, ymm3, [rsp] - BYTE $0xC5; BYTE $0x25; BYTE $0xFE; BYTE $0xDB // VPADDD ymm11, ymm11, ymm3 - BYTE $0xC4; BYTE $0x43; BYTE $0x3D; BYTE $0x46; BYTE $0xE1; BYTE $0x20 // VPERM2I128 ymm12, ymm8, ymm9, 32 - BYTE $0xC5; BYTE $0x1D; BYTE $0xEF; BYTE $0xA3; BYTE $0x00; BYTE $0x01; BYTE $0x00; BYTE $0x00 // VPXOR ymm12, ymm12, [rbx + 256] - BYTE $0xC5; BYTE $0x7E; BYTE $0x7F; BYTE $0xA1; BYTE $0x00; BYTE $0x01; BYTE $0x00; BYTE $0x00 // VMOVDQU [rcx + 256], ymm12 - BYTE $0xC4; BYTE $0x43; BYTE $0x2D; BYTE $0x46; BYTE $0xE3; BYTE $0x20 // VPERM2I128 ymm12, ymm10, ymm11, 32 - BYTE $0xC5; BYTE $0x1D; BYTE $0xEF; BYTE $0xA3; BYTE $0x20; BYTE $0x01; BYTE $0x00; BYTE $0x00 // VPXOR ymm12, ymm12, [rbx + 288] - BYTE $0xC5; BYTE $0x7E; BYTE $0x7F; BYTE $0xA1; BYTE $0x20; BYTE $0x01; BYTE $0x00; BYTE $0x00 // VMOVDQU [rcx + 288], ymm12 - BYTE $0xC4; BYTE $0x43; BYTE $0x3D; BYTE $0x46; BYTE $0xE1; BYTE $0x31 // VPERM2I128 ymm12, ymm8, ymm9, 49 - BYTE $0xC5; BYTE $0x1D; BYTE $0xEF; BYTE $0xA3; BYTE $0x40; BYTE $0x01; BYTE $0x00; BYTE $0x00 // VPXOR ymm12, ymm12, [rbx + 320] - BYTE $0xC5; BYTE $0x7E; BYTE $0x7F; BYTE $0xA1; BYTE $0x40; BYTE $0x01; BYTE $0x00; BYTE $0x00 // VMOVDQU [rcx + 320], ymm12 - BYTE $0xC4; BYTE $0x43; BYTE $0x2D; BYTE $0x46; BYTE $0xE3; BYTE $0x31 // VPERM2I128 ymm12, ymm10, ymm11, 49 - BYTE $0xC5; BYTE $0x1D; BYTE $0xEF; BYTE $0xA3; BYTE $0x60; BYTE $0x01; BYTE $0x00; BYTE $0x00 // VPXOR ymm12, ymm12, [rbx + 352] - BYTE $0xC5; BYTE $0x7E; BYTE $0x7F; BYTE $0xA1; BYTE $0x60; BYTE $0x01; BYTE $0x00; BYTE $0x00 // VMOVDQU [rcx + 352], ymm12 - BYTE $0xC5; BYTE $0xE5; BYTE $0xFE; BYTE $0x1C; BYTE $0x24 // VPADDD ymm3, ymm3, [rsp] - BYTE $0xC5; BYTE $0x7D; BYTE $0x6F; BYTE $0x64; BYTE $0x24; BYTE $0x40 // VMOVDQA ymm12, [rsp + 64] - BYTE $0xC5; BYTE $0x05; BYTE $0xFE; BYTE $0xFB // VPADDD ymm15, ymm15, ymm3 - BYTE $0xC4; BYTE $0xC3; BYTE $0x1D; BYTE $0x46; BYTE $0xC5; BYTE $0x20 // VPERM2I128 ymm0, ymm12, ymm13, 32 - BYTE $0xC5; BYTE $0xFD; BYTE $0xEF; BYTE $0x83; BYTE $0x80; BYTE $0x01; BYTE $0x00; BYTE $0x00 // VPXOR ymm0, ymm0, [rbx + 384] - BYTE $0xC5; BYTE $0xFE; BYTE $0x7F; BYTE $0x81; BYTE $0x80; BYTE $0x01; BYTE $0x00; BYTE $0x00 // VMOVDQU [rcx + 384], ymm0 - BYTE $0xC4; BYTE $0xC3; BYTE $0x0D; BYTE $0x46; BYTE $0xC7; BYTE $0x20 // VPERM2I128 ymm0, ymm14, ymm15, 32 - BYTE $0xC5; BYTE $0xFD; BYTE $0xEF; BYTE $0x83; BYTE $0xA0; BYTE $0x01; BYTE $0x00; BYTE $0x00 // VPXOR ymm0, ymm0, [rbx + 416] - BYTE $0xC5; BYTE $0xFE; BYTE $0x7F; BYTE $0x81; BYTE $0xA0; BYTE $0x01; BYTE $0x00; BYTE $0x00 // VMOVDQU [rcx + 416], ymm0 - BYTE $0xC4; BYTE $0xC3; BYTE $0x1D; BYTE $0x46; BYTE $0xC5; BYTE $0x31 // VPERM2I128 ymm0, ymm12, ymm13, 49 - BYTE $0xC5; BYTE $0xFD; BYTE $0xEF; BYTE $0x83; BYTE $0xC0; BYTE $0x01; BYTE $0x00; BYTE $0x00 // VPXOR ymm0, ymm0, [rbx + 448] - BYTE $0xC5; BYTE $0xFE; BYTE $0x7F; BYTE $0x81; BYTE $0xC0; BYTE $0x01; BYTE $0x00; BYTE $0x00 // VMOVDQU [rcx + 448], ymm0 - BYTE $0xC4; BYTE $0xC3; BYTE $0x0D; BYTE $0x46; BYTE $0xC7; BYTE $0x31 // VPERM2I128 ymm0, ymm14, ymm15, 49 - BYTE $0xC5; BYTE $0xFD; BYTE $0xEF; BYTE $0x83; BYTE $0xE0; BYTE $0x01; BYTE $0x00; BYTE $0x00 // VPXOR ymm0, ymm0, [rbx + 480] - BYTE $0xC5; BYTE $0xFE; BYTE $0x7F; BYTE $0x81; BYTE $0xE0; BYTE $0x01; BYTE $0x00; BYTE $0x00 // VMOVDQU [rcx + 480], ymm0 - BYTE $0xC5; BYTE $0xE5; BYTE $0xFE; BYTE $0x1C; BYTE $0x24 // VPADDD ymm3, ymm3, [rsp] - BYTE $0xC5; BYTE $0xFD; BYTE $0x7F; BYTE $0x5C; BYTE $0x24; BYTE $0x20 // VMOVDQA [rsp + 32], ymm3 - ADDQ $512, BX - ADDQ $512, CX - SUBQ $8, DX - JCC vector_loop8_begin -vector_loop8_end: - BYTE $0xC5; BYTE $0x7D; BYTE $0x6F; BYTE $0xDB // VMOVDQA ymm11, ymm3 - ADDQ $8, DX - JEQ out_write_even - BYTE $0xC4; BYTE $0x62; BYTE $0x7D; BYTE $0x5A; BYTE $0x00 // VBROADCASTI128 ymm8, [rax] - BYTE $0xC4; BYTE $0x62; BYTE $0x7D; BYTE $0x5A; BYTE $0x48; BYTE $0x10 // VBROADCASTI128 ymm9, [rax + 16] - BYTE $0xC4; BYTE $0x62; BYTE $0x7D; BYTE $0x5A; BYTE $0x50; BYTE $0x20 // VBROADCASTI128 ymm10, [rax + 32] - BYTE $0xC5; BYTE $0x7D; BYTE $0x6F; BYTE $0x34; BYTE $0x24 // VMOVDQA ymm14, [rsp] - SUBQ $4, DX - JCS vector_loop4_end -vector_loop4_begin: - BYTE $0xC5; BYTE $0x7D; BYTE $0x7F; BYTE $0xC0 // VMOVDQA ymm0, ymm8 - BYTE $0xC5; BYTE $0x7D; BYTE $0x7F; BYTE $0xC9 // VMOVDQA ymm1, ymm9 - BYTE $0xC5; BYTE $0x7D; BYTE $0x7F; BYTE $0xD2 // VMOVDQA ymm2, ymm10 - BYTE $0xC5; BYTE $0x7D; BYTE $0x7F; BYTE $0xDB // VMOVDQA ymm3, ymm11 - BYTE $0xC5; BYTE $0xFD; BYTE $0x6F; BYTE $0xE0 // VMOVDQA ymm4, ymm0 - BYTE $0xC5; BYTE $0xFD; BYTE $0x6F; BYTE $0xE9 // VMOVDQA ymm5, ymm1 - BYTE $0xC5; BYTE $0xFD; BYTE $0x6F; BYTE $0xF2 // VMOVDQA ymm6, ymm2 - BYTE $0xC4; BYTE $0xC1; BYTE $0x65; BYTE $0xD4; BYTE $0xFE // VPADDQ ymm7, ymm3, ymm14 - MOVQ $20, SI -rounds_loop4_begin: - BYTE $0xC5; BYTE $0xFD; BYTE $0xFE; BYTE $0xC1 // VPADDD ymm0, ymm0, ymm1 - BYTE $0xC5; BYTE $0xDD; BYTE $0xFE; BYTE $0xE5 // VPADDD ymm4, ymm4, ymm5 - BYTE $0xC5; BYTE $0xE5; BYTE $0xEF; BYTE $0xD8 // VPXOR ymm3, ymm3, ymm0 - BYTE $0xC5; BYTE $0xC5; BYTE $0xEF; BYTE $0xFC // VPXOR ymm7, ymm7, ymm4 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF3; BYTE $0x10 // VPSLLD ymm12, ymm3, 16 - BYTE $0xC5; BYTE $0xE5; BYTE $0x72; BYTE $0xD3; BYTE $0x10 // VPSRLD ymm3, ymm3, 16 - BYTE $0xC4; BYTE $0xC1; BYTE $0x65; BYTE $0xEF; BYTE $0xDC // VPXOR ymm3, ymm3, ymm12 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF7; BYTE $0x10 // VPSLLD ymm12, ymm7, 16 - BYTE $0xC5; BYTE $0xC5; BYTE $0x72; BYTE $0xD7; BYTE $0x10 // VPSRLD ymm7, ymm7, 16 - BYTE $0xC4; BYTE $0xC1; BYTE $0x45; BYTE $0xEF; BYTE $0xFC // VPXOR ymm7, ymm7, ymm12 - BYTE $0xC5; BYTE $0xED; BYTE $0xFE; BYTE $0xD3 // VPADDD ymm2, ymm2, ymm3 - BYTE $0xC5; BYTE $0xCD; BYTE $0xFE; BYTE $0xF7 // VPADDD ymm6, ymm6, ymm7 - BYTE $0xC5; BYTE $0xF5; BYTE $0xEF; BYTE $0xCA // VPXOR ymm1, ymm1, ymm2 - BYTE $0xC5; BYTE $0xD5; BYTE $0xEF; BYTE $0xEE // VPXOR ymm5, ymm5, ymm6 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF1; BYTE $0x0C // VPSLLD ymm12, ymm1, 12 - BYTE $0xC5; BYTE $0xF5; BYTE $0x72; BYTE $0xD1; BYTE $0x14 // VPSRLD ymm1, ymm1, 20 - BYTE $0xC4; BYTE $0xC1; BYTE $0x75; BYTE $0xEF; BYTE $0xCC // VPXOR ymm1, ymm1, ymm12 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF5; BYTE $0x0C // VPSLLD ymm12, ymm5, 12 - BYTE $0xC5; BYTE $0xD5; BYTE $0x72; BYTE $0xD5; BYTE $0x14 // VPSRLD ymm5, ymm5, 20 - BYTE $0xC4; BYTE $0xC1; BYTE $0x55; BYTE $0xEF; BYTE $0xEC // VPXOR ymm5, ymm5, ymm12 - BYTE $0xC5; BYTE $0xFD; BYTE $0xFE; BYTE $0xC1 // VPADDD ymm0, ymm0, ymm1 - BYTE $0xC5; BYTE $0xDD; BYTE $0xFE; BYTE $0xE5 // VPADDD ymm4, ymm4, ymm5 - BYTE $0xC5; BYTE $0xE5; BYTE $0xEF; BYTE $0xD8 // VPXOR ymm3, ymm3, ymm0 - BYTE $0xC5; BYTE $0xC5; BYTE $0xEF; BYTE $0xFC // VPXOR ymm7, ymm7, ymm4 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF3; BYTE $0x08 // VPSLLD ymm12, ymm3, 8 - BYTE $0xC5; BYTE $0xE5; BYTE $0x72; BYTE $0xD3; BYTE $0x18 // VPSRLD ymm3, ymm3, 24 - BYTE $0xC4; BYTE $0xC1; BYTE $0x65; BYTE $0xEF; BYTE $0xDC // VPXOR ymm3, ymm3, ymm12 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF7; BYTE $0x08 // VPSLLD ymm12, ymm7, 8 - BYTE $0xC5; BYTE $0xC5; BYTE $0x72; BYTE $0xD7; BYTE $0x18 // VPSRLD ymm7, ymm7, 24 - BYTE $0xC4; BYTE $0xC1; BYTE $0x45; BYTE $0xEF; BYTE $0xFC // VPXOR ymm7, ymm7, ymm12 - BYTE $0xC5; BYTE $0xED; BYTE $0xFE; BYTE $0xD3 // VPADDD ymm2, ymm2, ymm3 - BYTE $0xC5; BYTE $0xCD; BYTE $0xFE; BYTE $0xF7 // VPADDD ymm6, ymm6, ymm7 - BYTE $0xC5; BYTE $0xF5; BYTE $0xEF; BYTE $0xCA // VPXOR ymm1, ymm1, ymm2 - BYTE $0xC5; BYTE $0xD5; BYTE $0xEF; BYTE $0xEE // VPXOR ymm5, ymm5, ymm6 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF1; BYTE $0x07 // VPSLLD ymm12, ymm1, 7 - BYTE $0xC5; BYTE $0xF5; BYTE $0x72; BYTE $0xD1; BYTE $0x19 // VPSRLD ymm1, ymm1, 25 - BYTE $0xC4; BYTE $0xC1; BYTE $0x75; BYTE $0xEF; BYTE $0xCC // VPXOR ymm1, ymm1, ymm12 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF5; BYTE $0x07 // VPSLLD ymm12, ymm5, 7 - BYTE $0xC5; BYTE $0xD5; BYTE $0x72; BYTE $0xD5; BYTE $0x19 // VPSRLD ymm5, ymm5, 25 - BYTE $0xC4; BYTE $0xC1; BYTE $0x55; BYTE $0xEF; BYTE $0xEC // VPXOR ymm5, ymm5, ymm12 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xC9; BYTE $0x39 // VPSHUFD ymm1, ymm1, 57 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xED; BYTE $0x39 // VPSHUFD ymm5, ymm5, 57 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xD2; BYTE $0x4E // VPSHUFD ymm2, ymm2, 78 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xF6; BYTE $0x4E // VPSHUFD ymm6, ymm6, 78 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xDB; BYTE $0x93 // VPSHUFD ymm3, ymm3, 147 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xFF; BYTE $0x93 // VPSHUFD ymm7, ymm7, 147 - BYTE $0xC5; BYTE $0xFD; BYTE $0xFE; BYTE $0xC1 // VPADDD ymm0, ymm0, ymm1 - BYTE $0xC5; BYTE $0xDD; BYTE $0xFE; BYTE $0xE5 // VPADDD ymm4, ymm4, ymm5 - BYTE $0xC5; BYTE $0xE5; BYTE $0xEF; BYTE $0xD8 // VPXOR ymm3, ymm3, ymm0 - BYTE $0xC5; BYTE $0xC5; BYTE $0xEF; BYTE $0xFC // VPXOR ymm7, ymm7, ymm4 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF3; BYTE $0x10 // VPSLLD ymm12, ymm3, 16 - BYTE $0xC5; BYTE $0xE5; BYTE $0x72; BYTE $0xD3; BYTE $0x10 // VPSRLD ymm3, ymm3, 16 - BYTE $0xC4; BYTE $0xC1; BYTE $0x65; BYTE $0xEF; BYTE $0xDC // VPXOR ymm3, ymm3, ymm12 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF7; BYTE $0x10 // VPSLLD ymm12, ymm7, 16 - BYTE $0xC5; BYTE $0xC5; BYTE $0x72; BYTE $0xD7; BYTE $0x10 // VPSRLD ymm7, ymm7, 16 - BYTE $0xC4; BYTE $0xC1; BYTE $0x45; BYTE $0xEF; BYTE $0xFC // VPXOR ymm7, ymm7, ymm12 - BYTE $0xC5; BYTE $0xED; BYTE $0xFE; BYTE $0xD3 // VPADDD ymm2, ymm2, ymm3 - BYTE $0xC5; BYTE $0xCD; BYTE $0xFE; BYTE $0xF7 // VPADDD ymm6, ymm6, ymm7 - BYTE $0xC5; BYTE $0xF5; BYTE $0xEF; BYTE $0xCA // VPXOR ymm1, ymm1, ymm2 - BYTE $0xC5; BYTE $0xD5; BYTE $0xEF; BYTE $0xEE // VPXOR ymm5, ymm5, ymm6 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF1; BYTE $0x0C // VPSLLD ymm12, ymm1, 12 - BYTE $0xC5; BYTE $0xF5; BYTE $0x72; BYTE $0xD1; BYTE $0x14 // VPSRLD ymm1, ymm1, 20 - BYTE $0xC4; BYTE $0xC1; BYTE $0x75; BYTE $0xEF; BYTE $0xCC // VPXOR ymm1, ymm1, ymm12 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF5; BYTE $0x0C // VPSLLD ymm12, ymm5, 12 - BYTE $0xC5; BYTE $0xD5; BYTE $0x72; BYTE $0xD5; BYTE $0x14 // VPSRLD ymm5, ymm5, 20 - BYTE $0xC4; BYTE $0xC1; BYTE $0x55; BYTE $0xEF; BYTE $0xEC // VPXOR ymm5, ymm5, ymm12 - BYTE $0xC5; BYTE $0xFD; BYTE $0xFE; BYTE $0xC1 // VPADDD ymm0, ymm0, ymm1 - BYTE $0xC5; BYTE $0xDD; BYTE $0xFE; BYTE $0xE5 // VPADDD ymm4, ymm4, ymm5 - BYTE $0xC5; BYTE $0xE5; BYTE $0xEF; BYTE $0xD8 // VPXOR ymm3, ymm3, ymm0 - BYTE $0xC5; BYTE $0xC5; BYTE $0xEF; BYTE $0xFC // VPXOR ymm7, ymm7, ymm4 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF3; BYTE $0x08 // VPSLLD ymm12, ymm3, 8 - BYTE $0xC5; BYTE $0xE5; BYTE $0x72; BYTE $0xD3; BYTE $0x18 // VPSRLD ymm3, ymm3, 24 - BYTE $0xC4; BYTE $0xC1; BYTE $0x65; BYTE $0xEF; BYTE $0xDC // VPXOR ymm3, ymm3, ymm12 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF7; BYTE $0x08 // VPSLLD ymm12, ymm7, 8 - BYTE $0xC5; BYTE $0xC5; BYTE $0x72; BYTE $0xD7; BYTE $0x18 // VPSRLD ymm7, ymm7, 24 - BYTE $0xC4; BYTE $0xC1; BYTE $0x45; BYTE $0xEF; BYTE $0xFC // VPXOR ymm7, ymm7, ymm12 - BYTE $0xC5; BYTE $0xED; BYTE $0xFE; BYTE $0xD3 // VPADDD ymm2, ymm2, ymm3 - BYTE $0xC5; BYTE $0xCD; BYTE $0xFE; BYTE $0xF7 // VPADDD ymm6, ymm6, ymm7 - BYTE $0xC5; BYTE $0xF5; BYTE $0xEF; BYTE $0xCA // VPXOR ymm1, ymm1, ymm2 - BYTE $0xC5; BYTE $0xD5; BYTE $0xEF; BYTE $0xEE // VPXOR ymm5, ymm5, ymm6 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF1; BYTE $0x07 // VPSLLD ymm12, ymm1, 7 - BYTE $0xC5; BYTE $0xF5; BYTE $0x72; BYTE $0xD1; BYTE $0x19 // VPSRLD ymm1, ymm1, 25 - BYTE $0xC4; BYTE $0xC1; BYTE $0x75; BYTE $0xEF; BYTE $0xCC // VPXOR ymm1, ymm1, ymm12 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF5; BYTE $0x07 // VPSLLD ymm12, ymm5, 7 - BYTE $0xC5; BYTE $0xD5; BYTE $0x72; BYTE $0xD5; BYTE $0x19 // VPSRLD ymm5, ymm5, 25 - BYTE $0xC4; BYTE $0xC1; BYTE $0x55; BYTE $0xEF; BYTE $0xEC // VPXOR ymm5, ymm5, ymm12 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xC9; BYTE $0x93 // VPSHUFD ymm1, ymm1, 147 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xED; BYTE $0x93 // VPSHUFD ymm5, ymm5, 147 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xD2; BYTE $0x4E // VPSHUFD ymm2, ymm2, 78 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xF6; BYTE $0x4E // VPSHUFD ymm6, ymm6, 78 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xDB; BYTE $0x39 // VPSHUFD ymm3, ymm3, 57 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xFF; BYTE $0x39 // VPSHUFD ymm7, ymm7, 57 - SUBQ $2, SI - JNE rounds_loop4_begin - BYTE $0xC4; BYTE $0xC1; BYTE $0x7D; BYTE $0xFE; BYTE $0xC0 // VPADDD ymm0, ymm0, ymm8 - BYTE $0xC4; BYTE $0xC1; BYTE $0x75; BYTE $0xFE; BYTE $0xC9 // VPADDD ymm1, ymm1, ymm9 - BYTE $0xC4; BYTE $0xC1; BYTE $0x6D; BYTE $0xFE; BYTE $0xD2 // VPADDD ymm2, ymm2, ymm10 - BYTE $0xC4; BYTE $0xC1; BYTE $0x65; BYTE $0xFE; BYTE $0xDB // VPADDD ymm3, ymm3, ymm11 - BYTE $0xC4; BYTE $0x63; BYTE $0x7D; BYTE $0x46; BYTE $0xE1; BYTE $0x20 // VPERM2I128 ymm12, ymm0, ymm1, 32 - BYTE $0xC5; BYTE $0x1D; BYTE $0xEF; BYTE $0x23 // VPXOR ymm12, ymm12, [rbx] - BYTE $0xC5; BYTE $0x7E; BYTE $0x7F; BYTE $0x21 // VMOVDQU [rcx], ymm12 - BYTE $0xC4; BYTE $0x63; BYTE $0x6D; BYTE $0x46; BYTE $0xE3; BYTE $0x20 // VPERM2I128 ymm12, ymm2, ymm3, 32 - BYTE $0xC5; BYTE $0x1D; BYTE $0xEF; BYTE $0x63; BYTE $0x20 // VPXOR ymm12, ymm12, [rbx + 32] - BYTE $0xC5; BYTE $0x7E; BYTE $0x7F; BYTE $0x61; BYTE $0x20 // VMOVDQU [rcx + 32], ymm12 - BYTE $0xC4; BYTE $0x63; BYTE $0x7D; BYTE $0x46; BYTE $0xE1; BYTE $0x31 // VPERM2I128 ymm12, ymm0, ymm1, 49 - BYTE $0xC5; BYTE $0x1D; BYTE $0xEF; BYTE $0x63; BYTE $0x40 // VPXOR ymm12, ymm12, [rbx + 64] - BYTE $0xC5; BYTE $0x7E; BYTE $0x7F; BYTE $0x61; BYTE $0x40 // VMOVDQU [rcx + 64], ymm12 - BYTE $0xC4; BYTE $0x63; BYTE $0x6D; BYTE $0x46; BYTE $0xE3; BYTE $0x31 // VPERM2I128 ymm12, ymm2, ymm3, 49 - BYTE $0xC5; BYTE $0x1D; BYTE $0xEF; BYTE $0x63; BYTE $0x60 // VPXOR ymm12, ymm12, [rbx + 96] - BYTE $0xC5; BYTE $0x7E; BYTE $0x7F; BYTE $0x61; BYTE $0x60 // VMOVDQU [rcx + 96], ymm12 - BYTE $0xC4; BYTE $0x41; BYTE $0x25; BYTE $0xFE; BYTE $0xDE // VPADDD ymm11, ymm11, ymm14 - BYTE $0xC4; BYTE $0xC1; BYTE $0x5D; BYTE $0xFE; BYTE $0xE0 // VPADDD ymm4, ymm4, ymm8 - BYTE $0xC4; BYTE $0xC1; BYTE $0x55; BYTE $0xFE; BYTE $0xE9 // VPADDD ymm5, ymm5, ymm9 - BYTE $0xC4; BYTE $0xC1; BYTE $0x4D; BYTE $0xFE; BYTE $0xF2 // VPADDD ymm6, ymm6, ymm10 - BYTE $0xC4; BYTE $0xC1; BYTE $0x45; BYTE $0xFE; BYTE $0xFB // VPADDD ymm7, ymm7, ymm11 - BYTE $0xC4; BYTE $0x63; BYTE $0x5D; BYTE $0x46; BYTE $0xE5; BYTE $0x20 // VPERM2I128 ymm12, ymm4, ymm5, 32 - BYTE $0xC5; BYTE $0x1D; BYTE $0xEF; BYTE $0xA3; BYTE $0x80; BYTE $0x00; BYTE $0x00; BYTE $0x00 // VPXOR ymm12, ymm12, [rbx + 128] - BYTE $0xC5; BYTE $0x7E; BYTE $0x7F; BYTE $0xA1; BYTE $0x80; BYTE $0x00; BYTE $0x00; BYTE $0x00 // VMOVDQU [rcx + 128], ymm12 - BYTE $0xC4; BYTE $0x63; BYTE $0x4D; BYTE $0x46; BYTE $0xE7; BYTE $0x20 // VPERM2I128 ymm12, ymm6, ymm7, 32 - BYTE $0xC5; BYTE $0x1D; BYTE $0xEF; BYTE $0xA3; BYTE $0xA0; BYTE $0x00; BYTE $0x00; BYTE $0x00 // VPXOR ymm12, ymm12, [rbx + 160] - BYTE $0xC5; BYTE $0x7E; BYTE $0x7F; BYTE $0xA1; BYTE $0xA0; BYTE $0x00; BYTE $0x00; BYTE $0x00 // VMOVDQU [rcx + 160], ymm12 - BYTE $0xC4; BYTE $0x63; BYTE $0x5D; BYTE $0x46; BYTE $0xE5; BYTE $0x31 // VPERM2I128 ymm12, ymm4, ymm5, 49 - BYTE $0xC5; BYTE $0x1D; BYTE $0xEF; BYTE $0xA3; BYTE $0xC0; BYTE $0x00; BYTE $0x00; BYTE $0x00 // VPXOR ymm12, ymm12, [rbx + 192] - BYTE $0xC5; BYTE $0x7E; BYTE $0x7F; BYTE $0xA1; BYTE $0xC0; BYTE $0x00; BYTE $0x00; BYTE $0x00 // VMOVDQU [rcx + 192], ymm12 - BYTE $0xC4; BYTE $0x63; BYTE $0x4D; BYTE $0x46; BYTE $0xE7; BYTE $0x31 // VPERM2I128 ymm12, ymm6, ymm7, 49 - BYTE $0xC5; BYTE $0x1D; BYTE $0xEF; BYTE $0xA3; BYTE $0xE0; BYTE $0x00; BYTE $0x00; BYTE $0x00 // VPXOR ymm12, ymm12, [rbx + 224] - BYTE $0xC5; BYTE $0x7E; BYTE $0x7F; BYTE $0xA1; BYTE $0xE0; BYTE $0x00; BYTE $0x00; BYTE $0x00 // VMOVDQU [rcx + 224], ymm12 - BYTE $0xC4; BYTE $0x41; BYTE $0x25; BYTE $0xFE; BYTE $0xDE // VPADDD ymm11, ymm11, ymm14 - ADDQ $256, BX - ADDQ $256, CX - SUBQ $4, DX - JCC vector_loop4_begin -vector_loop4_end: - ADDQ $4, DX - JEQ out_write_even -vector_loop2_begin: - BYTE $0xC5; BYTE $0x7D; BYTE $0x7F; BYTE $0xC0 // VMOVDQA ymm0, ymm8 - BYTE $0xC5; BYTE $0x7D; BYTE $0x7F; BYTE $0xC9 // VMOVDQA ymm1, ymm9 - BYTE $0xC5; BYTE $0x7D; BYTE $0x7F; BYTE $0xD2 // VMOVDQA ymm2, ymm10 - BYTE $0xC5; BYTE $0x7D; BYTE $0x7F; BYTE $0xDB // VMOVDQA ymm3, ymm11 - MOVQ $20, SI -rounds_loop2_begin: - BYTE $0xC5; BYTE $0xFD; BYTE $0xFE; BYTE $0xC1 // VPADDD ymm0, ymm0, ymm1 - BYTE $0xC5; BYTE $0xE5; BYTE $0xEF; BYTE $0xD8 // VPXOR ymm3, ymm3, ymm0 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF3; BYTE $0x10 // VPSLLD ymm12, ymm3, 16 - BYTE $0xC5; BYTE $0xE5; BYTE $0x72; BYTE $0xD3; BYTE $0x10 // VPSRLD ymm3, ymm3, 16 - BYTE $0xC4; BYTE $0xC1; BYTE $0x65; BYTE $0xEF; BYTE $0xDC // VPXOR ymm3, ymm3, ymm12 - BYTE $0xC5; BYTE $0xED; BYTE $0xFE; BYTE $0xD3 // VPADDD ymm2, ymm2, ymm3 - BYTE $0xC5; BYTE $0xF5; BYTE $0xEF; BYTE $0xCA // VPXOR ymm1, ymm1, ymm2 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF1; BYTE $0x0C // VPSLLD ymm12, ymm1, 12 - BYTE $0xC5; BYTE $0xF5; BYTE $0x72; BYTE $0xD1; BYTE $0x14 // VPSRLD ymm1, ymm1, 20 - BYTE $0xC4; BYTE $0xC1; BYTE $0x75; BYTE $0xEF; BYTE $0xCC // VPXOR ymm1, ymm1, ymm12 - BYTE $0xC5; BYTE $0xFD; BYTE $0xFE; BYTE $0xC1 // VPADDD ymm0, ymm0, ymm1 - BYTE $0xC5; BYTE $0xE5; BYTE $0xEF; BYTE $0xD8 // VPXOR ymm3, ymm3, ymm0 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF3; BYTE $0x08 // VPSLLD ymm12, ymm3, 8 - BYTE $0xC5; BYTE $0xE5; BYTE $0x72; BYTE $0xD3; BYTE $0x18 // VPSRLD ymm3, ymm3, 24 - BYTE $0xC4; BYTE $0xC1; BYTE $0x65; BYTE $0xEF; BYTE $0xDC // VPXOR ymm3, ymm3, ymm12 - BYTE $0xC5; BYTE $0xED; BYTE $0xFE; BYTE $0xD3 // VPADDD ymm2, ymm2, ymm3 - BYTE $0xC5; BYTE $0xF5; BYTE $0xEF; BYTE $0xCA // VPXOR ymm1, ymm1, ymm2 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF1; BYTE $0x07 // VPSLLD ymm12, ymm1, 7 - BYTE $0xC5; BYTE $0xF5; BYTE $0x72; BYTE $0xD1; BYTE $0x19 // VPSRLD ymm1, ymm1, 25 - BYTE $0xC4; BYTE $0xC1; BYTE $0x75; BYTE $0xEF; BYTE $0xCC // VPXOR ymm1, ymm1, ymm12 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xC9; BYTE $0x39 // VPSHUFD ymm1, ymm1, 57 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xD2; BYTE $0x4E // VPSHUFD ymm2, ymm2, 78 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xDB; BYTE $0x93 // VPSHUFD ymm3, ymm3, 147 - BYTE $0xC5; BYTE $0xFD; BYTE $0xFE; BYTE $0xC1 // VPADDD ymm0, ymm0, ymm1 - BYTE $0xC5; BYTE $0xE5; BYTE $0xEF; BYTE $0xD8 // VPXOR ymm3, ymm3, ymm0 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF3; BYTE $0x10 // VPSLLD ymm12, ymm3, 16 - BYTE $0xC5; BYTE $0xE5; BYTE $0x72; BYTE $0xD3; BYTE $0x10 // VPSRLD ymm3, ymm3, 16 - BYTE $0xC4; BYTE $0xC1; BYTE $0x65; BYTE $0xEF; BYTE $0xDC // VPXOR ymm3, ymm3, ymm12 - BYTE $0xC5; BYTE $0xED; BYTE $0xFE; BYTE $0xD3 // VPADDD ymm2, ymm2, ymm3 - BYTE $0xC5; BYTE $0xF5; BYTE $0xEF; BYTE $0xCA // VPXOR ymm1, ymm1, ymm2 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF1; BYTE $0x0C // VPSLLD ymm12, ymm1, 12 - BYTE $0xC5; BYTE $0xF5; BYTE $0x72; BYTE $0xD1; BYTE $0x14 // VPSRLD ymm1, ymm1, 20 - BYTE $0xC4; BYTE $0xC1; BYTE $0x75; BYTE $0xEF; BYTE $0xCC // VPXOR ymm1, ymm1, ymm12 - BYTE $0xC5; BYTE $0xFD; BYTE $0xFE; BYTE $0xC1 // VPADDD ymm0, ymm0, ymm1 - BYTE $0xC5; BYTE $0xE5; BYTE $0xEF; BYTE $0xD8 // VPXOR ymm3, ymm3, ymm0 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF3; BYTE $0x08 // VPSLLD ymm12, ymm3, 8 - BYTE $0xC5; BYTE $0xE5; BYTE $0x72; BYTE $0xD3; BYTE $0x18 // VPSRLD ymm3, ymm3, 24 - BYTE $0xC4; BYTE $0xC1; BYTE $0x65; BYTE $0xEF; BYTE $0xDC // VPXOR ymm3, ymm3, ymm12 - BYTE $0xC5; BYTE $0xED; BYTE $0xFE; BYTE $0xD3 // VPADDD ymm2, ymm2, ymm3 - BYTE $0xC5; BYTE $0xF5; BYTE $0xEF; BYTE $0xCA // VPXOR ymm1, ymm1, ymm2 - BYTE $0xC5; BYTE $0x9D; BYTE $0x72; BYTE $0xF1; BYTE $0x07 // VPSLLD ymm12, ymm1, 7 - BYTE $0xC5; BYTE $0xF5; BYTE $0x72; BYTE $0xD1; BYTE $0x19 // VPSRLD ymm1, ymm1, 25 - BYTE $0xC4; BYTE $0xC1; BYTE $0x75; BYTE $0xEF; BYTE $0xCC // VPXOR ymm1, ymm1, ymm12 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xC9; BYTE $0x93 // VPSHUFD ymm1, ymm1, 147 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xD2; BYTE $0x4E // VPSHUFD ymm2, ymm2, 78 - BYTE $0xC5; BYTE $0xFD; BYTE $0x70; BYTE $0xDB; BYTE $0x39 // VPSHUFD ymm3, ymm3, 57 - SUBQ $2, SI - JNE rounds_loop2_begin - BYTE $0xC4; BYTE $0xC1; BYTE $0x7D; BYTE $0xFE; BYTE $0xC0 // VPADDD ymm0, ymm0, ymm8 - BYTE $0xC4; BYTE $0xC1; BYTE $0x75; BYTE $0xFE; BYTE $0xC9 // VPADDD ymm1, ymm1, ymm9 - BYTE $0xC4; BYTE $0xC1; BYTE $0x6D; BYTE $0xFE; BYTE $0xD2 // VPADDD ymm2, ymm2, ymm10 - BYTE $0xC4; BYTE $0xC1; BYTE $0x65; BYTE $0xFE; BYTE $0xDB // VPADDD ymm3, ymm3, ymm11 - BYTE $0xC4; BYTE $0x63; BYTE $0x7D; BYTE $0x46; BYTE $0xE1; BYTE $0x20 // VPERM2I128 ymm12, ymm0, ymm1, 32 - BYTE $0xC5; BYTE $0x1D; BYTE $0xEF; BYTE $0x23 // VPXOR ymm12, ymm12, [rbx] - BYTE $0xC5; BYTE $0x7E; BYTE $0x7F; BYTE $0x21 // VMOVDQU [rcx], ymm12 - BYTE $0xC4; BYTE $0x63; BYTE $0x6D; BYTE $0x46; BYTE $0xE3; BYTE $0x20 // VPERM2I128 ymm12, ymm2, ymm3, 32 - BYTE $0xC5; BYTE $0x1D; BYTE $0xEF; BYTE $0x63; BYTE $0x20 // VPXOR ymm12, ymm12, [rbx + 32] - BYTE $0xC5; BYTE $0x7E; BYTE $0x7F; BYTE $0x61; BYTE $0x20 // VMOVDQU [rcx + 32], ymm12 - SUBQ $1, DX - JEQ out_write_odd - BYTE $0xC4; BYTE $0x41; BYTE $0x25; BYTE $0xFE; BYTE $0xDE // VPADDD ymm11, ymm11, ymm14 - BYTE $0xC4; BYTE $0x63; BYTE $0x7D; BYTE $0x46; BYTE $0xE1; BYTE $0x31 // VPERM2I128 ymm12, ymm0, ymm1, 49 - BYTE $0xC5; BYTE $0x1D; BYTE $0xEF; BYTE $0x63; BYTE $0x40 // VPXOR ymm12, ymm12, [rbx + 64] - BYTE $0xC5; BYTE $0x7E; BYTE $0x7F; BYTE $0x61; BYTE $0x40 // VMOVDQU [rcx + 64], ymm12 - BYTE $0xC4; BYTE $0x63; BYTE $0x6D; BYTE $0x46; BYTE $0xE3; BYTE $0x31 // VPERM2I128 ymm12, ymm2, ymm3, 49 - BYTE $0xC5; BYTE $0x1D; BYTE $0xEF; BYTE $0x63; BYTE $0x60 // VPXOR ymm12, ymm12, [rbx + 96] - BYTE $0xC5; BYTE $0x7E; BYTE $0x7F; BYTE $0x61; BYTE $0x60 // VMOVDQU [rcx + 96], ymm12 - SUBQ $1, DX - JEQ out_write_even - ADDQ $128, BX - ADDQ $128, CX - JMP vector_loop2_begin -out_write_odd: - BYTE $0xC4; BYTE $0x43; BYTE $0x25; BYTE $0x46; BYTE $0xDB; BYTE $0x01 // VPERM2I128 ymm11, ymm11, ymm11, 1 -out_write_even: - BYTE $0xC5; BYTE $0x79; BYTE $0x7F; BYTE $0x58; BYTE $0x30 // VMOVDQA [rax + 48], xmm11 - BYTE $0xC5; BYTE $0xFD; BYTE $0xEF; BYTE $0xC0 // VPXOR ymm0, ymm0, ymm0 - BYTE $0xC5; BYTE $0xFD; BYTE $0x7F; BYTE $0x44; BYTE $0x24; BYTE $0x40 // VMOVDQA [rsp + 64], ymm0 - BYTE $0xC5; BYTE $0xFD; BYTE $0x7F; BYTE $0x44; BYTE $0x24; BYTE $0x20 // VMOVDQA [rsp + 32], ymm0 - MOVQ DI, SP - BYTE $0xC5; BYTE $0xF8; BYTE $0x77 // VZEROUPPER - RET - -// func cpuidAmd64(cpuidParams *uint32) -TEXT ·cpuidAmd64(SB),4,$0-8 - MOVQ cpuidParams+0(FP), R15 - MOVL 0(R15), AX - MOVL 4(R15), CX - CPUID - MOVL AX, 0(R15) - MOVL BX, 4(R15) - MOVL CX, 8(R15) - MOVL DX, 12(R15) - RET - -// func xgetbv0Amd64(xcrVec *uint32) -TEXT ·xgetbv0Amd64(SB),4,$0-8 - MOVQ xcrVec+0(FP), BX - XORL CX, CX - BYTE $0x0F; BYTE $0x01; BYTE $0xD0 // XGETBV - MOVL AX, 0(BX) - MOVL DX, 4(BX) - RET diff --git a/vendor/git.schwanenlied.me/yawning/chacha20.git/chacha20_ref.go b/vendor/git.schwanenlied.me/yawning/chacha20.git/chacha20_ref.go deleted file mode 100644 index 694c937..0000000 --- a/vendor/git.schwanenlied.me/yawning/chacha20.git/chacha20_ref.go +++ /dev/null @@ -1,392 +0,0 @@ -// chacha20_ref.go - Reference ChaCha20. -// -// To the extent possible under law, Yawning Angel has waived all copyright -// and related or neighboring rights to chacha20, using the Creative -// Commons "CC0" public domain dedication. See LICENSE or -// for full details. - -package chacha20 - -import ( - "encoding/binary" - "math" - "unsafe" -) - -func blocksRef(x *[stateSize]uint32, in []byte, out []byte, nrBlocks int, isIetf bool) { - if isIetf { - var totalBlocks uint64 - totalBlocks = uint64(x[8]) + uint64(nrBlocks) - if totalBlocks > math.MaxUint32 { - panic("chacha20: Exceeded keystream per nonce limit") - } - } - - // This routine ignores x[0]...x[4] in favor the const values since it's - // ever so slightly faster. - - for n := 0; n < nrBlocks; n++ { - x0, x1, x2, x3 := sigma0, sigma1, sigma2, sigma3 - x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15 := x[4], x[5], x[6], x[7], x[8], x[9], x[10], x[11], x[12], x[13], x[14], x[15] - - for i := chachaRounds; i > 0; i -= 2 { - // quarterround(x, 0, 4, 8, 12) - x0 += x4 - x12 ^= x0 - x12 = (x12 << 16) | (x12 >> 16) - x8 += x12 - x4 ^= x8 - x4 = (x4 << 12) | (x4 >> 20) - x0 += x4 - x12 ^= x0 - x12 = (x12 << 8) | (x12 >> 24) - x8 += x12 - x4 ^= x8 - x4 = (x4 << 7) | (x4 >> 25) - - // quarterround(x, 1, 5, 9, 13) - x1 += x5 - x13 ^= x1 - x13 = (x13 << 16) | (x13 >> 16) - x9 += x13 - x5 ^= x9 - x5 = (x5 << 12) | (x5 >> 20) - x1 += x5 - x13 ^= x1 - x13 = (x13 << 8) | (x13 >> 24) - x9 += x13 - x5 ^= x9 - x5 = (x5 << 7) | (x5 >> 25) - - // quarterround(x, 2, 6, 10, 14) - x2 += x6 - x14 ^= x2 - x14 = (x14 << 16) | (x14 >> 16) - x10 += x14 - x6 ^= x10 - x6 = (x6 << 12) | (x6 >> 20) - x2 += x6 - x14 ^= x2 - x14 = (x14 << 8) | (x14 >> 24) - x10 += x14 - x6 ^= x10 - x6 = (x6 << 7) | (x6 >> 25) - - // quarterround(x, 3, 7, 11, 15) - x3 += x7 - x15 ^= x3 - x15 = (x15 << 16) | (x15 >> 16) - x11 += x15 - x7 ^= x11 - x7 = (x7 << 12) | (x7 >> 20) - x3 += x7 - x15 ^= x3 - x15 = (x15 << 8) | (x15 >> 24) - x11 += x15 - x7 ^= x11 - x7 = (x7 << 7) | (x7 >> 25) - - // quarterround(x, 0, 5, 10, 15) - x0 += x5 - x15 ^= x0 - x15 = (x15 << 16) | (x15 >> 16) - x10 += x15 - x5 ^= x10 - x5 = (x5 << 12) | (x5 >> 20) - x0 += x5 - x15 ^= x0 - x15 = (x15 << 8) | (x15 >> 24) - x10 += x15 - x5 ^= x10 - x5 = (x5 << 7) | (x5 >> 25) - - // quarterround(x, 1, 6, 11, 12) - x1 += x6 - x12 ^= x1 - x12 = (x12 << 16) | (x12 >> 16) - x11 += x12 - x6 ^= x11 - x6 = (x6 << 12) | (x6 >> 20) - x1 += x6 - x12 ^= x1 - x12 = (x12 << 8) | (x12 >> 24) - x11 += x12 - x6 ^= x11 - x6 = (x6 << 7) | (x6 >> 25) - - // quarterround(x, 2, 7, 8, 13) - x2 += x7 - x13 ^= x2 - x13 = (x13 << 16) | (x13 >> 16) - x8 += x13 - x7 ^= x8 - x7 = (x7 << 12) | (x7 >> 20) - x2 += x7 - x13 ^= x2 - x13 = (x13 << 8) | (x13 >> 24) - x8 += x13 - x7 ^= x8 - x7 = (x7 << 7) | (x7 >> 25) - - // quarterround(x, 3, 4, 9, 14) - x3 += x4 - x14 ^= x3 - x14 = (x14 << 16) | (x14 >> 16) - x9 += x14 - x4 ^= x9 - x4 = (x4 << 12) | (x4 >> 20) - x3 += x4 - x14 ^= x3 - x14 = (x14 << 8) | (x14 >> 24) - x9 += x14 - x4 ^= x9 - x4 = (x4 << 7) | (x4 >> 25) - } - - // On amd64 at least, this is a rather big boost. - if useUnsafe { - if in != nil { - inArr := (*[16]uint32)(unsafe.Pointer(&in[n*BlockSize])) - outArr := (*[16]uint32)(unsafe.Pointer(&out[n*BlockSize])) - outArr[0] = inArr[0] ^ (x0 + sigma0) - outArr[1] = inArr[1] ^ (x1 + sigma1) - outArr[2] = inArr[2] ^ (x2 + sigma2) - outArr[3] = inArr[3] ^ (x3 + sigma3) - outArr[4] = inArr[4] ^ (x4 + x[4]) - outArr[5] = inArr[5] ^ (x5 + x[5]) - outArr[6] = inArr[6] ^ (x6 + x[6]) - outArr[7] = inArr[7] ^ (x7 + x[7]) - outArr[8] = inArr[8] ^ (x8 + x[8]) - outArr[9] = inArr[9] ^ (x9 + x[9]) - outArr[10] = inArr[10] ^ (x10 + x[10]) - outArr[11] = inArr[11] ^ (x11 + x[11]) - outArr[12] = inArr[12] ^ (x12 + x[12]) - outArr[13] = inArr[13] ^ (x13 + x[13]) - outArr[14] = inArr[14] ^ (x14 + x[14]) - outArr[15] = inArr[15] ^ (x15 + x[15]) - } else { - outArr := (*[16]uint32)(unsafe.Pointer(&out[n*BlockSize])) - outArr[0] = x0 + sigma0 - outArr[1] = x1 + sigma1 - outArr[2] = x2 + sigma2 - outArr[3] = x3 + sigma3 - outArr[4] = x4 + x[4] - outArr[5] = x5 + x[5] - outArr[6] = x6 + x[6] - outArr[7] = x7 + x[7] - outArr[8] = x8 + x[8] - outArr[9] = x9 + x[9] - outArr[10] = x10 + x[10] - outArr[11] = x11 + x[11] - outArr[12] = x12 + x[12] - outArr[13] = x13 + x[13] - outArr[14] = x14 + x[14] - outArr[15] = x15 + x[15] - } - } else { - // Slow path, either the architecture cares about alignment, or is not little endian. - x0 += sigma0 - x1 += sigma1 - x2 += sigma2 - x3 += sigma3 - x4 += x[4] - x5 += x[5] - x6 += x[6] - x7 += x[7] - x8 += x[8] - x9 += x[9] - x10 += x[10] - x11 += x[11] - x12 += x[12] - x13 += x[13] - x14 += x[14] - x15 += x[15] - if in != nil { - binary.LittleEndian.PutUint32(out[0:4], binary.LittleEndian.Uint32(in[0:4])^x0) - binary.LittleEndian.PutUint32(out[4:8], binary.LittleEndian.Uint32(in[4:8])^x1) - binary.LittleEndian.PutUint32(out[8:12], binary.LittleEndian.Uint32(in[8:12])^x2) - binary.LittleEndian.PutUint32(out[12:16], binary.LittleEndian.Uint32(in[12:16])^x3) - binary.LittleEndian.PutUint32(out[16:20], binary.LittleEndian.Uint32(in[16:20])^x4) - binary.LittleEndian.PutUint32(out[20:24], binary.LittleEndian.Uint32(in[20:24])^x5) - binary.LittleEndian.PutUint32(out[24:28], binary.LittleEndian.Uint32(in[24:28])^x6) - binary.LittleEndian.PutUint32(out[28:32], binary.LittleEndian.Uint32(in[28:32])^x7) - binary.LittleEndian.PutUint32(out[32:36], binary.LittleEndian.Uint32(in[32:36])^x8) - binary.LittleEndian.PutUint32(out[36:40], binary.LittleEndian.Uint32(in[36:40])^x9) - binary.LittleEndian.PutUint32(out[40:44], binary.LittleEndian.Uint32(in[40:44])^x10) - binary.LittleEndian.PutUint32(out[44:48], binary.LittleEndian.Uint32(in[44:48])^x11) - binary.LittleEndian.PutUint32(out[48:52], binary.LittleEndian.Uint32(in[48:52])^x12) - binary.LittleEndian.PutUint32(out[52:56], binary.LittleEndian.Uint32(in[52:56])^x13) - binary.LittleEndian.PutUint32(out[56:60], binary.LittleEndian.Uint32(in[56:60])^x14) - binary.LittleEndian.PutUint32(out[60:64], binary.LittleEndian.Uint32(in[60:64])^x15) - in = in[BlockSize:] - } else { - binary.LittleEndian.PutUint32(out[0:4], x0) - binary.LittleEndian.PutUint32(out[4:8], x1) - binary.LittleEndian.PutUint32(out[8:12], x2) - binary.LittleEndian.PutUint32(out[12:16], x3) - binary.LittleEndian.PutUint32(out[16:20], x4) - binary.LittleEndian.PutUint32(out[20:24], x5) - binary.LittleEndian.PutUint32(out[24:28], x6) - binary.LittleEndian.PutUint32(out[28:32], x7) - binary.LittleEndian.PutUint32(out[32:36], x8) - binary.LittleEndian.PutUint32(out[36:40], x9) - binary.LittleEndian.PutUint32(out[40:44], x10) - binary.LittleEndian.PutUint32(out[44:48], x11) - binary.LittleEndian.PutUint32(out[48:52], x12) - binary.LittleEndian.PutUint32(out[52:56], x13) - binary.LittleEndian.PutUint32(out[56:60], x14) - binary.LittleEndian.PutUint32(out[60:64], x15) - } - out = out[BlockSize:] - } - - // Stoping at 2^70 bytes per nonce is the user's responsibility. - ctr := uint64(x[13])<<32 | uint64(x[12]) - ctr++ - x[12] = uint32(ctr) - x[13] = uint32(ctr >> 32) - } -} - -func hChaChaRef(x *[stateSize]uint32, out *[32]byte) { - x0, x1, x2, x3 := sigma0, sigma1, sigma2, sigma3 - x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15 := x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[10], x[11] - - for i := chachaRounds; i > 0; i -= 2 { - // quarterround(x, 0, 4, 8, 12) - x0 += x4 - x12 ^= x0 - x12 = (x12 << 16) | (x12 >> 16) - x8 += x12 - x4 ^= x8 - x4 = (x4 << 12) | (x4 >> 20) - x0 += x4 - x12 ^= x0 - x12 = (x12 << 8) | (x12 >> 24) - x8 += x12 - x4 ^= x8 - x4 = (x4 << 7) | (x4 >> 25) - - // quarterround(x, 1, 5, 9, 13) - x1 += x5 - x13 ^= x1 - x13 = (x13 << 16) | (x13 >> 16) - x9 += x13 - x5 ^= x9 - x5 = (x5 << 12) | (x5 >> 20) - x1 += x5 - x13 ^= x1 - x13 = (x13 << 8) | (x13 >> 24) - x9 += x13 - x5 ^= x9 - x5 = (x5 << 7) | (x5 >> 25) - - // quarterround(x, 2, 6, 10, 14) - x2 += x6 - x14 ^= x2 - x14 = (x14 << 16) | (x14 >> 16) - x10 += x14 - x6 ^= x10 - x6 = (x6 << 12) | (x6 >> 20) - x2 += x6 - x14 ^= x2 - x14 = (x14 << 8) | (x14 >> 24) - x10 += x14 - x6 ^= x10 - x6 = (x6 << 7) | (x6 >> 25) - - // quarterround(x, 3, 7, 11, 15) - x3 += x7 - x15 ^= x3 - x15 = (x15 << 16) | (x15 >> 16) - x11 += x15 - x7 ^= x11 - x7 = (x7 << 12) | (x7 >> 20) - x3 += x7 - x15 ^= x3 - x15 = (x15 << 8) | (x15 >> 24) - x11 += x15 - x7 ^= x11 - x7 = (x7 << 7) | (x7 >> 25) - - // quarterround(x, 0, 5, 10, 15) - x0 += x5 - x15 ^= x0 - x15 = (x15 << 16) | (x15 >> 16) - x10 += x15 - x5 ^= x10 - x5 = (x5 << 12) | (x5 >> 20) - x0 += x5 - x15 ^= x0 - x15 = (x15 << 8) | (x15 >> 24) - x10 += x15 - x5 ^= x10 - x5 = (x5 << 7) | (x5 >> 25) - - // quarterround(x, 1, 6, 11, 12) - x1 += x6 - x12 ^= x1 - x12 = (x12 << 16) | (x12 >> 16) - x11 += x12 - x6 ^= x11 - x6 = (x6 << 12) | (x6 >> 20) - x1 += x6 - x12 ^= x1 - x12 = (x12 << 8) | (x12 >> 24) - x11 += x12 - x6 ^= x11 - x6 = (x6 << 7) | (x6 >> 25) - - // quarterround(x, 2, 7, 8, 13) - x2 += x7 - x13 ^= x2 - x13 = (x13 << 16) | (x13 >> 16) - x8 += x13 - x7 ^= x8 - x7 = (x7 << 12) | (x7 >> 20) - x2 += x7 - x13 ^= x2 - x13 = (x13 << 8) | (x13 >> 24) - x8 += x13 - x7 ^= x8 - x7 = (x7 << 7) | (x7 >> 25) - - // quarterround(x, 3, 4, 9, 14) - x3 += x4 - x14 ^= x3 - x14 = (x14 << 16) | (x14 >> 16) - x9 += x14 - x4 ^= x9 - x4 = (x4 << 12) | (x4 >> 20) - x3 += x4 - x14 ^= x3 - x14 = (x14 << 8) | (x14 >> 24) - x9 += x14 - x4 ^= x9 - x4 = (x4 << 7) | (x4 >> 25) - } - - // HChaCha returns x0...x3 | x12...x15, which corresponds to the - // indexes of the ChaCha constant and the indexes of the IV. - if useUnsafe { - outArr := (*[16]uint32)(unsafe.Pointer(&out[0])) - outArr[0] = x0 - outArr[1] = x1 - outArr[2] = x2 - outArr[3] = x3 - outArr[4] = x12 - outArr[5] = x13 - outArr[6] = x14 - outArr[7] = x15 - } else { - binary.LittleEndian.PutUint32(out[0:4], x0) - binary.LittleEndian.PutUint32(out[4:8], x1) - binary.LittleEndian.PutUint32(out[8:12], x2) - binary.LittleEndian.PutUint32(out[12:16], x3) - binary.LittleEndian.PutUint32(out[16:20], x12) - binary.LittleEndian.PutUint32(out[20:24], x13) - binary.LittleEndian.PutUint32(out[24:28], x14) - binary.LittleEndian.PutUint32(out[28:32], x15) - } - return -} diff --git a/vendor/git.schwanenlied.me/yawning/chacha20.git/chacha20_test.go b/vendor/git.schwanenlied.me/yawning/chacha20.git/chacha20_test.go deleted file mode 100644 index 3ba9b11..0000000 --- a/vendor/git.schwanenlied.me/yawning/chacha20.git/chacha20_test.go +++ /dev/null @@ -1,523 +0,0 @@ -// chacha20_test.go - ChaCha stream cipher implementation tests. -// -// To the extent possible under law, Yawning Angel waived all copyright -// and related or neighboring rights to chacha20, using the Creative -// Commons "CC0" public domain dedication. See LICENSE or -// for full details. - -package chacha20 - -import ( - "bytes" - "crypto/aes" - "crypto/cipher" - "crypto/rand" - "encoding/hex" - "testing" -) - -// Test vectors taken from: -// https://tools.ietf.org/html/draft-strombergson-chacha-test-vectors-01 -var draftTestVectors = []struct { - name string - key []byte - iv []byte - stream []byte - seekOffset uint64 -}{ - { - name: "IETF Draft: TC1: All zero key and IV.", - key: []byte{ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - iv: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - stream: []byte{ - 0x76, 0xb8, 0xe0, 0xad, 0xa0, 0xf1, 0x3d, 0x90, - 0x40, 0x5d, 0x6a, 0xe5, 0x53, 0x86, 0xbd, 0x28, - 0xbd, 0xd2, 0x19, 0xb8, 0xa0, 0x8d, 0xed, 0x1a, - 0xa8, 0x36, 0xef, 0xcc, 0x8b, 0x77, 0x0d, 0xc7, - 0xda, 0x41, 0x59, 0x7c, 0x51, 0x57, 0x48, 0x8d, - 0x77, 0x24, 0xe0, 0x3f, 0xb8, 0xd8, 0x4a, 0x37, - 0x6a, 0x43, 0xb8, 0xf4, 0x15, 0x18, 0xa1, 0x1c, - 0xc3, 0x87, 0xb6, 0x69, 0xb2, 0xee, 0x65, 0x86, - 0x9f, 0x07, 0xe7, 0xbe, 0x55, 0x51, 0x38, 0x7a, - 0x98, 0xba, 0x97, 0x7c, 0x73, 0x2d, 0x08, 0x0d, - 0xcb, 0x0f, 0x29, 0xa0, 0x48, 0xe3, 0x65, 0x69, - 0x12, 0xc6, 0x53, 0x3e, 0x32, 0xee, 0x7a, 0xed, - 0x29, 0xb7, 0x21, 0x76, 0x9c, 0xe6, 0x4e, 0x43, - 0xd5, 0x71, 0x33, 0xb0, 0x74, 0xd8, 0x39, 0xd5, - 0x31, 0xed, 0x1f, 0x28, 0x51, 0x0a, 0xfb, 0x45, - 0xac, 0xe1, 0x0a, 0x1f, 0x4b, 0x79, 0x4d, 0x6f, - }, - }, - { - name: "IETF Draft: TC2: Single bit in key set. All zero IV.", - key: []byte{ - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - iv: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - stream: []byte{ - 0xc5, 0xd3, 0x0a, 0x7c, 0xe1, 0xec, 0x11, 0x93, - 0x78, 0xc8, 0x4f, 0x48, 0x7d, 0x77, 0x5a, 0x85, - 0x42, 0xf1, 0x3e, 0xce, 0x23, 0x8a, 0x94, 0x55, - 0xe8, 0x22, 0x9e, 0x88, 0x8d, 0xe8, 0x5b, 0xbd, - 0x29, 0xeb, 0x63, 0xd0, 0xa1, 0x7a, 0x5b, 0x99, - 0x9b, 0x52, 0xda, 0x22, 0xbe, 0x40, 0x23, 0xeb, - 0x07, 0x62, 0x0a, 0x54, 0xf6, 0xfa, 0x6a, 0xd8, - 0x73, 0x7b, 0x71, 0xeb, 0x04, 0x64, 0xda, 0xc0, - 0x10, 0xf6, 0x56, 0xe6, 0xd1, 0xfd, 0x55, 0x05, - 0x3e, 0x50, 0xc4, 0x87, 0x5c, 0x99, 0x30, 0xa3, - 0x3f, 0x6d, 0x02, 0x63, 0xbd, 0x14, 0xdf, 0xd6, - 0xab, 0x8c, 0x70, 0x52, 0x1c, 0x19, 0x33, 0x8b, - 0x23, 0x08, 0xb9, 0x5c, 0xf8, 0xd0, 0xbb, 0x7d, - 0x20, 0x2d, 0x21, 0x02, 0x78, 0x0e, 0xa3, 0x52, - 0x8f, 0x1c, 0xb4, 0x85, 0x60, 0xf7, 0x6b, 0x20, - 0xf3, 0x82, 0xb9, 0x42, 0x50, 0x0f, 0xce, 0xac, - }, - }, - { - name: "IETF Draft: TC3: Single bit in IV set. All zero key.", - key: []byte{ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - iv: []byte{0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - stream: []byte{ - 0xef, 0x3f, 0xdf, 0xd6, 0xc6, 0x15, 0x78, 0xfb, - 0xf5, 0xcf, 0x35, 0xbd, 0x3d, 0xd3, 0x3b, 0x80, - 0x09, 0x63, 0x16, 0x34, 0xd2, 0x1e, 0x42, 0xac, - 0x33, 0x96, 0x0b, 0xd1, 0x38, 0xe5, 0x0d, 0x32, - 0x11, 0x1e, 0x4c, 0xaf, 0x23, 0x7e, 0xe5, 0x3c, - 0xa8, 0xad, 0x64, 0x26, 0x19, 0x4a, 0x88, 0x54, - 0x5d, 0xdc, 0x49, 0x7a, 0x0b, 0x46, 0x6e, 0x7d, - 0x6b, 0xbd, 0xb0, 0x04, 0x1b, 0x2f, 0x58, 0x6b, - 0x53, 0x05, 0xe5, 0xe4, 0x4a, 0xff, 0x19, 0xb2, - 0x35, 0x93, 0x61, 0x44, 0x67, 0x5e, 0xfb, 0xe4, - 0x40, 0x9e, 0xb7, 0xe8, 0xe5, 0xf1, 0x43, 0x0f, - 0x5f, 0x58, 0x36, 0xae, 0xb4, 0x9b, 0xb5, 0x32, - 0x8b, 0x01, 0x7c, 0x4b, 0x9d, 0xc1, 0x1f, 0x8a, - 0x03, 0x86, 0x3f, 0xa8, 0x03, 0xdc, 0x71, 0xd5, - 0x72, 0x6b, 0x2b, 0x6b, 0x31, 0xaa, 0x32, 0x70, - 0x8a, 0xfe, 0x5a, 0xf1, 0xd6, 0xb6, 0x90, 0x58, - }, - }, - { - name: "IETF Draft: TC4: All bits in key and IV are set.", - key: []byte{ - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - }, - iv: []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, - stream: []byte{ - 0xd9, 0xbf, 0x3f, 0x6b, 0xce, 0x6e, 0xd0, 0xb5, - 0x42, 0x54, 0x55, 0x77, 0x67, 0xfb, 0x57, 0x44, - 0x3d, 0xd4, 0x77, 0x89, 0x11, 0xb6, 0x06, 0x05, - 0x5c, 0x39, 0xcc, 0x25, 0xe6, 0x74, 0xb8, 0x36, - 0x3f, 0xea, 0xbc, 0x57, 0xfd, 0xe5, 0x4f, 0x79, - 0x0c, 0x52, 0xc8, 0xae, 0x43, 0x24, 0x0b, 0x79, - 0xd4, 0x90, 0x42, 0xb7, 0x77, 0xbf, 0xd6, 0xcb, - 0x80, 0xe9, 0x31, 0x27, 0x0b, 0x7f, 0x50, 0xeb, - 0x5b, 0xac, 0x2a, 0xcd, 0x86, 0xa8, 0x36, 0xc5, - 0xdc, 0x98, 0xc1, 0x16, 0xc1, 0x21, 0x7e, 0xc3, - 0x1d, 0x3a, 0x63, 0xa9, 0x45, 0x13, 0x19, 0xf0, - 0x97, 0xf3, 0xb4, 0xd6, 0xda, 0xb0, 0x77, 0x87, - 0x19, 0x47, 0x7d, 0x24, 0xd2, 0x4b, 0x40, 0x3a, - 0x12, 0x24, 0x1d, 0x7c, 0xca, 0x06, 0x4f, 0x79, - 0x0f, 0x1d, 0x51, 0xcc, 0xaf, 0xf6, 0xb1, 0x66, - 0x7d, 0x4b, 0xbc, 0xa1, 0x95, 0x8c, 0x43, 0x06, - }, - }, - { - name: "IETF Draft: TC5: Every even bit set in key and IV.", - key: []byte{ - 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, - 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, - 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, - 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, - }, - iv: []byte{0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55}, - stream: []byte{ - 0xbe, 0xa9, 0x41, 0x1a, 0xa4, 0x53, 0xc5, 0x43, - 0x4a, 0x5a, 0xe8, 0xc9, 0x28, 0x62, 0xf5, 0x64, - 0x39, 0x68, 0x55, 0xa9, 0xea, 0x6e, 0x22, 0xd6, - 0xd3, 0xb5, 0x0a, 0xe1, 0xb3, 0x66, 0x33, 0x11, - 0xa4, 0xa3, 0x60, 0x6c, 0x67, 0x1d, 0x60, 0x5c, - 0xe1, 0x6c, 0x3a, 0xec, 0xe8, 0xe6, 0x1e, 0xa1, - 0x45, 0xc5, 0x97, 0x75, 0x01, 0x7b, 0xee, 0x2f, - 0xa6, 0xf8, 0x8a, 0xfc, 0x75, 0x80, 0x69, 0xf7, - 0xe0, 0xb8, 0xf6, 0x76, 0xe6, 0x44, 0x21, 0x6f, - 0x4d, 0x2a, 0x34, 0x22, 0xd7, 0xfa, 0x36, 0xc6, - 0xc4, 0x93, 0x1a, 0xca, 0x95, 0x0e, 0x9d, 0xa4, - 0x27, 0x88, 0xe6, 0xd0, 0xb6, 0xd1, 0xcd, 0x83, - 0x8e, 0xf6, 0x52, 0xe9, 0x7b, 0x14, 0x5b, 0x14, - 0x87, 0x1e, 0xae, 0x6c, 0x68, 0x04, 0xc7, 0x00, - 0x4d, 0xb5, 0xac, 0x2f, 0xce, 0x4c, 0x68, 0xc7, - 0x26, 0xd0, 0x04, 0xb1, 0x0f, 0xca, 0xba, 0x86, - }, - }, - { - name: "IETF Draft: TC6: Every odd bit set in key and IV.", - key: []byte{ - 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, - 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, - 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, - 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, - }, - iv: []byte{0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa}, - stream: []byte{ - 0x9a, 0xa2, 0xa9, 0xf6, 0x56, 0xef, 0xde, 0x5a, - 0xa7, 0x59, 0x1c, 0x5f, 0xed, 0x4b, 0x35, 0xae, - 0xa2, 0x89, 0x5d, 0xec, 0x7c, 0xb4, 0x54, 0x3b, - 0x9e, 0x9f, 0x21, 0xf5, 0xe7, 0xbc, 0xbc, 0xf3, - 0xc4, 0x3c, 0x74, 0x8a, 0x97, 0x08, 0x88, 0xf8, - 0x24, 0x83, 0x93, 0xa0, 0x9d, 0x43, 0xe0, 0xb7, - 0xe1, 0x64, 0xbc, 0x4d, 0x0b, 0x0f, 0xb2, 0x40, - 0xa2, 0xd7, 0x21, 0x15, 0xc4, 0x80, 0x89, 0x06, - 0x72, 0x18, 0x44, 0x89, 0x44, 0x05, 0x45, 0xd0, - 0x21, 0xd9, 0x7e, 0xf6, 0xb6, 0x93, 0xdf, 0xe5, - 0xb2, 0xc1, 0x32, 0xd4, 0x7e, 0x6f, 0x04, 0x1c, - 0x90, 0x63, 0x65, 0x1f, 0x96, 0xb6, 0x23, 0xe6, - 0x2a, 0x11, 0x99, 0x9a, 0x23, 0xb6, 0xf7, 0xc4, - 0x61, 0xb2, 0x15, 0x30, 0x26, 0xad, 0x5e, 0x86, - 0x6a, 0x2e, 0x59, 0x7e, 0xd0, 0x7b, 0x84, 0x01, - 0xde, 0xc6, 0x3a, 0x09, 0x34, 0xc6, 0xb2, 0xa9, - }, - }, - { - name: "IETF Draft: TC7: Sequence patterns in key and IV.", - key: []byte{ - 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, - 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff, - 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, - 0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11, 0x00, - }, - iv: []byte{0x0f, 0x1e, 0x2d, 0x3c, 0x4b, 0x5a, 0x69, 0x78}, - stream: []byte{ - 0x9f, 0xad, 0xf4, 0x09, 0xc0, 0x08, 0x11, 0xd0, - 0x04, 0x31, 0xd6, 0x7e, 0xfb, 0xd8, 0x8f, 0xba, - 0x59, 0x21, 0x8d, 0x5d, 0x67, 0x08, 0xb1, 0xd6, - 0x85, 0x86, 0x3f, 0xab, 0xbb, 0x0e, 0x96, 0x1e, - 0xea, 0x48, 0x0f, 0xd6, 0xfb, 0x53, 0x2b, 0xfd, - 0x49, 0x4b, 0x21, 0x51, 0x01, 0x50, 0x57, 0x42, - 0x3a, 0xb6, 0x0a, 0x63, 0xfe, 0x4f, 0x55, 0xf7, - 0xa2, 0x12, 0xe2, 0x16, 0x7c, 0xca, 0xb9, 0x31, - 0xfb, 0xfd, 0x29, 0xcf, 0x7b, 0xc1, 0xd2, 0x79, - 0xed, 0xdf, 0x25, 0xdd, 0x31, 0x6b, 0xb8, 0x84, - 0x3d, 0x6e, 0xde, 0xe0, 0xbd, 0x1e, 0xf1, 0x21, - 0xd1, 0x2f, 0xa1, 0x7c, 0xbc, 0x2c, 0x57, 0x4c, - 0xcc, 0xab, 0x5e, 0x27, 0x51, 0x67, 0xb0, 0x8b, - 0xd6, 0x86, 0xf8, 0xa0, 0x9d, 0xf8, 0x7e, 0xc3, - 0xff, 0xb3, 0x53, 0x61, 0xb9, 0x4e, 0xbf, 0xa1, - 0x3f, 0xec, 0x0e, 0x48, 0x89, 0xd1, 0x8d, 0xa5, - }, - }, - { - name: "IETF Draft: TC8: key: 'All your base are belong to us!, IV: 'IETF2013'", - key: []byte{ - 0xc4, 0x6e, 0xc1, 0xb1, 0x8c, 0xe8, 0xa8, 0x78, - 0x72, 0x5a, 0x37, 0xe7, 0x80, 0xdf, 0xb7, 0x35, - 0x1f, 0x68, 0xed, 0x2e, 0x19, 0x4c, 0x79, 0xfb, - 0xc6, 0xae, 0xbe, 0xe1, 0xa6, 0x67, 0x97, 0x5d, - }, - iv: []byte{0x1a, 0xda, 0x31, 0xd5, 0xcf, 0x68, 0x82, 0x21}, - stream: []byte{ - 0xf6, 0x3a, 0x89, 0xb7, 0x5c, 0x22, 0x71, 0xf9, - 0x36, 0x88, 0x16, 0x54, 0x2b, 0xa5, 0x2f, 0x06, - 0xed, 0x49, 0x24, 0x17, 0x92, 0x30, 0x2b, 0x00, - 0xb5, 0xe8, 0xf8, 0x0a, 0xe9, 0xa4, 0x73, 0xaf, - 0xc2, 0x5b, 0x21, 0x8f, 0x51, 0x9a, 0xf0, 0xfd, - 0xd4, 0x06, 0x36, 0x2e, 0x8d, 0x69, 0xde, 0x7f, - 0x54, 0xc6, 0x04, 0xa6, 0xe0, 0x0f, 0x35, 0x3f, - 0x11, 0x0f, 0x77, 0x1b, 0xdc, 0xa8, 0xab, 0x92, - 0xe5, 0xfb, 0xc3, 0x4e, 0x60, 0xa1, 0xd9, 0xa9, - 0xdb, 0x17, 0x34, 0x5b, 0x0a, 0x40, 0x27, 0x36, - 0x85, 0x3b, 0xf9, 0x10, 0xb0, 0x60, 0xbd, 0xf1, - 0xf8, 0x97, 0xb6, 0x29, 0x0f, 0x01, 0xd1, 0x38, - 0xae, 0x2c, 0x4c, 0x90, 0x22, 0x5b, 0xa9, 0xea, - 0x14, 0xd5, 0x18, 0xf5, 0x59, 0x29, 0xde, 0xa0, - 0x98, 0xca, 0x7a, 0x6c, 0xcf, 0xe6, 0x12, 0x27, - 0x05, 0x3c, 0x84, 0xe4, 0x9a, 0x4a, 0x33, 0x32, - }, - }, - { - name: "XChaCha20 Test", - key: []byte{ - 0x1b, 0x27, 0x55, 0x64, 0x73, 0xe9, 0x85, 0xd4, - 0x62, 0xcd, 0x51, 0x19, 0x7a, 0x9a, 0x46, 0xc7, - 0x60, 0x09, 0x54, 0x9e, 0xac, 0x64, 0x74, 0xf2, - 0x06, 0xc4, 0xee, 0x08, 0x44, 0xf6, 0x83, 0x89, - }, - iv: []byte{ - 0x69, 0x69, 0x6e, 0xe9, 0x55, 0xb6, 0x2b, 0x73, - 0xcd, 0x62, 0xbd, 0xa8, 0x75, 0xfc, 0x73, 0xd6, - 0x82, 0x19, 0xe0, 0x03, 0x6b, 0x7a, 0x0b, 0x37, - }, - stream: []byte{ - 0x4f, 0xeb, 0xf2, 0xfe, 0x4b, 0x35, 0x9c, 0x50, - 0x8d, 0xc5, 0xe8, 0xb5, 0x98, 0x0c, 0x88, 0xe3, - 0x89, 0x46, 0xd8, 0xf1, 0x8f, 0x31, 0x34, 0x65, - 0xc8, 0x62, 0xa0, 0x87, 0x82, 0x64, 0x82, 0x48, - 0x01, 0x8d, 0xac, 0xdc, 0xb9, 0x04, 0x17, 0x88, - 0x53, 0xa4, 0x6d, 0xca, 0x3a, 0x0e, 0xaa, 0xee, - 0x74, 0x7c, 0xba, 0x97, 0x43, 0x4e, 0xaf, 0xfa, - 0xd5, 0x8f, 0xea, 0x82, 0x22, 0x04, 0x7e, 0x0d, - 0xe6, 0xc3, 0xa6, 0x77, 0x51, 0x06, 0xe0, 0x33, - 0x1a, 0xd7, 0x14, 0xd2, 0xf2, 0x7a, 0x55, 0x64, - 0x13, 0x40, 0xa1, 0xf1, 0xdd, 0x9f, 0x94, 0x53, - 0x2e, 0x68, 0xcb, 0x24, 0x1c, 0xbd, 0xd1, 0x50, - 0x97, 0x0d, 0x14, 0xe0, 0x5c, 0x5b, 0x17, 0x31, - 0x93, 0xfb, 0x14, 0xf5, 0x1c, 0x41, 0xf3, 0x93, - 0x83, 0x5b, 0xf7, 0xf4, 0x16, 0xa7, 0xe0, 0xbb, - 0xa8, 0x1f, 0xfb, 0x8b, 0x13, 0xaf, 0x0e, 0x21, - 0x69, 0x1d, 0x7e, 0xce, 0xc9, 0x3b, 0x75, 0xe6, - 0xe4, 0x18, 0x3a, - }, - }, - { - name: "RFC 7539 Test Vector (96 bit nonce)", - key: []byte{ - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, - 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, - 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, - 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, - }, - iv: []byte{ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4a, - 0x00, 0x00, 0x00, 0x00, - }, - stream: []byte{ - 0x22, 0x4f, 0x51, 0xf3, 0x40, 0x1b, 0xd9, 0xe1, - 0x2f, 0xde, 0x27, 0x6f, 0xb8, 0x63, 0x1d, 0xed, - 0x8c, 0x13, 0x1f, 0x82, 0x3d, 0x2c, 0x06, 0xe2, - 0x7e, 0x4f, 0xca, 0xec, 0x9e, 0xf3, 0xcf, 0x78, - 0x8a, 0x3b, 0x0a, 0xa3, 0x72, 0x60, 0x0a, 0x92, - 0xb5, 0x79, 0x74, 0xcd, 0xed, 0x2b, 0x93, 0x34, - 0x79, 0x4c, 0xba, 0x40, 0xc6, 0x3e, 0x34, 0xcd, - 0xea, 0x21, 0x2c, 0x4c, 0xf0, 0x7d, 0x41, 0xb7, - 0x69, 0xa6, 0x74, 0x9f, 0x3f, 0x63, 0x0f, 0x41, - 0x22, 0xca, 0xfe, 0x28, 0xec, 0x4d, 0xc4, 0x7e, - 0x26, 0xd4, 0x34, 0x6d, 0x70, 0xb9, 0x8c, 0x73, - 0xf3, 0xe9, 0xc5, 0x3a, 0xc4, 0x0c, 0x59, 0x45, - 0x39, 0x8b, 0x6e, 0xda, 0x1a, 0x83, 0x2c, 0x89, - 0xc1, 0x67, 0xea, 0xcd, 0x90, 0x1d, 0x7e, 0x2b, - 0xf3, 0x63, - }, - seekOffset: 1, - }, -} - -func TestChaCha20(t *testing.T) { - for _, v := range draftTestVectors { - c, err := NewCipher(v.key, v.iv) - if err != nil { - t.Errorf("[%s]: New(k, iv) returned: %s", v.name, err) - continue - } - if v.seekOffset != 0 { - if err = c.Seek(v.seekOffset); err != nil { - t.Errorf("[%s]: Seek(seekOffset) returned: %s", v.name, err) - continue - } - } - out := make([]byte, len(v.stream)) - c.XORKeyStream(out, out) - if !bytes.Equal(out, v.stream) { - t.Errorf("[%s]: out != stream (%x != %x)", v.name, out, v.stream) - } - } -} - -func TestChaCha20Vectorized(t *testing.T) { - if !usingVectors { - t.Skip("vectorized ChaCha20 support not enabled") - } - - // Save the batch blocks processing routine so we can mess with it, and - // restore it when we're done. - oldBlocksFn := blocksFn - defer func() { - blocksFn = oldBlocksFn - }() - - const testSz = 1024 * 16 - - // Generate a random key, nonce and input. - var key [KeySize]byte - var nonce [NonceSize]byte - var input [testSz]byte - var vecOut [testSz]byte - var refOut [testSz]byte - rand.Read(key[:]) - rand.Read(nonce[:]) - rand.Read(input[:]) - - for i := 0; i < testSz; i++ { - // Encrypt with the vectorized implementation. - c, err := NewCipher(key[:], nonce[:]) - if err != nil { - t.Fatal(err) - } - c.XORKeyStream(vecOut[:], input[:i]) - - c, err = NewCipher(key[:], nonce[:]) - if err != nil { - t.Fatal(err) - } - blocksFn = blocksRef - c.XORKeyStream(refOut[:], input[:i]) - if !bytes.Equal(refOut[:i], vecOut[:i]) { - for j, v := range refOut { - if vecOut[j] != v { - t.Errorf("[%d] mismatch at offset: %d %x != %x", i, j, vecOut[j], v) - break - } - } - t.Errorf("ref: %s", hex.Dump(refOut[:i])) - t.Errorf("vec: %s", hex.Dump(vecOut[:i])) - t.Errorf("refOut != vecOut") - break - } - blocksFn = oldBlocksFn - } -} - -func TestChaCha20VectorizedIncremental(t *testing.T) { - if !usingVectors { - t.Skip("vectorized ChaCha20 support not enabled") - } - - // Save the batch blocks processing routine so we can mess with it, and - // restore it when we're done. - oldBlocksFn := blocksFn - defer func() { - blocksFn = oldBlocksFn - }() - - const ( - maxBlocks = 256 - testSz = (maxBlocks * (maxBlocks + 1) / 2) * BlockSize - ) - - // Generate a random key, nonce and input. - var key [KeySize]byte - var nonce [NonceSize]byte - var input [testSz]byte - var vecOut [testSz]byte - var refOut [testSz]byte - rand.Read(key[:]) - rand.Read(nonce[:]) - rand.Read(input[:]) - - // Using the vectorized version, encrypt an ever increasing number of - // blocks at a time. - c, err := NewCipher(key[:], nonce[:]) - if err != nil { - t.Fatal(err) - } - off := 0 - for nrBlocks := 0; nrBlocks <= maxBlocks; nrBlocks++ { - cnt := nrBlocks * BlockSize - c.XORKeyStream(vecOut[off:off+cnt], input[off:off+cnt]) - off += cnt - } - - // Encrypt an equivalent amount of data with a one shot call to the - // reference implementation. - c, err = NewCipher(key[:], nonce[:]) - if err != nil { - t.Fatal(err) - } - blocksFn = blocksRef - c.XORKeyStream(refOut[:], input[:]) - - // And compare the output. - if !bytes.Equal(refOut[:], vecOut[:]) { - for j, v := range refOut { - if vecOut[j] != v { - t.Errorf("incremental mismatch at offset: %d %x != %x", j, vecOut[j], v) - break - } - } - // t.Errorf("ref: %s", hex.Dump(refOut[:])) - // t.Errorf("vec: %s", hex.Dump(vecOut[:])) - t.Errorf("refOut != vecOut") - } -} - -func doBenchN(b *testing.B, n int) { - var key [KeySize]byte - var nonce [NonceSize]byte - s := make([]byte, n) - c, err := NewCipher(key[:], nonce[:]) - if err != nil { - b.Fatal(err) - } - b.SetBytes(int64(n)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - c.XORKeyStream(s, s) - } -} - -func BenchmarkChaCha20_16(b *testing.B) { - doBenchN(b, 16) -} - -func BenchmarkChaCha20_64(b *testing.B) { - doBenchN(b, 64) -} - -func BenchmarkChaCha20_128(b *testing.B) { - doBenchN(b, 128) -} - -func BenchmarkChaCha20_192(b *testing.B) { - doBenchN(b, 192) -} - -func BenchmarkChaCha20_256(b *testing.B) { - doBenchN(b, 256) -} - -func BenchmarkChaCha20_384(b *testing.B) { - doBenchN(b, 384) -} - -func BenchmarkChaCha20_512(b *testing.B) { - doBenchN(b, 512) -} - -func BenchmarkChaCha20_1k(b *testing.B) { - doBenchN(b, 1024) -} - -func BenchmarkChaCha20_64k(b *testing.B) { - doBenchN(b, 65536) -} - -func BenchmarkCTRAES256_64k(b *testing.B) { - const sz = 64 * 1024 - var key [32]byte - var iv [16]byte - s := make([]byte, sz) - blk, err := aes.NewCipher(key[:]) - if err != nil { - b.Fatal(err) - } - c := cipher.NewCTR(blk, iv[:]) - b.SetBytes(sz) - b.ResetTimer() - for i := 0; i < b.N; i++ { - c.XORKeyStream(s, s) - } -} diff --git a/vendor/git.schwanenlied.me/yawning/poly1305.git/.gitignore b/vendor/git.schwanenlied.me/yawning/poly1305.git/.gitignore deleted file mode 100644 index d38c149..0000000 --- a/vendor/git.schwanenlied.me/yawning/poly1305.git/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.swp -*~ diff --git a/vendor/git.schwanenlied.me/yawning/poly1305.git/README.md b/vendor/git.schwanenlied.me/yawning/poly1305.git/README.md deleted file mode 100644 index 7b6de64..0000000 --- a/vendor/git.schwanenlied.me/yawning/poly1305.git/README.md +++ /dev/null @@ -1,17 +0,0 @@ -### poly1305: Go Poly1305 -#### Yawning Angel (yawning at schwanenlied dot me) - -Poly1305 implements the Poly1305 MAC algorithm, exposing a saner interface than -the one provided by golang.org/x/crypto/poly1305. In particular it exposes a -object that implements a hash.Hash interface. - -The implementation is based on the Public Domain poly1305-donna by Andrew -Moon. - -| Implementation | 64 byte | 1024 byte | -| -------------------- | ------------ | ----------- | -| go.crypto (ref) | 94.51 MB/s | 187.67 MB/s | -| go.crypto (amd64) | 540.68 MB/s | 909.97 MB/s | -| go poly1305-donna-32 | 425.40 MB/s | 715.23 MB/s | - -Note: All numbers on a i5-4250U, and to be taken with a huge grain of salt. diff --git a/vendor/git.schwanenlied.me/yawning/poly1305.git/poly1305.go b/vendor/git.schwanenlied.me/yawning/poly1305.git/poly1305.go deleted file mode 100644 index 21486ca..0000000 --- a/vendor/git.schwanenlied.me/yawning/poly1305.git/poly1305.go +++ /dev/null @@ -1,207 +0,0 @@ -// -// poly1305.go: Poly1305 MAC. -// -// To the extent possible under law, Yawning Angel waived all copyright -// and related or neighboring rights to poly1305, using the creative -// commons "CC0" public domain dedication. See LICENSE or -// for full details. - -// Package poly1305 is a Poly1305 MAC implementation. It is different from the -// golang.org/x/crypto implementation in that it exports a hash.Hash interface -// to support incremental updates. -// -// The implementation is based on Andrew Moon's poly1305-donna. -package poly1305 - -import ( - "crypto/subtle" - "errors" - "hash" - "runtime" - "unsafe" -) - -const ( - // KeySize is the Poly1305 key size in bytes. - KeySize = 32 - - // Size is the Poly1305 MAC size in bytes. - Size = 16 - - // BlockSize is the Poly1305 block size in bytes. - BlockSize = 16 -) - -var ( - // ErrInvalidKeySize is the error returned when an invalid sized key is - // encountered. - ErrInvalidKeySize = errors.New("poly1305: invalid key size") - - // ErrInvalidMacSize is the error returned when an invalid sized MAC is - // encountered. - ErrInvalidMacSize = errors.New("poly1305: invalid mac size") - - isLittleEndian = false -) - -type implInterface interface { - init(key []byte) - clear() - blocks(m []byte, bytes int, isFinal bool) - finish(mac *[Size]byte) -} - -// Poly1305 is an instance of the Poly1305 MAC algorithm. -type Poly1305 struct { - impl implState - leftover int - buffer [BlockSize]byte -} - -// Write adds more data to the running hash. It never returns an error. -func (st *Poly1305) Write(p []byte) (n int, err error) { - // - // poly1305-donna.c:poly1305_update() - // - - m := p - bytes := len(m) - - // handle leftover - if st.leftover > 0 { - want := BlockSize - st.leftover - if want > bytes { - want = bytes - } - for i := 0; i < want; i++ { - st.buffer[st.leftover+i] = m[i] - } - bytes -= want - m = m[want:] - st.leftover += want - if st.leftover < BlockSize { - return len(p), nil - } - st.impl.blocks(st.buffer[:], BlockSize, false) - st.leftover = 0 - } - - // process full blocks - if bytes >= BlockSize { - want := bytes & (^(BlockSize - 1)) - st.impl.blocks(m, want, false) - m = m[want:] - bytes -= want - } - - // store leftover - if bytes > 0 { - for i := 0; i < bytes; i++ { - st.buffer[st.leftover+i] = m[i] - } - st.leftover += bytes - } - - return len(p), nil -} - -// Sum appends the current hash to b and returns the resulting slice. It does -// not change the underlying hash state. -func (st *Poly1305) Sum(b []byte) []byte { - var mac [Size]byte - tmp := *st - tmp.finish(&mac) - return append(b, mac[:]...) -} - -// Reset clears the internal hash state and panic()s, because calling this is a -// sign that the user is doing something unadvisable. -func (st *Poly1305) Reset() { - st.Clear() // Obliterate the state before panic(). - - // Poly1305 keys are one time use only. - panic("poly1305: Reset() is not supported") -} - -// Size returns the number of bytes Sum will return. -func (st *Poly1305) Size() int { - return Size -} - -// BlockSize returns the hash's underlying block size. -func (st *Poly1305) BlockSize() int { - return BlockSize -} - -// Init (re-)initializes the hash instance with a given key. -func (st *Poly1305) Init(key []byte) { - if len(key) != KeySize { - panic(ErrInvalidKeySize) - } - - st.impl.init(key) - st.leftover = 0 -} - -// Clear purges the sensitive material in hash's internal state. -func (st *Poly1305) Clear() { - st.impl.clear() -} - -func (st *Poly1305) finish(mac *[Size]byte) { - // process the remaining block - if st.leftover > 0 { - st.buffer[st.leftover] = 1 - for i := st.leftover + 1; i < BlockSize; i++ { - st.buffer[i] = 0 - } - st.impl.blocks(st.buffer[:], BlockSize, true) - } - - st.impl.finish(mac) - st.impl.clear() -} - -// New returns a new Poly1305 instance keyed with the supplied key. -func New(key []byte) (*Poly1305, error) { - if len(key) != KeySize { - return nil, ErrInvalidKeySize - } - - h := &Poly1305{} - h.Init(key) - return h, nil -} - -// Sum does exactly what golang.org/x/crypto/poly1305.Sum() does. -func Sum(mac *[Size]byte, m []byte, key *[KeySize]byte) { - var h Poly1305 - h.Init(key[:]) - h.Write(m) - h.finish(mac) -} - -// Verify does exactly what golang.org/x/crypto/poly1305.Verify does. -func Verify(mac *[Size]byte, m []byte, key *[KeySize]byte) bool { - var m2 [Size]byte - Sum(&m2, m, key) - return subtle.ConstantTimeCompare(mac[:], m2[:]) == 1 -} - -func init() { - // Use the UTF-32 (UCS-4) Byte Order Mark to detect host byte order, - // which enables the further use of 'unsafe' for added performance. - const bomLE = 0x0000feff - bom := [4]byte{0xff, 0xfe, 0x00, 0x00} - - // ARM doesn't get the spiffy fast code since it's picky wrt alignment - // and I doubt Go does the right thing. - if runtime.GOARCH != "arm" { - bomHost := *(*uint32)(unsafe.Pointer(&bom[0])) - if bomHost == 0x0000feff { // Little endian, use unsafe. - isLittleEndian = true - } - } -} - -var _ hash.Hash = (*Poly1305)(nil) diff --git a/vendor/git.schwanenlied.me/yawning/poly1305.git/poly1305_32.go b/vendor/git.schwanenlied.me/yawning/poly1305.git/poly1305_32.go deleted file mode 100644 index 857de1f..0000000 --- a/vendor/git.schwanenlied.me/yawning/poly1305.git/poly1305_32.go +++ /dev/null @@ -1,236 +0,0 @@ -// -// poly1305_32.go: 32->64 bit multiplies, 64 bit additions -// -// To the extent possible under law, Yawning Angel waived all copyright -// and related or neighboring rights to poly1305, using the creative -// commons "CC0" public domain dedication. See LICENSE or -// for full details. - -package poly1305 - -import ( - "encoding/binary" - "unsafe" -) - -type implState struct { - r [5]uint32 - h [5]uint32 - pad [4]uint32 -} - -func (impl *implState) init(key []byte) { - // - // poly1305-donna-32.h:poly1305_init() - // - - // r &= 0xffffffc0ffffffc0ffffffc0fffffff - if isLittleEndian { - impl.r[0] = *(*uint32)(unsafe.Pointer(&key[0])) & 0x3ffffff - impl.r[1] = (*(*uint32)(unsafe.Pointer(&key[3])) >> 2) & 0x3ffff03 - impl.r[2] = (*(*uint32)(unsafe.Pointer(&key[6])) >> 4) & 0x3ffc0ff - impl.r[3] = (*(*uint32)(unsafe.Pointer(&key[9])) >> 6) & 0x3f03fff - impl.r[4] = (*(*uint32)(unsafe.Pointer(&key[12])) >> 8) & 0x00fffff - } else { - impl.r[0] = binary.LittleEndian.Uint32(key[0:]) & 0x3ffffff - impl.r[1] = (binary.LittleEndian.Uint32(key[3:]) >> 2) & 0x3ffff03 - impl.r[2] = (binary.LittleEndian.Uint32(key[6:]) >> 4) & 0x3ffc0ff - impl.r[3] = (binary.LittleEndian.Uint32(key[9:]) >> 6) & 0x3f03fff - impl.r[4] = (binary.LittleEndian.Uint32(key[12:]) >> 8) & 0x00fffff - } - - // h = 0 - for i := range impl.h { - impl.h[i] = 0 - } - - // save pad for later - impl.pad[0] = binary.LittleEndian.Uint32(key[16:]) - impl.pad[1] = binary.LittleEndian.Uint32(key[20:]) - impl.pad[2] = binary.LittleEndian.Uint32(key[24:]) - impl.pad[3] = binary.LittleEndian.Uint32(key[28:]) -} - -func (impl *implState) clear() { - for i := range impl.h { - impl.h[i] = 0 - } - for i := range impl.r { - impl.r[i] = 0 - } - for i := range impl.pad { - impl.pad[i] = 0 - } -} - -func (impl *implState) blocks(m []byte, bytes int, isFinal bool) { - // - // poly1305-donna-32.h:poly1305_blocks() - // - - var hibit uint32 - var d0, d1, d2, d3, d4 uint64 - var c uint32 - if !isFinal { - hibit = 1 << 24 // 1 << 128 - } - r0, r1, r2, r3, r4 := impl.r[0], impl.r[1], impl.r[2], impl.r[3], impl.r[4] - s1, s2, s3, s4 := r1*5, r2*5, r3*5, r4*5 - h0, h1, h2, h3, h4 := impl.h[0], impl.h[1], impl.h[2], impl.h[3], impl.h[4] - - for bytes >= BlockSize { - // h += m[i] - if isLittleEndian { - h0 += *(*uint32)(unsafe.Pointer(&m[0])) & 0x3ffffff - h1 += (*(*uint32)(unsafe.Pointer(&m[3])) >> 2) & 0x3ffffff - h2 += (*(*uint32)(unsafe.Pointer(&m[6])) >> 4) & 0x3ffffff - h3 += (*(*uint32)(unsafe.Pointer(&m[9])) >> 6) & 0x3ffffff - h4 += (*(*uint32)(unsafe.Pointer(&m[12])) >> 8) | hibit - } else { - h0 += binary.LittleEndian.Uint32(m[0:]) & 0x3ffffff - h1 += (binary.LittleEndian.Uint32(m[3:]) >> 2) & 0x3ffffff - h2 += (binary.LittleEndian.Uint32(m[6:]) >> 4) & 0x3ffffff - h3 += (binary.LittleEndian.Uint32(m[9:]) >> 6) & 0x3ffffff - h4 += (binary.LittleEndian.Uint32(m[12:]) >> 8) | hibit - } - - // h *= r - d0 = (uint64(h0) * uint64(r0)) + (uint64(h1) * uint64(s4)) + (uint64(h2) * uint64(s3)) + (uint64(h3) * uint64(s2)) + (uint64(h4) * uint64(s1)) - d1 = (uint64(h0) * uint64(r1)) + (uint64(h1) * uint64(r0)) + (uint64(h2) * uint64(s4)) + (uint64(h3) * uint64(s3)) + (uint64(h4) * uint64(s2)) - d2 = (uint64(h0) * uint64(r2)) + (uint64(h1) * uint64(r1)) + (uint64(h2) * uint64(r0)) + (uint64(h3) * uint64(s4)) + (uint64(h4) * uint64(s3)) - d3 = (uint64(h0) * uint64(r3)) + (uint64(h1) * uint64(r2)) + (uint64(h2) * uint64(r1)) + (uint64(h3) * uint64(r0)) + (uint64(h4) * uint64(s4)) - d4 = (uint64(h0) * uint64(r4)) + (uint64(h1) * uint64(r3)) + (uint64(h2) * uint64(r2)) + (uint64(h3) * uint64(r1)) + (uint64(h4) * uint64(r0)) - - // (partial) h %= p - c = uint32(d0 >> 26) - h0 = uint32(d0) & 0x3ffffff - - d1 += uint64(c) - c = uint32(d1 >> 26) - h1 = uint32(d1) & 0x3ffffff - - d2 += uint64(c) - c = uint32(d2 >> 26) - h2 = uint32(d2) & 0x3ffffff - - d3 += uint64(c) - c = uint32(d3 >> 26) - h3 = uint32(d3) & 0x3ffffff - - d4 += uint64(c) - c = uint32(d4 >> 26) - h4 = uint32(d4) & 0x3ffffff - - h0 += c * 5 - c = h0 >> 26 - h0 = h0 & 0x3ffffff - - h1 += c - - m = m[BlockSize:] - bytes -= BlockSize - } - - impl.h[0], impl.h[1], impl.h[2], impl.h[3], impl.h[4] = h0, h1, h2, h3, h4 -} - -func (impl *implState) finish(mac *[Size]byte) { - // - // poly1305-donna-32.h:poly1305_finish() - // - - var c uint32 - var g0, g1, g2, g3, g4 uint32 - var f uint64 - var mask uint32 - - // fully carry h - h0, h1, h2, h3, h4 := impl.h[0], impl.h[1], impl.h[2], impl.h[3], impl.h[4] - c = h1 >> 26 - h1 &= 0x3ffffff - - h2 += c - c = h2 >> 26 - h2 &= 0x3ffffff - - h3 += c - c = h3 >> 26 - h3 &= 0x3ffffff - - h4 += c - c = h4 >> 26 - h4 &= 0x3ffffff - - h0 += c * 5 - c = h0 >> 26 - h0 &= 0x3ffffff - - h1 += c - - // compute h + -p - g0 = h0 + 5 - c = g0 >> 26 - g0 &= 0x3ffffff - - g1 = h1 + c - c = g1 >> 26 - g1 &= 0x3ffffff - - g2 = h2 + c - c = g2 >> 26 - g2 &= 0x3ffffff - - g3 = h3 + c - c = g3 >> 26 - g3 &= 0x3ffffff - - g4 = h4 + c - (1 << 26) - - // select h if h < p, or h + -p if h >= p - mask = (g4 >> ((4 * 8) - 1)) - 1 - g0 &= mask - g1 &= mask - g2 &= mask - g3 &= mask - g4 &= mask - mask = ^mask - h0 = (h0 & mask) | g0 - h1 = (h1 & mask) | g1 - h2 = (h2 & mask) | g2 - h3 = (h3 & mask) | g3 - h4 = (h4 & mask) | g4 - - // h = h % (2^128) - h0 = ((h0) | (h1 << 26)) & 0xffffffff - h1 = ((h1 >> 6) | (h2 << 20)) & 0xffffffff - h2 = ((h2 >> 12) | (h3 << 14)) & 0xffffffff - h3 = ((h3 >> 18) | (h4 << 8)) & 0xffffffff - - // mac = (h + pad) % (2^128) - f = uint64(h0) + uint64(impl.pad[0]) - h0 = uint32(f) - - f = uint64(h1) + uint64(impl.pad[1]) + (f >> 32) - h1 = uint32(f) - - f = uint64(h2) + uint64(impl.pad[2]) + (f >> 32) - h2 = uint32(f) - - f = uint64(h3) + uint64(impl.pad[3]) + (f >> 32) - h3 = uint32(f) - - if isLittleEndian { - macArr := (*[4]uint32)(unsafe.Pointer(&mac[0])) - macArr[0] = h0 - macArr[1] = h1 - macArr[2] = h2 - macArr[3] = h3 - } else { - binary.LittleEndian.PutUint32(mac[0:], h0) - binary.LittleEndian.PutUint32(mac[4:], h1) - binary.LittleEndian.PutUint32(mac[8:], h2) - binary.LittleEndian.PutUint32(mac[12:], h3) - } -} - -var _ implInterface = (*implState)(nil) diff --git a/vendor/git.schwanenlied.me/yawning/poly1305.git/poly1305_test.go b/vendor/git.schwanenlied.me/yawning/poly1305.git/poly1305_test.go deleted file mode 100644 index bed3da7..0000000 --- a/vendor/git.schwanenlied.me/yawning/poly1305.git/poly1305_test.go +++ /dev/null @@ -1,585 +0,0 @@ -// -// poly1305.go: Poly1305 MAC known answer tests. -// -// To the extent possible under law, Yawning Angel waived all copyright -// and related or neighboring rights to poly1305, using the creative -// commons "CC0" public domain dedication. See LICENSE or -// for full details. - -package poly1305 - -import ( - "bytes" - "testing" -) - -// Shamelessly stolen from poly1305-donna.c:poly1305_power_on_self_test() - -func TestNaCl(t *testing.T) { - var naclKey = []byte{ - 0xee, 0xa6, 0xa7, 0x25, 0x1c, 0x1e, 0x72, 0x91, - 0x6d, 0x11, 0xc2, 0xcb, 0x21, 0x4d, 0x3c, 0x25, - 0x25, 0x39, 0x12, 0x1d, 0x8e, 0x23, 0x4e, 0x65, - 0x2d, 0x65, 0x1f, 0xa4, 0xc8, 0xcf, 0xf8, 0x80, - } - - var naclMsg = []byte{ - 0x8e, 0x99, 0x3b, 0x9f, 0x48, 0x68, 0x12, 0x73, - 0xc2, 0x96, 0x50, 0xba, 0x32, 0xfc, 0x76, 0xce, - 0x48, 0x33, 0x2e, 0xa7, 0x16, 0x4d, 0x96, 0xa4, - 0x47, 0x6f, 0xb8, 0xc5, 0x31, 0xa1, 0x18, 0x6a, - 0xc0, 0xdf, 0xc1, 0x7c, 0x98, 0xdc, 0xe8, 0x7b, - 0x4d, 0xa7, 0xf0, 0x11, 0xec, 0x48, 0xc9, 0x72, - 0x71, 0xd2, 0xc2, 0x0f, 0x9b, 0x92, 0x8f, 0xe2, - 0x27, 0x0d, 0x6f, 0xb8, 0x63, 0xd5, 0x17, 0x38, - 0xb4, 0x8e, 0xee, 0xe3, 0x14, 0xa7, 0xcc, 0x8a, - 0xb9, 0x32, 0x16, 0x45, 0x48, 0xe5, 0x26, 0xae, - 0x90, 0x22, 0x43, 0x68, 0x51, 0x7a, 0xcf, 0xea, - 0xbd, 0x6b, 0xb3, 0x73, 0x2b, 0xc0, 0xe9, 0xda, - 0x99, 0x83, 0x2b, 0x61, 0xca, 0x01, 0xb6, 0xde, - 0x56, 0x24, 0x4a, 0x9e, 0x88, 0xd5, 0xf9, 0xb3, - 0x79, 0x73, 0xf6, 0x22, 0xa4, 0x3d, 0x14, 0xa6, - 0x59, 0x9b, 0x1f, 0x65, 0x4c, 0xb4, 0x5a, 0x74, - 0xe3, 0x55, 0xa5, - } - - var naclMac = []byte{ - 0xf3, 0xff, 0xc7, 0x70, 0x3f, 0x94, 0x00, 0xe5, - 0x2a, 0x7d, 0xfb, 0x4b, 0x3d, 0x33, 0x05, 0xd9, - } - - // Oneshot - h, err := New(naclKey[:]) - if err != nil { - t.Fatal(err) - } - - n, err := h.Write(naclMsg[:]) - if err != nil { - t.Fatal(err) - } else if n != len(naclMsg) { - t.Fatalf("h.Write() returned unexpected length: %d", n) - } - - mac := h.Sum(nil) - if !bytes.Equal(mac, naclMac[:]) { - t.Fatalf("mac != naclMac") - } - - // Incremental - h, err = New(naclKey[:]) - if err != nil { - t.Fatal(err) - } - - for i, s := range []struct{ off, sz int }{ - {0, 32}, - {32, 64}, - {96, 16}, - {112, 8}, - {120, 4}, - {124, 2}, - {126, 1}, - {127, 1}, - {128, 1}, - {129, 1}, - {130, 1}, - } { - n, err := h.Write(naclMsg[s.off : s.off+s.sz]) - if err != nil { - t.Fatalf("[%d]: h.Write(): %s", i, err) - } else if n != s.sz { - t.Fatalf("[%d]: h.Write(): %d (expected: %d)", i, n, s.sz) - } - } - - mac = h.Sum(nil) - if !bytes.Equal(mac, naclMac[:]) { - t.Fatalf("mac != naclMac") - } -} - -func TestWrap(t *testing.T) { - // generates a final value of (2^130 - 2) == 3 - wrapKey := [KeySize]byte{ - 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - } - - wrapMsg := []byte{ - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - } - - wrapMac := [Size]byte{ - 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - } - - var mac [Size]byte - Sum(&mac, wrapMsg, &wrapKey) - if !bytes.Equal(mac[:], wrapMac[:]) { - t.Fatalf("mac != wrapMac") - } -} - -func TestTotal(t *testing.T) { - // mac of the macs of messages of length 0 to 256, where the key and messages - // have all their values set to the length - totalKey := []byte{ - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x00, - } - - totalMac := []byte{ - 0x64, 0xaf, 0xe2, 0xe8, 0xd6, 0xad, 0x7b, 0xbd, - 0xd2, 0x87, 0xf9, 0x7c, 0x44, 0x62, 0x3d, 0x39, - } - - var allKey [KeySize]byte - allMsg := make([]byte, 256) - - totalCtx, err := New(totalKey[:]) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 256; i++ { - // set key and message to 'i,i,i..' - for j := range allKey { - allKey[j] = byte(i) - } - for j := 0; j < i; j++ { - allMsg[j] = byte(i) - } - - var mac [Size]byte - Sum(&mac, allMsg[:i], &allKey) - n, err := totalCtx.Write(mac[:]) - if err != nil { - t.Fatalf("[%d]: h.Write(): %s", i, err) - } else if n != len(mac) { - t.Fatalf("[%d]: h.Write(): %d (expected: %d)", i, n, len(mac)) - } - } - mac := totalCtx.Sum(nil) - if !bytes.Equal(mac, totalMac[:]) { - t.Fatalf("mac != totalMac") - } -} - -func TestIETFDraft(t *testing.T) { - // Test vectors taken from: - // https://www.ietf.org/id/draft-irtf-cfrg-chacha20-poly1305-07.txt - - vectors := []struct { - key [KeySize]byte - m []byte - tag [Size]byte - }{ - // Test Vector #1 - { - [KeySize]byte{}, - []byte{ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - [Size]byte{}, - }, - - // Test Vector #2 - { - [KeySize]byte{ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x36, 0xe5, 0xf6, 0xb5, 0xc5, 0xe0, 0x60, 0x70, - 0xf0, 0xef, 0xca, 0x96, 0x22, 0x7a, 0x86, 0x3e, - }, - []byte{ - 0x41, 0x6e, 0x79, 0x20, 0x73, 0x75, 0x62, 0x6d, - 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x74, - 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x49, 0x45, - 0x54, 0x46, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x6e, - 0x64, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, - 0x68, 0x65, 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x6f, 0x72, 0x20, 0x66, - 0x6f, 0x72, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x61, - 0x73, 0x20, 0x61, 0x6c, 0x6c, 0x20, 0x6f, 0x72, - 0x20, 0x70, 0x61, 0x72, 0x74, 0x20, 0x6f, 0x66, - 0x20, 0x61, 0x6e, 0x20, 0x49, 0x45, 0x54, 0x46, - 0x20, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, - 0x74, 0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x20, - 0x6f, 0x72, 0x20, 0x52, 0x46, 0x43, 0x20, 0x61, - 0x6e, 0x64, 0x20, 0x61, 0x6e, 0x79, 0x20, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x20, 0x6d, 0x61, 0x64, 0x65, 0x20, 0x77, 0x69, - 0x74, 0x68, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, - 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, - 0x20, 0x6f, 0x66, 0x20, 0x61, 0x6e, 0x20, 0x49, - 0x45, 0x54, 0x46, 0x20, 0x61, 0x63, 0x74, 0x69, - 0x76, 0x69, 0x74, 0x79, 0x20, 0x69, 0x73, 0x20, - 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x64, 0x65, 0x72, - 0x65, 0x64, 0x20, 0x61, 0x6e, 0x20, 0x22, 0x49, - 0x45, 0x54, 0x46, 0x20, 0x43, 0x6f, 0x6e, 0x74, - 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x22, 0x2e, 0x20, 0x53, 0x75, 0x63, 0x68, 0x20, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x20, 0x69, 0x6e, 0x63, 0x6c, 0x75, - 0x64, 0x65, 0x20, 0x6f, 0x72, 0x61, 0x6c, 0x20, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x20, 0x69, 0x6e, 0x20, 0x49, 0x45, - 0x54, 0x46, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x2c, 0x20, 0x61, 0x73, 0x20, - 0x77, 0x65, 0x6c, 0x6c, 0x20, 0x61, 0x73, 0x20, - 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x20, - 0x61, 0x6e, 0x64, 0x20, 0x65, 0x6c, 0x65, 0x63, - 0x74, 0x72, 0x6f, 0x6e, 0x69, 0x63, 0x20, 0x63, - 0x6f, 0x6d, 0x6d, 0x75, 0x6e, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x6d, 0x61, - 0x64, 0x65, 0x20, 0x61, 0x74, 0x20, 0x61, 0x6e, - 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x20, 0x6f, - 0x72, 0x20, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x2c, - 0x20, 0x77, 0x68, 0x69, 0x63, 0x68, 0x20, 0x61, - 0x72, 0x65, 0x20, 0x61, 0x64, 0x64, 0x72, 0x65, - 0x73, 0x73, 0x65, 0x64, 0x20, 0x74, 0x6f, - }, - [Size]byte{ - 0x36, 0xe5, 0xf6, 0xb5, 0xc5, 0xe0, 0x60, 0x70, - 0xf0, 0xef, 0xca, 0x96, 0x22, 0x7a, 0x86, 0x3e, - }, - }, - - // Test Vector #3 - { - [KeySize]byte{ - 0x36, 0xe5, 0xf6, 0xb5, 0xc5, 0xe0, 0x60, 0x70, - 0xf0, 0xef, 0xca, 0x96, 0x22, 0x7a, 0x86, 0x3e, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - []byte{ - 0x41, 0x6e, 0x79, 0x20, 0x73, 0x75, 0x62, 0x6d, - 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x74, - 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x49, 0x45, - 0x54, 0x46, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x6e, - 0x64, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, - 0x68, 0x65, 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x6f, 0x72, 0x20, 0x66, - 0x6f, 0x72, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x61, - 0x73, 0x20, 0x61, 0x6c, 0x6c, 0x20, 0x6f, 0x72, - 0x20, 0x70, 0x61, 0x72, 0x74, 0x20, 0x6f, 0x66, - 0x20, 0x61, 0x6e, 0x20, 0x49, 0x45, 0x54, 0x46, - 0x20, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, - 0x74, 0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x20, - 0x6f, 0x72, 0x20, 0x52, 0x46, 0x43, 0x20, 0x61, - 0x6e, 0x64, 0x20, 0x61, 0x6e, 0x79, 0x20, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x20, 0x6d, 0x61, 0x64, 0x65, 0x20, 0x77, 0x69, - 0x74, 0x68, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, - 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, - 0x20, 0x6f, 0x66, 0x20, 0x61, 0x6e, 0x20, 0x49, - 0x45, 0x54, 0x46, 0x20, 0x61, 0x63, 0x74, 0x69, - 0x76, 0x69, 0x74, 0x79, 0x20, 0x69, 0x73, 0x20, - 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x64, 0x65, 0x72, - 0x65, 0x64, 0x20, 0x61, 0x6e, 0x20, 0x22, 0x49, - 0x45, 0x54, 0x46, 0x20, 0x43, 0x6f, 0x6e, 0x74, - 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x22, 0x2e, 0x20, 0x53, 0x75, 0x63, 0x68, 0x20, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x20, 0x69, 0x6e, 0x63, 0x6c, 0x75, - 0x64, 0x65, 0x20, 0x6f, 0x72, 0x61, 0x6c, 0x20, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x20, 0x69, 0x6e, 0x20, 0x49, 0x45, - 0x54, 0x46, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x2c, 0x20, 0x61, 0x73, 0x20, - 0x77, 0x65, 0x6c, 0x6c, 0x20, 0x61, 0x73, 0x20, - 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x20, - 0x61, 0x6e, 0x64, 0x20, 0x65, 0x6c, 0x65, 0x63, - 0x74, 0x72, 0x6f, 0x6e, 0x69, 0x63, 0x20, 0x63, - 0x6f, 0x6d, 0x6d, 0x75, 0x6e, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x6d, 0x61, - 0x64, 0x65, 0x20, 0x61, 0x74, 0x20, 0x61, 0x6e, - 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x20, 0x6f, - 0x72, 0x20, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x2c, - 0x20, 0x77, 0x68, 0x69, 0x63, 0x68, 0x20, 0x61, - 0x72, 0x65, 0x20, 0x61, 0x64, 0x64, 0x72, 0x65, - 0x73, 0x73, 0x65, 0x64, 0x20, 0x74, 0x6f, - }, - [Size]byte{ - 0xf3, 0x47, 0x7e, 0x7c, 0xd9, 0x54, 0x17, 0xaf, - 0x89, 0xa6, 0xb8, 0x79, 0x4c, 0x31, 0x0c, 0xf0, - }, - }, - - // Test Vector #4 - { - [KeySize]byte{ - 0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a, - 0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0, - 0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09, - 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0, - }, - []byte{ - 0x27, 0x54, 0x77, 0x61, 0x73, 0x20, 0x62, 0x72, - 0x69, 0x6c, 0x6c, 0x69, 0x67, 0x2c, 0x20, 0x61, - 0x6e, 0x64, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, - 0x6c, 0x69, 0x74, 0x68, 0x79, 0x20, 0x74, 0x6f, - 0x76, 0x65, 0x73, 0x0a, 0x44, 0x69, 0x64, 0x20, - 0x67, 0x79, 0x72, 0x65, 0x20, 0x61, 0x6e, 0x64, - 0x20, 0x67, 0x69, 0x6d, 0x62, 0x6c, 0x65, 0x20, - 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x77, - 0x61, 0x62, 0x65, 0x3a, 0x0a, 0x41, 0x6c, 0x6c, - 0x20, 0x6d, 0x69, 0x6d, 0x73, 0x79, 0x20, 0x77, - 0x65, 0x72, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, - 0x62, 0x6f, 0x72, 0x6f, 0x67, 0x6f, 0x76, 0x65, - 0x73, 0x2c, 0x0a, 0x41, 0x6e, 0x64, 0x20, 0x74, - 0x68, 0x65, 0x20, 0x6d, 0x6f, 0x6d, 0x65, 0x20, - 0x72, 0x61, 0x74, 0x68, 0x73, 0x20, 0x6f, 0x75, - 0x74, 0x67, 0x72, 0x61, 0x62, 0x65, 0x2e, - }, - [Size]byte{ - 0x45, 0x41, 0x66, 0x9a, 0x7e, 0xaa, 0xee, 0x61, - 0xe7, 0x08, 0xdc, 0x7c, 0xbc, 0xc5, 0xeb, 0x62, - }, - }, - - // Test Vector #5 - // - // If one uses 130-bit partial reduction, does the code handle the case - // where partially reduced final result is not fully reduced? - { - [KeySize]byte{ - // R - 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // S - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - []byte{ - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - }, - [Size]byte{ - 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - }, - - // Test Vector #6 - // - // What happens if addition of s overflows modulo 2^128? - { - [KeySize]byte{ - // R - 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // S - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - }, - []byte{ - 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - [Size]byte{ - 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - }, - - // Test Vector #7 - // - // What happens if data limb is all ones and there is carry from lower - // limb? - { - [KeySize]byte{ - // R - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // S - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - []byte{ - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xF0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - [Size]byte{ - 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - }, - - // Test Vector #8 - // - // What happens if final result from polynomial part is exactly - // 2^130-5? - { - [KeySize]byte{ - // R - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // S - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - []byte{ - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFB, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, - 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, - 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - }, - [Size]byte{ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - }, - - // Test Vector #9 - // - // What happens if final result from polynomial part is exactly - // 2^130-6? - { - [KeySize]byte{ - // R - 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // S - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - []byte{ - 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - }, - [Size]byte{ - 0xFA, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - }, - }, - - // Test Vector #10 - // - // What happens if 5*H+L-type reduction produces 131-bit intermediate - // result? - { - [KeySize]byte{ - // R - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // S - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - []byte{ - 0xE3, 0x35, 0x94, 0xD7, 0x50, 0x5E, 0x43, 0xB9, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x33, 0x94, 0xD7, 0x50, 0x5E, 0x43, 0x79, 0xCD, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - [Size]byte{ - 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - }, - - // Test Vector #11 - // - // What happens if 5*H+L-type reduction produces 131-bit final result? - { - [KeySize]byte{ - // R - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // S - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - []byte{ - 0xE3, 0x35, 0x94, 0xD7, 0x50, 0x5E, 0x43, 0xB9, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x33, 0x94, 0xD7, 0x50, 0x5E, 0x43, 0x79, 0xCD, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - [Size]byte{ - 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - }, - } - - for i, vec := range vectors { - var mac [Size]byte - Sum(&mac, vec.m, &vec.key) - if !bytes.Equal(mac[:], vec.tag[:]) { - t.Errorf("[%d]: mac != vec.tag", i) - } - if !Verify(&vec.tag, vec.m, &vec.key) { - t.Errorf("[%d]: Verify(tag, m, key) returned false", i) - } - } -} - -func TestIETFDraftForceByteswap(t *testing.T) { - if !isLittleEndian { - t.Skipf("not little endian, slow path already taken") - } else { - isLittleEndian = false - TestIETFDraft(t) - isLittleEndian = true - } -} - -// Swiped from golang.org/x/crypto/poly1305/poly1305_test.go. - -func Benchmark64(b *testing.B) { - b.StopTimer() - var mac [Size]byte - var key [KeySize]byte - m := make([]byte, 64) - b.SetBytes(int64(len(m))) - b.StartTimer() - - for i := 0; i < b.N; i++ { - Sum(&mac, m, &key) - } -} - -func Benchmark1k(b *testing.B) { - b.StopTimer() - var mac [Size]byte - var key [KeySize]byte - m := make([]byte, 1024) - b.SetBytes(int64(len(m))) - b.StartTimer() - - for i := 0; i < b.N; i++ { - Sum(&mac, m, &key) - } -} diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore deleted file mode 100644 index 0cd3800..0000000 --- a/vendor/github.com/BurntSushi/toml/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -TAGS -tags -.*.swp -tomlcheck/tomlcheck -toml.test diff --git a/vendor/github.com/BurntSushi/toml/.travis.yml b/vendor/github.com/BurntSushi/toml/.travis.yml deleted file mode 100644 index 43caf6d..0000000 --- a/vendor/github.com/BurntSushi/toml/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -go: - - 1.1 - - 1.2 - - tip -install: - - go install ./... - - go get github.com/BurntSushi/toml-test -script: - - export PATH="$PATH:$HOME/gopath/bin" - - make test - diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE deleted file mode 100644 index 21e0938..0000000 --- a/vendor/github.com/BurntSushi/toml/COMPATIBLE +++ /dev/null @@ -1,3 +0,0 @@ -Compatible with TOML version -[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md) - diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING deleted file mode 100644 index 5a8e332..0000000 --- a/vendor/github.com/BurntSushi/toml/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/vendor/github.com/BurntSushi/toml/Makefile b/vendor/github.com/BurntSushi/toml/Makefile deleted file mode 100644 index 3600848..0000000 --- a/vendor/github.com/BurntSushi/toml/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -install: - go install ./... - -test: install - go test -v - toml-test toml-test-decoder - toml-test -encoder toml-test-encoder - -fmt: - gofmt -w *.go */*.go - colcheck *.go */*.go - -tags: - find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS - -push: - git push origin master - git push github master - diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md deleted file mode 100644 index 5a5df63..0000000 --- a/vendor/github.com/BurntSushi/toml/README.md +++ /dev/null @@ -1,220 +0,0 @@ -## TOML parser and encoder for Go with reflection - -TOML stands for Tom's Obvious, Minimal Language. This Go package provides a -reflection interface similar to Go's standard library `json` and `xml` -packages. This package also supports the `encoding.TextUnmarshaler` and -`encoding.TextMarshaler` interfaces so that you can define custom data -representations. (There is an example of this below.) - -Spec: https://github.com/mojombo/toml - -Compatible with TOML version -[v0.2.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.2.0.md) - -Documentation: http://godoc.org/github.com/BurntSushi/toml - -Installation: - -```bash -go get github.com/BurntSushi/toml -``` - -Try the toml validator: - -```bash -go get github.com/BurntSushi/toml/cmd/tomlv -tomlv some-toml-file.toml -``` - -[![Build status](https://api.travis-ci.org/BurntSushi/toml.png)](https://travis-ci.org/BurntSushi/toml) - - -### Testing - -This package passes all tests in -[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder -and the encoder. - -### Examples - -This package works similarly to how the Go standard library handles `XML` -and `JSON`. Namely, data is loaded into Go values via reflection. - -For the simplest example, consider some TOML file as just a list of keys -and values: - -```toml -Age = 25 -Cats = [ "Cauchy", "Plato" ] -Pi = 3.14 -Perfection = [ 6, 28, 496, 8128 ] -DOB = 1987-07-05T05:45:00Z -``` - -Which could be defined in Go as: - -```go -type Config struct { - Age int - Cats []string - Pi float64 - Perfection []int - DOB time.Time // requires `import time` -} -``` - -And then decoded with: - -```go -var conf Config -if _, err := toml.Decode(tomlData, &conf); err != nil { - // handle error -} -``` - -You can also use struct tags if your struct field name doesn't map to a TOML -key value directly: - -```toml -some_key_NAME = "wat" -``` - -```go -type TOML struct { - ObscureKey string `toml:"some_key_NAME"` -} -``` - -### Using the `encoding.TextUnmarshaler` interface - -Here's an example that automatically parses duration strings into -`time.Duration` values: - -```toml -[[song]] -name = "Thunder Road" -duration = "4m49s" - -[[song]] -name = "Stairway to Heaven" -duration = "8m03s" -``` - -Which can be decoded with: - -```go -type song struct { - Name string - Duration duration -} -type songs struct { - Song []song -} -var favorites songs -if _, err := toml.Decode(blob, &favorites); err != nil { - log.Fatal(err) -} - -for _, s := range favorites.Song { - fmt.Printf("%s (%s)\n", s.Name, s.Duration) -} -``` - -And you'll also need a `duration` type that satisfies the -`encoding.TextUnmarshaler` interface: - -```go -type duration struct { - time.Duration -} - -func (d *duration) UnmarshalText(text []byte) error { - var err error - d.Duration, err = time.ParseDuration(string(text)) - return err -} -``` - -### More complex usage - -Here's an example of how to load the example from the official spec page: - -```toml -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it - -# Line breaks are OK when inside arrays -hosts = [ - "alpha", - "omega" -] -``` - -And the corresponding Go types are: - -```go -type tomlConfig struct { - Title string - Owner ownerInfo - DB database `toml:"database"` - Servers map[string]server - Clients clients -} - -type ownerInfo struct { - Name string - Org string `toml:"organization"` - Bio string - DOB time.Time -} - -type database struct { - Server string - Ports []int - ConnMax int `toml:"connection_max"` - Enabled bool -} - -type server struct { - IP string - DC string -} - -type clients struct { - Data [][]interface{} - Hosts []string -} -``` - -Note that a case insensitive match will be tried if an exact match can't be -found. - -A working example of the above can be found in `_examples/example.{go,toml}`. - diff --git a/vendor/github.com/BurntSushi/toml/_examples/example.go b/vendor/github.com/BurntSushi/toml/_examples/example.go deleted file mode 100644 index 79f31f2..0000000 --- a/vendor/github.com/BurntSushi/toml/_examples/example.go +++ /dev/null @@ -1,61 +0,0 @@ -package main - -import ( - "fmt" - "time" - - "github.com/BurntSushi/toml" -) - -type tomlConfig struct { - Title string - Owner ownerInfo - DB database `toml:"database"` - Servers map[string]server - Clients clients -} - -type ownerInfo struct { - Name string - Org string `toml:"organization"` - Bio string - DOB time.Time -} - -type database struct { - Server string - Ports []int - ConnMax int `toml:"connection_max"` - Enabled bool -} - -type server struct { - IP string - DC string -} - -type clients struct { - Data [][]interface{} - Hosts []string -} - -func main() { - var config tomlConfig - if _, err := toml.DecodeFile("example.toml", &config); err != nil { - fmt.Println(err) - return - } - - fmt.Printf("Title: %s\n", config.Title) - fmt.Printf("Owner: %s (%s, %s), Born: %s\n", - config.Owner.Name, config.Owner.Org, config.Owner.Bio, - config.Owner.DOB) - fmt.Printf("Database: %s %v (Max conn. %d), Enabled? %v\n", - config.DB.Server, config.DB.Ports, config.DB.ConnMax, - config.DB.Enabled) - for serverName, server := range config.Servers { - fmt.Printf("Server: %s (%s, %s)\n", serverName, server.IP, server.DC) - } - fmt.Printf("Client data: %v\n", config.Clients.Data) - fmt.Printf("Client hosts: %v\n", config.Clients.Hosts) -} diff --git a/vendor/github.com/BurntSushi/toml/_examples/example.toml b/vendor/github.com/BurntSushi/toml/_examples/example.toml deleted file mode 100644 index 32c7a4f..0000000 --- a/vendor/github.com/BurntSushi/toml/_examples/example.toml +++ /dev/null @@ -1,35 +0,0 @@ -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it - -# Line breaks are OK when inside arrays -hosts = [ - "alpha", - "omega" -] diff --git a/vendor/github.com/BurntSushi/toml/_examples/hard.toml b/vendor/github.com/BurntSushi/toml/_examples/hard.toml deleted file mode 100644 index 26145d2..0000000 --- a/vendor/github.com/BurntSushi/toml/_examples/hard.toml +++ /dev/null @@ -1,22 +0,0 @@ -# Test file for TOML -# Only this one tries to emulate a TOML file written by a user of the kind of parser writers probably hate -# This part you'll really hate - -[the] -test_string = "You'll hate me after this - #" # " Annoying, isn't it? - - [the.hard] - test_array = [ "] ", " # "] # ] There you go, parse this! - test_array2 = [ "Test #11 ]proved that", "Experiment #9 was a success" ] - # You didn't think it'd as easy as chucking out the last #, did you? - another_test_string = " Same thing, but with a string #" - harder_test_string = " And when \"'s are in the string, along with # \"" # "and comments are there too" - # Things will get harder - - [the.hard.bit#] - what? = "You don't think some user won't do that?" - multi_line_array = [ - "]", - # ] Oh yes I did - ] - diff --git a/vendor/github.com/BurntSushi/toml/_examples/implicit.toml b/vendor/github.com/BurntSushi/toml/_examples/implicit.toml deleted file mode 100644 index 1dea5ce..0000000 --- a/vendor/github.com/BurntSushi/toml/_examples/implicit.toml +++ /dev/null @@ -1,4 +0,0 @@ -# [x] you -# [x.y] don't -# [x.y.z] need these -[x.y.z.w] # for this to work diff --git a/vendor/github.com/BurntSushi/toml/_examples/invalid-apples.toml b/vendor/github.com/BurntSushi/toml/_examples/invalid-apples.toml deleted file mode 100644 index 74e9e33..0000000 --- a/vendor/github.com/BurntSushi/toml/_examples/invalid-apples.toml +++ /dev/null @@ -1,6 +0,0 @@ -# DO NOT WANT -[fruit] -type = "apple" - -[fruit.type] -apple = "yes" diff --git a/vendor/github.com/BurntSushi/toml/_examples/invalid.toml b/vendor/github.com/BurntSushi/toml/_examples/invalid.toml deleted file mode 100644 index beb1dba..0000000 --- a/vendor/github.com/BurntSushi/toml/_examples/invalid.toml +++ /dev/null @@ -1,35 +0,0 @@ -# This is an INVALID TOML document. Boom. -# Can you spot the error without help? - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T7:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it - -# Line breaks are OK when inside arrays -hosts = [ - "alpha", - "omega" -] diff --git a/vendor/github.com/BurntSushi/toml/_examples/readme1.toml b/vendor/github.com/BurntSushi/toml/_examples/readme1.toml deleted file mode 100644 index 3e1261d..0000000 --- a/vendor/github.com/BurntSushi/toml/_examples/readme1.toml +++ /dev/null @@ -1,5 +0,0 @@ -Age = 25 -Cats = [ "Cauchy", "Plato" ] -Pi = 3.14 -Perfection = [ 6, 28, 496, 8128 ] -DOB = 1987-07-05T05:45:00Z diff --git a/vendor/github.com/BurntSushi/toml/_examples/readme2.toml b/vendor/github.com/BurntSushi/toml/_examples/readme2.toml deleted file mode 100644 index b51cd93..0000000 --- a/vendor/github.com/BurntSushi/toml/_examples/readme2.toml +++ /dev/null @@ -1 +0,0 @@ -some_key_NAME = "wat" diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING b/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING deleted file mode 100644 index 5a8e332..0000000 --- a/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md b/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md deleted file mode 100644 index 24421eb..0000000 --- a/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Implements the TOML test suite interface - -This is an implementation of the interface expected by -[toml-test](https://github.com/BurntSushi/toml-test) for my -[toml parser written in Go](https://github.com/BurntSushi/toml). -In particular, it maps TOML data on `stdin` to a JSON format on `stdout`. - - -Compatible with TOML version -[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md) - -Compatible with `toml-test` version -[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0) - diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go b/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go deleted file mode 100644 index 14e7557..0000000 --- a/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go +++ /dev/null @@ -1,90 +0,0 @@ -// Command toml-test-decoder satisfies the toml-test interface for testing -// TOML decoders. Namely, it accepts TOML on stdin and outputs JSON on stdout. -package main - -import ( - "encoding/json" - "flag" - "fmt" - "log" - "os" - "path" - "time" - - "github.com/BurntSushi/toml" -) - -func init() { - log.SetFlags(0) - - flag.Usage = usage - flag.Parse() -} - -func usage() { - log.Printf("Usage: %s < toml-file\n", path.Base(os.Args[0])) - flag.PrintDefaults() - - os.Exit(1) -} - -func main() { - if flag.NArg() != 0 { - flag.Usage() - } - - var tmp interface{} - if _, err := toml.DecodeReader(os.Stdin, &tmp); err != nil { - log.Fatalf("Error decoding TOML: %s", err) - } - - typedTmp := translate(tmp) - if err := json.NewEncoder(os.Stdout).Encode(typedTmp); err != nil { - log.Fatalf("Error encoding JSON: %s", err) - } -} - -func translate(tomlData interface{}) interface{} { - switch orig := tomlData.(type) { - case map[string]interface{}: - typed := make(map[string]interface{}, len(orig)) - for k, v := range orig { - typed[k] = translate(v) - } - return typed - case []map[string]interface{}: - typed := make([]map[string]interface{}, len(orig)) - for i, v := range orig { - typed[i] = translate(v).(map[string]interface{}) - } - return typed - case []interface{}: - typed := make([]interface{}, len(orig)) - for i, v := range orig { - typed[i] = translate(v) - } - - // We don't really need to tag arrays, but let's be future proof. - // (If TOML ever supports tuples, we'll need this.) - return tag("array", typed) - case time.Time: - return tag("datetime", orig.Format("2006-01-02T15:04:05Z")) - case bool: - return tag("bool", fmt.Sprintf("%v", orig)) - case int64: - return tag("integer", fmt.Sprintf("%d", orig)) - case float64: - return tag("float", fmt.Sprintf("%v", orig)) - case string: - return tag("string", orig) - } - - panic(fmt.Sprintf("Unknown type: %T", tomlData)) -} - -func tag(typeName string, data interface{}) map[string]interface{} { - return map[string]interface{}{ - "type": typeName, - "value": data, - } -} diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING b/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING deleted file mode 100644 index 5a8e332..0000000 --- a/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md b/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md deleted file mode 100644 index 45a603f..0000000 --- a/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Implements the TOML test suite interface for TOML encoders - -This is an implementation of the interface expected by -[toml-test](https://github.com/BurntSushi/toml-test) for the -[TOML encoder](https://github.com/BurntSushi/toml). -In particular, it maps JSON data on `stdin` to a TOML format on `stdout`. - - -Compatible with TOML version -[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md) - -Compatible with `toml-test` version -[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0) - diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go b/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go deleted file mode 100644 index 092cc68..0000000 --- a/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go +++ /dev/null @@ -1,131 +0,0 @@ -// Command toml-test-encoder satisfies the toml-test interface for testing -// TOML encoders. Namely, it accepts JSON on stdin and outputs TOML on stdout. -package main - -import ( - "encoding/json" - "flag" - "log" - "os" - "path" - "strconv" - "time" - - "github.com/BurntSushi/toml" -) - -func init() { - log.SetFlags(0) - - flag.Usage = usage - flag.Parse() -} - -func usage() { - log.Printf("Usage: %s < json-file\n", path.Base(os.Args[0])) - flag.PrintDefaults() - - os.Exit(1) -} - -func main() { - if flag.NArg() != 0 { - flag.Usage() - } - - var tmp interface{} - if err := json.NewDecoder(os.Stdin).Decode(&tmp); err != nil { - log.Fatalf("Error decoding JSON: %s", err) - } - - tomlData := translate(tmp) - if err := toml.NewEncoder(os.Stdout).Encode(tomlData); err != nil { - log.Fatalf("Error encoding TOML: %s", err) - } -} - -func translate(typedJson interface{}) interface{} { - switch v := typedJson.(type) { - case map[string]interface{}: - if len(v) == 2 && in("type", v) && in("value", v) { - return untag(v) - } - m := make(map[string]interface{}, len(v)) - for k, v2 := range v { - m[k] = translate(v2) - } - return m - case []interface{}: - tabArray := make([]map[string]interface{}, len(v)) - for i := range v { - if m, ok := translate(v[i]).(map[string]interface{}); ok { - tabArray[i] = m - } else { - log.Fatalf("JSON arrays may only contain objects. This " + - "corresponds to only tables being allowed in " + - "TOML table arrays.") - } - } - return tabArray - } - log.Fatalf("Unrecognized JSON format '%T'.", typedJson) - panic("unreachable") -} - -func untag(typed map[string]interface{}) interface{} { - t := typed["type"].(string) - v := typed["value"] - switch t { - case "string": - return v.(string) - case "integer": - v := v.(string) - n, err := strconv.Atoi(v) - if err != nil { - log.Fatalf("Could not parse '%s' as integer: %s", v, err) - } - return n - case "float": - v := v.(string) - f, err := strconv.ParseFloat(v, 64) - if err != nil { - log.Fatalf("Could not parse '%s' as float64: %s", v, err) - } - return f - case "datetime": - v := v.(string) - t, err := time.Parse("2006-01-02T15:04:05Z", v) - if err != nil { - log.Fatalf("Could not parse '%s' as a datetime: %s", v, err) - } - return t - case "bool": - v := v.(string) - switch v { - case "true": - return true - case "false": - return false - } - log.Fatalf("Could not parse '%s' as a boolean.", v) - case "array": - v := v.([]interface{}) - array := make([]interface{}, len(v)) - for i := range v { - if m, ok := v[i].(map[string]interface{}); ok { - array[i] = untag(m) - } else { - log.Fatalf("Arrays may only contain other arrays or "+ - "primitive values, but found a '%T'.", m) - } - } - return array - } - log.Fatalf("Unrecognized tag type '%s'.", t) - panic("unreachable") -} - -func in(key string, m map[string]interface{}) bool { - _, ok := m[key] - return ok -} diff --git a/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING b/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING deleted file mode 100644 index 5a8e332..0000000 --- a/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/vendor/github.com/BurntSushi/toml/cmd/tomlv/README.md b/vendor/github.com/BurntSushi/toml/cmd/tomlv/README.md deleted file mode 100644 index 5df0dc3..0000000 --- a/vendor/github.com/BurntSushi/toml/cmd/tomlv/README.md +++ /dev/null @@ -1,22 +0,0 @@ -# TOML Validator - -If Go is installed, it's simple to try it out: - -```bash -go get github.com/BurntSushi/toml/cmd/tomlv -tomlv some-toml-file.toml -``` - -You can see the types of every key in a TOML file with: - -```bash -tomlv -types some-toml-file.toml -``` - -At the moment, only one error message is reported at a time. Error messages -include line numbers. No output means that the files given are valid TOML, or -there is a bug in `tomlv`. - -Compatible with TOML version -[v0.1.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.1.0.md) - diff --git a/vendor/github.com/BurntSushi/toml/cmd/tomlv/main.go b/vendor/github.com/BurntSushi/toml/cmd/tomlv/main.go deleted file mode 100644 index c7d689a..0000000 --- a/vendor/github.com/BurntSushi/toml/cmd/tomlv/main.go +++ /dev/null @@ -1,61 +0,0 @@ -// Command tomlv validates TOML documents and prints each key's type. -package main - -import ( - "flag" - "fmt" - "log" - "os" - "path" - "strings" - "text/tabwriter" - - "github.com/BurntSushi/toml" -) - -var ( - flagTypes = false -) - -func init() { - log.SetFlags(0) - - flag.BoolVar(&flagTypes, "types", flagTypes, - "When set, the types of every defined key will be shown.") - - flag.Usage = usage - flag.Parse() -} - -func usage() { - log.Printf("Usage: %s toml-file [ toml-file ... ]\n", - path.Base(os.Args[0])) - flag.PrintDefaults() - - os.Exit(1) -} - -func main() { - if flag.NArg() < 1 { - flag.Usage() - } - for _, f := range flag.Args() { - var tmp interface{} - md, err := toml.DecodeFile(f, &tmp) - if err != nil { - log.Fatalf("Error in '%s': %s", f, err) - } - if flagTypes { - printTypes(md) - } - } -} - -func printTypes(md toml.MetaData) { - tabw := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) - for _, key := range md.Keys() { - fmt.Fprintf(tabw, "%s%s\t%s\n", - strings.Repeat(" ", len(key)-1), key, md.Type(key...)) - } - tabw.Flush() -} diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go deleted file mode 100644 index 98c8aa6..0000000 --- a/vendor/github.com/BurntSushi/toml/decode.go +++ /dev/null @@ -1,512 +0,0 @@ -package toml - -import ( - "fmt" - "io" - "io/ioutil" - "math" - "reflect" - "strings" - "time" -) - -var e = fmt.Errorf - -// Unmarshaler is the interface implemented by objects that can unmarshal a -// TOML description of themselves. -type Unmarshaler interface { - UnmarshalTOML(interface{}) error -} - -// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. -func Unmarshal(p []byte, v interface{}) error { - _, err := Decode(string(p), v) - return err -} - -// Primitive is a TOML value that hasn't been decoded into a Go value. -// When using the various `Decode*` functions, the type `Primitive` may -// be given to any value, and its decoding will be delayed. -// -// A `Primitive` value can be decoded using the `PrimitiveDecode` function. -// -// The underlying representation of a `Primitive` value is subject to change. -// Do not rely on it. -// -// N.B. Primitive values are still parsed, so using them will only avoid -// the overhead of reflection. They can be useful when you don't know the -// exact type of TOML data until run time. -type Primitive struct { - undecoded interface{} - context Key -} - -// DEPRECATED! -// -// Use MetaData.PrimitiveDecode instead. -func PrimitiveDecode(primValue Primitive, v interface{}) error { - md := MetaData{decoded: make(map[string]bool)} - return md.unify(primValue.undecoded, rvalue(v)) -} - -// PrimitiveDecode is just like the other `Decode*` functions, except it -// decodes a TOML value that has already been parsed. Valid primitive values -// can *only* be obtained from values filled by the decoder functions, -// including this method. (i.e., `v` may contain more `Primitive` -// values.) -// -// Meta data for primitive values is included in the meta data returned by -// the `Decode*` functions with one exception: keys returned by the Undecoded -// method will only reflect keys that were decoded. Namely, any keys hidden -// behind a Primitive will be considered undecoded. Executing this method will -// update the undecoded keys in the meta data. (See the example.) -func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { - md.context = primValue.context - defer func() { md.context = nil }() - return md.unify(primValue.undecoded, rvalue(v)) -} - -// Decode will decode the contents of `data` in TOML format into a pointer -// `v`. -// -// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be -// used interchangeably.) -// -// TOML arrays of tables correspond to either a slice of structs or a slice -// of maps. -// -// TOML datetimes correspond to Go `time.Time` values. -// -// All other TOML types (float, string, int, bool and array) correspond -// to the obvious Go types. -// -// An exception to the above rules is if a type implements the -// encoding.TextUnmarshaler interface. In this case, any primitive TOML value -// (floats, strings, integers, booleans and datetimes) will be converted to -// a byte string and given to the value's UnmarshalText method. See the -// Unmarshaler example for a demonstration with time duration strings. -// -// Key mapping -// -// TOML keys can map to either keys in a Go map or field names in a Go -// struct. The special `toml` struct tag may be used to map TOML keys to -// struct fields that don't match the key name exactly. (See the example.) -// A case insensitive match to struct names will be tried if an exact match -// can't be found. -// -// The mapping between TOML values and Go values is loose. That is, there -// may exist TOML values that cannot be placed into your representation, and -// there may be parts of your representation that do not correspond to -// TOML values. This loose mapping can be made stricter by using the IsDefined -// and/or Undecoded methods on the MetaData returned. -// -// This decoder will not handle cyclic types. If a cyclic type is passed, -// `Decode` will not terminate. -func Decode(data string, v interface{}) (MetaData, error) { - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr { - return MetaData{}, e("Decode of non-pointer type %s", reflect.TypeOf(v)) - } - if rv.IsNil() { - return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v)) - } - p, err := parse(data) - if err != nil { - return MetaData{}, err - } - md := MetaData{ - p.mapping, p.types, p.ordered, - make(map[string]bool, len(p.ordered)), nil, - } - return md, md.unify(p.mapping, indirect(rv)) -} - -// DecodeFile is just like Decode, except it will automatically read the -// contents of the file at `fpath` and decode it for you. -func DecodeFile(fpath string, v interface{}) (MetaData, error) { - bs, err := ioutil.ReadFile(fpath) - if err != nil { - return MetaData{}, err - } - return Decode(string(bs), v) -} - -// DecodeReader is just like Decode, except it will consume all bytes -// from the reader and decode it for you. -func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { - bs, err := ioutil.ReadAll(r) - if err != nil { - return MetaData{}, err - } - return Decode(string(bs), v) -} - -// unify performs a sort of type unification based on the structure of `rv`, -// which is the client representation. -// -// Any type mismatch produces an error. Finding a type that we don't know -// how to handle produces an unsupported type error. -func (md *MetaData) unify(data interface{}, rv reflect.Value) error { - - // Special case. Look for a `Primitive` value. - if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { - // Save the undecoded data and the key context into the primitive - // value. - context := make(Key, len(md.context)) - copy(context, md.context) - rv.Set(reflect.ValueOf(Primitive{ - undecoded: data, - context: context, - })) - return nil - } - - // Special case. Unmarshaler Interface support. - if rv.CanAddr() { - if v, ok := rv.Addr().Interface().(Unmarshaler); ok { - return v.UnmarshalTOML(data) - } - } - - // Special case. Handle time.Time values specifically. - // TODO: Remove this code when we decide to drop support for Go 1.1. - // This isn't necessary in Go 1.2 because time.Time satisfies the encoding - // interfaces. - if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) { - return md.unifyDatetime(data, rv) - } - - // Special case. Look for a value satisfying the TextUnmarshaler interface. - if v, ok := rv.Interface().(TextUnmarshaler); ok { - return md.unifyText(data, v) - } - // BUG(burntsushi) - // The behavior here is incorrect whenever a Go type satisfies the - // encoding.TextUnmarshaler interface but also corresponds to a TOML - // hash or array. In particular, the unmarshaler should only be applied - // to primitive TOML values. But at this point, it will be applied to - // all kinds of values and produce an incorrect error whenever those values - // are hashes or arrays (including arrays of tables). - - k := rv.Kind() - - // laziness - if k >= reflect.Int && k <= reflect.Uint64 { - return md.unifyInt(data, rv) - } - switch k { - case reflect.Ptr: - elem := reflect.New(rv.Type().Elem()) - err := md.unify(data, reflect.Indirect(elem)) - if err != nil { - return err - } - rv.Set(elem) - return nil - case reflect.Struct: - return md.unifyStruct(data, rv) - case reflect.Map: - return md.unifyMap(data, rv) - case reflect.Array: - return md.unifyArray(data, rv) - case reflect.Slice: - return md.unifySlice(data, rv) - case reflect.String: - return md.unifyString(data, rv) - case reflect.Bool: - return md.unifyBool(data, rv) - case reflect.Interface: - // we only support empty interfaces. - if rv.NumMethod() > 0 { - return e("Unsupported type '%s'.", rv.Kind()) - } - return md.unifyAnything(data, rv) - case reflect.Float32: - fallthrough - case reflect.Float64: - return md.unifyFloat64(data, rv) - } - return e("Unsupported type '%s'.", rv.Kind()) -} - -func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { - tmap, ok := mapping.(map[string]interface{}) - if !ok { - if mapping == nil { - return nil - } - return mismatch(rv, "map", mapping) - } - - for key, datum := range tmap { - var f *field - fields := cachedTypeFields(rv.Type()) - for i := range fields { - ff := &fields[i] - if ff.name == key { - f = ff - break - } - if f == nil && strings.EqualFold(ff.name, key) { - f = ff - } - } - if f != nil { - subv := rv - for _, i := range f.index { - subv = indirect(subv.Field(i)) - } - if isUnifiable(subv) { - md.decoded[md.context.add(key).String()] = true - md.context = append(md.context, key) - if err := md.unify(datum, subv); err != nil { - return e("Type mismatch for '%s.%s': %s", - rv.Type().String(), f.name, err) - } - md.context = md.context[0 : len(md.context)-1] - } else if f.name != "" { - // Bad user! No soup for you! - return e("Field '%s.%s' is unexported, and therefore cannot "+ - "be loaded with reflection.", rv.Type().String(), f.name) - } - } - } - return nil -} - -func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { - tmap, ok := mapping.(map[string]interface{}) - if !ok { - if tmap == nil { - return nil - } - return badtype("map", mapping) - } - if rv.IsNil() { - rv.Set(reflect.MakeMap(rv.Type())) - } - for k, v := range tmap { - md.decoded[md.context.add(k).String()] = true - md.context = append(md.context, k) - - rvkey := indirect(reflect.New(rv.Type().Key())) - rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) - if err := md.unify(v, rvval); err != nil { - return err - } - md.context = md.context[0 : len(md.context)-1] - - rvkey.SetString(k) - rv.SetMapIndex(rvkey, rvval) - } - return nil -} - -func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { - datav := reflect.ValueOf(data) - if datav.Kind() != reflect.Slice { - if !datav.IsValid() { - return nil - } - return badtype("slice", data) - } - sliceLen := datav.Len() - if sliceLen != rv.Len() { - return e("expected array length %d; got TOML array of length %d", - rv.Len(), sliceLen) - } - return md.unifySliceArray(datav, rv) -} - -func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { - datav := reflect.ValueOf(data) - if datav.Kind() != reflect.Slice { - if !datav.IsValid() { - return nil - } - return badtype("slice", data) - } - n := datav.Len() - if rv.IsNil() || rv.Cap() < n { - rv.Set(reflect.MakeSlice(rv.Type(), n, n)) - } - rv.SetLen(n) - return md.unifySliceArray(datav, rv) -} - -func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { - sliceLen := data.Len() - for i := 0; i < sliceLen; i++ { - v := data.Index(i).Interface() - sliceval := indirect(rv.Index(i)) - if err := md.unify(v, sliceval); err != nil { - return err - } - } - return nil -} - -func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error { - if _, ok := data.(time.Time); ok { - rv.Set(reflect.ValueOf(data)) - return nil - } - return badtype("time.Time", data) -} - -func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { - if s, ok := data.(string); ok { - rv.SetString(s) - return nil - } - return badtype("string", data) -} - -func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { - if num, ok := data.(float64); ok { - switch rv.Kind() { - case reflect.Float32: - fallthrough - case reflect.Float64: - rv.SetFloat(num) - default: - panic("bug") - } - return nil - } - return badtype("float", data) -} - -func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { - if num, ok := data.(int64); ok { - if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { - switch rv.Kind() { - case reflect.Int, reflect.Int64: - // No bounds checking necessary. - case reflect.Int8: - if num < math.MinInt8 || num > math.MaxInt8 { - return e("Value '%d' is out of range for int8.", num) - } - case reflect.Int16: - if num < math.MinInt16 || num > math.MaxInt16 { - return e("Value '%d' is out of range for int16.", num) - } - case reflect.Int32: - if num < math.MinInt32 || num > math.MaxInt32 { - return e("Value '%d' is out of range for int32.", num) - } - } - rv.SetInt(num) - } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { - unum := uint64(num) - switch rv.Kind() { - case reflect.Uint, reflect.Uint64: - // No bounds checking necessary. - case reflect.Uint8: - if num < 0 || unum > math.MaxUint8 { - return e("Value '%d' is out of range for uint8.", num) - } - case reflect.Uint16: - if num < 0 || unum > math.MaxUint16 { - return e("Value '%d' is out of range for uint16.", num) - } - case reflect.Uint32: - if num < 0 || unum > math.MaxUint32 { - return e("Value '%d' is out of range for uint32.", num) - } - } - rv.SetUint(unum) - } else { - panic("unreachable") - } - return nil - } - return badtype("integer", data) -} - -func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { - if b, ok := data.(bool); ok { - rv.SetBool(b) - return nil - } - return badtype("boolean", data) -} - -func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { - rv.Set(reflect.ValueOf(data)) - return nil -} - -func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { - var s string - switch sdata := data.(type) { - case TextMarshaler: - text, err := sdata.MarshalText() - if err != nil { - return err - } - s = string(text) - case fmt.Stringer: - s = sdata.String() - case string: - s = sdata - case bool: - s = fmt.Sprintf("%v", sdata) - case int64: - s = fmt.Sprintf("%d", sdata) - case float64: - s = fmt.Sprintf("%f", sdata) - default: - return badtype("primitive (string-like)", data) - } - if err := v.UnmarshalText([]byte(s)); err != nil { - return err - } - return nil -} - -// rvalue returns a reflect.Value of `v`. All pointers are resolved. -func rvalue(v interface{}) reflect.Value { - return indirect(reflect.ValueOf(v)) -} - -// indirect returns the value pointed to by a pointer. -// Pointers are followed until the value is not a pointer. -// New values are allocated for each nil pointer. -// -// An exception to this rule is if the value satisfies an interface of -// interest to us (like encoding.TextUnmarshaler). -func indirect(v reflect.Value) reflect.Value { - if v.Kind() != reflect.Ptr { - if v.CanAddr() { - pv := v.Addr() - if _, ok := pv.Interface().(TextUnmarshaler); ok { - return pv - } - } - return v - } - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - return indirect(reflect.Indirect(v)) -} - -func isUnifiable(rv reflect.Value) bool { - if rv.CanSet() { - return true - } - if _, ok := rv.Interface().(TextUnmarshaler); ok { - return true - } - return false -} - -func badtype(expected string, data interface{}) error { - return e("Expected %s but found '%T'.", expected, data) -} - -func mismatch(user reflect.Value, expected string, data interface{}) error { - return e("Type mismatch for %s. Expected %s but found '%T'.", - user.Type().String(), expected, data) -} diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go deleted file mode 100644 index ef6f545..0000000 --- a/vendor/github.com/BurntSushi/toml/decode_meta.go +++ /dev/null @@ -1,122 +0,0 @@ -package toml - -import "strings" - -// MetaData allows access to meta information about TOML data that may not -// be inferrable via reflection. In particular, whether a key has been defined -// and the TOML type of a key. -type MetaData struct { - mapping map[string]interface{} - types map[string]tomlType - keys []Key - decoded map[string]bool - context Key // Used only during decoding. -} - -// IsDefined returns true if the key given exists in the TOML data. The key -// should be specified hierarchially. e.g., -// -// // access the TOML key 'a.b.c' -// IsDefined("a", "b", "c") -// -// IsDefined will return false if an empty key given. Keys are case sensitive. -func (md *MetaData) IsDefined(key ...string) bool { - if len(key) == 0 { - return false - } - - var hash map[string]interface{} - var ok bool - var hashOrVal interface{} = md.mapping - for _, k := range key { - if hash, ok = hashOrVal.(map[string]interface{}); !ok { - return false - } - if hashOrVal, ok = hash[k]; !ok { - return false - } - } - return true -} - -// Type returns a string representation of the type of the key specified. -// -// Type will return the empty string if given an empty key or a key that -// does not exist. Keys are case sensitive. -func (md *MetaData) Type(key ...string) string { - fullkey := strings.Join(key, ".") - if typ, ok := md.types[fullkey]; ok { - return typ.typeString() - } - return "" -} - -// Key is the type of any TOML key, including key groups. Use (MetaData).Keys -// to get values of this type. -type Key []string - -func (k Key) String() string { - return strings.Join(k, ".") -} - -func (k Key) maybeQuotedAll() string { - var ss []string - for i := range k { - ss = append(ss, k.maybeQuoted(i)) - } - return strings.Join(ss, ".") -} - -func (k Key) maybeQuoted(i int) string { - quote := false - for _, c := range k[i] { - if !isBareKeyChar(c) { - quote = true - break - } - } - if quote { - return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" - } else { - return k[i] - } -} - -func (k Key) add(piece string) Key { - newKey := make(Key, len(k)+1) - copy(newKey, k) - newKey[len(k)] = piece - return newKey -} - -// Keys returns a slice of every key in the TOML data, including key groups. -// Each key is itself a slice, where the first element is the top of the -// hierarchy and the last is the most specific. -// -// The list will have the same order as the keys appeared in the TOML data. -// -// All keys returned are non-empty. -func (md *MetaData) Keys() []Key { - return md.keys -} - -// Undecoded returns all keys that have not been decoded in the order in which -// they appear in the original TOML document. -// -// This includes keys that haven't been decoded because of a Primitive value. -// Once the Primitive value is decoded, the keys will be considered decoded. -// -// Also note that decoding into an empty interface will result in no decoding, -// and so no keys will be considered decoded. -// -// In this sense, the Undecoded keys correspond to keys in the TOML document -// that do not have a concrete type in your representation. -func (md *MetaData) Undecoded() []Key { - undecoded := make([]Key, 0, len(md.keys)) - for _, key := range md.keys { - if !md.decoded[key.String()] { - undecoded = append(undecoded, key) - } - } - return undecoded -} diff --git a/vendor/github.com/BurntSushi/toml/decode_test.go b/vendor/github.com/BurntSushi/toml/decode_test.go deleted file mode 100644 index d746527..0000000 --- a/vendor/github.com/BurntSushi/toml/decode_test.go +++ /dev/null @@ -1,1278 +0,0 @@ -package toml - -import ( - "fmt" - "log" - "math" - "reflect" - "strings" - "testing" - "time" -) - -func TestDecodeSimple(t *testing.T) { - var testSimple = ` -age = 250 -andrew = "gallant" -kait = "brady" -now = 1987-07-05T05:45:00Z -yesOrNo = true -pi = 3.14 -colors = [ - ["red", "green", "blue"], - ["cyan", "magenta", "yellow", "black"], -] - -[My.Cats] -plato = "cat 1" -cauchy = "cat 2" -` - - type cats struct { - Plato string - Cauchy string - } - type simple struct { - Age int - Colors [][]string - Pi float64 - YesOrNo bool - Now time.Time - Andrew string - Kait string - My map[string]cats - } - - var val simple - _, err := Decode(testSimple, &val) - if err != nil { - t.Fatal(err) - } - - now, err := time.Parse("2006-01-02T15:04:05", "1987-07-05T05:45:00") - if err != nil { - panic(err) - } - var answer = simple{ - Age: 250, - Andrew: "gallant", - Kait: "brady", - Now: now, - YesOrNo: true, - Pi: 3.14, - Colors: [][]string{ - {"red", "green", "blue"}, - {"cyan", "magenta", "yellow", "black"}, - }, - My: map[string]cats{ - "Cats": {Plato: "cat 1", Cauchy: "cat 2"}, - }, - } - if !reflect.DeepEqual(val, answer) { - t.Fatalf("Expected\n-----\n%#v\n-----\nbut got\n-----\n%#v\n", - answer, val) - } -} - -func TestDecodeEmbedded(t *testing.T) { - type Dog struct{ Name string } - type Age int - - tests := map[string]struct { - input string - decodeInto interface{} - wantDecoded interface{} - }{ - "embedded struct": { - input: `Name = "milton"`, - decodeInto: &struct{ Dog }{}, - wantDecoded: &struct{ Dog }{Dog{"milton"}}, - }, - "embedded non-nil pointer to struct": { - input: `Name = "milton"`, - decodeInto: &struct{ *Dog }{}, - wantDecoded: &struct{ *Dog }{&Dog{"milton"}}, - }, - "embedded nil pointer to struct": { - input: ``, - decodeInto: &struct{ *Dog }{}, - wantDecoded: &struct{ *Dog }{nil}, - }, - "embedded int": { - input: `Age = -5`, - decodeInto: &struct{ Age }{}, - wantDecoded: &struct{ Age }{-5}, - }, - } - - for label, test := range tests { - _, err := Decode(test.input, test.decodeInto) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(test.wantDecoded, test.decodeInto) { - t.Errorf("%s: want decoded == %+v, got %+v", - label, test.wantDecoded, test.decodeInto) - } - } -} - -func TestDecodeIgnoredFields(t *testing.T) { - type simple struct { - Number int `toml:"-"` - } - const input = ` -Number = 123 -- = 234 -` - var s simple - if _, err := Decode(input, &s); err != nil { - t.Fatal(err) - } - if s.Number != 0 { - t.Errorf("got: %d; want 0", s.Number) - } -} - -func TestTableArrays(t *testing.T) { - var tomlTableArrays = ` -[[albums]] -name = "Born to Run" - - [[albums.songs]] - name = "Jungleland" - - [[albums.songs]] - name = "Meeting Across the River" - -[[albums]] -name = "Born in the USA" - - [[albums.songs]] - name = "Glory Days" - - [[albums.songs]] - name = "Dancing in the Dark" -` - - type Song struct { - Name string - } - - type Album struct { - Name string - Songs []Song - } - - type Music struct { - Albums []Album - } - - expected := Music{[]Album{ - {"Born to Run", []Song{{"Jungleland"}, {"Meeting Across the River"}}}, - {"Born in the USA", []Song{{"Glory Days"}, {"Dancing in the Dark"}}}, - }} - var got Music - if _, err := Decode(tomlTableArrays, &got); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(expected, got) { - t.Fatalf("\n%#v\n!=\n%#v\n", expected, got) - } -} - -func TestTableNesting(t *testing.T) { - for _, tt := range []struct { - t string - want []string - }{ - {"[a.b.c]", []string{"a", "b", "c"}}, - {`[a."b.c"]`, []string{"a", "b.c"}}, - {`[a.'b.c']`, []string{"a", "b.c"}}, - {`[a.' b ']`, []string{"a", " b "}}, - {"[ d.e.f ]", []string{"d", "e", "f"}}, - {"[ g . h . i ]", []string{"g", "h", "i"}}, - {`[ j . "Êž" . 'l' ]`, []string{"j", "Êž", "l"}}, - } { - var m map[string]interface{} - if _, err := Decode(tt.t, &m); err != nil { - t.Errorf("Decode(%q): got error: %s", tt.t, err) - continue - } - if keys := extractNestedKeys(m); !reflect.DeepEqual(keys, tt.want) { - t.Errorf("Decode(%q): got nested keys %#v; want %#v", - tt.t, keys, tt.want) - } - } -} - -func extractNestedKeys(v map[string]interface{}) []string { - var result []string - for { - if len(v) != 1 { - return result - } - for k, m := range v { - result = append(result, k) - var ok bool - v, ok = m.(map[string]interface{}) - if !ok { - return result - } - } - - } -} - -// Case insensitive matching tests. -// A bit more comprehensive than needed given the current implementation, -// but implementations change. -// Probably still missing demonstrations of some ugly corner cases regarding -// case insensitive matching and multiple fields. -func TestCase(t *testing.T) { - var caseToml = ` -tOpString = "string" -tOpInt = 1 -tOpFloat = 1.1 -tOpBool = true -tOpdate = 2006-01-02T15:04:05Z -tOparray = [ "array" ] -Match = "i should be in Match only" -MatcH = "i should be in MatcH only" -once = "just once" -[nEst.eD] -nEstedString = "another string" -` - - type InsensitiveEd struct { - NestedString string - } - - type InsensitiveNest struct { - Ed InsensitiveEd - } - - type Insensitive struct { - TopString string - TopInt int - TopFloat float64 - TopBool bool - TopDate time.Time - TopArray []string - Match string - MatcH string - Once string - OncE string - Nest InsensitiveNest - } - - tme, err := time.Parse(time.RFC3339, time.RFC3339[:len(time.RFC3339)-5]) - if err != nil { - panic(err) - } - expected := Insensitive{ - TopString: "string", - TopInt: 1, - TopFloat: 1.1, - TopBool: true, - TopDate: tme, - TopArray: []string{"array"}, - MatcH: "i should be in MatcH only", - Match: "i should be in Match only", - Once: "just once", - OncE: "", - Nest: InsensitiveNest{ - Ed: InsensitiveEd{NestedString: "another string"}, - }, - } - var got Insensitive - if _, err := Decode(caseToml, &got); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(expected, got) { - t.Fatalf("\n%#v\n!=\n%#v\n", expected, got) - } -} - -func TestPointers(t *testing.T) { - type Object struct { - Type string - Description string - } - - type Dict struct { - NamedObject map[string]*Object - BaseObject *Object - Strptr *string - Strptrs []*string - } - s1, s2, s3 := "blah", "abc", "def" - expected := &Dict{ - Strptr: &s1, - Strptrs: []*string{&s2, &s3}, - NamedObject: map[string]*Object{ - "foo": {"FOO", "fooooo!!!"}, - "bar": {"BAR", "ba-ba-ba-ba-barrrr!!!"}, - }, - BaseObject: &Object{"BASE", "da base"}, - } - - ex1 := ` -Strptr = "blah" -Strptrs = ["abc", "def"] - -[NamedObject.foo] -Type = "FOO" -Description = "fooooo!!!" - -[NamedObject.bar] -Type = "BAR" -Description = "ba-ba-ba-ba-barrrr!!!" - -[BaseObject] -Type = "BASE" -Description = "da base" -` - dict := new(Dict) - _, err := Decode(ex1, dict) - if err != nil { - t.Errorf("Decode error: %v", err) - } - if !reflect.DeepEqual(expected, dict) { - t.Fatalf("\n%#v\n!=\n%#v\n", expected, dict) - } -} - -func TestDecodeDatetime(t *testing.T) { - const noTimestamp = "2006-01-02T15:04:05" - for _, tt := range []struct { - s string - t string - format string - }{ - {"1979-05-27T07:32:00Z", "1979-05-27T07:32:00Z", time.RFC3339}, - {"1979-05-27T00:32:00-07:00", "1979-05-27T00:32:00-07:00", time.RFC3339}, - { - "1979-05-27T00:32:00.999999-07:00", - "1979-05-27T00:32:00.999999-07:00", - time.RFC3339, - }, - {"1979-05-27T07:32:00", "1979-05-27T07:32:00", noTimestamp}, - { - "1979-05-27T00:32:00.999999", - "1979-05-27T00:32:00.999999", - noTimestamp, - }, - {"1979-05-27", "1979-05-27T00:00:00", noTimestamp}, - } { - var x struct{ D time.Time } - input := "d = " + tt.s - if _, err := Decode(input, &x); err != nil { - t.Errorf("Decode(%q): got error: %s", input, err) - continue - } - want, err := time.ParseInLocation(tt.format, tt.t, time.Local) - if err != nil { - panic(err) - } - if !x.D.Equal(want) { - t.Errorf("Decode(%q): got %s; want %s", input, x.D, want) - } - } -} - -func TestDecodeBadDatetime(t *testing.T) { - var x struct{ T time.Time } - for _, s := range []string{ - "123", - "2006-01-50T00:00:00Z", - "2006-01-30T00:00", - "2006-01-30T", - } { - input := "T = " + s - if _, err := Decode(input, &x); err == nil { - t.Errorf("Expected invalid DateTime error for %q", s) - } - } -} - -func TestDecodeMultilineStrings(t *testing.T) { - var x struct { - S string - } - const s0 = `s = """ -a b \n c -d e f -"""` - if _, err := Decode(s0, &x); err != nil { - t.Fatal(err) - } - if want := "a b \n c\nd e f\n"; x.S != want { - t.Errorf("got: %q; want: %q", x.S, want) - } - const s1 = `s = """a b c\ -"""` - if _, err := Decode(s1, &x); err != nil { - t.Fatal(err) - } - if want := "a b c"; x.S != want { - t.Errorf("got: %q; want: %q", x.S, want) - } -} - -type sphere struct { - Center [3]float64 - Radius float64 -} - -func TestDecodeSimpleArray(t *testing.T) { - var s1 sphere - if _, err := Decode(`center = [0.0, 1.5, 0.0]`, &s1); err != nil { - t.Fatal(err) - } -} - -func TestDecodeArrayWrongSize(t *testing.T) { - var s1 sphere - if _, err := Decode(`center = [0.1, 2.3]`, &s1); err == nil { - t.Fatal("Expected array type mismatch error") - } -} - -func TestDecodeLargeIntoSmallInt(t *testing.T) { - type table struct { - Value int8 - } - var tab table - if _, err := Decode(`value = 500`, &tab); err == nil { - t.Fatal("Expected integer out-of-bounds error.") - } -} - -func TestDecodeSizedInts(t *testing.T) { - type table struct { - U8 uint8 - U16 uint16 - U32 uint32 - U64 uint64 - U uint - I8 int8 - I16 int16 - I32 int32 - I64 int64 - I int - } - answer := table{1, 1, 1, 1, 1, -1, -1, -1, -1, -1} - toml := ` - u8 = 1 - u16 = 1 - u32 = 1 - u64 = 1 - u = 1 - i8 = -1 - i16 = -1 - i32 = -1 - i64 = -1 - i = -1 - ` - var tab table - if _, err := Decode(toml, &tab); err != nil { - t.Fatal(err.Error()) - } - if answer != tab { - t.Fatalf("Expected %#v but got %#v", answer, tab) - } -} - -func TestDecodeInts(t *testing.T) { - for _, tt := range []struct { - s string - want int64 - }{ - {"0", 0}, - {"+99", 99}, - {"-10", -10}, - {"1_234_567", 1234567}, - {"1_2_3_4", 1234}, - {"-9_223_372_036_854_775_808", math.MinInt64}, - {"9_223_372_036_854_775_807", math.MaxInt64}, - } { - var x struct{ N int64 } - input := "n = " + tt.s - if _, err := Decode(input, &x); err != nil { - t.Errorf("Decode(%q): got error: %s", input, err) - continue - } - if x.N != tt.want { - t.Errorf("Decode(%q): got %d; want %d", input, x.N, tt.want) - } - } -} - -func TestDecodeFloats(t *testing.T) { - for _, tt := range []struct { - s string - want float64 - }{ - {"+1.0", 1}, - {"3.1415", 3.1415}, - {"-0.01", -0.01}, - {"5e+22", 5e22}, - {"1e6", 1e6}, - {"-2E-2", -2e-2}, - {"6.626e-34", 6.626e-34}, - {"9_224_617.445_991_228_313", 9224617.445991228313}, - {"9_876.54_32e1_0", 9876.5432e10}, - } { - var x struct{ N float64 } - input := "n = " + tt.s - if _, err := Decode(input, &x); err != nil { - t.Errorf("Decode(%q): got error: %s", input, err) - continue - } - if x.N != tt.want { - t.Errorf("Decode(%q): got %d; want %d", input, x.N, tt.want) - } - } -} - -func TestDecodeMalformedNumbers(t *testing.T) { - for _, tt := range []struct { - s string - want string - }{ - {"++99", "Expected a digit"}, - {"0..1", "must be followed by one or more digits"}, - {"0.1.2", "Invalid float value"}, - {"1e2.3", "Invalid float value"}, - {"1e2e3", "Invalid float value"}, - {"_123", "Expected value"}, - {"123_", "surrounded by digits"}, - {"1._23", "surrounded by digits"}, - {"1e__23", "surrounded by digits"}, - {"123.", "must be followed by one or more digits"}, - {"1.e2", "must be followed by one or more digits"}, - } { - var x struct{ N interface{} } - input := "n = " + tt.s - _, err := Decode(input, &x) - if err == nil { - t.Errorf("Decode(%q): got nil, want error containing %q", - input, tt.want) - continue - } - if !strings.Contains(err.Error(), tt.want) { - t.Errorf("Decode(%q): got %q, want error containing %q", - input, err, tt.want) - } - } -} - -func TestDecodeBadValues(t *testing.T) { - for _, tt := range []struct { - v interface{} - want string - }{ - {3, "non-pointer type"}, - {(*int)(nil), "nil"}, - } { - _, err := Decode(`x = 3`, tt.v) - if err == nil { - t.Errorf("Decode(%v): got nil; want error containing %q", - tt.v, tt.want) - continue - } - if !strings.Contains(err.Error(), tt.want) { - t.Errorf("Decode(%v): got %q; want error containing %q", - tt.v, err, tt.want) - } - } -} - -func TestUnmarshaler(t *testing.T) { - - var tomlBlob = ` -[dishes.hamboogie] -name = "Hamboogie with fries" -price = 10.99 - -[[dishes.hamboogie.ingredients]] -name = "Bread Bun" - -[[dishes.hamboogie.ingredients]] -name = "Lettuce" - -[[dishes.hamboogie.ingredients]] -name = "Real Beef Patty" - -[[dishes.hamboogie.ingredients]] -name = "Tomato" - -[dishes.eggsalad] -name = "Egg Salad with rice" -price = 3.99 - -[[dishes.eggsalad.ingredients]] -name = "Egg" - -[[dishes.eggsalad.ingredients]] -name = "Mayo" - -[[dishes.eggsalad.ingredients]] -name = "Rice" -` - m := &menu{} - if _, err := Decode(tomlBlob, m); err != nil { - t.Fatal(err) - } - - if len(m.Dishes) != 2 { - t.Log("two dishes should be loaded with UnmarshalTOML()") - t.Errorf("expected %d but got %d", 2, len(m.Dishes)) - } - - eggSalad := m.Dishes["eggsalad"] - if _, ok := interface{}(eggSalad).(dish); !ok { - t.Errorf("expected a dish") - } - - if eggSalad.Name != "Egg Salad with rice" { - t.Errorf("expected the dish to be named 'Egg Salad with rice'") - } - - if len(eggSalad.Ingredients) != 3 { - t.Log("dish should be loaded with UnmarshalTOML()") - t.Errorf("expected %d but got %d", 3, len(eggSalad.Ingredients)) - } - - found := false - for _, i := range eggSalad.Ingredients { - if i.Name == "Rice" { - found = true - break - } - } - if !found { - t.Error("Rice was not loaded in UnmarshalTOML()") - } - - // test on a value - must be passed as * - o := menu{} - if _, err := Decode(tomlBlob, &o); err != nil { - t.Fatal(err) - } - -} - -type menu struct { - Dishes map[string]dish -} - -func (m *menu) UnmarshalTOML(p interface{}) error { - m.Dishes = make(map[string]dish) - data, _ := p.(map[string]interface{}) - dishes := data["dishes"].(map[string]interface{}) - for n, v := range dishes { - if d, ok := v.(map[string]interface{}); ok { - nd := dish{} - nd.UnmarshalTOML(d) - m.Dishes[n] = nd - } else { - return fmt.Errorf("not a dish") - } - } - return nil -} - -type dish struct { - Name string - Price float32 - Ingredients []ingredient -} - -func (d *dish) UnmarshalTOML(p interface{}) error { - data, _ := p.(map[string]interface{}) - d.Name, _ = data["name"].(string) - d.Price, _ = data["price"].(float32) - ingredients, _ := data["ingredients"].([]map[string]interface{}) - for _, e := range ingredients { - n, _ := interface{}(e).(map[string]interface{}) - name, _ := n["name"].(string) - i := ingredient{name} - d.Ingredients = append(d.Ingredients, i) - } - return nil -} - -type ingredient struct { - Name string -} - -func TestDecodeSlices(t *testing.T) { - type T struct { - S []string - } - for i, tt := range []struct { - v T - input string - want T - }{ - {T{}, "", T{}}, - {T{[]string{}}, "", T{[]string{}}}, - {T{[]string{"a", "b"}}, "", T{[]string{"a", "b"}}}, - {T{}, "S = []", T{[]string{}}}, - {T{[]string{}}, "S = []", T{[]string{}}}, - {T{[]string{"a", "b"}}, "S = []", T{[]string{}}}, - {T{}, `S = ["x"]`, T{[]string{"x"}}}, - {T{[]string{}}, `S = ["x"]`, T{[]string{"x"}}}, - {T{[]string{"a", "b"}}, `S = ["x"]`, T{[]string{"x"}}}, - } { - if _, err := Decode(tt.input, &tt.v); err != nil { - t.Errorf("[%d] %s", i, err) - continue - } - if !reflect.DeepEqual(tt.v, tt.want) { - t.Errorf("[%d] got %#v; want %#v", i, tt.v, tt.want) - } - } -} - -func TestDecodePrimitive(t *testing.T) { - type S struct { - P Primitive - } - type T struct { - S []int - } - slicep := func(s []int) *[]int { return &s } - arrayp := func(a [2]int) *[2]int { return &a } - mapp := func(m map[string]int) *map[string]int { return &m } - for i, tt := range []struct { - v interface{} - input string - want interface{} - }{ - // slices - {slicep(nil), "", slicep(nil)}, - {slicep([]int{}), "", slicep([]int{})}, - {slicep([]int{1, 2, 3}), "", slicep([]int{1, 2, 3})}, - {slicep(nil), "P = [1,2]", slicep([]int{1, 2})}, - {slicep([]int{}), "P = [1,2]", slicep([]int{1, 2})}, - {slicep([]int{1, 2, 3}), "P = [1,2]", slicep([]int{1, 2})}, - - // arrays - {arrayp([2]int{2, 3}), "", arrayp([2]int{2, 3})}, - {arrayp([2]int{2, 3}), "P = [3,4]", arrayp([2]int{3, 4})}, - - // maps - {mapp(nil), "", mapp(nil)}, - {mapp(map[string]int{}), "", mapp(map[string]int{})}, - {mapp(map[string]int{"a": 1}), "", mapp(map[string]int{"a": 1})}, - {mapp(nil), "[P]\na = 2", mapp(map[string]int{"a": 2})}, - {mapp(map[string]int{}), "[P]\na = 2", mapp(map[string]int{"a": 2})}, - {mapp(map[string]int{"a": 1, "b": 3}), "[P]\na = 2", mapp(map[string]int{"a": 2, "b": 3})}, - - // structs - {&T{nil}, "[P]", &T{nil}}, - {&T{[]int{}}, "[P]", &T{[]int{}}}, - {&T{[]int{1, 2, 3}}, "[P]", &T{[]int{1, 2, 3}}}, - {&T{nil}, "[P]\nS = [1,2]", &T{[]int{1, 2}}}, - {&T{[]int{}}, "[P]\nS = [1,2]", &T{[]int{1, 2}}}, - {&T{[]int{1, 2, 3}}, "[P]\nS = [1,2]", &T{[]int{1, 2}}}, - } { - var s S - md, err := Decode(tt.input, &s) - if err != nil { - t.Errorf("[%d] Decode error: %s", i, err) - continue - } - if err := md.PrimitiveDecode(s.P, tt.v); err != nil { - t.Errorf("[%d] PrimitiveDecode error: %s", i, err) - continue - } - if !reflect.DeepEqual(tt.v, tt.want) { - t.Errorf("[%d] got %#v; want %#v", i, tt.v, tt.want) - } - } -} - -func ExampleMetaData_PrimitiveDecode() { - var md MetaData - var err error - - var tomlBlob = ` -ranking = ["Springsteen", "J Geils"] - -[bands.Springsteen] -started = 1973 -albums = ["Greetings", "WIESS", "Born to Run", "Darkness"] - -[bands."J Geils"] -started = 1970 -albums = ["The J. Geils Band", "Full House", "Blow Your Face Out"] -` - - type band struct { - Started int - Albums []string - } - type classics struct { - Ranking []string - Bands map[string]Primitive - } - - // Do the initial decode. Reflection is delayed on Primitive values. - var music classics - if md, err = Decode(tomlBlob, &music); err != nil { - log.Fatal(err) - } - - // MetaData still includes information on Primitive values. - fmt.Printf("Is `bands.Springsteen` defined? %v\n", - md.IsDefined("bands", "Springsteen")) - - // Decode primitive data into Go values. - for _, artist := range music.Ranking { - // A band is a primitive value, so we need to decode it to get a - // real `band` value. - primValue := music.Bands[artist] - - var aBand band - if err = md.PrimitiveDecode(primValue, &aBand); err != nil { - log.Fatal(err) - } - fmt.Printf("%s started in %d.\n", artist, aBand.Started) - } - // Check to see if there were any fields left undecoded. - // Note that this won't be empty before decoding the Primitive value! - fmt.Printf("Undecoded: %q\n", md.Undecoded()) - - // Output: - // Is `bands.Springsteen` defined? true - // Springsteen started in 1973. - // J Geils started in 1970. - // Undecoded: [] -} - -func ExampleDecode() { - var tomlBlob = ` -# Some comments. -[alpha] -ip = "10.0.0.1" - - [alpha.config] - Ports = [ 8001, 8002 ] - Location = "Toronto" - Created = 1987-07-05T05:45:00Z - -[beta] -ip = "10.0.0.2" - - [beta.config] - Ports = [ 9001, 9002 ] - Location = "New Jersey" - Created = 1887-01-05T05:55:00Z -` - - type serverConfig struct { - Ports []int - Location string - Created time.Time - } - - type server struct { - IP string `toml:"ip,omitempty"` - Config serverConfig `toml:"config"` - } - - type servers map[string]server - - var config servers - if _, err := Decode(tomlBlob, &config); err != nil { - log.Fatal(err) - } - - for _, name := range []string{"alpha", "beta"} { - s := config[name] - fmt.Printf("Server: %s (ip: %s) in %s created on %s\n", - name, s.IP, s.Config.Location, - s.Config.Created.Format("2006-01-02")) - fmt.Printf("Ports: %v\n", s.Config.Ports) - } - - // Output: - // Server: alpha (ip: 10.0.0.1) in Toronto created on 1987-07-05 - // Ports: [8001 8002] - // Server: beta (ip: 10.0.0.2) in New Jersey created on 1887-01-05 - // Ports: [9001 9002] -} - -type duration struct { - time.Duration -} - -func (d *duration) UnmarshalText(text []byte) error { - var err error - d.Duration, err = time.ParseDuration(string(text)) - return err -} - -// Example Unmarshaler shows how to decode TOML strings into your own -// custom data type. -func Example_unmarshaler() { - blob := ` -[[song]] -name = "Thunder Road" -duration = "4m49s" - -[[song]] -name = "Stairway to Heaven" -duration = "8m03s" -` - type song struct { - Name string - Duration duration - } - type songs struct { - Song []song - } - var favorites songs - if _, err := Decode(blob, &favorites); err != nil { - log.Fatal(err) - } - - // Code to implement the TextUnmarshaler interface for `duration`: - // - // type duration struct { - // time.Duration - // } - // - // func (d *duration) UnmarshalText(text []byte) error { - // var err error - // d.Duration, err = time.ParseDuration(string(text)) - // return err - // } - - for _, s := range favorites.Song { - fmt.Printf("%s (%s)\n", s.Name, s.Duration) - } - // Output: - // Thunder Road (4m49s) - // Stairway to Heaven (8m3s) -} - -// Example StrictDecoding shows how to detect whether there are keys in the -// TOML document that weren't decoded into the value given. This is useful -// for returning an error to the user if they've included extraneous fields -// in their configuration. -func Example_strictDecoding() { - var blob = ` -key1 = "value1" -key2 = "value2" -key3 = "value3" -` - type config struct { - Key1 string - Key3 string - } - - var conf config - md, err := Decode(blob, &conf) - if err != nil { - log.Fatal(err) - } - fmt.Printf("Undecoded keys: %q\n", md.Undecoded()) - // Output: - // Undecoded keys: ["key2"] -} - -// Example UnmarshalTOML shows how to implement a struct type that knows how to -// unmarshal itself. The struct must take full responsibility for mapping the -// values passed into the struct. The method may be used with interfaces in a -// struct in cases where the actual type is not known until the data is -// examined. -func Example_unmarshalTOML() { - - var blob = ` -[[parts]] -type = "valve" -id = "valve-1" -size = 1.2 -rating = 4 - -[[parts]] -type = "valve" -id = "valve-2" -size = 2.1 -rating = 5 - -[[parts]] -type = "pipe" -id = "pipe-1" -length = 2.1 -diameter = 12 - -[[parts]] -type = "cable" -id = "cable-1" -length = 12 -rating = 3.1 -` - o := &order{} - err := Unmarshal([]byte(blob), o) - if err != nil { - log.Fatal(err) - } - - fmt.Println(len(o.parts)) - - for _, part := range o.parts { - fmt.Println(part.Name()) - } - - // Code to implement UmarshalJSON. - - // type order struct { - // // NOTE `order.parts` is a private slice of type `part` which is an - // // interface and may only be loaded from toml using the - // // UnmarshalTOML() method of the Umarshaler interface. - // parts parts - // } - - // func (o *order) UnmarshalTOML(data interface{}) error { - - // // NOTE the example below contains detailed type casting to show how - // // the 'data' is retrieved. In operational use, a type cast wrapper - // // may be prefered e.g. - // // - // // func AsMap(v interface{}) (map[string]interface{}, error) { - // // return v.(map[string]interface{}) - // // } - // // - // // resulting in: - // // d, _ := AsMap(data) - // // - - // d, _ := data.(map[string]interface{}) - // parts, _ := d["parts"].([]map[string]interface{}) - - // for _, p := range parts { - - // typ, _ := p["type"].(string) - // id, _ := p["id"].(string) - - // // detect the type of part and handle each case - // switch p["type"] { - // case "valve": - - // size := float32(p["size"].(float64)) - // rating := int(p["rating"].(int64)) - - // valve := &valve{ - // Type: typ, - // ID: id, - // Size: size, - // Rating: rating, - // } - - // o.parts = append(o.parts, valve) - - // case "pipe": - - // length := float32(p["length"].(float64)) - // diameter := int(p["diameter"].(int64)) - - // pipe := &pipe{ - // Type: typ, - // ID: id, - // Length: length, - // Diameter: diameter, - // } - - // o.parts = append(o.parts, pipe) - - // case "cable": - - // length := int(p["length"].(int64)) - // rating := float32(p["rating"].(float64)) - - // cable := &cable{ - // Type: typ, - // ID: id, - // Length: length, - // Rating: rating, - // } - - // o.parts = append(o.parts, cable) - - // } - // } - - // return nil - // } - - // type parts []part - - // type part interface { - // Name() string - // } - - // type valve struct { - // Type string - // ID string - // Size float32 - // Rating int - // } - - // func (v *valve) Name() string { - // return fmt.Sprintf("VALVE: %s", v.ID) - // } - - // type pipe struct { - // Type string - // ID string - // Length float32 - // Diameter int - // } - - // func (p *pipe) Name() string { - // return fmt.Sprintf("PIPE: %s", p.ID) - // } - - // type cable struct { - // Type string - // ID string - // Length int - // Rating float32 - // } - - // func (c *cable) Name() string { - // return fmt.Sprintf("CABLE: %s", c.ID) - // } - - // Output: - // 4 - // VALVE: valve-1 - // VALVE: valve-2 - // PIPE: pipe-1 - // CABLE: cable-1 - -} - -type order struct { - // NOTE `order.parts` is a private slice of type `part` which is an - // interface and may only be loaded from toml using the UnmarshalTOML() - // method of the Umarshaler interface. - parts parts -} - -func (o *order) UnmarshalTOML(data interface{}) error { - - // NOTE the example below contains detailed type casting to show how - // the 'data' is retrieved. In operational use, a type cast wrapper - // may be prefered e.g. - // - // func AsMap(v interface{}) (map[string]interface{}, error) { - // return v.(map[string]interface{}) - // } - // - // resulting in: - // d, _ := AsMap(data) - // - - d, _ := data.(map[string]interface{}) - parts, _ := d["parts"].([]map[string]interface{}) - - for _, p := range parts { - - typ, _ := p["type"].(string) - id, _ := p["id"].(string) - - // detect the type of part and handle each case - switch p["type"] { - case "valve": - - size := float32(p["size"].(float64)) - rating := int(p["rating"].(int64)) - - valve := &valve{ - Type: typ, - ID: id, - Size: size, - Rating: rating, - } - - o.parts = append(o.parts, valve) - - case "pipe": - - length := float32(p["length"].(float64)) - diameter := int(p["diameter"].(int64)) - - pipe := &pipe{ - Type: typ, - ID: id, - Length: length, - Diameter: diameter, - } - - o.parts = append(o.parts, pipe) - - case "cable": - - length := int(p["length"].(int64)) - rating := float32(p["rating"].(float64)) - - cable := &cable{ - Type: typ, - ID: id, - Length: length, - Rating: rating, - } - - o.parts = append(o.parts, cable) - - } - } - - return nil -} - -type parts []part - -type part interface { - Name() string -} - -type valve struct { - Type string - ID string - Size float32 - Rating int -} - -func (v *valve) Name() string { - return fmt.Sprintf("VALVE: %s", v.ID) -} - -type pipe struct { - Type string - ID string - Length float32 - Diameter int -} - -func (p *pipe) Name() string { - return fmt.Sprintf("PIPE: %s", p.ID) -} - -type cable struct { - Type string - ID string - Length int - Rating float32 -} - -func (c *cable) Name() string { - return fmt.Sprintf("CABLE: %s", c.ID) -} diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go deleted file mode 100644 index fe26800..0000000 --- a/vendor/github.com/BurntSushi/toml/doc.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Package toml provides facilities for decoding and encoding TOML configuration -files via reflection. There is also support for delaying decoding with -the Primitive type, and querying the set of keys in a TOML document with the -MetaData type. - -The specification implemented: https://github.com/mojombo/toml - -The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify -whether a file is a valid TOML document. It can also be used to print the -type of each key in a TOML document. - -Testing - -There are two important types of tests used for this package. The first is -contained inside '*_test.go' files and uses the standard Go unit testing -framework. These tests are primarily devoted to holistically testing the -decoder and encoder. - -The second type of testing is used to verify the implementation's adherence -to the TOML specification. These tests have been factored into their own -project: https://github.com/BurntSushi/toml-test - -The reason the tests are in a separate project is so that they can be used by -any implementation of TOML. Namely, it is language agnostic. -*/ -package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go deleted file mode 100644 index f538261..0000000 --- a/vendor/github.com/BurntSushi/toml/encode.go +++ /dev/null @@ -1,569 +0,0 @@ -package toml - -import ( - "bufio" - "errors" - "fmt" - "io" - "reflect" - "sort" - "strconv" - "strings" - "time" -) - -type tomlEncodeError struct{ error } - -var ( - errArrayMixedElementTypes = errors.New( - "can't encode array with mixed element types") - errArrayNilElement = errors.New( - "can't encode array with nil element") - errNonString = errors.New( - "can't encode a map with non-string key type") - errAnonNonStruct = errors.New( - "can't encode an anonymous field that is not a struct") - errArrayNoTable = errors.New( - "TOML array element can't contain a table") - errNoKey = errors.New( - "top-level values must be a Go map or struct") - errAnything = errors.New("") // used in testing -) - -var quotedReplacer = strings.NewReplacer( - "\t", "\\t", - "\n", "\\n", - "\r", "\\r", - "\"", "\\\"", - "\\", "\\\\", -) - -// Encoder controls the encoding of Go values to a TOML document to some -// io.Writer. -// -// The indentation level can be controlled with the Indent field. -type Encoder struct { - // A single indentation level. By default it is two spaces. - Indent string - - // hasWritten is whether we have written any output to w yet. - hasWritten bool - w *bufio.Writer -} - -// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer -// given. By default, a single indentation level is 2 spaces. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: bufio.NewWriter(w), - Indent: " ", - } -} - -// Encode writes a TOML representation of the Go value to the underlying -// io.Writer. If the value given cannot be encoded to a valid TOML document, -// then an error is returned. -// -// The mapping between Go values and TOML values should be precisely the same -// as for the Decode* functions. Similarly, the TextMarshaler interface is -// supported by encoding the resulting bytes as strings. (If you want to write -// arbitrary binary data then you will need to use something like base64 since -// TOML does not have any binary types.) -// -// When encoding TOML hashes (i.e., Go maps or structs), keys without any -// sub-hashes are encoded first. -// -// If a Go map is encoded, then its keys are sorted alphabetically for -// deterministic output. More control over this behavior may be provided if -// there is demand for it. -// -// Encoding Go values without a corresponding TOML representation---like map -// types with non-string keys---will cause an error to be returned. Similarly -// for mixed arrays/slices, arrays/slices with nil elements, embedded -// non-struct types and nested slices containing maps or structs. -// (e.g., [][]map[string]string is not allowed but []map[string]string is OK -// and so is []map[string][]string.) -func (enc *Encoder) Encode(v interface{}) error { - rv := eindirect(reflect.ValueOf(v)) - if err := enc.safeEncode(Key([]string{}), rv); err != nil { - return err - } - return enc.w.Flush() -} - -func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { - defer func() { - if r := recover(); r != nil { - if terr, ok := r.(tomlEncodeError); ok { - err = terr.error - return - } - panic(r) - } - }() - enc.encode(key, rv) - return nil -} - -func (enc *Encoder) encode(key Key, rv reflect.Value) { - // Special case. Time needs to be in ISO8601 format. - // Special case. If we can marshal the type to text, then we used that. - // Basically, this prevents the encoder for handling these types as - // generic structs (or whatever the underlying type of a TextMarshaler is). - switch rv.Interface().(type) { - case time.Time, TextMarshaler: - enc.keyEqElement(key, rv) - return - } - - k := rv.Kind() - switch k { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, - reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, - reflect.Uint64, - reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: - enc.keyEqElement(key, rv) - case reflect.Array, reflect.Slice: - if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { - enc.eArrayOfTables(key, rv) - } else { - enc.keyEqElement(key, rv) - } - case reflect.Interface: - if rv.IsNil() { - return - } - enc.encode(key, rv.Elem()) - case reflect.Map: - if rv.IsNil() { - return - } - enc.eTable(key, rv) - case reflect.Ptr: - if rv.IsNil() { - return - } - enc.encode(key, rv.Elem()) - case reflect.Struct: - enc.eTable(key, rv) - default: - panic(e("Unsupported type for key '%s': %s", key, k)) - } -} - -// eElement encodes any value that can be an array element (primitives and -// arrays). -func (enc *Encoder) eElement(rv reflect.Value) { - switch v := rv.Interface().(type) { - case time.Time: - // Special case time.Time as a primitive. Has to come before - // TextMarshaler below because time.Time implements - // encoding.TextMarshaler, but we need to always use UTC. - enc.wf(v.In(time.FixedZone("UTC", 0)).Format("2006-01-02T15:04:05Z")) - return - case TextMarshaler: - // Special case. Use text marshaler if it's available for this value. - if s, err := v.MarshalText(); err != nil { - encPanic(err) - } else { - enc.writeQuoted(string(s)) - } - return - } - switch rv.Kind() { - case reflect.Bool: - enc.wf(strconv.FormatBool(rv.Bool())) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, - reflect.Int64: - enc.wf(strconv.FormatInt(rv.Int(), 10)) - case reflect.Uint, reflect.Uint8, reflect.Uint16, - reflect.Uint32, reflect.Uint64: - enc.wf(strconv.FormatUint(rv.Uint(), 10)) - case reflect.Float32: - enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32))) - case reflect.Float64: - enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64))) - case reflect.Array, reflect.Slice: - enc.eArrayOrSliceElement(rv) - case reflect.Interface: - enc.eElement(rv.Elem()) - case reflect.String: - enc.writeQuoted(rv.String()) - default: - panic(e("Unexpected primitive type: %s", rv.Kind())) - } -} - -// By the TOML spec, all floats must have a decimal with at least one -// number on either side. -func floatAddDecimal(fstr string) string { - if !strings.Contains(fstr, ".") { - return fstr + ".0" - } - return fstr -} - -func (enc *Encoder) writeQuoted(s string) { - enc.wf("\"%s\"", quotedReplacer.Replace(s)) -} - -func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { - length := rv.Len() - enc.wf("[") - for i := 0; i < length; i++ { - elem := rv.Index(i) - enc.eElement(elem) - if i != length-1 { - enc.wf(", ") - } - } - enc.wf("]") -} - -func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { - if len(key) == 0 { - encPanic(errNoKey) - } - for i := 0; i < rv.Len(); i++ { - trv := rv.Index(i) - if isNil(trv) { - continue - } - panicIfInvalidKey(key) - enc.newline() - enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) - enc.newline() - enc.eMapOrStruct(key, trv) - } -} - -func (enc *Encoder) eTable(key Key, rv reflect.Value) { - panicIfInvalidKey(key) - if len(key) == 1 { - // Output an extra new line between top-level tables. - // (The newline isn't written if nothing else has been written though.) - enc.newline() - } - if len(key) > 0 { - enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) - enc.newline() - } - enc.eMapOrStruct(key, rv) -} - -func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) { - switch rv := eindirect(rv); rv.Kind() { - case reflect.Map: - enc.eMap(key, rv) - case reflect.Struct: - enc.eStruct(key, rv) - default: - panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) - } -} - -func (enc *Encoder) eMap(key Key, rv reflect.Value) { - rt := rv.Type() - if rt.Key().Kind() != reflect.String { - encPanic(errNonString) - } - - // Sort keys so that we have deterministic output. And write keys directly - // underneath this key first, before writing sub-structs or sub-maps. - var mapKeysDirect, mapKeysSub []string - for _, mapKey := range rv.MapKeys() { - k := mapKey.String() - if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) { - mapKeysSub = append(mapKeysSub, k) - } else { - mapKeysDirect = append(mapKeysDirect, k) - } - } - - var writeMapKeys = func(mapKeys []string) { - sort.Strings(mapKeys) - for _, mapKey := range mapKeys { - mrv := rv.MapIndex(reflect.ValueOf(mapKey)) - if isNil(mrv) { - // Don't write anything for nil fields. - continue - } - enc.encode(key.add(mapKey), mrv) - } - } - writeMapKeys(mapKeysDirect) - writeMapKeys(mapKeysSub) -} - -func (enc *Encoder) eStruct(key Key, rv reflect.Value) { - // Write keys for fields directly under this key first, because if we write - // a field that creates a new table, then all keys under it will be in that - // table (not the one we're writing here). - rt := rv.Type() - var fieldsDirect, fieldsSub [][]int - var addFields func(rt reflect.Type, rv reflect.Value, start []int) - addFields = func(rt reflect.Type, rv reflect.Value, start []int) { - for i := 0; i < rt.NumField(); i++ { - f := rt.Field(i) - // skip unexported fields - if f.PkgPath != "" && !f.Anonymous { - continue - } - frv := rv.Field(i) - if f.Anonymous { - t := f.Type - switch t.Kind() { - case reflect.Struct: - // Treat anonymous struct fields with - // tag names as though they are not - // anonymous, like encoding/json does. - if getOptions(f.Tag).name == "" { - addFields(t, frv, f.Index) - continue - } - case reflect.Ptr: - if t.Elem().Kind() == reflect.Struct && - getOptions(f.Tag).name == "" { - if !frv.IsNil() { - addFields(t.Elem(), frv.Elem(), f.Index) - } - continue - } - // Fall through to the normal field encoding logic below - // for non-struct anonymous fields. - } - } - - if typeIsHash(tomlTypeOfGo(frv)) { - fieldsSub = append(fieldsSub, append(start, f.Index...)) - } else { - fieldsDirect = append(fieldsDirect, append(start, f.Index...)) - } - } - } - addFields(rt, rv, nil) - - var writeFields = func(fields [][]int) { - for _, fieldIndex := range fields { - sft := rt.FieldByIndex(fieldIndex) - sf := rv.FieldByIndex(fieldIndex) - if isNil(sf) { - // Don't write anything for nil fields. - continue - } - - opts := getOptions(sft.Tag) - if opts.skip { - continue - } - keyName := sft.Name - if opts.name != "" { - keyName = opts.name - } - if opts.omitempty && isEmpty(sf) { - continue - } - if opts.omitzero && isZero(sf) { - continue - } - - enc.encode(key.add(keyName), sf) - } - } - writeFields(fieldsDirect) - writeFields(fieldsSub) -} - -// tomlTypeName returns the TOML type name of the Go value's type. It is -// used to determine whether the types of array elements are mixed (which is -// forbidden). If the Go value is nil, then it is illegal for it to be an array -// element, and valueIsNil is returned as true. - -// Returns the TOML type of a Go value. The type may be `nil`, which means -// no concrete TOML type could be found. -func tomlTypeOfGo(rv reflect.Value) tomlType { - if isNil(rv) || !rv.IsValid() { - return nil - } - switch rv.Kind() { - case reflect.Bool: - return tomlBool - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, - reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, - reflect.Uint64: - return tomlInteger - case reflect.Float32, reflect.Float64: - return tomlFloat - case reflect.Array, reflect.Slice: - if typeEqual(tomlHash, tomlArrayType(rv)) { - return tomlArrayHash - } else { - return tomlArray - } - case reflect.Ptr, reflect.Interface: - return tomlTypeOfGo(rv.Elem()) - case reflect.String: - return tomlString - case reflect.Map: - return tomlHash - case reflect.Struct: - switch rv.Interface().(type) { - case time.Time: - return tomlDatetime - case TextMarshaler: - return tomlString - default: - return tomlHash - } - default: - panic("unexpected reflect.Kind: " + rv.Kind().String()) - } -} - -// tomlArrayType returns the element type of a TOML array. The type returned -// may be nil if it cannot be determined (e.g., a nil slice or a zero length -// slize). This function may also panic if it finds a type that cannot be -// expressed in TOML (such as nil elements, heterogeneous arrays or directly -// nested arrays of tables). -func tomlArrayType(rv reflect.Value) tomlType { - if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { - return nil - } - firstType := tomlTypeOfGo(rv.Index(0)) - if firstType == nil { - encPanic(errArrayNilElement) - } - - rvlen := rv.Len() - for i := 1; i < rvlen; i++ { - elem := rv.Index(i) - switch elemType := tomlTypeOfGo(elem); { - case elemType == nil: - encPanic(errArrayNilElement) - case !typeEqual(firstType, elemType): - encPanic(errArrayMixedElementTypes) - } - } - // If we have a nested array, then we must make sure that the nested - // array contains ONLY primitives. - // This checks arbitrarily nested arrays. - if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) { - nest := tomlArrayType(eindirect(rv.Index(0))) - if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) { - encPanic(errArrayNoTable) - } - } - return firstType -} - -type tagOptions struct { - skip bool // "-" - name string - omitempty bool - omitzero bool -} - -func getOptions(tag reflect.StructTag) tagOptions { - t := tag.Get("toml") - if t == "-" { - return tagOptions{skip: true} - } - var opts tagOptions - parts := strings.Split(t, ",") - opts.name = parts[0] - for _, s := range parts[1:] { - switch s { - case "omitempty": - opts.omitempty = true - case "omitzero": - opts.omitzero = true - } - } - return opts -} - -func isZero(rv reflect.Value) bool { - switch rv.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return rv.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return rv.Uint() == 0 - case reflect.Float32, reflect.Float64: - return rv.Float() == 0.0 - } - return false -} - -func isEmpty(rv reflect.Value) bool { - switch rv.Kind() { - case reflect.Array, reflect.Slice, reflect.Map, reflect.String: - return rv.Len() == 0 - case reflect.Bool: - return !rv.Bool() - } - return false -} - -func (enc *Encoder) newline() { - if enc.hasWritten { - enc.wf("\n") - } -} - -func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { - if len(key) == 0 { - encPanic(errNoKey) - } - panicIfInvalidKey(key) - enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) - enc.eElement(val) - enc.newline() -} - -func (enc *Encoder) wf(format string, v ...interface{}) { - if _, err := fmt.Fprintf(enc.w, format, v...); err != nil { - encPanic(err) - } - enc.hasWritten = true -} - -func (enc *Encoder) indentStr(key Key) string { - return strings.Repeat(enc.Indent, len(key)-1) -} - -func encPanic(err error) { - panic(tomlEncodeError{err}) -} - -func eindirect(v reflect.Value) reflect.Value { - switch v.Kind() { - case reflect.Ptr, reflect.Interface: - return eindirect(v.Elem()) - default: - return v - } -} - -func isNil(rv reflect.Value) bool { - switch rv.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return rv.IsNil() - default: - return false - } -} - -func panicIfInvalidKey(key Key) { - for _, k := range key { - if len(k) == 0 { - encPanic(e("Key '%s' is not a valid table name. Key names "+ - "cannot be empty.", key.maybeQuotedAll())) - } - } -} - -func isValidKeyName(s string) bool { - return len(s) != 0 -} diff --git a/vendor/github.com/BurntSushi/toml/encode_test.go b/vendor/github.com/BurntSushi/toml/encode_test.go deleted file mode 100644 index 673b7b0..0000000 --- a/vendor/github.com/BurntSushi/toml/encode_test.go +++ /dev/null @@ -1,615 +0,0 @@ -package toml - -import ( - "bytes" - "fmt" - "log" - "net" - "testing" - "time" -) - -func TestEncodeRoundTrip(t *testing.T) { - type Config struct { - Age int - Cats []string - Pi float64 - Perfection []int - DOB time.Time - Ipaddress net.IP - } - - var inputs = Config{ - 13, - []string{"one", "two", "three"}, - 3.145, - []int{11, 2, 3, 4}, - time.Now(), - net.ParseIP("192.168.59.254"), - } - - var firstBuffer bytes.Buffer - e := NewEncoder(&firstBuffer) - err := e.Encode(inputs) - if err != nil { - t.Fatal(err) - } - var outputs Config - if _, err := Decode(firstBuffer.String(), &outputs); err != nil { - t.Logf("Could not decode:\n-----\n%s\n-----\n", - firstBuffer.String()) - t.Fatal(err) - } - - // could test each value individually, but I'm lazy - var secondBuffer bytes.Buffer - e2 := NewEncoder(&secondBuffer) - err = e2.Encode(outputs) - if err != nil { - t.Fatal(err) - } - if firstBuffer.String() != secondBuffer.String() { - t.Error( - firstBuffer.String(), - "\n\n is not identical to\n\n", - secondBuffer.String()) - } -} - -// XXX(burntsushi) -// I think these tests probably should be removed. They are good, but they -// ought to be obsolete by toml-test. -func TestEncode(t *testing.T) { - type Embedded struct { - Int int `toml:"_int"` - } - type NonStruct int - - date := time.Date(2014, 5, 11, 20, 30, 40, 0, time.FixedZone("IST", 3600)) - dateStr := "2014-05-11T19:30:40Z" - - tests := map[string]struct { - input interface{} - wantOutput string - wantError error - }{ - "bool field": { - input: struct { - BoolTrue bool - BoolFalse bool - }{true, false}, - wantOutput: "BoolTrue = true\nBoolFalse = false\n", - }, - "int fields": { - input: struct { - Int int - Int8 int8 - Int16 int16 - Int32 int32 - Int64 int64 - }{1, 2, 3, 4, 5}, - wantOutput: "Int = 1\nInt8 = 2\nInt16 = 3\nInt32 = 4\nInt64 = 5\n", - }, - "uint fields": { - input: struct { - Uint uint - Uint8 uint8 - Uint16 uint16 - Uint32 uint32 - Uint64 uint64 - }{1, 2, 3, 4, 5}, - wantOutput: "Uint = 1\nUint8 = 2\nUint16 = 3\nUint32 = 4" + - "\nUint64 = 5\n", - }, - "float fields": { - input: struct { - Float32 float32 - Float64 float64 - }{1.5, 2.5}, - wantOutput: "Float32 = 1.5\nFloat64 = 2.5\n", - }, - "string field": { - input: struct{ String string }{"foo"}, - wantOutput: "String = \"foo\"\n", - }, - "string field and unexported field": { - input: struct { - String string - unexported int - }{"foo", 0}, - wantOutput: "String = \"foo\"\n", - }, - "datetime field in UTC": { - input: struct{ Date time.Time }{date}, - wantOutput: fmt.Sprintf("Date = %s\n", dateStr), - }, - "datetime field as primitive": { - // Using a map here to fail if isStructOrMap() returns true for - // time.Time. - input: map[string]interface{}{ - "Date": date, - "Int": 1, - }, - wantOutput: fmt.Sprintf("Date = %s\nInt = 1\n", dateStr), - }, - "array fields": { - input: struct { - IntArray0 [0]int - IntArray3 [3]int - }{[0]int{}, [3]int{1, 2, 3}}, - wantOutput: "IntArray0 = []\nIntArray3 = [1, 2, 3]\n", - }, - "slice fields": { - input: struct{ IntSliceNil, IntSlice0, IntSlice3 []int }{ - nil, []int{}, []int{1, 2, 3}, - }, - wantOutput: "IntSlice0 = []\nIntSlice3 = [1, 2, 3]\n", - }, - "datetime slices": { - input: struct{ DatetimeSlice []time.Time }{ - []time.Time{date, date}, - }, - wantOutput: fmt.Sprintf("DatetimeSlice = [%s, %s]\n", - dateStr, dateStr), - }, - "nested arrays and slices": { - input: struct { - SliceOfArrays [][2]int - ArrayOfSlices [2][]int - SliceOfArraysOfSlices [][2][]int - ArrayOfSlicesOfArrays [2][][2]int - SliceOfMixedArrays [][2]interface{} - ArrayOfMixedSlices [2][]interface{} - }{ - [][2]int{{1, 2}, {3, 4}}, - [2][]int{{1, 2}, {3, 4}}, - [][2][]int{ - { - {1, 2}, {3, 4}, - }, - { - {5, 6}, {7, 8}, - }, - }, - [2][][2]int{ - { - {1, 2}, {3, 4}, - }, - { - {5, 6}, {7, 8}, - }, - }, - [][2]interface{}{ - {1, 2}, {"a", "b"}, - }, - [2][]interface{}{ - {1, 2}, {"a", "b"}, - }, - }, - wantOutput: `SliceOfArrays = [[1, 2], [3, 4]] -ArrayOfSlices = [[1, 2], [3, 4]] -SliceOfArraysOfSlices = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] -ArrayOfSlicesOfArrays = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] -SliceOfMixedArrays = [[1, 2], ["a", "b"]] -ArrayOfMixedSlices = [[1, 2], ["a", "b"]] -`, - }, - "empty slice": { - input: struct{ Empty []interface{} }{[]interface{}{}}, - wantOutput: "Empty = []\n", - }, - "(error) slice with element type mismatch (string and integer)": { - input: struct{ Mixed []interface{} }{[]interface{}{1, "a"}}, - wantError: errArrayMixedElementTypes, - }, - "(error) slice with element type mismatch (integer and float)": { - input: struct{ Mixed []interface{} }{[]interface{}{1, 2.5}}, - wantError: errArrayMixedElementTypes, - }, - "slice with elems of differing Go types, same TOML types": { - input: struct { - MixedInts []interface{} - MixedFloats []interface{} - }{ - []interface{}{ - int(1), int8(2), int16(3), int32(4), int64(5), - uint(1), uint8(2), uint16(3), uint32(4), uint64(5), - }, - []interface{}{float32(1.5), float64(2.5)}, - }, - wantOutput: "MixedInts = [1, 2, 3, 4, 5, 1, 2, 3, 4, 5]\n" + - "MixedFloats = [1.5, 2.5]\n", - }, - "(error) slice w/ element type mismatch (one is nested array)": { - input: struct{ Mixed []interface{} }{ - []interface{}{1, []interface{}{2}}, - }, - wantError: errArrayMixedElementTypes, - }, - "(error) slice with 1 nil element": { - input: struct{ NilElement1 []interface{} }{[]interface{}{nil}}, - wantError: errArrayNilElement, - }, - "(error) slice with 1 nil element (and other non-nil elements)": { - input: struct{ NilElement []interface{} }{ - []interface{}{1, nil}, - }, - wantError: errArrayNilElement, - }, - "simple map": { - input: map[string]int{"a": 1, "b": 2}, - wantOutput: "a = 1\nb = 2\n", - }, - "map with interface{} value type": { - input: map[string]interface{}{"a": 1, "b": "c"}, - wantOutput: "a = 1\nb = \"c\"\n", - }, - "map with interface{} value type, some of which are structs": { - input: map[string]interface{}{ - "a": struct{ Int int }{2}, - "b": 1, - }, - wantOutput: "b = 1\n\n[a]\n Int = 2\n", - }, - "nested map": { - input: map[string]map[string]int{ - "a": {"b": 1}, - "c": {"d": 2}, - }, - wantOutput: "[a]\n b = 1\n\n[c]\n d = 2\n", - }, - "nested struct": { - input: struct{ Struct struct{ Int int } }{ - struct{ Int int }{1}, - }, - wantOutput: "[Struct]\n Int = 1\n", - }, - "nested struct and non-struct field": { - input: struct { - Struct struct{ Int int } - Bool bool - }{struct{ Int int }{1}, true}, - wantOutput: "Bool = true\n\n[Struct]\n Int = 1\n", - }, - "2 nested structs": { - input: struct{ Struct1, Struct2 struct{ Int int } }{ - struct{ Int int }{1}, struct{ Int int }{2}, - }, - wantOutput: "[Struct1]\n Int = 1\n\n[Struct2]\n Int = 2\n", - }, - "deeply nested structs": { - input: struct { - Struct1, Struct2 struct{ Struct3 *struct{ Int int } } - }{ - struct{ Struct3 *struct{ Int int } }{&struct{ Int int }{1}}, - struct{ Struct3 *struct{ Int int } }{nil}, - }, - wantOutput: "[Struct1]\n [Struct1.Struct3]\n Int = 1" + - "\n\n[Struct2]\n", - }, - "nested struct with nil struct elem": { - input: struct { - Struct struct{ Inner *struct{ Int int } } - }{ - struct{ Inner *struct{ Int int } }{nil}, - }, - wantOutput: "[Struct]\n", - }, - "nested struct with no fields": { - input: struct { - Struct struct{ Inner struct{} } - }{ - struct{ Inner struct{} }{struct{}{}}, - }, - wantOutput: "[Struct]\n [Struct.Inner]\n", - }, - "struct with tags": { - input: struct { - Struct struct { - Int int `toml:"_int"` - } `toml:"_struct"` - Bool bool `toml:"_bool"` - }{ - struct { - Int int `toml:"_int"` - }{1}, true, - }, - wantOutput: "_bool = true\n\n[_struct]\n _int = 1\n", - }, - "embedded struct": { - input: struct{ Embedded }{Embedded{1}}, - wantOutput: "_int = 1\n", - }, - "embedded *struct": { - input: struct{ *Embedded }{&Embedded{1}}, - wantOutput: "_int = 1\n", - }, - "nested embedded struct": { - input: struct { - Struct struct{ Embedded } `toml:"_struct"` - }{struct{ Embedded }{Embedded{1}}}, - wantOutput: "[_struct]\n _int = 1\n", - }, - "nested embedded *struct": { - input: struct { - Struct struct{ *Embedded } `toml:"_struct"` - }{struct{ *Embedded }{&Embedded{1}}}, - wantOutput: "[_struct]\n _int = 1\n", - }, - "embedded non-struct": { - input: struct{ NonStruct }{5}, - wantOutput: "NonStruct = 5\n", - }, - "array of tables": { - input: struct { - Structs []*struct{ Int int } `toml:"struct"` - }{ - []*struct{ Int int }{{1}, {3}}, - }, - wantOutput: "[[struct]]\n Int = 1\n\n[[struct]]\n Int = 3\n", - }, - "array of tables order": { - input: map[string]interface{}{ - "map": map[string]interface{}{ - "zero": 5, - "arr": []map[string]int{ - { - "friend": 5, - }, - }, - }, - }, - wantOutput: "[map]\n zero = 5\n\n [[map.arr]]\n friend = 5\n", - }, - "(error) top-level slice": { - input: []struct{ Int int }{{1}, {2}, {3}}, - wantError: errNoKey, - }, - "(error) slice of slice": { - input: struct { - Slices [][]struct{ Int int } - }{ - [][]struct{ Int int }{{{1}}, {{2}}, {{3}}}, - }, - wantError: errArrayNoTable, - }, - "(error) map no string key": { - input: map[int]string{1: ""}, - wantError: errNonString, - }, - "(error) empty key name": { - input: map[string]int{"": 1}, - wantError: errAnything, - }, - "(error) empty map name": { - input: map[string]interface{}{ - "": map[string]int{"v": 1}, - }, - wantError: errAnything, - }, - } - for label, test := range tests { - encodeExpected(t, label, test.input, test.wantOutput, test.wantError) - } -} - -func TestEncodeNestedTableArrays(t *testing.T) { - type song struct { - Name string `toml:"name"` - } - type album struct { - Name string `toml:"name"` - Songs []song `toml:"songs"` - } - type springsteen struct { - Albums []album `toml:"albums"` - } - value := springsteen{ - []album{ - {"Born to Run", - []song{{"Jungleland"}, {"Meeting Across the River"}}}, - {"Born in the USA", - []song{{"Glory Days"}, {"Dancing in the Dark"}}}, - }, - } - expected := `[[albums]] - name = "Born to Run" - - [[albums.songs]] - name = "Jungleland" - - [[albums.songs]] - name = "Meeting Across the River" - -[[albums]] - name = "Born in the USA" - - [[albums.songs]] - name = "Glory Days" - - [[albums.songs]] - name = "Dancing in the Dark" -` - encodeExpected(t, "nested table arrays", value, expected, nil) -} - -func TestEncodeArrayHashWithNormalHashOrder(t *testing.T) { - type Alpha struct { - V int - } - type Beta struct { - V int - } - type Conf struct { - V int - A Alpha - B []Beta - } - - val := Conf{ - V: 1, - A: Alpha{2}, - B: []Beta{{3}}, - } - expected := "V = 1\n\n[A]\n V = 2\n\n[[B]]\n V = 3\n" - encodeExpected(t, "array hash with normal hash order", val, expected, nil) -} - -func TestEncodeWithOmitEmpty(t *testing.T) { - type simple struct { - Bool bool `toml:"bool,omitempty"` - String string `toml:"string,omitempty"` - Array [0]byte `toml:"array,omitempty"` - Slice []int `toml:"slice,omitempty"` - Map map[string]string `toml:"map,omitempty"` - } - - var v simple - encodeExpected(t, "fields with omitempty are omitted when empty", v, "", nil) - v = simple{ - Bool: true, - String: " ", - Slice: []int{2, 3, 4}, - Map: map[string]string{"foo": "bar"}, - } - expected := `bool = true -string = " " -slice = [2, 3, 4] - -[map] - foo = "bar" -` - encodeExpected(t, "fields with omitempty are not omitted when non-empty", - v, expected, nil) -} - -func TestEncodeWithOmitZero(t *testing.T) { - type simple struct { - Number int `toml:"number,omitzero"` - Real float64 `toml:"real,omitzero"` - Unsigned uint `toml:"unsigned,omitzero"` - } - - value := simple{0, 0.0, uint(0)} - expected := "" - - encodeExpected(t, "simple with omitzero, all zero", value, expected, nil) - - value.Number = 10 - value.Real = 20 - value.Unsigned = 5 - expected = `number = 10 -real = 20.0 -unsigned = 5 -` - encodeExpected(t, "simple with omitzero, non-zero", value, expected, nil) -} - -func TestEncodeOmitemptyWithEmptyName(t *testing.T) { - type simple struct { - S []int `toml:",omitempty"` - } - v := simple{[]int{1, 2, 3}} - expected := "S = [1, 2, 3]\n" - encodeExpected(t, "simple with omitempty, no name, non-empty field", - v, expected, nil) -} - -func TestEncodeAnonymousStruct(t *testing.T) { - type Inner struct{ N int } - type Outer0 struct{ Inner } - type Outer1 struct { - Inner `toml:"inner"` - } - - v0 := Outer0{Inner{3}} - expected := "N = 3\n" - encodeExpected(t, "embedded anonymous untagged struct", v0, expected, nil) - - v1 := Outer1{Inner{3}} - expected = "[inner]\n N = 3\n" - encodeExpected(t, "embedded anonymous tagged struct", v1, expected, nil) -} - -func TestEncodeAnonymousStructPointerField(t *testing.T) { - type Inner struct{ N int } - type Outer0 struct{ *Inner } - type Outer1 struct { - *Inner `toml:"inner"` - } - - v0 := Outer0{} - expected := "" - encodeExpected(t, "nil anonymous untagged struct pointer field", v0, expected, nil) - - v0 = Outer0{&Inner{3}} - expected = "N = 3\n" - encodeExpected(t, "non-nil anonymous untagged struct pointer field", v0, expected, nil) - - v1 := Outer1{} - expected = "" - encodeExpected(t, "nil anonymous tagged struct pointer field", v1, expected, nil) - - v1 = Outer1{&Inner{3}} - expected = "[inner]\n N = 3\n" - encodeExpected(t, "non-nil anonymous tagged struct pointer field", v1, expected, nil) -} - -func TestEncodeIgnoredFields(t *testing.T) { - type simple struct { - Number int `toml:"-"` - } - value := simple{} - expected := "" - encodeExpected(t, "ignored field", value, expected, nil) -} - -func encodeExpected( - t *testing.T, label string, val interface{}, wantStr string, wantErr error, -) { - var buf bytes.Buffer - enc := NewEncoder(&buf) - err := enc.Encode(val) - if err != wantErr { - if wantErr != nil { - if wantErr == errAnything && err != nil { - return - } - t.Errorf("%s: want Encode error %v, got %v", label, wantErr, err) - } else { - t.Errorf("%s: Encode failed: %s", label, err) - } - } - if err != nil { - return - } - if got := buf.String(); wantStr != got { - t.Errorf("%s: want\n-----\n%q\n-----\nbut got\n-----\n%q\n-----\n", - label, wantStr, got) - } -} - -func ExampleEncoder_Encode() { - date, _ := time.Parse(time.RFC822, "14 Mar 10 18:00 UTC") - var config = map[string]interface{}{ - "date": date, - "counts": []int{1, 1, 2, 3, 5, 8}, - "hash": map[string]string{ - "key1": "val1", - "key2": "val2", - }, - } - buf := new(bytes.Buffer) - if err := NewEncoder(buf).Encode(config); err != nil { - log.Fatal(err) - } - fmt.Println(buf.String()) - - // Output: - // counts = [1, 1, 2, 3, 5, 8] - // date = 2010-03-14T18:00:00Z - // - // [hash] - // key1 = "val1" - // key2 = "val2" -} diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go deleted file mode 100644 index d36e1dd..0000000 --- a/vendor/github.com/BurntSushi/toml/encoding_types.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build go1.2 - -package toml - -// In order to support Go 1.1, we define our own TextMarshaler and -// TextUnmarshaler types. For Go 1.2+, we just alias them with the -// standard library interfaces. - -import ( - "encoding" -) - -// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here -// so that Go 1.1 can be supported. -type TextMarshaler encoding.TextMarshaler - -// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined -// here so that Go 1.1 can be supported. -type TextUnmarshaler encoding.TextUnmarshaler diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go deleted file mode 100644 index e8d503d..0000000 --- a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build !go1.2 - -package toml - -// These interfaces were introduced in Go 1.2, so we add them manually when -// compiling for Go 1.1. - -// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here -// so that Go 1.1 can be supported. -type TextMarshaler interface { - MarshalText() (text []byte, err error) -} - -// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined -// here so that Go 1.1 can be supported. -type TextUnmarshaler interface { - UnmarshalText(text []byte) error -} diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go deleted file mode 100644 index a016dc2..0000000 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ /dev/null @@ -1,866 +0,0 @@ -package toml - -import ( - "fmt" - "strings" - "unicode" - "unicode/utf8" -) - -type itemType int - -const ( - itemError itemType = iota - itemNIL // used in the parser to indicate no type - itemEOF - itemText - itemString - itemRawString - itemMultilineString - itemRawMultilineString - itemBool - itemInteger - itemFloat - itemDatetime - itemArray // the start of an array - itemArrayEnd - itemTableStart - itemTableEnd - itemArrayTableStart - itemArrayTableEnd - itemKeyStart - itemCommentStart -) - -const ( - eof = 0 - tableStart = '[' - tableEnd = ']' - arrayTableStart = '[' - arrayTableEnd = ']' - tableSep = '.' - keySep = '=' - arrayStart = '[' - arrayEnd = ']' - arrayValTerm = ',' - commentStart = '#' - stringStart = '"' - stringEnd = '"' - rawStringStart = '\'' - rawStringEnd = '\'' -) - -type stateFn func(lx *lexer) stateFn - -type lexer struct { - input string - start int - pos int - width int - line int - state stateFn - items chan item - - // A stack of state functions used to maintain context. - // The idea is to reuse parts of the state machine in various places. - // For example, values can appear at the top level or within arbitrarily - // nested arrays. The last state on the stack is used after a value has - // been lexed. Similarly for comments. - stack []stateFn -} - -type item struct { - typ itemType - val string - line int -} - -func (lx *lexer) nextItem() item { - for { - select { - case item := <-lx.items: - return item - default: - lx.state = lx.state(lx) - } - } -} - -func lex(input string) *lexer { - lx := &lexer{ - input: input + "\n", - state: lexTop, - line: 1, - items: make(chan item, 10), - stack: make([]stateFn, 0, 10), - } - return lx -} - -func (lx *lexer) push(state stateFn) { - lx.stack = append(lx.stack, state) -} - -func (lx *lexer) pop() stateFn { - if len(lx.stack) == 0 { - return lx.errorf("BUG in lexer: no states to pop.") - } - last := lx.stack[len(lx.stack)-1] - lx.stack = lx.stack[0 : len(lx.stack)-1] - return last -} - -func (lx *lexer) current() string { - return lx.input[lx.start:lx.pos] -} - -func (lx *lexer) emit(typ itemType) { - lx.items <- item{typ, lx.current(), lx.line} - lx.start = lx.pos -} - -func (lx *lexer) emitTrim(typ itemType) { - lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line} - lx.start = lx.pos -} - -func (lx *lexer) next() (r rune) { - if lx.pos >= len(lx.input) { - lx.width = 0 - return eof - } - - if lx.input[lx.pos] == '\n' { - lx.line++ - } - r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:]) - lx.pos += lx.width - return r -} - -// ignore skips over the pending input before this point. -func (lx *lexer) ignore() { - lx.start = lx.pos -} - -// backup steps back one rune. Can be called only once per call of next. -func (lx *lexer) backup() { - lx.pos -= lx.width - if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { - lx.line-- - } -} - -// accept consumes the next rune if it's equal to `valid`. -func (lx *lexer) accept(valid rune) bool { - if lx.next() == valid { - return true - } - lx.backup() - return false -} - -// peek returns but does not consume the next rune in the input. -func (lx *lexer) peek() rune { - r := lx.next() - lx.backup() - return r -} - -// skip ignores all input that matches the given predicate. -func (lx *lexer) skip(pred func(rune) bool) { - for { - r := lx.next() - if pred(r) { - continue - } - lx.backup() - lx.ignore() - return - } -} - -// errorf stops all lexing by emitting an error and returning `nil`. -// Note that any value that is a character is escaped if it's a special -// character (new lines, tabs, etc.). -func (lx *lexer) errorf(format string, values ...interface{}) stateFn { - lx.items <- item{ - itemError, - fmt.Sprintf(format, values...), - lx.line, - } - return nil -} - -// lexTop consumes elements at the top level of TOML data. -func lexTop(lx *lexer) stateFn { - r := lx.next() - if isWhitespace(r) || isNL(r) { - return lexSkip(lx, lexTop) - } - - switch r { - case commentStart: - lx.push(lexTop) - return lexCommentStart - case tableStart: - return lexTableStart - case eof: - if lx.pos > lx.start { - return lx.errorf("Unexpected EOF.") - } - lx.emit(itemEOF) - return nil - } - - // At this point, the only valid item can be a key, so we back up - // and let the key lexer do the rest. - lx.backup() - lx.push(lexTopEnd) - return lexKeyStart -} - -// lexTopEnd is entered whenever a top-level item has been consumed. (A value -// or a table.) It must see only whitespace, and will turn back to lexTop -// upon a new line. If it sees EOF, it will quit the lexer successfully. -func lexTopEnd(lx *lexer) stateFn { - r := lx.next() - switch { - case r == commentStart: - // a comment will read to a new line for us. - lx.push(lexTop) - return lexCommentStart - case isWhitespace(r): - return lexTopEnd - case isNL(r): - lx.ignore() - return lexTop - case r == eof: - lx.ignore() - return lexTop - } - return lx.errorf("Expected a top-level item to end with a new line, "+ - "comment or EOF, but got %q instead.", r) -} - -// lexTable lexes the beginning of a table. Namely, it makes sure that -// it starts with a character other than '.' and ']'. -// It assumes that '[' has already been consumed. -// It also handles the case that this is an item in an array of tables. -// e.g., '[[name]]'. -func lexTableStart(lx *lexer) stateFn { - if lx.peek() == arrayTableStart { - lx.next() - lx.emit(itemArrayTableStart) - lx.push(lexArrayTableEnd) - } else { - lx.emit(itemTableStart) - lx.push(lexTableEnd) - } - return lexTableNameStart -} - -func lexTableEnd(lx *lexer) stateFn { - lx.emit(itemTableEnd) - return lexTopEnd -} - -func lexArrayTableEnd(lx *lexer) stateFn { - if r := lx.next(); r != arrayTableEnd { - return lx.errorf("Expected end of table array name delimiter %q, "+ - "but got %q instead.", arrayTableEnd, r) - } - lx.emit(itemArrayTableEnd) - return lexTopEnd -} - -func lexTableNameStart(lx *lexer) stateFn { - lx.skip(isWhitespace) - switch r := lx.peek(); { - case r == tableEnd || r == eof: - return lx.errorf("Unexpected end of table name. (Table names cannot " + - "be empty.)") - case r == tableSep: - return lx.errorf("Unexpected table separator. (Table names cannot " + - "be empty.)") - case r == stringStart || r == rawStringStart: - lx.ignore() - lx.push(lexTableNameEnd) - return lexValue // reuse string lexing - default: - return lexBareTableName - } -} - -// lexBareTableName lexes the name of a table. It assumes that at least one -// valid character for the table has already been read. -func lexBareTableName(lx *lexer) stateFn { - r := lx.next() - if isBareKeyChar(r) { - return lexBareTableName - } - lx.backup() - lx.emit(itemText) - return lexTableNameEnd -} - -// lexTableNameEnd reads the end of a piece of a table name, optionally -// consuming whitespace. -func lexTableNameEnd(lx *lexer) stateFn { - lx.skip(isWhitespace) - switch r := lx.next(); { - case isWhitespace(r): - return lexTableNameEnd - case r == tableSep: - lx.ignore() - return lexTableNameStart - case r == tableEnd: - return lx.pop() - default: - return lx.errorf("Expected '.' or ']' to end table name, but got %q "+ - "instead.", r) - } -} - -// lexKeyStart consumes a key name up until the first non-whitespace character. -// lexKeyStart will ignore whitespace. -func lexKeyStart(lx *lexer) stateFn { - r := lx.peek() - switch { - case r == keySep: - return lx.errorf("Unexpected key separator %q.", keySep) - case isWhitespace(r) || isNL(r): - lx.next() - return lexSkip(lx, lexKeyStart) - case r == stringStart || r == rawStringStart: - lx.ignore() - lx.emit(itemKeyStart) - lx.push(lexKeyEnd) - return lexValue // reuse string lexing - default: - lx.ignore() - lx.emit(itemKeyStart) - return lexBareKey - } -} - -// lexBareKey consumes the text of a bare key. Assumes that the first character -// (which is not whitespace) has not yet been consumed. -func lexBareKey(lx *lexer) stateFn { - switch r := lx.next(); { - case isBareKeyChar(r): - return lexBareKey - case isWhitespace(r): - lx.backup() - lx.emit(itemText) - return lexKeyEnd - case r == keySep: - lx.backup() - lx.emit(itemText) - return lexKeyEnd - default: - return lx.errorf("Bare keys cannot contain %q.", r) - } -} - -// lexKeyEnd consumes the end of a key and trims whitespace (up to the key -// separator). -func lexKeyEnd(lx *lexer) stateFn { - switch r := lx.next(); { - case r == keySep: - return lexSkip(lx, lexValue) - case isWhitespace(r): - return lexSkip(lx, lexKeyEnd) - default: - return lx.errorf("Expected key separator %q, but got %q instead.", - keySep, r) - } -} - -// lexValue starts the consumption of a value anywhere a value is expected. -// lexValue will ignore whitespace. -// After a value is lexed, the last state on the next is popped and returned. -func lexValue(lx *lexer) stateFn { - // We allow whitespace to precede a value, but NOT new lines. - // In array syntax, the array states are responsible for ignoring new - // lines. - r := lx.next() - switch { - case isWhitespace(r): - return lexSkip(lx, lexValue) - case isDigit(r): - lx.backup() // avoid an extra state and use the same as above - return lexNumberOrDateStart - } - switch r { - case arrayStart: - lx.ignore() - lx.emit(itemArray) - return lexArrayValue - case stringStart: - if lx.accept(stringStart) { - if lx.accept(stringStart) { - lx.ignore() // Ignore """ - return lexMultilineString - } - lx.backup() - } - lx.ignore() // ignore the '"' - return lexString - case rawStringStart: - if lx.accept(rawStringStart) { - if lx.accept(rawStringStart) { - lx.ignore() // Ignore """ - return lexMultilineRawString - } - lx.backup() - } - lx.ignore() // ignore the "'" - return lexRawString - case '+', '-': - return lexNumberStart - case '.': // special error case, be kind to users - return lx.errorf("Floats must start with a digit, not '.'.") - } - if unicode.IsLetter(r) { - // Be permissive here; lexBool will give a nice error if the - // user wrote something like - // x = foo - // (i.e. not 'true' or 'false' but is something else word-like.) - lx.backup() - return lexBool - } - return lx.errorf("Expected value but found %q instead.", r) -} - -// lexArrayValue consumes one value in an array. It assumes that '[' or ',' -// have already been consumed. All whitespace and new lines are ignored. -func lexArrayValue(lx *lexer) stateFn { - r := lx.next() - switch { - case isWhitespace(r) || isNL(r): - return lexSkip(lx, lexArrayValue) - case r == commentStart: - lx.push(lexArrayValue) - return lexCommentStart - case r == arrayValTerm: - return lx.errorf("Unexpected array value terminator %q.", - arrayValTerm) - case r == arrayEnd: - return lexArrayEnd - } - - lx.backup() - lx.push(lexArrayValueEnd) - return lexValue -} - -// lexArrayValueEnd consumes the cruft between values of an array. Namely, -// it ignores whitespace and expects either a ',' or a ']'. -func lexArrayValueEnd(lx *lexer) stateFn { - r := lx.next() - switch { - case isWhitespace(r) || isNL(r): - return lexSkip(lx, lexArrayValueEnd) - case r == commentStart: - lx.push(lexArrayValueEnd) - return lexCommentStart - case r == arrayValTerm: - lx.ignore() - return lexArrayValue // move on to the next value - case r == arrayEnd: - return lexArrayEnd - } - return lx.errorf("Expected an array value terminator %q or an array "+ - "terminator %q, but got %q instead.", arrayValTerm, arrayEnd, r) -} - -// lexArrayEnd finishes the lexing of an array. It assumes that a ']' has -// just been consumed. -func lexArrayEnd(lx *lexer) stateFn { - lx.ignore() - lx.emit(itemArrayEnd) - return lx.pop() -} - -// lexString consumes the inner contents of a string. It assumes that the -// beginning '"' has already been consumed and ignored. -func lexString(lx *lexer) stateFn { - r := lx.next() - switch { - case isNL(r): - return lx.errorf("Strings cannot contain new lines.") - case r == '\\': - lx.push(lexString) - return lexStringEscape - case r == stringEnd: - lx.backup() - lx.emit(itemString) - lx.next() - lx.ignore() - return lx.pop() - } - return lexString -} - -// lexMultilineString consumes the inner contents of a string. It assumes that -// the beginning '"""' has already been consumed and ignored. -func lexMultilineString(lx *lexer) stateFn { - r := lx.next() - switch { - case r == '\\': - return lexMultilineStringEscape - case r == stringEnd: - if lx.accept(stringEnd) { - if lx.accept(stringEnd) { - lx.backup() - lx.backup() - lx.backup() - lx.emit(itemMultilineString) - lx.next() - lx.next() - lx.next() - lx.ignore() - return lx.pop() - } - lx.backup() - } - } - return lexMultilineString -} - -// lexRawString consumes a raw string. Nothing can be escaped in such a string. -// It assumes that the beginning "'" has already been consumed and ignored. -func lexRawString(lx *lexer) stateFn { - r := lx.next() - switch { - case isNL(r): - return lx.errorf("Strings cannot contain new lines.") - case r == rawStringEnd: - lx.backup() - lx.emit(itemRawString) - lx.next() - lx.ignore() - return lx.pop() - } - return lexRawString -} - -// lexMultilineRawString consumes a raw string. Nothing can be escaped in such -// a string. It assumes that the beginning "'" has already been consumed and -// ignored. -func lexMultilineRawString(lx *lexer) stateFn { - r := lx.next() - switch { - case r == rawStringEnd: - if lx.accept(rawStringEnd) { - if lx.accept(rawStringEnd) { - lx.backup() - lx.backup() - lx.backup() - lx.emit(itemRawMultilineString) - lx.next() - lx.next() - lx.next() - lx.ignore() - return lx.pop() - } - lx.backup() - } - } - return lexMultilineRawString -} - -// lexMultilineStringEscape consumes an escaped character. It assumes that the -// preceding '\\' has already been consumed. -func lexMultilineStringEscape(lx *lexer) stateFn { - // Handle the special case first: - if isNL(lx.next()) { - return lexMultilineString - } else { - lx.backup() - lx.push(lexMultilineString) - return lexStringEscape(lx) - } -} - -func lexStringEscape(lx *lexer) stateFn { - r := lx.next() - switch r { - case 'b': - fallthrough - case 't': - fallthrough - case 'n': - fallthrough - case 'f': - fallthrough - case 'r': - fallthrough - case '"': - fallthrough - case '\\': - return lx.pop() - case 'u': - return lexShortUnicodeEscape - case 'U': - return lexLongUnicodeEscape - } - return lx.errorf("Invalid escape character %q. Only the following "+ - "escape characters are allowed: "+ - "\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, "+ - "\\uXXXX and \\UXXXXXXXX.", r) -} - -func lexShortUnicodeEscape(lx *lexer) stateFn { - var r rune - for i := 0; i < 4; i++ { - r = lx.next() - if !isHexadecimal(r) { - return lx.errorf("Expected four hexadecimal digits after '\\u', "+ - "but got '%s' instead.", lx.current()) - } - } - return lx.pop() -} - -func lexLongUnicodeEscape(lx *lexer) stateFn { - var r rune - for i := 0; i < 8; i++ { - r = lx.next() - if !isHexadecimal(r) { - return lx.errorf("Expected eight hexadecimal digits after '\\U', "+ - "but got '%s' instead.", lx.current()) - } - } - return lx.pop() -} - -// lexNumberOrDateStart consumes either an integer, a float, or datetime. -func lexNumberOrDateStart(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexNumberOrDate - } - switch r { - case '_': - return lexNumber - case 'e', 'E': - return lexFloat - case '.': - return lx.errorf("Floats must start with a digit, not '.'.") - } - return lx.errorf("Expected a digit but got %q.", r) -} - -// lexNumberOrDate consumes either an integer, float or datetime. -func lexNumberOrDate(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexNumberOrDate - } - switch r { - case '-': - return lexDatetime - case '_': - return lexNumber - case '.', 'e', 'E': - return lexFloat - } - - lx.backup() - lx.emit(itemInteger) - return lx.pop() -} - -// lexDatetime consumes a Datetime, to a first approximation. -// The parser validates that it matches one of the accepted formats. -func lexDatetime(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexDatetime - } - switch r { - case '-', 'T', ':', '.', 'Z': - return lexDatetime - } - - lx.backup() - lx.emit(itemDatetime) - return lx.pop() -} - -// lexNumberStart consumes either an integer or a float. It assumes that a sign -// has already been read, but that *no* digits have been consumed. -// lexNumberStart will move to the appropriate integer or float states. -func lexNumberStart(lx *lexer) stateFn { - // We MUST see a digit. Even floats have to start with a digit. - r := lx.next() - if !isDigit(r) { - if r == '.' { - return lx.errorf("Floats must start with a digit, not '.'.") - } else { - return lx.errorf("Expected a digit but got %q.", r) - } - } - return lexNumber -} - -// lexNumber consumes an integer or a float after seeing the first digit. -func lexNumber(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexNumber - } - switch r { - case '_': - return lexNumber - case '.', 'e', 'E': - return lexFloat - } - - lx.backup() - lx.emit(itemInteger) - return lx.pop() -} - -// lexFloat consumes the elements of a float. It allows any sequence of -// float-like characters, so floats emitted by the lexer are only a first -// approximation and must be validated by the parser. -func lexFloat(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexFloat - } - switch r { - case '_', '.', '-', '+', 'e', 'E': - return lexFloat - } - - lx.backup() - lx.emit(itemFloat) - return lx.pop() -} - -// lexBool consumes a bool string: 'true' or 'false. -func lexBool(lx *lexer) stateFn { - var rs []rune - for { - r := lx.next() - if r == eof || isWhitespace(r) || isNL(r) { - lx.backup() - break - } - rs = append(rs, r) - } - s := string(rs) - switch s { - case "true", "false": - lx.emit(itemBool) - return lx.pop() - } - return lx.errorf("Expected value but found %q instead.", s) -} - -// lexCommentStart begins the lexing of a comment. It will emit -// itemCommentStart and consume no characters, passing control to lexComment. -func lexCommentStart(lx *lexer) stateFn { - lx.ignore() - lx.emit(itemCommentStart) - return lexComment -} - -// lexComment lexes an entire comment. It assumes that '#' has been consumed. -// It will consume *up to* the first new line character, and pass control -// back to the last state on the stack. -func lexComment(lx *lexer) stateFn { - r := lx.peek() - if isNL(r) || r == eof { - lx.emit(itemText) - return lx.pop() - } - lx.next() - return lexComment -} - -// lexSkip ignores all slurped input and moves on to the next state. -func lexSkip(lx *lexer, nextState stateFn) stateFn { - return func(lx *lexer) stateFn { - lx.ignore() - return nextState - } -} - -// isWhitespace returns true if `r` is a whitespace character according -// to the spec. -func isWhitespace(r rune) bool { - return r == '\t' || r == ' ' -} - -func isNL(r rune) bool { - return r == '\n' || r == '\r' -} - -func isDigit(r rune) bool { - return r >= '0' && r <= '9' -} - -func isHexadecimal(r rune) bool { - return (r >= '0' && r <= '9') || - (r >= 'a' && r <= 'f') || - (r >= 'A' && r <= 'F') -} - -func isBareKeyChar(r rune) bool { - return (r >= 'A' && r <= 'Z') || - (r >= 'a' && r <= 'z') || - (r >= '0' && r <= '9') || - r == '_' || - r == '-' -} - -func (itype itemType) String() string { - switch itype { - case itemError: - return "Error" - case itemNIL: - return "NIL" - case itemEOF: - return "EOF" - case itemText: - return "Text" - case itemString: - return "String" - case itemRawString: - return "String" - case itemMultilineString: - return "String" - case itemRawMultilineString: - return "String" - case itemBool: - return "Bool" - case itemInteger: - return "Integer" - case itemFloat: - return "Float" - case itemDatetime: - return "DateTime" - case itemTableStart: - return "TableStart" - case itemTableEnd: - return "TableEnd" - case itemKeyStart: - return "KeyStart" - case itemArray: - return "Array" - case itemArrayEnd: - return "ArrayEnd" - case itemCommentStart: - return "CommentStart" - } - panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) -} - -func (item item) String() string { - return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) -} diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go deleted file mode 100644 index a562555..0000000 --- a/vendor/github.com/BurntSushi/toml/parse.go +++ /dev/null @@ -1,557 +0,0 @@ -package toml - -import ( - "fmt" - "strconv" - "strings" - "time" - "unicode" - "unicode/utf8" -) - -type parser struct { - mapping map[string]interface{} - types map[string]tomlType - lx *lexer - - // A list of keys in the order that they appear in the TOML data. - ordered []Key - - // the full key for the current hash in scope - context Key - - // the base key name for everything except hashes - currentKey string - - // rough approximation of line number - approxLine int - - // A map of 'key.group.names' to whether they were created implicitly. - implicits map[string]bool -} - -type parseError string - -func (pe parseError) Error() string { - return string(pe) -} - -func parse(data string) (p *parser, err error) { - defer func() { - if r := recover(); r != nil { - var ok bool - if err, ok = r.(parseError); ok { - return - } - panic(r) - } - }() - - p = &parser{ - mapping: make(map[string]interface{}), - types: make(map[string]tomlType), - lx: lex(data), - ordered: make([]Key, 0), - implicits: make(map[string]bool), - } - for { - item := p.next() - if item.typ == itemEOF { - break - } - p.topLevel(item) - } - - return p, nil -} - -func (p *parser) panicf(format string, v ...interface{}) { - msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s", - p.approxLine, p.current(), fmt.Sprintf(format, v...)) - panic(parseError(msg)) -} - -func (p *parser) next() item { - it := p.lx.nextItem() - if it.typ == itemError { - p.panicf("%s", it.val) - } - return it -} - -func (p *parser) bug(format string, v ...interface{}) { - panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) -} - -func (p *parser) expect(typ itemType) item { - it := p.next() - p.assertEqual(typ, it.typ) - return it -} - -func (p *parser) assertEqual(expected, got itemType) { - if expected != got { - p.bug("Expected '%s' but got '%s'.", expected, got) - } -} - -func (p *parser) topLevel(item item) { - switch item.typ { - case itemCommentStart: - p.approxLine = item.line - p.expect(itemText) - case itemTableStart: - kg := p.next() - p.approxLine = kg.line - - var key Key - for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() { - key = append(key, p.keyString(kg)) - } - p.assertEqual(itemTableEnd, kg.typ) - - p.establishContext(key, false) - p.setType("", tomlHash) - p.ordered = append(p.ordered, key) - case itemArrayTableStart: - kg := p.next() - p.approxLine = kg.line - - var key Key - for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() { - key = append(key, p.keyString(kg)) - } - p.assertEqual(itemArrayTableEnd, kg.typ) - - p.establishContext(key, true) - p.setType("", tomlArrayHash) - p.ordered = append(p.ordered, key) - case itemKeyStart: - kname := p.next() - p.approxLine = kname.line - p.currentKey = p.keyString(kname) - - val, typ := p.value(p.next()) - p.setValue(p.currentKey, val) - p.setType(p.currentKey, typ) - p.ordered = append(p.ordered, p.context.add(p.currentKey)) - p.currentKey = "" - default: - p.bug("Unexpected type at top level: %s", item.typ) - } -} - -// Gets a string for a key (or part of a key in a table name). -func (p *parser) keyString(it item) string { - switch it.typ { - case itemText: - return it.val - case itemString, itemMultilineString, - itemRawString, itemRawMultilineString: - s, _ := p.value(it) - return s.(string) - default: - p.bug("Unexpected key type: %s", it.typ) - panic("unreachable") - } -} - -// value translates an expected value from the lexer into a Go value wrapped -// as an empty interface. -func (p *parser) value(it item) (interface{}, tomlType) { - switch it.typ { - case itemString: - return p.replaceEscapes(it.val), p.typeOfPrimitive(it) - case itemMultilineString: - trimmed := stripFirstNewline(stripEscapedWhitespace(it.val)) - return p.replaceEscapes(trimmed), p.typeOfPrimitive(it) - case itemRawString: - return it.val, p.typeOfPrimitive(it) - case itemRawMultilineString: - return stripFirstNewline(it.val), p.typeOfPrimitive(it) - case itemBool: - switch it.val { - case "true": - return true, p.typeOfPrimitive(it) - case "false": - return false, p.typeOfPrimitive(it) - } - p.bug("Expected boolean value, but got '%s'.", it.val) - case itemInteger: - if !numUnderscoresOK(it.val) { - p.panicf("Invalid integer %q: underscores must be surrounded by digits", - it.val) - } - val := strings.Replace(it.val, "_", "", -1) - num, err := strconv.ParseInt(val, 10, 64) - if err != nil { - // Distinguish integer values. Normally, it'd be a bug if the lexer - // provides an invalid integer, but it's possible that the number is - // out of range of valid values (which the lexer cannot determine). - // So mark the former as a bug but the latter as a legitimate user - // error. - if e, ok := err.(*strconv.NumError); ok && - e.Err == strconv.ErrRange { - - p.panicf("Integer '%s' is out of the range of 64-bit "+ - "signed integers.", it.val) - } else { - p.bug("Expected integer value, but got '%s'.", it.val) - } - } - return num, p.typeOfPrimitive(it) - case itemFloat: - parts := strings.FieldsFunc(it.val, func(r rune) bool { - switch r { - case '.', 'e', 'E': - return true - } - return false - }) - for _, part := range parts { - if !numUnderscoresOK(part) { - p.panicf("Invalid float %q: underscores must be "+ - "surrounded by digits", it.val) - } - } - if !numPeriodsOK(it.val) { - // As a special case, numbers like '123.' or '1.e2', - // which are valid as far as Go/strconv are concerned, - // must be rejected because TOML says that a fractional - // part consists of '.' followed by 1+ digits. - p.panicf("Invalid float %q: '.' must be followed "+ - "by one or more digits", it.val) - } - val := strings.Replace(it.val, "_", "", -1) - num, err := strconv.ParseFloat(val, 64) - if err != nil { - if e, ok := err.(*strconv.NumError); ok && - e.Err == strconv.ErrRange { - - p.panicf("Float '%s' is out of the range of 64-bit "+ - "IEEE-754 floating-point numbers.", it.val) - } else { - p.panicf("Invalid float value: %q", it.val) - } - } - return num, p.typeOfPrimitive(it) - case itemDatetime: - var t time.Time - var ok bool - var err error - for _, format := range []string{ - "2006-01-02T15:04:05Z07:00", - "2006-01-02T15:04:05", - "2006-01-02", - } { - t, err = time.ParseInLocation(format, it.val, time.Local) - if err == nil { - ok = true - break - } - } - if !ok { - p.panicf("Invalid TOML Datetime: %q.", it.val) - } - return t, p.typeOfPrimitive(it) - case itemArray: - array := make([]interface{}, 0) - types := make([]tomlType, 0) - - for it = p.next(); it.typ != itemArrayEnd; it = p.next() { - if it.typ == itemCommentStart { - p.expect(itemText) - continue - } - - val, typ := p.value(it) - array = append(array, val) - types = append(types, typ) - } - return array, p.typeOfArray(types) - } - p.bug("Unexpected value type: %s", it.typ) - panic("unreachable") -} - -// numUnderscoresOK checks whether each underscore in s is surrounded by -// characters that are not underscores. -func numUnderscoresOK(s string) bool { - accept := false - for _, r := range s { - if r == '_' { - if !accept { - return false - } - accept = false - continue - } - accept = true - } - return accept -} - -// numPeriodsOK checks whether every period in s is followed by a digit. -func numPeriodsOK(s string) bool { - period := false - for _, r := range s { - if period && !isDigit(r) { - return false - } - period = r == '.' - } - return !period -} - -// establishContext sets the current context of the parser, -// where the context is either a hash or an array of hashes. Which one is -// set depends on the value of the `array` parameter. -// -// Establishing the context also makes sure that the key isn't a duplicate, and -// will create implicit hashes automatically. -func (p *parser) establishContext(key Key, array bool) { - var ok bool - - // Always start at the top level and drill down for our context. - hashContext := p.mapping - keyContext := make(Key, 0) - - // We only need implicit hashes for key[0:-1] - for _, k := range key[0 : len(key)-1] { - _, ok = hashContext[k] - keyContext = append(keyContext, k) - - // No key? Make an implicit hash and move on. - if !ok { - p.addImplicit(keyContext) - hashContext[k] = make(map[string]interface{}) - } - - // If the hash context is actually an array of tables, then set - // the hash context to the last element in that array. - // - // Otherwise, it better be a table, since this MUST be a key group (by - // virtue of it not being the last element in a key). - switch t := hashContext[k].(type) { - case []map[string]interface{}: - hashContext = t[len(t)-1] - case map[string]interface{}: - hashContext = t - default: - p.panicf("Key '%s' was already created as a hash.", keyContext) - } - } - - p.context = keyContext - if array { - // If this is the first element for this array, then allocate a new - // list of tables for it. - k := key[len(key)-1] - if _, ok := hashContext[k]; !ok { - hashContext[k] = make([]map[string]interface{}, 0, 5) - } - - // Add a new table. But make sure the key hasn't already been used - // for something else. - if hash, ok := hashContext[k].([]map[string]interface{}); ok { - hashContext[k] = append(hash, make(map[string]interface{})) - } else { - p.panicf("Key '%s' was already created and cannot be used as "+ - "an array.", keyContext) - } - } else { - p.setValue(key[len(key)-1], make(map[string]interface{})) - } - p.context = append(p.context, key[len(key)-1]) -} - -// setValue sets the given key to the given value in the current context. -// It will make sure that the key hasn't already been defined, account for -// implicit key groups. -func (p *parser) setValue(key string, value interface{}) { - var tmpHash interface{} - var ok bool - - hash := p.mapping - keyContext := make(Key, 0) - for _, k := range p.context { - keyContext = append(keyContext, k) - if tmpHash, ok = hash[k]; !ok { - p.bug("Context for key '%s' has not been established.", keyContext) - } - switch t := tmpHash.(type) { - case []map[string]interface{}: - // The context is a table of hashes. Pick the most recent table - // defined as the current hash. - hash = t[len(t)-1] - case map[string]interface{}: - hash = t - default: - p.bug("Expected hash to have type 'map[string]interface{}', but "+ - "it has '%T' instead.", tmpHash) - } - } - keyContext = append(keyContext, key) - - if _, ok := hash[key]; ok { - // Typically, if the given key has already been set, then we have - // to raise an error since duplicate keys are disallowed. However, - // it's possible that a key was previously defined implicitly. In this - // case, it is allowed to be redefined concretely. (See the - // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.) - // - // But we have to make sure to stop marking it as an implicit. (So that - // another redefinition provokes an error.) - // - // Note that since it has already been defined (as a hash), we don't - // want to overwrite it. So our business is done. - if p.isImplicit(keyContext) { - p.removeImplicit(keyContext) - return - } - - // Otherwise, we have a concrete key trying to override a previous - // key, which is *always* wrong. - p.panicf("Key '%s' has already been defined.", keyContext) - } - hash[key] = value -} - -// setType sets the type of a particular value at a given key. -// It should be called immediately AFTER setValue. -// -// Note that if `key` is empty, then the type given will be applied to the -// current context (which is either a table or an array of tables). -func (p *parser) setType(key string, typ tomlType) { - keyContext := make(Key, 0, len(p.context)+1) - for _, k := range p.context { - keyContext = append(keyContext, k) - } - if len(key) > 0 { // allow type setting for hashes - keyContext = append(keyContext, key) - } - p.types[keyContext.String()] = typ -} - -// addImplicit sets the given Key as having been created implicitly. -func (p *parser) addImplicit(key Key) { - p.implicits[key.String()] = true -} - -// removeImplicit stops tagging the given key as having been implicitly -// created. -func (p *parser) removeImplicit(key Key) { - p.implicits[key.String()] = false -} - -// isImplicit returns true if the key group pointed to by the key was created -// implicitly. -func (p *parser) isImplicit(key Key) bool { - return p.implicits[key.String()] -} - -// current returns the full key name of the current context. -func (p *parser) current() string { - if len(p.currentKey) == 0 { - return p.context.String() - } - if len(p.context) == 0 { - return p.currentKey - } - return fmt.Sprintf("%s.%s", p.context, p.currentKey) -} - -func stripFirstNewline(s string) string { - if len(s) == 0 || s[0] != '\n' { - return s - } - return s[1:] -} - -func stripEscapedWhitespace(s string) string { - esc := strings.Split(s, "\\\n") - if len(esc) > 1 { - for i := 1; i < len(esc); i++ { - esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace) - } - } - return strings.Join(esc, "") -} - -func (p *parser) replaceEscapes(str string) string { - var replaced []rune - s := []byte(str) - r := 0 - for r < len(s) { - if s[r] != '\\' { - c, size := utf8.DecodeRune(s[r:]) - r += size - replaced = append(replaced, c) - continue - } - r += 1 - if r >= len(s) { - p.bug("Escape sequence at end of string.") - return "" - } - switch s[r] { - default: - p.bug("Expected valid escape code after \\, but got %q.", s[r]) - return "" - case 'b': - replaced = append(replaced, rune(0x0008)) - r += 1 - case 't': - replaced = append(replaced, rune(0x0009)) - r += 1 - case 'n': - replaced = append(replaced, rune(0x000A)) - r += 1 - case 'f': - replaced = append(replaced, rune(0x000C)) - r += 1 - case 'r': - replaced = append(replaced, rune(0x000D)) - r += 1 - case '"': - replaced = append(replaced, rune(0x0022)) - r += 1 - case '\\': - replaced = append(replaced, rune(0x005C)) - r += 1 - case 'u': - // At this point, we know we have a Unicode escape of the form - // `uXXXX` at [r, r+5). (Because the lexer guarantees this - // for us.) - escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) - replaced = append(replaced, escaped) - r += 5 - case 'U': - // At this point, we know we have a Unicode escape of the form - // `uXXXX` at [r, r+9). (Because the lexer guarantees this - // for us.) - escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) - replaced = append(replaced, escaped) - r += 9 - } - } - return string(replaced) -} - -func (p *parser) asciiEscapeToUnicode(bs []byte) rune { - s := string(bs) - hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) - if err != nil { - p.bug("Could not parse '%s' as a hexadecimal number, but the "+ - "lexer claims it's OK: %s", s, err) - } - if !utf8.ValidRune(rune(hex)) { - p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) - } - return rune(hex) -} - -func isStringType(ty itemType) bool { - return ty == itemString || ty == itemMultilineString || - ty == itemRawString || ty == itemRawMultilineString -} diff --git a/vendor/github.com/BurntSushi/toml/session.vim b/vendor/github.com/BurntSushi/toml/session.vim deleted file mode 100644 index 562164b..0000000 --- a/vendor/github.com/BurntSushi/toml/session.vim +++ /dev/null @@ -1 +0,0 @@ -au BufWritePost *.go silent!make tags > /dev/null 2>&1 diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go deleted file mode 100644 index c73f8af..0000000 --- a/vendor/github.com/BurntSushi/toml/type_check.go +++ /dev/null @@ -1,91 +0,0 @@ -package toml - -// tomlType represents any Go type that corresponds to a TOML type. -// While the first draft of the TOML spec has a simplistic type system that -// probably doesn't need this level of sophistication, we seem to be militating -// toward adding real composite types. -type tomlType interface { - typeString() string -} - -// typeEqual accepts any two types and returns true if they are equal. -func typeEqual(t1, t2 tomlType) bool { - if t1 == nil || t2 == nil { - return false - } - return t1.typeString() == t2.typeString() -} - -func typeIsHash(t tomlType) bool { - return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) -} - -type tomlBaseType string - -func (btype tomlBaseType) typeString() string { - return string(btype) -} - -func (btype tomlBaseType) String() string { - return btype.typeString() -} - -var ( - tomlInteger tomlBaseType = "Integer" - tomlFloat tomlBaseType = "Float" - tomlDatetime tomlBaseType = "Datetime" - tomlString tomlBaseType = "String" - tomlBool tomlBaseType = "Bool" - tomlArray tomlBaseType = "Array" - tomlHash tomlBaseType = "Hash" - tomlArrayHash tomlBaseType = "ArrayHash" -) - -// typeOfPrimitive returns a tomlType of any primitive value in TOML. -// Primitive values are: Integer, Float, Datetime, String and Bool. -// -// Passing a lexer item other than the following will cause a BUG message -// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. -func (p *parser) typeOfPrimitive(lexItem item) tomlType { - switch lexItem.typ { - case itemInteger: - return tomlInteger - case itemFloat: - return tomlFloat - case itemDatetime: - return tomlDatetime - case itemString: - return tomlString - case itemMultilineString: - return tomlString - case itemRawString: - return tomlString - case itemRawMultilineString: - return tomlString - case itemBool: - return tomlBool - } - p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) - panic("unreachable") -} - -// typeOfArray returns a tomlType for an array given a list of types of its -// values. -// -// In the current spec, if an array is homogeneous, then its type is always -// "Array". If the array is not homogeneous, an error is generated. -func (p *parser) typeOfArray(types []tomlType) tomlType { - // Empty arrays are cool. - if len(types) == 0 { - return tomlArray - } - - theType := types[0] - for _, t := range types[1:] { - if !typeEqual(theType, t) { - p.panicf("Array contains values of type '%s' and '%s', but "+ - "arrays must be homogeneous.", theType, t) - } - } - return tomlArray -} diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go deleted file mode 100644 index 608997c..0000000 --- a/vendor/github.com/BurntSushi/toml/type_fields.go +++ /dev/null @@ -1,242 +0,0 @@ -package toml - -// Struct field handling is adapted from code in encoding/json: -// -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the Go distribution. - -import ( - "reflect" - "sort" - "sync" -) - -// A field represents a single field found in a struct. -type field struct { - name string // the name of the field (`toml` tag included) - tag bool // whether field has a `toml` tag - index []int // represents the depth of an anonymous field - typ reflect.Type // the type of the field -} - -// byName sorts field by name, breaking ties with depth, -// then breaking ties with "name came from toml tag", then -// breaking ties with index sequence. -type byName []field - -func (x byName) Len() int { return len(x) } - -func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byName) Less(i, j int) bool { - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].index) != len(x[j].index) { - return len(x[i].index) < len(x[j].index) - } - if x[i].tag != x[j].tag { - return x[i].tag - } - return byIndex(x).Less(i, j) -} - -// byIndex sorts field by index sequence. -type byIndex []field - -func (x byIndex) Len() int { return len(x) } - -func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byIndex) Less(i, j int) bool { - for k, xik := range x[i].index { - if k >= len(x[j].index) { - return false - } - if xik != x[j].index[k] { - return xik < x[j].index[k] - } - } - return len(x[i].index) < len(x[j].index) -} - -// typeFields returns a list of fields that TOML should recognize for the given -// type. The algorithm is breadth-first search over the set of structs to -// include - the top struct and then any reachable anonymous structs. -func typeFields(t reflect.Type) []field { - // Anonymous fields to explore at the current level and the next. - current := []field{} - next := []field{{typ: t}} - - // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} - - // Types already visited at an earlier level. - visited := map[reflect.Type]bool{} - - // Fields found. - var fields []field - - for len(next) > 0 { - current, next = next, current[:0] - count, nextCount = nextCount, map[reflect.Type]int{} - - for _, f := range current { - if visited[f.typ] { - continue - } - visited[f.typ] = true - - // Scan f.typ for fields to include. - for i := 0; i < f.typ.NumField(); i++ { - sf := f.typ.Field(i) - if sf.PkgPath != "" && !sf.Anonymous { // unexported - continue - } - opts := getOptions(sf.Tag) - if opts.skip { - continue - } - index := make([]int, len(f.index)+1) - copy(index, f.index) - index[len(f.index)] = i - - ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { - // Follow pointer. - ft = ft.Elem() - } - - // Record found field and index sequence. - if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { - tagged := opts.name != "" - name := opts.name - if name == "" { - name = sf.Name - } - fields = append(fields, field{name, tagged, index, ft}) - if count[f.typ] > 1 { - // If there were multiple instances, add a second, - // so that the annihilation code will see a duplicate. - // It only cares about the distinction between 1 or 2, - // so don't bother generating any more copies. - fields = append(fields, fields[len(fields)-1]) - } - continue - } - - // Record new anonymous struct to explore in next round. - nextCount[ft]++ - if nextCount[ft] == 1 { - f := field{name: ft.Name(), index: index, typ: ft} - next = append(next, f) - } - } - } - } - - sort.Sort(byName(fields)) - - // Delete all fields that are hidden by the Go rules for embedded fields, - // except that fields with TOML tags are promoted. - - // The fields are sorted in primary order of name, secondary order - // of field index length. Loop over names; for each name, delete - // hidden fields by choosing the one dominant field that survives. - out := fields[:0] - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - out = append(out, fi) - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if ok { - out = append(out, dominant) - } - } - - fields = out - sort.Sort(byIndex(fields)) - - return fields -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's embedding rules, modified by the presence of -// TOML tags. If there are multiple top-level fields, the boolean -// will be false: This condition is an error in Go and we skip all -// the fields. -func dominantField(fields []field) (field, bool) { - // The fields are sorted in increasing index-length order. The winner - // must therefore be one with the shortest index length. Drop all - // longer entries, which is easy: just truncate the slice. - length := len(fields[0].index) - tagged := -1 // Index of first tagged field. - for i, f := range fields { - if len(f.index) > length { - fields = fields[:i] - break - } - if f.tag { - if tagged >= 0 { - // Multiple tagged fields at the same level: conflict. - // Return no field. - return field{}, false - } - tagged = i - } - } - if tagged >= 0 { - return fields[tagged], true - } - // All remaining fields have the same length. If there's more than one, - // we have a conflict (two fields named "X" at the same level) and we - // return no field. - if len(fields) > 1 { - return field{}, false - } - return fields[0], true -} - -var fieldCache struct { - sync.RWMutex - m map[reflect.Type][]field -} - -// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. -func cachedTypeFields(t reflect.Type) []field { - fieldCache.RLock() - f := fieldCache.m[t] - fieldCache.RUnlock() - if f != nil { - return f - } - - // Compute fields without lock. - // Might duplicate effort but won't hold other computations back. - f = typeFields(t) - if f == nil { - f = []field{} - } - - fieldCache.Lock() - if fieldCache.m == nil { - fieldCache.m = map[reflect.Type][]field{} - } - fieldCache.m[t] = f - fieldCache.Unlock() - return f -} diff --git a/vendor/github.com/abiosoft/caddy-git/.gitignore b/vendor/github.com/abiosoft/caddy-git/.gitignore deleted file mode 100644 index af449fe..0000000 --- a/vendor/github.com/abiosoft/caddy-git/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -# IntelliJ project files -.idea/ -*.iml -Caddyfile diff --git a/vendor/github.com/abiosoft/caddy-git/.travis.yml b/vendor/github.com/abiosoft/caddy-git/.travis.yml deleted file mode 100644 index d31fde7..0000000 --- a/vendor/github.com/abiosoft/caddy-git/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go - -go: - - 1.6 - -script: go test - diff --git a/vendor/github.com/abiosoft/caddy-git/BUILDING.md b/vendor/github.com/abiosoft/caddy-git/BUILDING.md deleted file mode 100644 index fb65b78..0000000 --- a/vendor/github.com/abiosoft/caddy-git/BUILDING.md +++ /dev/null @@ -1,43 +0,0 @@ -# Building from Source - -Follow the instructions below to build from source. [Go](http://golang.org/doc/install) must be installed. - -### 1. Install Caddydev - -``` -$ go get github.com/caddyserver/caddydev -``` - -### 2. Pull Git Add-on -``` -$ go get github.com/abiosoft/caddy-git -``` - -### 3. Execute -``` -$ cd $GOPATH/src/github.com/abiosoft/caddy-git -$ caddydev -``` -## Other options - -### Execute from another directory -Copy the bundled caddydev config over to the directory. -``` -$ cp $GOPATH/src/github.com/abiosoft/caddy-git/config.json config.json -$ caddydev -``` -Or pass path to `config.json` as flag to caddydev. -``` -$ caddydev --conf $GOPATH/src/github.com/abiosoft/caddy-git/config.json -``` - -### Generate Binary -Generate a Caddy binary that includes Git add-on. -``` -$ cd $GOPATH/src/github.com/abiosoft/caddy-git -$ caddydev -o caddy -$ ./caddy -``` - -### Note -Caddydev is more suited to development purpose. To add other add-ons to Caddy alongside Git, download from [Caddy's download page](https://caddyserver.com/download) or use [Caddyext](https://github.com/caddyserver/caddyext). \ No newline at end of file diff --git a/vendor/github.com/abiosoft/caddy-git/LICENSE b/vendor/github.com/abiosoft/caddy-git/LICENSE deleted file mode 100644 index 95678ba..0000000 --- a/vendor/github.com/abiosoft/caddy-git/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Abiola Ibrahim - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/abiosoft/caddy-git/README.md b/vendor/github.com/abiosoft/caddy-git/README.md deleted file mode 100644 index b507985..0000000 --- a/vendor/github.com/abiosoft/caddy-git/README.md +++ /dev/null @@ -1,116 +0,0 @@ -# git - -Middleware for [Caddy](https://caddyserver.com). - -[![Build Status](https://travis-ci.org/abiosoft/caddy-git.svg?branch=master)](https://travis-ci.org/abiosoft/caddy-git) - -git clones a git repository into the site. This makes it possible to deploy your site with a simple git push. - -The git directive does not chain in a handler. Instead, it starts a service routine that runs during the lifetime of the server. When the server starts, it clones the repository. While the server is still up, it pulls the latest every so often. In regular git fashion, a download only includes changes so it is very efficient. - -If a pull fails, the service will retry up to three times. If the pull was not successful by then, it won't try again until the next interval. - -**Requirements**: This directive requires git to be installed. Also, private repositories may only be accessed from Linux or Mac systems. (Contributions are welcome that make private repositories work on Windows.) - -### Syntax - -``` -git repo [path] -``` -* **repo** is the URL to the repository; SSH and HTTPS URLs are supported -* **path** is the path, relative to site root, to clone the repository into; default is site root - -This simplified syntax pulls from master every 3600 seconds (1 hour) and only works for public repositories. - -For more control or to use a private repository, use the following syntax: - -``` -git [repo path] { - repo repo - path path - branch branch - key key - interval interval - hook path secret - hook_type type - then command [args...] - then_long command [args...] -} -``` -* **repo** is the URL to the repository; SSH and HTTPS URLs are supported. -* **path** is the path, relative to site root, to clone the repository into; default is site root. -* **branch** is the branch or tag to pull; default is master branch. **`{latest}`** is a placeholder for latest tag which ensures the most recent tag is always pulled. -* **key** is the path to the SSH private key; only required for private repositories. -* **interval** is the number of seconds between pulls; default is 3600 (1 hour), minimum 5. -* **path** and **secret** are used to create a webhook which pulls the latest right after a push. This is limited to the [supported webhooks](#supported-webhooks). **secret** is currently supported for GitHub and Travis hooks only. -* **type** is webhook type to use. The webhook type is auto detected by default but it can be explicitly set to one of the [supported webhooks](#supported-webhooks). This is a requirement for generic webhook. -* **command** is a command to execute after successful pull; followed by **args** which are any arguments to pass to the command. You can have multiple lines of this for multiple commands. **then_long** is for long executing commands that should run in background. - -Each property in the block is optional. The path and repo may be specified on the first line, as in the first syntax, or they may be specified in the block with other values. - -#### Supported Webhooks -* [github](https://github.com) -* [gitlab](https://gitlab.com) -* [bitbucket](https://bitbucket.org) -* [travis](https://travis-ci.org) -* [gogs](https://gogs.io) -* generic - -### Examples - -Public repository pulled into site root every hour: -``` -git github.com/user/myproject -``` - -Public repository pulled into the "subfolder" directory in the site root: -``` -git github.com/user/myproject /subfolder -``` - -Private repository pulled into the "subfolder" directory with tag v1.0 once per day: -``` -git { - repo git@github.com:user/myproject - branch v1.0 - key /home/user/.ssh/id_rsa - path subfolder - interval 86400 -} -``` - -Generate a static site with [Hugo](http://gohugo.io) after each pull: -``` -git github.com/user/site { - path ../ - then hugo --destination=/home/user/hugosite/public -} -``` - -Part of a Caddyfile for a PHP site that gets changes from a private repo: -``` -git git@github.com:user/myphpsite { - key /home/user/.ssh/id_rsa -} -fastcgi / 127.0.0.1:9000 php -``` - -Specifying a webhook: -``` -git git@github.com:user/site { - hook /webhook secret-password -} -``` - -You might need quotes `"secret-password"` around your secret if it contains any special characters, or you get an error. - - -Generic webhook payload: `` is branch name e.g. `master`. -``` -{ - "ref" : "refs/heads/" -} -``` -### Build from source -Check instructions for building from source here [BUILDING.md](https://github.com/abiosoft/caddy-git/blob/master/BUILDING.md) - diff --git a/vendor/github.com/abiosoft/caddy-git/bitbucket_hook.go b/vendor/github.com/abiosoft/caddy-git/bitbucket_hook.go deleted file mode 100644 index 4209a89..0000000 --- a/vendor/github.com/abiosoft/caddy-git/bitbucket_hook.go +++ /dev/null @@ -1,121 +0,0 @@ -package git - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "strings" -) - -// See: https://confluence.atlassian.com/bitbucket/manage-webhooks-735643732.html -var bitbucketIPBlocks = []string{ - "131.103.20.160/27", - "165.254.145.0/26", - "104.192.143.0/24", -} - -type BitbucketHook struct{} - -type bbPush struct { - Push struct { - Changes []struct { - New struct { - Name string `json:"name,omitempty"` - } `json:"new,omitempty"` - } `json:"changes,omitempty"` - } `json:"push,omitempty"` -} - -func (b BitbucketHook) DoesHandle(h http.Header) bool { - event := h.Get("X-Event-Key") - - // for Gitlab you can only use X-Gitlab-Event header to test if you could handle the request - if event != "" { - return true - } - return false -} - -func (b BitbucketHook) Handle(w http.ResponseWriter, r *http.Request, repo *Repo) (int, error) { - if !b.verifyBitbucketIP(r.RemoteAddr) { - return http.StatusForbidden, errors.New("the request doesn't come from a valid IP") - } - - if r.Method != "POST" { - return http.StatusMethodNotAllowed, errors.New("the request had an invalid method.") - } - - body, err := ioutil.ReadAll(r.Body) - if err != nil { - return http.StatusRequestTimeout, errors.New("could not read body from request") - } - - event := r.Header.Get("X-Event-Key") - if event == "" { - return http.StatusBadRequest, errors.New("the 'X-Event-Key' header is required but was missing.") - } - - switch event { - case "repo:push": - err = b.handlePush(body, repo) - if !hookIgnored(err) && err != nil { - return http.StatusBadRequest, err - } - default: - // return 400 if we do not handle the event type. - return http.StatusBadRequest, nil - } - - return http.StatusOK, err -} - -func (b BitbucketHook) handlePush(body []byte, repo *Repo) error { - var push bbPush - - err := json.Unmarshal(body, &push) - if err != nil { - return err - } - - if len(push.Push.Changes) == 0 { - return errors.New("the push was incomplete, missing change list") - } - - change := push.Push.Changes[0] - if len(change.New.Name) == 0 { - return errors.New("the push didn't contain a valid branch name") - } - - branch := change.New.Name - if branch != repo.Branch { - return hookIgnoredError{hookType: hookName(b), err: fmt.Errorf("found different branch %v", branch)} - } - Logger().Print("Received pull notification for the tracking branch, updating...\n") - repo.Pull() - - return nil -} - -func cleanRemoteIP(remoteIP string) string { - // *httpRequest.RemoteAddr comes in format IP:PORT, remove the port - return strings.Split(remoteIP, ":")[0] -} - -func (b BitbucketHook) verifyBitbucketIP(remoteIP string) bool { - ipAddress := net.ParseIP(cleanRemoteIP(remoteIP)) - for _, cidr := range bitbucketIPBlocks { - _, cidrnet, err := net.ParseCIDR(cidr) - if err != nil { - Logger().Printf("Error parsing CIDR block [%s]. Skipping...\n", cidr) - continue - } - - if cidrnet.Contains(ipAddress) { - return true - } - } - return false -} diff --git a/vendor/github.com/abiosoft/caddy-git/bitbucket_hook_test.go b/vendor/github.com/abiosoft/caddy-git/bitbucket_hook_test.go deleted file mode 100644 index 72ed03e..0000000 --- a/vendor/github.com/abiosoft/caddy-git/bitbucket_hook_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package git - -import ( - "bytes" - "net/http" - "net/http/httptest" - "testing" -) - -func TestBitbucketDeployPush(t *testing.T) { - repo := &Repo{Branch: "master", Hook: HookConfig{Url: "/bitbucket_deploy"}} - bbHook := BitbucketHook{} - - for i, test := range []struct { - ip string - body string - event string - responseBody string - code int - }{ - {"131.103.20.192", "", "", "", 403}, - {"131.103.20.160", "", "", "", 400}, - {"131.103.20.160", "", "repo:push", "", 400}, - {"131.103.20.160", pushBBBodyValid, "repo:push", "", 200}, - {"131.103.20.165", pushBBBodyValid, "repo:push", "", 200}, - {"131.103.20.160", pushBBBodyEmptyBranch, "repo:push", "", 400}, - {"131.103.20.160", pushBBBodyDeleteBranch, "repo:push", "", 400}, - } { - - req, err := http.NewRequest("POST", "/bitbucket_deploy", bytes.NewBuffer([]byte(test.body))) - if err != nil { - t.Fatalf("Test %v: Could not create HTTP request: %v", i, err) - } - req.RemoteAddr = test.ip - - if test.event != "" { - req.Header.Add("X-Event-Key", test.event) - } - - rec := httptest.NewRecorder() - - code, err := bbHook.Handle(rec, req, repo) - - if code != test.code { - t.Errorf("Test %d: Expected response code to be %d but was %d", i, test.code, code) - } - - if rec.Body.String() != test.responseBody { - t.Errorf("Test %d: Expected response body to be '%v' but was '%v'", i, test.responseBody, rec.Body.String()) - } - } - -} - -var pushBBBodyEmptyBranch = ` -{ - "push": { - "changes": [ - { - "new": { - "type": "branch", - "name": "", - "target": { - "hash": "709d658dc5b6d6afcd46049c2f332ee3f515a67d" - } - } - } - ] - } -} -` - -var pushBBBodyValid = ` -{ - "push": { - "changes": [ - { - "new": { - "type": "branch", - "name": "master", - "target": { - "hash": "709d658dc5b6d6afcd46049c2f332ee3f515a67d" - } - } - } - ] - } -} -` - -var pushBBBodyDeleteBranch = ` -{ - "push": { - "changes": [ - ] - } -} -` diff --git a/vendor/github.com/abiosoft/caddy-git/commands.go b/vendor/github.com/abiosoft/caddy-git/commands.go deleted file mode 100644 index e73b6ee..0000000 --- a/vendor/github.com/abiosoft/caddy-git/commands.go +++ /dev/null @@ -1,212 +0,0 @@ -package git - -import ( - "bytes" - "os" - "strings" - "sync" - "time" -) - -// Then is the command executed after successful pull. -type Then interface { - Command() string - Exec(string) error -} - -// NewThen creates a new Then command. -func NewThen(command string, args ...string) Then { - return &gitCmd{command: command, args: args} -} - -// NewLongThen creates a new long running Then comand. -func NewLongThen(command string, args ...string) Then { - return &gitCmd{command: command, args: args, background: true, haltChan: make(chan struct{})} -} - -type gitCmd struct { - command string - args []string - dir string - background bool - process *os.Process - - haltChan chan struct{} - monitoring bool - sync.RWMutex -} - -// Command returns the full command as configured in Caddyfile -func (g *gitCmd) Command() string { - return g.command + " " + strings.Join(g.args, " ") -} - -// Exec executes the command initiated in GitCmd -func (g *gitCmd) Exec(dir string) error { - g.Lock() - g.dir = dir - g.Unlock() - - if g.background { - return g.execBackground(dir) - } - return g.exec(dir) -} - -func (g *gitCmd) restart() error { - err := g.Exec(g.dir) - if err == nil { - Logger().Printf("Restart successful for '%v'.\n", g.Command()) - } else { - Logger().Printf("Restart failed for '%v'.\n", g.Command()) - } - return err -} - -func (g *gitCmd) exec(dir string) error { - return runCmd(g.command, g.args, dir) -} - -func (g *gitCmd) execBackground(dir string) error { - // if existing process is running, kill it. - g.RLock() - if g.process != nil { - g.haltProcess() - } - g.RUnlock() - - process, err := runCmdBackground(g.command, g.args, dir) - if err == nil { - g.Lock() - g.process = process - g.Unlock() - g.monitorProcess() - } - return err -} - -func (g *gitCmd) monitorProcess() { - g.RLock() - if g.process == nil { - g.RUnlock() - return - } - monitoring := g.monitoring - g.RUnlock() - - if monitoring { - return - } - - type resp struct { - state *os.ProcessState - err error - } - - respChan := make(chan resp) - - go func() { - p, err := g.process.Wait() - respChan <- resp{p, err} - }() - - go func() { - g.Lock() - g.monitoring = true - g.Unlock() - - select { - case <-g.haltChan: - g.killProcess() - case r := <-respChan: - if r.err != nil || !r.state.Success() { - Logger().Printf("Command '%v' terminated with error", g.Command()) - - g.Lock() - g.process = nil - g.monitoring = false - g.Unlock() - - for i := 0; ; i++ { - if i >= numRetries { - Logger().Printf("Restart failed after 3 attempts for '%v'. Ignoring...\n", g.Command()) - break - } - Logger().Printf("Attempting restart %v of %v for '%v'\n", i+1, numRetries, g.Command()) - if g.restart() == nil { - break - } - time.Sleep(time.Second * 5) - } - } else { - g.Lock() - g.process = nil - g.monitoring = false - g.Unlock() - } - } - }() - -} - -func (g *gitCmd) killProcess() { - g.Lock() - if err := g.process.Kill(); err != nil { - Logger().Printf("Could not terminate running command '%v'\n", g.command) - } else { - Logger().Printf("Command '%v' terminated from within.\n", g.command) - } - g.process = nil - g.monitoring = false - g.Unlock() -} - -// haltProcess halts the running process -func (g *gitCmd) haltProcess() { - g.RLock() - monitoring := g.monitoring - g.RUnlock() - - if monitoring { - g.haltChan <- struct{}{} - } -} - -// runCmd is a helper function to run commands. -// It runs command with args from directory at dir. -// The executed process outputs to os.Stderr -func runCmd(command string, args []string, dir string) error { - cmd := gos.Command(command, args...) - cmd.Stdout(os.Stderr) - cmd.Stderr(os.Stderr) - cmd.Dir(dir) - if err := cmd.Start(); err != nil { - return err - } - return cmd.Wait() -} - -// runCmdBackground is a helper function to run commands in the background. -// It returns the resulting process and an error that occurs during while -// starting the process (if any). -func runCmdBackground(command string, args []string, dir string) (*os.Process, error) { - cmd := gos.Command(command, args...) - cmd.Dir(dir) - cmd.Stdout(os.Stderr) - cmd.Stderr(os.Stderr) - err := cmd.Start() - return cmd.Process(), err -} - -// runCmdOutput is a helper function to run commands and return output. -// It runs command with args from directory at dir. -// If successful, returns output and nil error -func runCmdOutput(command string, args []string, dir string) (string, error) { - cmd := gos.Command(command, args...) - cmd.Dir(dir) - var err error - if output, err := cmd.Output(); err == nil { - return string(bytes.TrimSpace(output)), nil - } - return "", err -} diff --git a/vendor/github.com/abiosoft/caddy-git/config.json b/vendor/github.com/abiosoft/caddy-git/config.json deleted file mode 100644 index af58b69..0000000 --- a/vendor/github.com/abiosoft/caddy-git/config.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "directive": "git", - "after": "shutdown", - "source": "github.com/abiosoft/caddy-git" -} diff --git a/vendor/github.com/abiosoft/caddy-git/generic_hook.go b/vendor/github.com/abiosoft/caddy-git/generic_hook.go deleted file mode 100644 index 14b85a9..0000000 --- a/vendor/github.com/abiosoft/caddy-git/generic_hook.go +++ /dev/null @@ -1,61 +0,0 @@ -package git - -import ( - "encoding/json" - "errors" - "io/ioutil" - "net/http" - "strings" -) - -type GenericHook struct{} - -type gPush struct { - Ref string `json:"ref"` -} - -func (g GenericHook) DoesHandle(h http.Header) bool { - return true -} - -func (g GenericHook) Handle(w http.ResponseWriter, r *http.Request, repo *Repo) (int, error) { - if r.Method != "POST" { - return http.StatusMethodNotAllowed, errors.New("the request had an invalid method.") - } - - body, err := ioutil.ReadAll(r.Body) - if err != nil { - return http.StatusRequestTimeout, errors.New("could not read body from request") - } - - err = g.handlePush(body, repo) - if err != nil { - return http.StatusBadRequest, err - } - - return http.StatusOK, nil -} - -func (g GenericHook) handlePush(body []byte, repo *Repo) error { - var push gPush - - err := json.Unmarshal(body, &push) - if err != nil { - return err - } - - // extract the branch being pushed from the ref string - // and if it matches with our locally tracked one, pull. - refSlice := strings.Split(push.Ref, "/") - if len(refSlice) != 3 { - return errors.New("the push request contained an invalid reference string.") - } - - branch := refSlice[2] - if branch == repo.Branch { - Logger().Print("Received pull notification for the tracking branch, updating...\n") - repo.Pull() - } - - return nil -} diff --git a/vendor/github.com/abiosoft/caddy-git/generic_hook_test.go b/vendor/github.com/abiosoft/caddy-git/generic_hook_test.go deleted file mode 100644 index 7977eb4..0000000 --- a/vendor/github.com/abiosoft/caddy-git/generic_hook_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package git - -import ( - "bytes" - "net/http" - "net/http/httptest" - "testing" -) - -func TestGenericDeployPush(t *testing.T) { - repo := &Repo{Branch: "master", Hook: HookConfig{Url: "/generic_deploy"}} - gHook := GenericHook{} - - for i, test := range []struct { - body string - event string - responseBody string - code int - }{ - {"", "", "", 400}, - {pushGBodyOther, "", "", 200}, - {pushGBodyPartial, "", "", 400}, - {"", "Some Event", "", 400}, - } { - - req, err := http.NewRequest("POST", "/generic_deploy", bytes.NewBuffer([]byte(test.body))) - if err != nil { - t.Fatalf("Test %v: Could not create HTTP request: %v", i, err) - } - - rec := httptest.NewRecorder() - - code, err := gHook.Handle(rec, req, repo) - - if code != test.code { - t.Errorf("Test %d: Expected response code to be %d but was %d", i, test.code, code) - } - - if rec.Body.String() != test.responseBody { - t.Errorf("Test %d: Expected response body to be '%v' but was '%v'", i, test.responseBody, rec.Body.String()) - } - } - -} - -var pushGBodyPartial = ` -{ - "ref": "" -} -` - -var pushGBodyOther = ` -{ - "ref": "refs/heads/some-other-branch" -} -` diff --git a/vendor/github.com/abiosoft/caddy-git/git.go b/vendor/github.com/abiosoft/caddy-git/git.go deleted file mode 100644 index 58b6fba..0000000 --- a/vendor/github.com/abiosoft/caddy-git/git.go +++ /dev/null @@ -1,317 +0,0 @@ -package git - -import ( - "fmt" - "os" - "strings" - "sync" - "time" - - "github.com/abiosoft/caddy-git/gitos" - "github.com/mholt/caddy" -) - -const ( - // Number of retries if git pull fails - numRetries = 3 - - // variable for latest tag - latestTag = "{latest}" -) - -// Git represent multiple repositories. -type Git []*Repo - -// Repo retrieves repository at i or nil if not found. -func (g Git) Repo(i int) *Repo { - if i < len(g) { - return g[i] - } - return nil -} - -// Repo is the structure that holds required information -// of a git repository. -type Repo struct { - URL string // Repository URL - Path string // Directory to pull to - Host string // Git domain host e.g. github.com - Branch string // Git branch - KeyPath string // Path to private ssh key - Interval time.Duration // Interval between pulls - Then []Then // Commands to execute after successful git pull - pulled bool // true if there was a successful pull - lastPull time.Time // time of the last successful pull - lastCommit string // hash for the most recent commit - sync.Mutex - latestTag string // latest tag name - Hook HookConfig // Webhook configuration - -} - -// Pull attempts a git pull. -// It retries at most numRetries times if error occurs -func (r *Repo) Pull() error { - r.Lock() - defer r.Unlock() - - // prevent a pull if the last one was less than 5 seconds ago - if gos.TimeSince(r.lastPull) < 5*time.Second { - return nil - } - - // keep last commit hash for comparison later - lastCommit := r.lastCommit - - var err error - // Attempt to pull at most numRetries times - for i := 0; i < numRetries; i++ { - if err = r.pull(); err == nil { - break - } - Logger().Println(err) - } - - if err != nil { - return err - } - - // check if there are new changes, - // then execute post pull command - if r.lastCommit == lastCommit { - Logger().Println("No new changes.") - return nil - } - return r.execThen() -} - -// pull performs git pull, or git clone if repository does not exist. -func (r *Repo) pull() error { - - // if not pulled, perform clone - if !r.pulled { - return r.clone() - } - - // if latest tag config is set - if r.Branch == latestTag { - return r.checkoutLatestTag() - } - - params := []string{"pull", "origin", r.Branch} - var err error - if err = r.gitCmd(params, r.Path); err == nil { - r.pulled = true - r.lastPull = time.Now() - Logger().Printf("%v pulled.\n", r.URL) - r.lastCommit, err = r.mostRecentCommit() - } - return err -} - -// clone performs git clone. -func (r *Repo) clone() error { - params := []string{"clone", "-b", r.Branch, r.URL, r.Path} - - tagMode := r.Branch == latestTag - if tagMode { - params = []string{"clone", r.URL, r.Path} - } - - var err error - if err = r.gitCmd(params, ""); err == nil { - r.pulled = true - r.lastPull = time.Now() - Logger().Printf("%v pulled.\n", r.URL) - r.lastCommit, err = r.mostRecentCommit() - - // if latest tag config is set. - if tagMode { - return r.checkoutLatestTag() - } - } - - return err -} - -// checkoutLatestTag checks out the latest tag of the repository. -func (r *Repo) checkoutLatestTag() error { - tag, err := r.fetchLatestTag() - if err != nil { - Logger().Println("Error retrieving latest tag.") - return err - } - if tag == "" { - Logger().Println("No tags found for Repo: ", r.URL) - return fmt.Errorf("No tags found for Repo: %v.", r.URL) - } else if tag == r.latestTag { - Logger().Println("No new tags.") - return nil - } - - params := []string{"checkout", "tags/" + tag} - if err = r.gitCmd(params, r.Path); err == nil { - r.latestTag = tag - r.lastCommit, err = r.mostRecentCommit() - Logger().Printf("Tag %v checkout done.\n", tag) - } - return err -} - -// checkoutCommit checks out the specified commitHash. -func (r *Repo) checkoutCommit(commitHash string) error { - var err error - params := []string{"checkout", commitHash} - if err = r.gitCmd(params, r.Path); err == nil { - Logger().Printf("Commit %v checkout done.\n", commitHash) - } - return err -} - -// gitCmd performs a git command. -func (r *Repo) gitCmd(params []string, dir string) error { - // if key is specified, use ssh key - if r.KeyPath != "" { - return r.gitCmdWithKey(params, dir) - } - return runCmd(gitBinary, params, dir) -} - -// gitCmdWithKey is used for private repositories and requires an ssh key. -// Note: currently only limited to Linux and OSX. -func (r *Repo) gitCmdWithKey(params []string, dir string) error { - var gitSSH, script gitos.File - // ensure temporary files deleted after usage - defer func() { - if gitSSH != nil { - gos.Remove(gitSSH.Name()) - } - if script != nil { - gos.Remove(script.Name()) - } - }() - - var err error - // write git.sh script to temp file - gitSSH, err = writeScriptFile(gitWrapperScript()) - if err != nil { - return err - } - - // write git bash script to file - script, err = writeScriptFile(bashScript(gitSSH.Name(), r, params)) - if err != nil { - return err - } - - return runCmd(script.Name(), nil, dir) -} - -// Prepare prepares for a git pull -// and validates the configured directory -func (r *Repo) Prepare() error { - // check if directory exists or is empty - // if not, create directory - fs, err := gos.ReadDir(r.Path) - if err != nil || len(fs) == 0 { - return gos.MkdirAll(r.Path, os.FileMode(0755)) - } - - // validate git repo - isGit := false - for _, f := range fs { - if f.IsDir() && f.Name() == ".git" { - isGit = true - break - } - } - - if isGit { - // check if same repository - var repoURL string - if repoURL, err = r.originURL(); err == nil { - // add .git suffix if missing for adequate comparison. - if !strings.HasSuffix(repoURL, ".git") { - repoURL += ".git" - } - if repoURL == r.URL { - r.pulled = true - return nil - } - } - if err != nil { - return fmt.Errorf("cannot retrieve repo url for %v Error: %v", r.Path, err) - } - return fmt.Errorf("another git repo '%v' exists at %v", repoURL, r.Path) - } - return fmt.Errorf("cannot git clone into %v, directory not empty", r.Path) -} - -// getMostRecentCommit gets the hash of the most recent commit to the -// repository. Useful for checking if changes occur. -func (r *Repo) mostRecentCommit() (string, error) { - command := gitBinary + ` --no-pager log -n 1 --pretty=format:"%H"` - c, args, err := caddy.SplitCommandAndArgs(command) - if err != nil { - return "", err - } - return runCmdOutput(c, args, r.Path) -} - -// getLatestTag retrieves the most recent tag in the repository. -func (r *Repo) fetchLatestTag() (string, error) { - // fetch updates to get latest tag - params := []string{"fetch", "origin", "--tags"} - err := r.gitCmd(params, r.Path) - if err != nil { - return "", err - } - // retrieve latest tag - command := gitBinary + ` describe origin --abbrev=0 --tags` - c, args, err := caddy.SplitCommandAndArgs(command) - if err != nil { - return "", err - } - return runCmdOutput(c, args, r.Path) -} - -// getRepoURL retrieves remote origin url for the git repository at path -func (r *Repo) originURL() (string, error) { - _, err := gos.Stat(r.Path) - if err != nil { - return "", err - } - args := []string{"config", "--get", "remote.origin.url"} - return runCmdOutput(gitBinary, args, r.Path) -} - -// execThen executes r.Then. -// It is trigged after successful git pull -func (r *Repo) execThen() error { - var errs error - for _, command := range r.Then { - err := command.Exec(r.Path) - if err == nil { - Logger().Printf("Command '%v' successful.\n", command.Command()) - } - errs = mergeErrors(errs, err) - } - return errs -} - -func mergeErrors(errs ...error) error { - if len(errs) == 0 { - return nil - } - var err error - for _, e := range errs { - if err == nil { - err = e - continue - } - if e != nil { - err = fmt.Errorf("%v\n%v", err.Error(), e.Error()) - } - } - return err -} diff --git a/vendor/github.com/abiosoft/caddy-git/git_test.go b/vendor/github.com/abiosoft/caddy-git/git_test.go deleted file mode 100644 index 1bcc1ff..0000000 --- a/vendor/github.com/abiosoft/caddy-git/git_test.go +++ /dev/null @@ -1,252 +0,0 @@ -package git - -import ( - "io/ioutil" - "log" - "testing" - "time" - - "github.com/abiosoft/caddy-git/gittest" -) - -// init sets the OS used to fakeOS. -func init() { - SetOS(gittest.FakeOS) -} - -func check(t *testing.T, err error) { - if err != nil { - t.Errorf("Error not expected but found %v", err) - } -} - -func TestInit(t *testing.T) { - err := Init() - check(t, err) -} - -func TestHelpers(t *testing.T) { - f, err := writeScriptFile([]byte("script")) - check(t, err) - var b [6]byte - _, err = f.Read(b[:]) - check(t, err) - if string(b[:]) != "script" { - t.Errorf("Expected script found %v", string(b[:])) - } - - out, err := runCmdOutput(gitBinary, []string{"-version"}, "") - check(t, err) - if out != gittest.CmdOutput { - t.Errorf("Expected %v found %v", gittest.CmdOutput, out) - } - - err = runCmd(gitBinary, []string{"-version"}, "") - check(t, err) - - wScript := gitWrapperScript() - if string(wScript) != expectedWrapperScript { - t.Errorf("Expected %v found %v", expectedWrapperScript, string(wScript)) - } - - f, err = writeScriptFile(wScript) - check(t, err) - - repo := &Repo{Host: "github.com", KeyPath: "~/.key"} - script := string(bashScript(f.Name(), repo, []string{"clone", "git@github.com/repo/user"})) - if script != expectedBashScript { - t.Errorf("Expected %v found %v", expectedBashScript, script) - } -} - -func TestGit(t *testing.T) { - // prepare - repos := []*Repo{ - nil, - &Repo{Path: "gitdir", URL: "success.git"}, - } - for _, r := range repos { - repo := createRepo(r) - err := repo.Prepare() - check(t, err) - } - - // pull with success - logFile := gittest.Open("file") - SetLogger(log.New(logFile, "", 0)) - tests := []struct { - repo *Repo - output string - }{ - { - &Repo{Path: "gitdir", URL: "git@github.com:user/repo.git", KeyPath: "~/.key", Then: []Then{NewThen("echo", "Hello")}}, - `git@github.com:user/repo.git pulled. -Command 'echo Hello' successful. -`, - }, - { - &Repo{Path: "gitdir", URL: "https://github.com/user/repo.git", Then: []Then{NewThen("echo", "Hello")}}, - `https://github.com/user/repo.git pulled. -Command 'echo Hello' successful. -`, - }, - { - &Repo{URL: "git@github.com:user/repo"}, - `git@github.com:user/repo pulled. -`, - }, - } - - for i, test := range tests { - gittest.CmdOutput = test.repo.URL - - test.repo = createRepo(test.repo) - - err := test.repo.Prepare() - check(t, err) - - err = test.repo.Pull() - check(t, err) - - out, err := ioutil.ReadAll(logFile) - check(t, err) - if test.output != string(out) { - t.Errorf("Pull with Success %v: Expected %v found %v", i, test.output, string(out)) - } - } - - // pull with error - repos = []*Repo{ - &Repo{Path: "gitdir", URL: "http://github.com:u/repo.git"}, - &Repo{Path: "gitdir", URL: "https://github.com/user/repo.git", Then: []Then{NewThen("echo", "Hello")}}, - &Repo{Path: "gitdir"}, - &Repo{Path: "gitdir", KeyPath: ".key"}, - } - - gittest.CmdOutput = "git@github.com:u1/repo.git" - for i, repo := range repos { - repo = createRepo(repo) - - err := repo.Prepare() - if err == nil { - t.Errorf("Pull with Error %v: Error expected but not found %v", i, err) - continue - } - - expected := "another git repo 'git@github.com:u1/repo.git' exists at gitdir" - if expected != err.Error() { - t.Errorf("Pull with Error %v: Expected %v found %v", i, expected, err.Error()) - } - } - - // timeout checks - timeoutTests := []struct { - repo *Repo - shouldPull bool - }{ - {&Repo{Interval: time.Millisecond * 4900}, false}, - {&Repo{Interval: time.Millisecond * 1}, false}, - {&Repo{Interval: time.Second * 5}, true}, - {&Repo{Interval: time.Second * 10}, true}, - } - - for i, r := range timeoutTests { - r.repo = createRepo(r.repo) - - err := r.repo.Prepare() - check(t, err) - err = r.repo.Pull() - check(t, err) - - before := r.repo.lastPull - - gittest.Sleep(r.repo.Interval) - - err = r.repo.Pull() - after := r.repo.lastPull - check(t, err) - - expected := after.After(before) - if expected != r.shouldPull { - t.Errorf("Pull with Error %v: Expected %v found %v", i, expected, r.shouldPull) - } - } - -} - -func createRepo(r *Repo) *Repo { - repo := &Repo{ - URL: "git@github.com/user/test", - Path: ".", - Host: "github.com", - Branch: "master", - Interval: time.Second * 60, - } - if r == nil { - return repo - } - if r.Branch != "" { - repo.Branch = r.Branch - } - if r.Host != "" { - repo.Branch = r.Branch - } - if r.Interval != 0 { - repo.Interval = r.Interval - } - if r.KeyPath != "" { - repo.KeyPath = r.KeyPath - } - if r.Path != "" { - repo.Path = r.Path - } - if r.Then != nil { - repo.Then = r.Then - } - if r.URL != "" { - repo.URL = r.URL - } - - return repo -} - -var expectedBashScript = `#!/bin/bash - -mkdir -p ~/.ssh; -touch ~/.ssh/known_hosts; -ssh-keyscan -t rsa,dsa github.com 2>&1 | sort -u - ~/.ssh/known_hosts > ~/.ssh/tmp_hosts; -cat ~/.ssh/tmp_hosts >> ~/.ssh/known_hosts; -` + gittest.TempFileName + ` -i ~/.key clone git@github.com/repo/user; -` - -var expectedWrapperScript = `#!/bin/bash - -# The MIT License (MIT) -# Copyright (c) 2013 Alvin Abad - -if [ $# -eq 0 ]; then - echo "Git wrapper script that can specify an ssh-key file -Usage: - git.sh -i ssh-key-file git-command - " - exit 1 -fi - -# remove temporary file on exit -trap 'rm -f /tmp/.git_ssh.$$' 0 - -if [ "$1" = "-i" ]; then - SSH_KEY=$2; shift; shift - echo -e "#!/bin/bash \n \ - ssh -i $SSH_KEY \$@" > /tmp/.git_ssh.$$ - chmod +x /tmp/.git_ssh.$$ - export GIT_SSH=/tmp/.git_ssh.$$ -fi - -# in case the git command is repeated -[ "$1" = "git" ] && shift - -# Run the git command -/usr/bin/git "$@" - -` diff --git a/vendor/github.com/abiosoft/caddy-git/github_hook.go b/vendor/github.com/abiosoft/caddy-git/github_hook.go deleted file mode 100644 index cee2370..0000000 --- a/vendor/github.com/abiosoft/caddy-git/github_hook.go +++ /dev/null @@ -1,147 +0,0 @@ -package git - -import ( - "crypto/hmac" - "crypto/sha1" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/http" - "strings" -) - -type GithubHook struct{} - -type ghRelease struct { - Action string `json:"action"` - Release struct { - TagName string `json:"tag_name"` - Name interface{} `json:"name"` - } `json:"release"` -} - -type ghPush struct { - Ref string `json:"ref"` -} - -func (g GithubHook) DoesHandle(h http.Header) bool { - userAgent := h.Get("User-Agent") - - // GitHub always uses a user-agent like "GitHub-Hookshot/" - if userAgent != "" && strings.HasPrefix(userAgent, "GitHub-Hookshot") { - return true - } - return false -} - -func (g GithubHook) Handle(w http.ResponseWriter, r *http.Request, repo *Repo) (int, error) { - if r.Method != "POST" { - return http.StatusMethodNotAllowed, errors.New("the request had an invalid method.") - } - - // read full body - required for signature - body, err := ioutil.ReadAll(r.Body) - - err = g.handleSignature(r, body, repo.Hook.Secret) - if err != nil { - return http.StatusBadRequest, err - } - - event := r.Header.Get("X-Github-Event") - if event == "" { - return http.StatusBadRequest, errors.New("the 'X-Github-Event' header is required but was missing.") - } - - switch event { - case "ping": - w.Write([]byte("pong")) - case "push": - err = g.handlePush(body, repo) - if !hookIgnored(err) && err != nil { - return http.StatusBadRequest, err - } - case "release": - err = g.handleRelease(body, repo) - if err != nil { - return http.StatusBadRequest, err - } - - // return 400 if we do not handle the event type. - // This is to visually show the user a configuration error in the GH ui. - default: - return http.StatusBadRequest, nil - } - - return http.StatusOK, err -} - -// Check for an optional signature in the request -// if it is signed, verify the signature. -func (g GithubHook) handleSignature(r *http.Request, body []byte, secret string) error { - signature := r.Header.Get("X-Hub-Signature") - if signature != "" { - if secret == "" { - Logger().Print("Unable to verify request signature. Secret not set in caddyfile!\n") - } else { - mac := hmac.New(sha1.New, []byte(secret)) - mac.Write(body) - expectedMac := hex.EncodeToString(mac.Sum(nil)) - - if signature[5:] != expectedMac { - return errors.New("could not verify request signature. The signature is invalid!") - } - } - } - - return nil -} - -func (g GithubHook) handlePush(body []byte, repo *Repo) error { - var push ghPush - - err := json.Unmarshal(body, &push) - if err != nil { - return err - } - - // extract the branch being pushed from the ref string - // and if it matches with our locally tracked one, pull. - refSlice := strings.Split(push.Ref, "/") - if len(refSlice) != 3 { - return errors.New("the push request contained an invalid reference string.") - } - - branch := refSlice[2] - - if branch != repo.Branch { - return hookIgnoredError{hookType: hookName(g), err: fmt.Errorf("found different branch %v", branch)} - } - - Logger().Println("Received pull notification for the tracking branch, updating...") - repo.Pull() - return nil -} - -func (g GithubHook) handleRelease(body []byte, repo *Repo) error { - var release ghRelease - - err := json.Unmarshal(body, &release) - if err != nil { - return err - } - - if release.Release.TagName == "" { - return errors.New("the release request contained an invalid TagName.") - } - - Logger().Printf("Received new release '%s'. -> Updating local repository to this release.\n", release.Release.Name) - - // Update the local branch to the release tag name - // this will pull the release tag. - repo.Branch = release.Release.TagName - repo.Pull() - - return nil -} diff --git a/vendor/github.com/abiosoft/caddy-git/github_hook_test.go b/vendor/github.com/abiosoft/caddy-git/github_hook_test.go deleted file mode 100644 index 96813c3..0000000 --- a/vendor/github.com/abiosoft/caddy-git/github_hook_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package git - -import ( - "bytes" - "net/http" - "net/http/httptest" - "testing" -) - -func TestGithubDeployPush(t *testing.T) { - repo := &Repo{Branch: "master", Hook: HookConfig{ Url: "/github_deploy", Secret: "supersecret"} } - ghHook := GithubHook{} - - for i, test := range []struct { - body string - event string - responseBody string - code int - }{ - {"", "", "", 400}, - {"", "push", "", 400}, - {pushBodyOther, "push", "", 200}, - {pushBodyPartial, "push", "", 400}, - {"", "release", "", 400}, - {"", "ping", "pong", 200}, - } { - - req, err := http.NewRequest("POST", "/github_deploy", bytes.NewBuffer([]byte(test.body))) - if err != nil { - t.Fatalf("Test %v: Could not create HTTP request: %v", i, err) - } - - if test.event != "" { - req.Header.Add("X-Github-Event", test.event) - } - - rec := httptest.NewRecorder() - - code, err := ghHook.Handle(rec, req, repo) - - if code != test.code { - t.Errorf("Test %d: Expected response code to be %d but was %d", i, test.code, code) - } - - if rec.Body.String() != test.responseBody { - t.Errorf("Test %d: Expected response body to be '%v' but was '%v'", i, test.responseBody, rec.Body.String()) - } - } - -} - -var pushBodyPartial = ` -{ - "ref": "" -} -` - -var pushBodyOther = ` -{ - "ref": "refs/heads/some-other-branch" -} -` diff --git a/vendor/github.com/abiosoft/caddy-git/gitlab_hook.go b/vendor/github.com/abiosoft/caddy-git/gitlab_hook.go deleted file mode 100644 index 5b8ba33..0000000 --- a/vendor/github.com/abiosoft/caddy-git/gitlab_hook.go +++ /dev/null @@ -1,82 +0,0 @@ -package git - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/http" - "strings" -) - -type GitlabHook struct{} - -type glPush struct { - Ref string `json:"ref"` -} - -func (g GitlabHook) DoesHandle(h http.Header) bool { - event := h.Get("X-Gitlab-Event") - - // for Gitlab you can only use X-Gitlab-Event header to test if you could handle the request - if event != "" { - return true - } - return false -} - -func (g GitlabHook) Handle(w http.ResponseWriter, r *http.Request, repo *Repo) (int, error) { - if r.Method != "POST" { - return http.StatusMethodNotAllowed, errors.New("the request had an invalid method.") - } - - body, err := ioutil.ReadAll(r.Body) - if err != nil { - return http.StatusRequestTimeout, errors.New("could not read body from request") - } - - event := r.Header.Get("X-Gitlab-Event") - if event == "" { - return http.StatusBadRequest, errors.New("the 'X-Gitlab-Event' header is required but was missing.") - } - - switch event { - case "Push Hook": - err = g.handlePush(body, repo) - if !hookIgnored(err) && err != nil { - return http.StatusBadRequest, err - } - - // return 400 if we do not handle the event type. - default: - return http.StatusBadRequest, nil - } - - return http.StatusOK, err -} - -func (g GitlabHook) handlePush(body []byte, repo *Repo) error { - var push glPush - - err := json.Unmarshal(body, &push) - if err != nil { - return err - } - - // extract the branch being pushed from the ref string - // and if it matches with our locally tracked one, pull. - refSlice := strings.Split(push.Ref, "/") - if len(refSlice) != 3 { - return errors.New("the push request contained an invalid reference string.") - } - - branch := refSlice[2] - if branch != repo.Branch { - return hookIgnoredError{hookType: hookName(g), err: fmt.Errorf("found different branch %v", branch)} - } - - Logger().Print("Received pull notification for the tracking branch, updating...\n") - repo.Pull() - - return nil -} diff --git a/vendor/github.com/abiosoft/caddy-git/gitlab_hook_test.go b/vendor/github.com/abiosoft/caddy-git/gitlab_hook_test.go deleted file mode 100644 index 5401829..0000000 --- a/vendor/github.com/abiosoft/caddy-git/gitlab_hook_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package git - -import ( - "bytes" - "net/http" - "net/http/httptest" - "testing" -) - -func TestGitlabDeployPush(t *testing.T) { - repo := &Repo{Branch: "master", Hook: HookConfig{Url: "/gitlab_deploy"}} - glHook := GitlabHook{} - - for i, test := range []struct { - body string - event string - responseBody string - code int - }{ - {"", "", "", 400}, - {"", "Push Hook", "", 400}, - {pushGLBodyOther, "Push Hook", "", 200}, - {pushGLBodyPartial, "Push Hook", "", 400}, - {"", "Some other Event", "", 400}, - } { - - req, err := http.NewRequest("POST", "/gitlab_deploy", bytes.NewBuffer([]byte(test.body))) - if err != nil { - t.Fatalf("Test %v: Could not create HTTP request: %v", i, err) - } - - if test.event != "" { - req.Header.Add("X-Gitlab-Event", test.event) - } - - rec := httptest.NewRecorder() - - code, err := glHook.Handle(rec, req, repo) - - if code != test.code { - t.Errorf("Test %d: Expected response code to be %d but was %d", i, test.code, code) - } - - if rec.Body.String() != test.responseBody { - t.Errorf("Test %d: Expected response body to be '%v' but was '%v'", i, test.responseBody, rec.Body.String()) - } - } - -} - -var pushGLBodyPartial = ` -{ - "ref": "" -} -` - -var pushGLBodyOther = ` -{ - "ref": "refs/heads/some-other-branch" -} -` diff --git a/vendor/github.com/abiosoft/caddy-git/gitos/gitos.go b/vendor/github.com/abiosoft/caddy-git/gitos/gitos.go deleted file mode 100644 index 158aff5..0000000 --- a/vendor/github.com/abiosoft/caddy-git/gitos/gitos.go +++ /dev/null @@ -1,210 +0,0 @@ -package gitos - -import ( - "io" - "io/ioutil" - "os" - "os/exec" - "time" -) - -// File is an abstraction for file (os.File). -type File interface { - // Name returns the name of the file - Name() string - - // Stat returns the FileInfo structure describing file. - Stat() (os.FileInfo, error) - - // Close closes the File, rendering it unusable for I/O. - Close() error - - // Chmod changes the mode of the file. - Chmod(os.FileMode) error - - // Read reads up to len(b) bytes from the File. It returns the number of - // bytes read and an error, if any. - Read([]byte) (int, error) - - // Write writes len(b) bytes to the File. It returns the number of bytes - // written and an error, if any. - Write([]byte) (int, error) -} - -// Cmd is an abstraction for external commands (os.Cmd). -type Cmd interface { - // Run starts the specified command and waits for it to complete. - Run() error - - // Start starts the specified command but does not wait for it to complete. - Start() error - - // Wait waits for the command to exit. It must have been started by Start. - Wait() error - - // Output runs the command and returns its standard output. - Output() ([]byte, error) - - // Dir sets the working directory of the command. - Dir(string) - - // Stdin sets the process's standard input. - Stdin(io.Reader) - - // Stdout sets the process's standard output. - Stdout(io.Writer) - - // Stderr sets the process's standard output. - Stderr(io.Writer) - - // Process is the underlying process, once started. - Process() *os.Process -} - -// gitCmd represents external commands executed by git. -type gitCmd struct { - *exec.Cmd -} - -// Dir sets the working directory of the command. -func (g *gitCmd) Dir(dir string) { - g.Cmd.Dir = dir -} - -// Stdin sets the process's standard input. -func (g *gitCmd) Stdin(stdin io.Reader) { - g.Cmd.Stdin = stdin -} - -// Stdout sets the process's standard output. -func (g *gitCmd) Stdout(stdout io.Writer) { - g.Cmd.Stdout = stdout -} - -// Stderr sets the process's standard output. -func (g *gitCmd) Stderr(stderr io.Writer) { - g.Cmd.Stderr = stderr -} - -func (g *gitCmd) Process() *os.Process { - return g.Cmd.Process -} - -// OS is an abstraction for required OS level functions. -type OS interface { - // Command returns the Cmd to execute the named program with the - // given arguments. - Command(string, ...string) Cmd - - // Mkdir creates a new directory with the specified name and permission - // bits. - Mkdir(string, os.FileMode) error - - // MkdirAll creates a directory named path, along with any necessary - // parents. - MkdirAll(string, os.FileMode) error - - // Stat returns a FileInfo describing the named file. - Stat(string) (os.FileInfo, error) - - // Remove removes the named file or directory. - Remove(string) error - - // ReadDir reads the directory named by dirname and returns a list of - // directory entries. - ReadDir(string) ([]os.FileInfo, error) - - // LookPath searches for an executable binary named file in the directories - // named by the PATH environment variable. - LookPath(string) (string, error) - - // TempFile creates a new temporary file in the directory dir with a name - // beginning with prefix, opens the file for reading and writing, and - // returns the resulting File. - TempFile(string, string) (File, error) - - // Sleep pauses the current goroutine for at least the duration d. A - // negative or zero duration causes Sleep to return immediately. - Sleep(time.Duration) - - // NewTicker returns a new Ticker containing a channel that will send the - // time with a period specified by the argument. - NewTicker(time.Duration) Ticker - - // TimeSince returns the time elapsed since the argument. - TimeSince(time.Time) time.Duration -} - -// Ticker is an abstraction for Ticker (time.Ticker) -type Ticker interface { - C() <-chan time.Time - Stop() -} - -// GitTicker is the implementation of Ticker for git. -type GitTicker struct { - *time.Ticker -} - -// C returns the channel on which the ticks are delivered.s -func (g *GitTicker) C() <-chan time.Time { - return g.Ticker.C -} - -// GitOS is the implementation of OS for git. -type GitOS struct{} - -// Mkdir calls os.Mkdir. -func (g GitOS) Mkdir(name string, perm os.FileMode) error { - return os.Mkdir(name, perm) -} - -// MkdirAll calls os.MkdirAll. -func (g GitOS) MkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm) -} - -// Stat calls os.Stat. -func (g GitOS) Stat(name string) (os.FileInfo, error) { - return os.Stat(name) -} - -// Remove calls os.Remove. -func (g GitOS) Remove(name string) error { - return os.Remove(name) -} - -// LookPath calls exec.LookPath. -func (g GitOS) LookPath(file string) (string, error) { - return exec.LookPath(file) -} - -// TempFile calls ioutil.TempFile. -func (g GitOS) TempFile(dir, prefix string) (File, error) { - return ioutil.TempFile(dir, prefix) -} - -// ReadDir calls ioutil.ReadDir. -func (g GitOS) ReadDir(dirname string) ([]os.FileInfo, error) { - return ioutil.ReadDir(dirname) -} - -// Command calls exec.Command. -func (g GitOS) Command(name string, args ...string) Cmd { - return &gitCmd{exec.Command(name, args...)} -} - -// Sleep calls time.Sleep. -func (g GitOS) Sleep(d time.Duration) { - time.Sleep(d) -} - -// New Ticker calls time.NewTicker. -func (g GitOS) NewTicker(d time.Duration) Ticker { - return &GitTicker{time.NewTicker(d)} -} - -// TimeSince calls time.Since -func (g GitOS) TimeSince(t time.Time) time.Duration { - return time.Since(t) -} diff --git a/vendor/github.com/abiosoft/caddy-git/gittest/gittest.go b/vendor/github.com/abiosoft/caddy-git/gittest/gittest.go deleted file mode 100644 index 9073c41..0000000 --- a/vendor/github.com/abiosoft/caddy-git/gittest/gittest.go +++ /dev/null @@ -1,210 +0,0 @@ -// Package gittest is a test package for the git middleware. -// It implements a mock gitos.OS, gitos.Cmd and gitos.File. -package gittest - -import ( - "io" - "log" - "os" - "sync" - "time" - - "github.com/abiosoft/caddy-git/gitos" -) - -// FakeOS implements a mock gitos.OS, gitos.Cmd and gitos.File. -var FakeOS = fakeOS{} - -// CmdOutput is the output of any call to the mocked gitos.Cmd's Output(). -var CmdOutput = "success" - -// TempFileName is the name of any file returned by mocked gitos.OS's TempFile(). -var TempFileName = "tempfile" - -// TimeSpeed is how faster the mocked gitos.Ticker and gitos.Sleep should run. -var TimeSpeed = 5 - -// dirs mocks a fake git dir if filename is "gitdir". -var dirs = map[string][]os.FileInfo{ - "gitdir": { - fakeInfo{name: ".git", dir: true}, - }, -} - -// Open creates a new mock gitos.File. -func Open(name string) gitos.File { - return &fakeFile{name: name} -} - -// Sleep calls fake time.Sleep -func Sleep(d time.Duration) { - FakeOS.Sleep(d) -} - -// NewLogger creates a logger that logs to f -func NewLogger(f gitos.File) *log.Logger { - return log.New(f, "", 0) -} - -// fakeFile is a mock gitos.File. -type fakeFile struct { - name string - dir bool - content []byte - info fakeInfo - sync.Mutex -} - -func (f fakeFile) Name() string { - return f.name -} - -func (f fakeFile) Stat() (os.FileInfo, error) { - return fakeInfo{name: f.name}, nil -} - -func (f fakeFile) Close() error { - return nil -} - -func (f fakeFile) Chmod(mode os.FileMode) error { - f.info.mode = mode - return nil -} - -func (f *fakeFile) Read(b []byte) (int, error) { - f.Lock() - defer f.Unlock() - if len(f.content) == 0 { - return 0, io.EOF - } - n := copy(b, f.content) - f.content = f.content[n:] - return n, nil -} - -func (f *fakeFile) Write(b []byte) (int, error) { - f.Lock() - defer f.Unlock() - f.content = append(f.content, b...) - return len(b), nil -} - -// fakeCmd is a mock gitos.Cmd. -type fakeCmd struct{} - -func (f fakeCmd) Run() error { - return nil -} - -func (f fakeCmd) Start() error { - return nil -} - -func (f fakeCmd) Wait() error { - return nil -} - -func (f fakeCmd) Output() ([]byte, error) { - return []byte(CmdOutput), nil -} - -func (f fakeCmd) Dir(dir string) {} - -func (f fakeCmd) Stdin(stdin io.Reader) {} - -func (f fakeCmd) Stdout(stdout io.Writer) {} - -func (f fakeCmd) Stderr(stderr io.Writer) {} - -func (f fakeCmd) Process() *os.Process { return nil } - -// fakeInfo is a mock os.FileInfo. -type fakeInfo struct { - name string - dir bool - mode os.FileMode -} - -func (f fakeInfo) Name() string { - return f.name -} - -func (f fakeInfo) Size() int64 { - return 1024 -} - -func (f fakeInfo) Mode() os.FileMode { - return f.mode -} - -func (f fakeInfo) ModTime() time.Time { - return time.Now().Truncate(time.Hour) -} - -func (f fakeInfo) IsDir() bool { - return f.dir -} - -func (f fakeInfo) Sys() interface{} { - return nil -} - -// fakeTicker is a mock gitos.Ticker -type fakeTicker struct { - *time.Ticker -} - -func (f fakeTicker) C() <-chan time.Time { - return f.Ticker.C -} - -// fakeOS is a mock gitos.OS. -type fakeOS struct{} - -func (f fakeOS) Mkdir(name string, perm os.FileMode) error { - return nil -} - -func (f fakeOS) MkdirAll(path string, perm os.FileMode) error { - return nil -} - -func (f fakeOS) Stat(name string) (os.FileInfo, error) { - return fakeInfo{name: name}, nil -} - -func (f fakeOS) Remove(name string) error { - return nil -} - -func (f fakeOS) LookPath(file string) (string, error) { - return "/usr/bin/" + file, nil -} - -func (f fakeOS) TempFile(dir, prefix string) (gitos.File, error) { - return &fakeFile{name: TempFileName, info: fakeInfo{name: TempFileName}}, nil -} - -func (f fakeOS) ReadDir(dirname string) ([]os.FileInfo, error) { - if f, ok := dirs[dirname]; ok { - return f, nil - } - return nil, nil -} - -func (f fakeOS) Command(name string, args ...string) gitos.Cmd { - return fakeCmd{} -} - -func (f fakeOS) Sleep(d time.Duration) { - time.Sleep(d / time.Duration(TimeSpeed)) -} - -func (f fakeOS) NewTicker(d time.Duration) gitos.Ticker { - return &fakeTicker{time.NewTicker(d / time.Duration(TimeSpeed))} -} - -func (f fakeOS) TimeSince(t time.Time) time.Duration { - return time.Since(t) * time.Duration(TimeSpeed) -} diff --git a/vendor/github.com/abiosoft/caddy-git/gogs_hook.go b/vendor/github.com/abiosoft/caddy-git/gogs_hook.go deleted file mode 100644 index 0d3452e..0000000 --- a/vendor/github.com/abiosoft/caddy-git/gogs_hook.go +++ /dev/null @@ -1,87 +0,0 @@ -package git - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/http" - "strings" -) - -type GogsHook struct{} - -type gsPush struct { - Ref string `json:"ref"` -} - -func (g GogsHook) DoesHandle(h http.Header) bool { - event := h.Get("X-Gogs-Event") - - // for Gogs you can only use X-Gogs-Event header to test if you could handle the request - if event != "" { - return true - } - return false -} - -func (g GogsHook) Handle(w http.ResponseWriter, r *http.Request, repo *Repo) (int, error) { - if r.Method != "POST" { - return http.StatusMethodNotAllowed, errors.New("the request had an invalid method.") - } - - // read full body - required for signature - body, err := ioutil.ReadAll(r.Body) - - if err != nil { - return http.StatusBadRequest, err - } - - event := r.Header.Get("X-Gogs-Event") - if event == "" { - return http.StatusBadRequest, errors.New("the 'X-Gogs-Event' header is required but was missing.") - } - - switch event { - case "ping": - w.Write([]byte("pong")) - case "push": - err = g.handlePush(body, repo) - if !hookIgnored(err) && err != nil { - return http.StatusBadRequest, err - } - - // return 400 if we do not handle the event type. - // This is to visually show the user a configuration error in the Gogs ui. - default: - return http.StatusBadRequest, nil - } - - return http.StatusOK, err -} - -func (g GogsHook) handlePush(body []byte, repo *Repo) error { - var push gsPush - - err := json.Unmarshal(body, &push) - if err != nil { - return err - } - - // extract the branch being pushed from the ref string - // and if it matches with our locally tracked one, pull. - refSlice := strings.Split(push.Ref, "/") - if len(refSlice) != 3 { - return errors.New("the push request contained an invalid reference string.") - } - - branch := refSlice[2] - if branch != repo.Branch { - return hookIgnoredError{hookType: hookName(g), err: fmt.Errorf("found different branch %v", branch)} - } - - Logger().Print("Received pull notification for the tracking branch, updating...\n") - repo.Pull() - - return nil -} diff --git a/vendor/github.com/abiosoft/caddy-git/gogs_hook_test.go b/vendor/github.com/abiosoft/caddy-git/gogs_hook_test.go deleted file mode 100644 index 8c8c528..0000000 --- a/vendor/github.com/abiosoft/caddy-git/gogs_hook_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package git - -import ( - "bytes" - "net/http" - "net/http/httptest" - "testing" -) - -func TestGogsDeployPush(t *testing.T) { - repo := &Repo{Branch: "master", Hook: HookConfig{Url: "/gogs_deploy"}} - gsHook := GogsHook{} - - for i, test := range []struct { - body string - event string - responseBody string - code int - }{ - {"", "", "", 400}, - {"", "push", "", 400}, - {pushGSBodyOther, "push", "", 200}, - {pushGSBodyPartial, "push", "", 400}, - {"", "ping", "pong", 200}, - } { - - req, err := http.NewRequest("POST", "/gogs_deploy", bytes.NewBuffer([]byte(test.body))) - if err != nil { - t.Fatalf("Test %v: Could not create HTTP request: %v", i, err) - } - - if test.event != "" { - req.Header.Add("X-Gogs-Event", test.event) - } - - rec := httptest.NewRecorder() - - code, err := gsHook.Handle(rec, req, repo) - - if code != test.code { - t.Errorf("Test %d: Expected response code to be %d but was %d", i, test.code, code) - } - - if rec.Body.String() != test.responseBody { - t.Errorf("Test %d: Expected response body to be '%v' but was '%v'", i, test.responseBody, rec.Body.String()) - } - } - -} - -var pushGSBodyPartial = ` -{ - "ref": "" -} -` - -var pushGSBodyOther = ` -{ - "ref": "refs/heads/some-other-branch" -} -` diff --git a/vendor/github.com/abiosoft/caddy-git/logger.go b/vendor/github.com/abiosoft/caddy-git/logger.go deleted file mode 100644 index 2500239..0000000 --- a/vendor/github.com/abiosoft/caddy-git/logger.go +++ /dev/null @@ -1,38 +0,0 @@ -package git - -import ( - "log" - "os" - "sync" -) - -// logger is used to log errors -var logger = &gitLogger{l: log.New(os.Stderr, "", log.LstdFlags)} - -// gitLogger wraps log.Logger with mutex for thread safety. -type gitLogger struct { - l *log.Logger - sync.RWMutex -} - -func (g *gitLogger) logger() *log.Logger { - g.RLock() - defer g.RUnlock() - return g.l -} - -func (g *gitLogger) setLogger(l *log.Logger) { - g.Lock() - g.l = l - g.Unlock() -} - -// Logger gets the currently available logger -func Logger() *log.Logger { - return logger.logger() -} - -// SetLogger sets the current logger to l -func SetLogger(l *log.Logger) { - logger.setLogger(l) -} diff --git a/vendor/github.com/abiosoft/caddy-git/os.go b/vendor/github.com/abiosoft/caddy-git/os.go deleted file mode 100644 index 3b42dc7..0000000 --- a/vendor/github.com/abiosoft/caddy-git/os.go +++ /dev/null @@ -1,12 +0,0 @@ -package git - -import "github.com/abiosoft/caddy-git/gitos" - -// gos is the OS used by git. -var gos gitos.OS = gitos.GitOS{} - -// SetOS sets the OS to be used. Intended to be used for tests -// to abstract OS level git actions. -func SetOS(os gitos.OS) { - gos = os -} diff --git a/vendor/github.com/abiosoft/caddy-git/scripts.go b/vendor/github.com/abiosoft/caddy-git/scripts.go deleted file mode 100644 index 0016283..0000000 --- a/vendor/github.com/abiosoft/caddy-git/scripts.go +++ /dev/null @@ -1,116 +0,0 @@ -package git - -import ( - "fmt" - "os" - "strings" - "sync" - - "github.com/abiosoft/caddy-git/gitos" -) - -var ( - // gitBinary holds the absolute path to git executable - gitBinary string - - // shell holds the shell to be used. Either sh or bash. - shell string - - // initMutex prevents parallel attempt to validate - // git requirements. - initMutex = sync.Mutex{} -) - -// Init validates git installation, locates the git executable -// binary in PATH and check for available shell to use. -func Init() error { - // prevent concurrent call - initMutex.Lock() - defer initMutex.Unlock() - - // if validation has been done before and binary located in - // PATH, return. - if gitBinary != "" { - return nil - } - - // locate git binary in path - var err error - if gitBinary, err = gos.LookPath("git"); err != nil { - return fmt.Errorf("git middleware requires git installed. Cannot find git binary in PATH") - } - - // locate bash in PATH. If not found, fallback to sh. - // If neither is found, return error. - shell = "bash" - if _, err = gos.LookPath("bash"); err != nil { - shell = "sh" - if _, err = gos.LookPath("sh"); err != nil { - return fmt.Errorf("git middleware requires either bash or sh.") - } - } - return nil -} - -// writeScriptFile writes content to a temporary file. -// It changes the temporary file mode to executable and -// closes it to prepare it for execution. -func writeScriptFile(content []byte) (file gitos.File, err error) { - if file, err = gos.TempFile("", "caddy"); err != nil { - return nil, err - } - if _, err = file.Write(content); err != nil { - return nil, err - } - if err = file.Chmod(os.FileMode(0755)); err != nil { - return nil, err - } - return file, file.Close() -} - -// gitWrapperScript forms content for git.sh script -func gitWrapperScript() []byte { - return []byte(fmt.Sprintf(`#!/bin/%v - -# The MIT License (MIT) -# Copyright (c) 2013 Alvin Abad - -if [ $# -eq 0 ]; then - echo "Git wrapper script that can specify an ssh-key file -Usage: - git.sh -i ssh-key-file git-command - " - exit 1 -fi - -# remove temporary file on exit -trap 'rm -f /tmp/.git_ssh.$$' 0 - -if [ "$1" = "-i" ]; then - SSH_KEY=$2; shift; shift - echo -e "#!/bin/%v \n \ - ssh -i $SSH_KEY \$@" > /tmp/.git_ssh.$$ - chmod +x /tmp/.git_ssh.$$ - export GIT_SSH=/tmp/.git_ssh.$$ -fi - -# in case the git command is repeated -[ "$1" = "git" ] && shift - -# Run the git command -%v "$@" - -`, shell, shell, gitBinary)) -} - -// bashScript forms content of bash script to clone or update a repo using ssh -func bashScript(gitShPath string, repo *Repo, params []string) []byte { - return []byte(fmt.Sprintf(`#!/bin/%v - -mkdir -p ~/.ssh; -touch ~/.ssh/known_hosts; -ssh-keyscan -t rsa,dsa %v 2>&1 | sort -u - ~/.ssh/known_hosts > ~/.ssh/tmp_hosts; -cat ~/.ssh/tmp_hosts >> ~/.ssh/known_hosts; -%v -i %v %v; -`, shell, repo.Host, gitShPath, repo.KeyPath, strings.Join(params, " "))) -} diff --git a/vendor/github.com/abiosoft/caddy-git/service.go b/vendor/github.com/abiosoft/caddy-git/service.go deleted file mode 100644 index dd7b439..0000000 --- a/vendor/github.com/abiosoft/caddy-git/service.go +++ /dev/null @@ -1,90 +0,0 @@ -package git - -import ( - "sync" - - "github.com/abiosoft/caddy-git/gitos" -) - -var ( - // Services holds all git pulling services and provides the function to - // stop them. - Services = &services{} -) - -// repoService is the service that runs in background and periodically -// pull from the repository. -type repoService struct { - repo *Repo - ticker gitos.Ticker // ticker to tick at intervals - halt chan struct{} // channel to notify service to halt and stop pulling. -} - -// Start starts a new background service to pull periodically. -func Start(repo *Repo) { - service := &repoService{ - repo, - gos.NewTicker(repo.Interval), - make(chan struct{}), - } - go func(s *repoService) { - for { - select { - case <-s.ticker.C(): - err := repo.Pull() - if err != nil { - Logger().Println(err) - } - case <-s.halt: - s.ticker.Stop() - return - } - } - }(service) - - // add to services to make it stoppable - Services.add(service) -} - -// services stores all repoServices -type services struct { - services []*repoService - sync.Mutex -} - -// add adds a new service to list of services. -func (s *services) add(r *repoService) { - s.Lock() - defer s.Unlock() - - s.services = append(s.services, r) -} - -// Stop stops at most `limit` running services pulling from git repo at -// repoURL. It waits until the service is terminated before returning. -// If limit is less than zero, it is ignored. -// TODO find better ways to identify repos -func (s *services) Stop(repoURL string, limit int) { - s.Lock() - defer s.Unlock() - - // locate repos - for i, j := 0, 0; i < len(s.services) && ((limit >= 0 && j < limit) || limit < 0); i++ { - service := s.services[i] - if service.repo.URL == repoURL { - // send halt signal - service.halt <- struct{}{} - s.services[i] = nil - j++ - } - } - - // remove them from repos list - services := s.services[:0] - for _, s := range s.services { - if s != nil { - services = append(services, s) - } - } - s.services = services -} diff --git a/vendor/github.com/abiosoft/caddy-git/service_test.go b/vendor/github.com/abiosoft/caddy-git/service_test.go deleted file mode 100644 index 7b6f520..0000000 --- a/vendor/github.com/abiosoft/caddy-git/service_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package git - -import ( - "fmt" - "testing" - "time" - - "github.com/abiosoft/caddy-git/gittest" -) - -func init() { - SetOS(gittest.FakeOS) -} - -func TestServices(t *testing.T) { - repo := &Repo{URL: "git@github.com", Interval: time.Second} - - Start(repo) - if len(Services.services) != 1 { - t.Errorf("Expected 1 service, found %v", len(Services.services)) - } - - Services.Stop(repo.URL, 1) - if len(Services.services) != 0 { - t.Errorf("Expected 1 service, found %v", len(Services.services)) - } - - repos := make([]*Repo, 5) - for i := 0; i < 5; i++ { - repos[i] = &Repo{URL: fmt.Sprintf("test%v", i), Interval: time.Second * 2} - Start(repos[i]) - if len(Services.services) != i+1 { - t.Errorf("Expected %v service(s), found %v", i+1, len(Services.services)) - } - } - - gos.Sleep(time.Second * 5) - Services.Stop(repos[0].URL, 1) - if len(Services.services) != 4 { - t.Errorf("Expected %v service(s), found %v", 4, len(Services.services)) - } - - repo = &Repo{URL: "git@github.com", Interval: time.Second} - Start(repo) - if len(Services.services) != 5 { - t.Errorf("Expected %v service(s), found %v", 5, len(Services.services)) - } - - repo = &Repo{URL: "git@github.com", Interval: time.Second * 2} - Start(repo) - if len(Services.services) != 6 { - t.Errorf("Expected %v service(s), found %v", 6, len(Services.services)) - } - - gos.Sleep(time.Second * 5) - Services.Stop(repo.URL, -1) - if len(Services.services) != 4 { - t.Errorf("Expected %v service(s), found %v", 4, len(Services.services)) - } - - for _, repo := range repos { - Services.Stop(repo.URL, -1) - } - if len(Services.services) != 0 { - t.Errorf("Expected %v service(s), found %v", 0, len(Services.services)) - } -} diff --git a/vendor/github.com/abiosoft/caddy-git/setup.go b/vendor/github.com/abiosoft/caddy-git/setup.go deleted file mode 100644 index f01a829..0000000 --- a/vendor/github.com/abiosoft/caddy-git/setup.go +++ /dev/null @@ -1,323 +0,0 @@ -package git - -import ( - "fmt" - "net/url" - "path" - "path/filepath" - "runtime" - "strconv" - "strings" - "time" - - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -const ( - // DefaultInterval is the minimum interval to delay before - // requesting another git pull - DefaultInterval time.Duration = time.Hour * 1 -) - -func init() { - caddy.RegisterPlugin("git", caddy.Plugin{ - ServerType: "http", - Action: setup, - }) -} - -// setup configures a new Git service routine. -func setup(c *caddy.Controller) error { - git, err := parse(c) - if err != nil { - return err - } - - // repos configured with webhooks - var hookRepos []*Repo - - // functions to execute at startup - var startupFuncs []func() error - - // loop through all repos and and start monitoring - for i := range git { - repo := git.Repo(i) - - // If a HookUrl is set, we switch to event based pulling. - // Install the url handler - if repo.Hook.Url != "" { - - hookRepos = append(hookRepos, repo) - - startupFuncs = append(startupFuncs, func() error { - return repo.Pull() - }) - - } else { - startupFuncs = append(startupFuncs, func() error { - - // Start service routine in background - Start(repo) - - // Do a pull right away to return error - return repo.Pull() - }) - } - } - - // ensure the functions are executed once per server block - // for cases like server1.com, server2.com { ... } - c.OncePerServerBlock(func() error { - for i := range startupFuncs { - c.OnStartup(startupFuncs[i]) - } - return nil - }) - - // if there are repo(s) with webhook - // return handler - if len(hookRepos) > 0 { - webhook := &WebHook{Repos: hookRepos} - httpserver.GetConfig(c.Key).AddMiddleware(func(next httpserver.Handler) httpserver.Handler { - webhook.Next = next - return webhook - }) - } - - return nil -} - -func parse(c *caddy.Controller) (Git, error) { - var git Git - - config := httpserver.GetConfig(c.Key) - for c.Next() { - repo := &Repo{Branch: "master", Interval: DefaultInterval, Path: config.Root} - - args := c.RemainingArgs() - - switch len(args) { - case 2: - repo.Path = filepath.Clean(config.Root + string(filepath.Separator) + args[1]) - fallthrough - case 1: - u, err := validateURL(args[0]) - if err != nil { - return nil, err - } - repo.URL = u - } - - for c.NextBlock() { - switch c.Val() { - case "repo": - if !c.NextArg() { - return nil, c.ArgErr() - } - u, err := validateURL(c.Val()) - if err != nil { - return nil, err - } - repo.URL = u - case "path": - if !c.NextArg() { - return nil, c.ArgErr() - } - repo.Path = filepath.Clean(config.Root + string(filepath.Separator) + c.Val()) - case "branch": - if !c.NextArg() { - return nil, c.ArgErr() - } - repo.Branch = c.Val() - case "key": - if !c.NextArg() { - return nil, c.ArgErr() - } - repo.KeyPath = c.Val() - case "interval": - if !c.NextArg() { - return nil, c.ArgErr() - } - t, _ := strconv.Atoi(c.Val()) - if t > 0 { - repo.Interval = time.Duration(t) * time.Second - } - case "hook": - if !c.NextArg() { - return nil, c.ArgErr() - } - repo.Hook.Url = c.Val() - - // optional secret for validation - if c.NextArg() { - repo.Hook.Secret = c.Val() - } - case "hook_type": - if !c.NextArg() { - return nil, c.ArgErr() - } - t := c.Val() - if _, ok := handlers[t]; !ok { - return nil, c.Errf("invalid hook type %v", t) - } - repo.Hook.Type = t - case "then": - if !c.NextArg() { - return nil, c.ArgErr() - } - command := c.Val() - args := c.RemainingArgs() - repo.Then = append(repo.Then, NewThen(command, args...)) - case "then_long": - if !c.NextArg() { - return nil, c.ArgErr() - } - command := c.Val() - args := c.RemainingArgs() - repo.Then = append(repo.Then, NewLongThen(command, args...)) - default: - return nil, c.ArgErr() - } - } - - // if repo is not specified, return error - if repo.URL == "" { - return nil, c.ArgErr() - } - - // if private key is not specified, convert repository URL to https - // to avoid ssh authentication - // else validate git URL - // Note: private key support not yet available on Windows - var err error - if repo.KeyPath == "" { - repo.URL, repo.Host, err = sanitizeHTTP(repo.URL) - } else { - repo.URL, repo.Host, err = sanitizeSSH(repo.URL) - // TODO add Windows support for private repos - if runtime.GOOS == "windows" { - return nil, fmt.Errorf("private repository not yet supported on Windows") - } - } - - if err != nil { - return nil, err - } - - // validate git requirements - if err = Init(); err != nil { - return nil, err - } - - // prepare repo for use - if err = repo.Prepare(); err != nil { - return nil, err - } - - git = append(git, repo) - - } - - return git, nil -} - -// validateURL validates repoUrl is a valid git url and appends -// with .git if missing. -func validateURL(repoURL string) (string, error) { - u, err := url.Parse(repoURL) - if err != nil { - return "", err - } - if u.Scheme == "" { - u.Scheme = "https" - } - - switch u.Scheme { - case "https", "http", "ssh": - default: - return "", fmt.Errorf("Invalid url scheme %s. If url contains port, scheme is required.", u.Scheme) - } - - if !strings.HasSuffix(u.String(), ".git") { - return u.String() + ".git", nil - } - return u.String(), nil -} - -// sanitizeHTTP cleans up repository URL and converts to https format -// if currently in ssh format. -// Returns sanitized url, hostName (e.g. github.com, bitbucket.com) -// and possible error -func sanitizeHTTP(repoURL string) (string, string, error) { - u, err := url.Parse(repoURL) - if err != nil { - return "", "", err - } - - // ensure the url is not ssh - if u.Scheme == "ssh" { - u.Scheme = "https" - } - - // convert to http format if in ssh format - if strings.Contains(u.Host, ":") { - s := strings.SplitN(u.Host, ":", 2) - // alter path and host if we're sure its not a port - if _, err := strconv.Atoi(s[1]); err != nil { - u.Host = s[0] - u.Path = path.Join(s[1], u.Path) - } - } - - // Bitbucket require the user to be set into the HTTP URL - if u.Host == "bitbucket.org" && u.User == nil { - segments := strings.Split(u.Path, "/") - u.User = url.User(segments[1]) - } - - return u.String(), u.Host, nil -} - -// sanitizeSSH cleans up repository url and converts to ssh format for private -// repositories if required. -// Returns sanitized url, hostName (e.g. github.com, bitbucket.com) -// and possible error -func sanitizeSSH(repoURL string) (string, string, error) { - u, err := url.Parse(repoURL) - if err != nil { - return "", "", err - } - - u.Scheme = "" - host := u.Host - // convert to ssh format if not in ssh format - if !strings.Contains(u.Host, ":") { - if u.Path[0] == '/' { - u.Path = ":" + u.Path[1:] - } else if u.Path[0] != ':' { - u.Path = ":" + u.Path - } - } else { - s := strings.SplitN(u.Host, ":", 2) - host = s[0] - // if port is set, ssh scheme is required - if _, err := strconv.Atoi(s[1]); err == nil { - u.Scheme = "ssh" - } - } - - // ensure user is set - if u.User == nil { - u.User = url.User("git") - } - - // remove unintended `/` added by url.String and `//` if scheme is not ssh. - // TODO find a cleaner way - replacer := strings.NewReplacer("/:", ":", "//", "") - if u.Scheme == "ssh" { - replacer = strings.NewReplacer("/:", ":") - } - repoURL = replacer.Replace(u.String()) - return repoURL, host, nil -} diff --git a/vendor/github.com/abiosoft/caddy-git/setup_test.go b/vendor/github.com/abiosoft/caddy-git/setup_test.go deleted file mode 100644 index bffed73..0000000 --- a/vendor/github.com/abiosoft/caddy-git/setup_test.go +++ /dev/null @@ -1,285 +0,0 @@ -package git - -import ( - "fmt" - "io/ioutil" - "strings" - "testing" - "time" - - "github.com/abiosoft/caddy-git/gittest" - "github.com/mholt/caddy" -) - -// init sets the OS used to fakeOS -func init() { - SetOS(gittest.FakeOS) -} - -func TestGitSetup(t *testing.T) { - c := caddy.NewTestController(`git git@github.com:mholt/caddy.git`) - err := setup(c) - check(t, err) -} - -func TestGitParse(t *testing.T) { - tests := []struct { - input string - shouldErr bool - expected *Repo - }{ - {`git git@github.com:user/repo`, false, &Repo{ - URL: "https://git@github.com/user/repo.git", - }}, - {`git github.com/user/repo`, false, &Repo{ - URL: "https://github.com/user/repo.git", - }}, - {`git git@github.com/user/repo`, false, &Repo{ - URL: "https://git@github.com/user/repo.git", - }}, - {`git http://github.com/user/repo`, false, &Repo{ - URL: "http://github.com/user/repo.git", - }}, - {`git http://github.com:8888/user/repo`, false, &Repo{ - URL: "http://github.com:8888/user/repo.git", - }}, - {`git https://github.com/user/repo`, false, &Repo{ - URL: "https://github.com/user/repo.git", - }}, - {`git http://github.com/user/repo { - key ~/.key - }`, false, &Repo{ - KeyPath: "~/.key", - URL: "git@github.com:user/repo.git", - }}, - {`git git@github.com:user/repo { - key ~/.key - }`, false, &Repo{ - KeyPath: "~/.key", - URL: "git@github.com:user/repo.git", - }}, - {`git `, true, nil}, - {`git { - }`, true, nil}, - {`git { - repo git@github.com:user/repo.git`, true, nil}, - {`git { - repo git@github.com:user/repo - key ~/.key - }`, false, &Repo{ - KeyPath: "~/.key", - URL: "git@github.com:user/repo.git", - }}, - {`git { - repo git@github.com:user/repo - key ~/.key - interval 600 - }`, false, &Repo{ - KeyPath: "~/.key", - URL: "git@github.com:user/repo.git", - Interval: time.Second * 600, - }}, - {`git { - repo git@github.com:user/repo - branch dev - }`, false, &Repo{ - Branch: "dev", - URL: "https://git@github.com/user/repo.git", - }}, - {`git { - key ~/.key - }`, true, nil}, - {`git { - repo git@github.com:user/repo - key ~/.key - then echo hello world - }`, false, &Repo{ - KeyPath: "~/.key", - URL: "git@github.com:user/repo.git", - Then: []Then{NewThen("echo", "hello world")}, - }}, - {`git https://user@bitbucket.org/user/repo.git`, false, &Repo{ - URL: "https://user@bitbucket.org/user/repo.git", - }}, - {`git https://user@bitbucket.org/user/repo.git { - key ~/.key - }`, false, &Repo{ - KeyPath: "~/.key", - URL: "user@bitbucket.org:user/repo.git", - }}, - {`git git@bitbucket.org:user/repo.git { - key ~/.key - }`, false, &Repo{ - KeyPath: "~/.key", - URL: "git@bitbucket.org:user/repo.git", - }}, - {`git ssh://git@bitbucket.org:user/repo.git { - key ~/.key - }`, false, &Repo{ - KeyPath: "~/.key", - URL: "git@bitbucket.org:user/repo.git", - }}, - {`git git@bitbucket.org:2222/user/repo.git { - key ~/.key - }`, false, &Repo{ - KeyPath: "~/.key", - URL: "ssh://git@bitbucket.org:2222/user/repo.git", - }}, - {`git git@bitbucket.org:2222/user/repo.git { - key ~/.key - hook_type gogs - }`, false, &Repo{ - KeyPath: "~/.key", - URL: "ssh://git@bitbucket.org:2222/user/repo.git", - Hook: HookConfig{ - Type: "gogs", - }, - }}, - {`git git@bitbucket.org:2222/user/repo.git { - key ~/.key - hook /webhook - hook_type gogs - }`, false, &Repo{ - KeyPath: "~/.key", - URL: "ssh://git@bitbucket.org:2222/user/repo.git", - Hook: HookConfig{ - Url: "/webhook", - Type: "gogs", - }, - }}, - {`git git@bitbucket.org:2222/user/repo.git { - key ~/.key - hook /webhook some-secrets - hook_type gogs - }`, false, &Repo{ - KeyPath: "~/.key", - URL: "ssh://git@bitbucket.org:2222/user/repo.git", - Hook: HookConfig{ - Url: "/webhook", - Secret: "some-secrets", - Type: "gogs", - }, - }}, - } - - for i, test := range tests { - c := caddy.NewTestController(test.input) - git, err := parse(c) - if !test.shouldErr && err != nil { - t.Errorf("Test %v should not error but found %v", i, err) - continue - } - if test.shouldErr && err == nil { - t.Errorf("Test %v should error but found nil", i) - continue - } - repo := git.Repo(0) - if !reposEqual(test.expected, repo) { - t.Errorf("Test %v expects %v but found %v", i, test.expected, repo) - } - } -} - -func TestIntervals(t *testing.T) { - tests := []string{ - `git git@github.com:user/repo { interval 10 }`, - `git git@github.com:user/repo { interval 5 }`, - `git git@github.com:user/repo { interval 2 }`, - `git git@github.com:user/repo { interval 1 }`, - `git git@github.com:user/repo { interval 6 }`, - } - - for i, test := range tests { - SetLogger(gittest.NewLogger(gittest.Open("file"))) - c1 := caddy.NewTestController(test) - git, err := parse(c1) - check(t, err) - repo := git.Repo(0) - - c2 := caddy.NewTestController(test) - err = setup(c2) - check(t, err) - - // start startup services - err = func() error { - // Start service routine in background - Start(repo) - // Do a pull right away to return error - return repo.Pull() - }() - check(t, err) - - // wait for first background pull - gittest.Sleep(time.Millisecond * 100) - - // switch logger to test file - logFile := gittest.Open("file") - SetLogger(gittest.NewLogger(logFile)) - - // sleep for the interval - gittest.Sleep(repo.Interval) - - // get log output - out, err := ioutil.ReadAll(logFile) - check(t, err) - - // if greater than minimum interval - if repo.Interval >= time.Second*5 { - expected := `https://git@github.com/user/repo.git pulled. -No new changes.` - - // ensure pull is done by tracing the output - if expected != strings.TrimSpace(string(out)) { - t.Errorf("Test %v: Expected %v found %v", i, expected, string(out)) - } - } else { - // ensure pull is ignored by confirming no output - if string(out) != "" { - t.Errorf("Test %v: Expected no output but found %v", i, string(out)) - } - } - - // stop background thread monitor - Services.Stop(repo.URL, 1) - - } - -} - -func reposEqual(expected, repo *Repo) bool { - thenStr := func(then []Then) string { - var str []string - for _, t := range then { - str = append(str, t.Command()) - } - return fmt.Sprint(str) - } - if expected == nil { - return repo == nil - } - if expected.Branch != "" && expected.Branch != repo.Branch { - return false - } - if expected.Host != "" && expected.Host != repo.Host { - return false - } - if expected.Interval != 0 && expected.Interval != repo.Interval { - return false - } - if expected.KeyPath != "" && expected.KeyPath != repo.KeyPath { - return false - } - if expected.Path != "" && expected.Path != repo.Path { - return false - } - if expected.Then != nil && thenStr(expected.Then) != thenStr(repo.Then) { - return false - } - if expected.URL != "" && expected.URL != repo.URL { - return false - } - if fmt.Sprint(expected.Hook) != fmt.Sprint(repo.Hook) { - return false - } - return true -} diff --git a/vendor/github.com/abiosoft/caddy-git/travis_hook.go b/vendor/github.com/abiosoft/caddy-git/travis_hook.go deleted file mode 100644 index 66adeda..0000000 --- a/vendor/github.com/abiosoft/caddy-git/travis_hook.go +++ /dev/null @@ -1,95 +0,0 @@ -package git - -import ( - "crypto/sha256" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "net/http" - "time" -) - -type TravisHook struct{} - -func (t TravisHook) DoesHandle(h http.Header) bool { - return h.Get("Travis-Repo-Slug") != "" -} - -func (t TravisHook) Handle(w http.ResponseWriter, r *http.Request, repo *Repo) (int, error) { - if r.Method != "POST" { - return http.StatusMethodNotAllowed, errors.New("the request had an invalid method") - } - if err := t.handleSignature(r, repo.Hook.Secret); err != nil { - return http.StatusBadRequest, err - } - if err := r.ParseForm(); err != nil { - return http.StatusBadRequest, err - } - payload := r.FormValue("payload") - if payload == "" { - return http.StatusBadRequest, fmt.Errorf("Payload required") - } - data := &travisPayload{} - if err := json.Unmarshal([]byte(payload), data); err != nil { - return http.StatusBadRequest, err - } - - // ignored webhooks - err := hookIgnoredError{hookType: hookName(t)} - if data.Type != "push" || data.StatusMessage != "Passed" { - err.err = fmt.Errorf("Ignoring payload with wrong status or type.") - return 200, err - } - if repo.Branch != "" && data.Branch != repo.Branch { - err.err = fmt.Errorf("Ignoring push for branch %s", data.Branch) - return 200, err - } - - // attempt pull - if err := repo.Pull(); err != nil { - return http.StatusInternalServerError, err - } - if err := repo.checkoutCommit(data.Commit); err != nil { - return http.StatusInternalServerError, err - } - return 200, nil -} - -type travisPayload struct { - ID int `json:"id"` - Number string `json:"number"` - Status int `json:"status"` - Result int `json:"result"` - StatusMessage string `json:"status_message"` - ResultMessage string `json:"result_message"` - StartedAt time.Time `json:"started_at"` - FinishedAt time.Time `json:"finished_at"` - Duration int `json:"duration"` - BuildURL string `json:"build_url"` - Branch string `json:"branch"` - Type string `json:"type"` - State string `json:"state"` - Commit string `json:"commit"` -} - -// Check for an authorization signature in the request. Reject if not present. If validation required, check the sha -func (t TravisHook) handleSignature(r *http.Request, secret string) error { - signature := r.Header.Get("Authorization") - if signature == "" { - return errors.New("request sent no authorization signature") - } - if secret == "" { - Logger().Print("Unable to verify request signature. Secret not set in caddyfile!\n") - return nil - } - - content := r.Header.Get("Travis-Repo-Slug") + secret - hash := sha256.Sum256([]byte(content)) - expectedMac := hex.EncodeToString(hash[:]) - if signature != expectedMac { - fmt.Println(signature, expectedMac) - return errors.New("Invalid authorization header") - } - return nil -} diff --git a/vendor/github.com/abiosoft/caddy-git/webhook.go b/vendor/github.com/abiosoft/caddy-git/webhook.go deleted file mode 100644 index a1ac78e..0000000 --- a/vendor/github.com/abiosoft/caddy-git/webhook.go +++ /dev/null @@ -1,124 +0,0 @@ -package git - -import ( - "errors" - "fmt" - "net/http" - - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -// WebHook is middleware for handling web hooks of git providers -type WebHook struct { - Repos []*Repo - Next httpserver.Handler -} - -// HookConfig is a webhook handler configuration. -type HookConfig struct { - Url string // url to listen on for webhooks - Secret string // secret to validate hooks - Type string // type of Webhook -} - -// hookIgnoredError is returned when a webhook is ignored by the -// webhook handler. -type hookIgnoredError struct { - hookType string - err error -} - -// Error satisfies error interface -func (h hookIgnoredError) Error() string { - return fmt.Sprintf("%s webhook ignored. Error: %v", h.hookType, h.err) -} - -// hookIgnored checks if err is of type hookIgnoredError. -func hookIgnored(err error) bool { - _, ok := err.(hookIgnoredError) - return ok -} - -// hookName returns the name of the hookHanlder h. -func hookName(h hookHandler) string { - for name, handler := range handlers { - if handler == h { - return name - } - } - return "" -} - -// hookHandler is interface for specific providers to implement. -type hookHandler interface { - DoesHandle(http.Header) bool - Handle(w http.ResponseWriter, r *http.Request, repo *Repo) (int, error) -} - -// handlers stores all registered hookHandlers. -// map key corresponds to expected config name. -// -// register hook handlers here. -var handlers = map[string]hookHandler{ - "github": GithubHook{}, - "gitlab": GitlabHook{}, - "bitbucket": BitbucketHook{}, - "generic": GenericHook{}, - "travis": TravisHook{}, - "gogs": GogsHook{}, -} - -// defaultHandlers is the list of handlers to choose from -// if handler type is not specified in config. -var defaultHandlers = []string{ - "github", - "gitlab", - "bitbucket", - "travis", - "gogs", -} - -// ServeHTTP implements the middlware.Handler interface. -func (h WebHook) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { - - for _, repo := range h.Repos { - - if r.URL.Path == repo.Hook.Url { - - // if handler type is specified. - if handler, ok := handlers[repo.Hook.Type]; ok { - if !handler.DoesHandle(r.Header) { - return http.StatusBadRequest, errors.New(http.StatusText(http.StatusBadRequest)) - } - status, err := handler.Handle(w, r, repo) - // if the webhook is ignored, log it and allow request to continue. - if hookIgnored(err) { - Logger().Println(err) - err = nil - } - return status, err - } - - // auto detect handler - for _, h := range defaultHandlers { - // if a handler indicates it does handle the request, - // we do not try other handlers. Only one handler ever - // handles a specific request. - if handlers[h].DoesHandle(r.Header) { - status, err := handlers[h].Handle(w, r, repo) - // if the webhook is ignored, log it and allow request to continue. - if hookIgnored(err) { - Logger().Println(err) - err = nil - } - return status, err - } - } - - // no compatible handler - Logger().Println("No compatible handler found. Consider enabling generic handler with 'hook_type generic'.") - } - } - - return h.Next.ServeHTTP(w, r) -} diff --git a/vendor/github.com/dgrijalva/jwt-go/.gitignore b/vendor/github.com/dgrijalva/jwt-go/.gitignore deleted file mode 100644 index 80bed65..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.DS_Store -bin - - diff --git a/vendor/github.com/dgrijalva/jwt-go/.travis.yml b/vendor/github.com/dgrijalva/jwt-go/.travis.yml deleted file mode 100644 index bde823d..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go - -go: - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - tip diff --git a/vendor/github.com/dgrijalva/jwt-go/LICENSE b/vendor/github.com/dgrijalva/jwt-go/LICENSE deleted file mode 100644 index df83a9c..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/LICENSE +++ /dev/null @@ -1,8 +0,0 @@ -Copyright (c) 2012 Dave Grijalva - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/vendor/github.com/dgrijalva/jwt-go/README.md b/vendor/github.com/dgrijalva/jwt-go/README.md deleted file mode 100644 index 88448eb..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/README.md +++ /dev/null @@ -1,100 +0,0 @@ -A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-jones-json-web-token.html) - -[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) - -**BREAKING CHANGES COMING:*** Version 3.0.0 is almost complete. It will include _a lot_ of changes including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes will be available before 3.0.0 lands. If you would like to have any input befor 3.0.0 is locked, now's the time to review and provide feedback. - -**NOTICE:** A vulnerability in JWT was [recently published](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). As this library doesn't force users to validate the `alg` is what they expected, it's possible your usage is effected. There will be an update soon to remedy this, and it will likey require backwards-incompatible changes to the API. In the short term, please make sure your implementation verifies the `alg` is what you expect. - -## What the heck is a JWT? - -In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. - -The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. - -The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own. - -## What's in the box? - -This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. - -## Parse and Verify - -Parsing and verifying tokens is pretty straight forward. You pass in the token and a function for looking up the key. This is done as a callback since you may need to parse the token to find out what signing method and key was used. - -```go - token, err := jwt.Parse(myToken, func(token *jwt.Token) (interface{}, error) { - // Don't forget to validate the alg is what you expect: - if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { - return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) - } - return myLookupKey(token.Header["kid"]), nil - }) - - if err == nil && token.Valid { - deliverGoodness("!") - } else { - deliverUtterRejection(":(") - } -``` - -## Create a token - -```go - // Create the token - token := jwt.New(jwt.SigningMethodHS256) - // Set some claims - token.Claims["foo"] = "bar" - token.Claims["exp"] = time.Now().Add(time.Hour * 72).Unix() - // Sign and get the complete encoded token as a string - tokenString, err := token.SignedString(mySigningKey) -``` - -## Extensions - -This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. - -Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go - -## Project Status & Versioning - -This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). - -This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). - -While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v2`. It will do the right thing WRT semantic versioning. - -## Usage Tips - -### Signing vs Encryption - -A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: - -* The author of the token was in the possession of the signing secret -* The data has not been modified since it was signed - -It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. - -### Choosing a Signing Method - -There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. - -Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. - -Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. - -### JWT and OAuth - -It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. - -Without going too far down the rabbit hole, here's a description of the interaction of these technologies: - -* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. -* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. -* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. - -## More - -Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). - -The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. For a more http centric example, see [this gist](https://gist.github.com/cryptix/45c33ecf0ae54828e63b). diff --git a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md deleted file mode 100644 index 3918734..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md +++ /dev/null @@ -1,81 +0,0 @@ -## `jwt-go` Version History - -#### 2.6.0 - -This will likely be the last backwards compatible release before 3.0.0. - -* Exposed inner error within ValidationError -* Fixed validation errors when using UseJSONNumber flag -* Added several unit tests - -#### 2.5.0 - -* Added support for signing method none. You shouldn't use this. The API tries to make this clear. -* Updated/fixed some documentation -* Added more helpful error message when trying to parse tokens that begin with `BEARER ` - -#### 2.4.0 - -* Added new type, Parser, to allow for configuration of various parsing parameters - * You can now specify a list of valid signing methods. Anything outside this set will be rejected. - * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON -* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) -* Fixed some bugs with ECDSA parsing - -#### 2.3.0 - -* Added support for ECDSA signing methods -* Added support for RSA PSS signing methods (requires go v1.4) - -#### 2.2.0 - -* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. - -#### 2.1.0 - -Backwards compatible API change that was missed in 2.0.0. - -* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` - -#### 2.0.0 - -There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. - -The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. - -It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. - -* **Compatibility Breaking Changes** - * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` - * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` - * `KeyFunc` now returns `interface{}` instead of `[]byte` - * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key - * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key -* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. - * Added public package global `SigningMethodHS256` - * Added public package global `SigningMethodHS384` - * Added public package global `SigningMethodHS512` -* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. - * Added public package global `SigningMethodRS256` - * Added public package global `SigningMethodRS384` - * Added public package global `SigningMethodRS512` -* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. -* Refactored the RSA implementation to be easier to read -* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` - -#### 1.0.2 - -* Fixed bug in parsing public keys from certificates -* Added more tests around the parsing of keys for RS256 -* Code refactoring in RS256 implementation. No functional changes - -#### 1.0.1 - -* Fixed panic if RS256 signing method was passed an invalid key - -#### 1.0.0 - -* First versioned release -* API stabilized -* Supports creating, signing, parsing, and validating JWT tokens -* Supports RS256 and HS256 signing methods \ No newline at end of file diff --git a/vendor/github.com/dgrijalva/jwt-go/cmd/jwt/README.md b/vendor/github.com/dgrijalva/jwt-go/cmd/jwt/README.md deleted file mode 100644 index 4a68ba4..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/cmd/jwt/README.md +++ /dev/null @@ -1,13 +0,0 @@ -`jwt` command-line tool -======================= - -This is a simple tool to sign, verify and show JSON Web Tokens from -the command line. - -The following will create and sign a token, then verify it and output the original claims: - - echo {\"foo\":\"bar\"} | bin/jwt -key test/sample_key -alg RS256 -sign - | bin/jwt -key test/sample_key.pub -verify - - -To simply display a token, use: - - echo $JWT | jwt -show - diff --git a/vendor/github.com/dgrijalva/jwt-go/cmd/jwt/app.go b/vendor/github.com/dgrijalva/jwt-go/cmd/jwt/app.go deleted file mode 100644 index e8bc336..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/cmd/jwt/app.go +++ /dev/null @@ -1,246 +0,0 @@ -// A useful example app. You can use this to debug your tokens on the command line. -// This is also a great place to look at how you might use this library. -// -// Example usage: -// The following will create and sign a token, then verify it and output the original claims. -// echo {\"foo\":\"bar\"} | bin/jwt -key test/sample_key -alg RS256 -sign - | bin/jwt -key test/sample_key.pub -verify - -package main - -import ( - "encoding/json" - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "regexp" - "strings" - - jwt "github.com/dgrijalva/jwt-go" -) - -var ( - // Options - flagAlg = flag.String("alg", "", "signing algorithm identifier") - flagKey = flag.String("key", "", "path to key file or '-' to read from stdin") - flagCompact = flag.Bool("compact", false, "output compact JSON") - flagDebug = flag.Bool("debug", false, "print out all kinds of debug data") - - // Modes - exactly one of these is required - flagSign = flag.String("sign", "", "path to claims object to sign or '-' to read from stdin") - flagVerify = flag.String("verify", "", "path to JWT token to verify or '-' to read from stdin") - flagShow = flag.String("show", "", "path to JWT file or '-' to read from stdin") -) - -func main() { - // Usage message if you ask for -help or if you mess up inputs. - flag.Usage = func() { - fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) - fmt.Fprintf(os.Stderr, " One of the following flags is required: sign, verify\n") - flag.PrintDefaults() - } - - // Parse command line options - flag.Parse() - - // Do the thing. If something goes wrong, print error to stderr - // and exit with a non-zero status code - if err := start(); err != nil { - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - os.Exit(1) - } -} - -// Figure out which thing to do and then do that -func start() error { - if *flagSign != "" { - return signToken() - } else if *flagVerify != "" { - return verifyToken() - } else if *flagShow != "" { - return showToken() - } else { - flag.Usage() - return fmt.Errorf("None of the required flags are present. What do you want me to do?") - } -} - -// Helper func: Read input from specified file or stdin -func loadData(p string) ([]byte, error) { - if p == "" { - return nil, fmt.Errorf("No path specified") - } - - var rdr io.Reader - if p == "-" { - rdr = os.Stdin - } else { - if f, err := os.Open(p); err == nil { - rdr = f - defer f.Close() - } else { - return nil, err - } - } - return ioutil.ReadAll(rdr) -} - -// Print a json object in accordance with the prophecy (or the command line options) -func printJSON(j interface{}) error { - var out []byte - var err error - - if *flagCompact == false { - out, err = json.MarshalIndent(j, "", " ") - } else { - out, err = json.Marshal(j) - } - - if err == nil { - fmt.Println(string(out)) - } - - return err -} - -// Verify a token and output the claims. This is a great example -// of how to verify and view a token. -func verifyToken() error { - // get the token - tokData, err := loadData(*flagVerify) - if err != nil { - return fmt.Errorf("Couldn't read token: %v", err) - } - - // trim possible whitespace from token - tokData = regexp.MustCompile(`\s*$`).ReplaceAll(tokData, []byte{}) - if *flagDebug { - fmt.Fprintf(os.Stderr, "Token len: %v bytes\n", len(tokData)) - } - - // Parse the token. Load the key from command line option - token, err := jwt.Parse(string(tokData), func(t *jwt.Token) (interface{}, error) { - data, err := loadData(*flagKey) - if err != nil { - return nil, err - } - if isEs() { - return jwt.ParseECPublicKeyFromPEM(data) - } - return data, nil - }) - - // Print some debug data - if *flagDebug && token != nil { - fmt.Fprintf(os.Stderr, "Header:\n%v\n", token.Header) - fmt.Fprintf(os.Stderr, "Claims:\n%v\n", token.Claims) - } - - // Print an error if we can't parse for some reason - if err != nil { - return fmt.Errorf("Couldn't parse token: %v", err) - } - - // Is token invalid? - if !token.Valid { - return fmt.Errorf("Token is invalid") - } - - // Print the token details - if err := printJSON(token.Claims); err != nil { - return fmt.Errorf("Failed to output claims: %v", err) - } - - return nil -} - -// Create, sign, and output a token. This is a great, simple example of -// how to use this library to create and sign a token. -func signToken() error { - // get the token data from command line arguments - tokData, err := loadData(*flagSign) - if err != nil { - return fmt.Errorf("Couldn't read token: %v", err) - } else if *flagDebug { - fmt.Fprintf(os.Stderr, "Token: %v bytes", len(tokData)) - } - - // parse the JSON of the claims - var claims map[string]interface{} - if err := json.Unmarshal(tokData, &claims); err != nil { - return fmt.Errorf("Couldn't parse claims JSON: %v", err) - } - - // get the key - var key interface{} - key, err = loadData(*flagKey) - if err != nil { - return fmt.Errorf("Couldn't read key: %v", err) - } - - // get the signing alg - alg := jwt.GetSigningMethod(*flagAlg) - if alg == nil { - return fmt.Errorf("Couldn't find signing method: %v", *flagAlg) - } - - // create a new token - token := jwt.New(alg) - token.Claims = claims - - if isEs() { - if k, ok := key.([]byte); !ok { - return fmt.Errorf("Couldn't convert key data to key") - } else { - key, err = jwt.ParseECPrivateKeyFromPEM(k) - if err != nil { - return err - } - } - } - - if out, err := token.SignedString(key); err == nil { - fmt.Println(out) - } else { - return fmt.Errorf("Error signing token: %v", err) - } - - return nil -} - -// showToken pretty-prints the token on the command line. -func showToken() error { - // get the token - tokData, err := loadData(*flagShow) - if err != nil { - return fmt.Errorf("Couldn't read token: %v", err) - } - - // trim possible whitespace from token - tokData = regexp.MustCompile(`\s*$`).ReplaceAll(tokData, []byte{}) - if *flagDebug { - fmt.Fprintf(os.Stderr, "Token len: %v bytes\n", len(tokData)) - } - - token, err := jwt.Parse(string(tokData), nil) - if token == nil { - return fmt.Errorf("malformed token: %v", err) - } - - // Print the token details - fmt.Println("Header:") - if err := printJSON(token.Header); err != nil { - return fmt.Errorf("Failed to output header: %v", err) - } - - fmt.Println("Claims:") - if err := printJSON(token.Claims); err != nil { - return fmt.Errorf("Failed to output claims: %v", err) - } - - return nil -} - -func isEs() bool { - return strings.HasPrefix(*flagAlg, "ES") -} diff --git a/vendor/github.com/dgrijalva/jwt-go/doc.go b/vendor/github.com/dgrijalva/jwt-go/doc.go deleted file mode 100644 index a86dc1a..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html -// -// See README.md for more info. -package jwt diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go deleted file mode 100644 index 0518ed1..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go +++ /dev/null @@ -1,147 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rand" - "errors" - "math/big" -) - -var ( - // Sadly this is missing from crypto/ecdsa compared to crypto/rsa - ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") -) - -// Implements the ECDSA family of signing methods signing methods -type SigningMethodECDSA struct { - Name string - Hash crypto.Hash - KeySize int - CurveBits int -} - -// Specific instances for EC256 and company -var ( - SigningMethodES256 *SigningMethodECDSA - SigningMethodES384 *SigningMethodECDSA - SigningMethodES512 *SigningMethodECDSA -) - -func init() { - // ES256 - SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} - RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { - return SigningMethodES256 - }) - - // ES384 - SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} - RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { - return SigningMethodES384 - }) - - // ES512 - SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} - RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { - return SigningMethodES512 - }) -} - -func (m *SigningMethodECDSA) Alg() string { - return m.Name -} - -// Implements the Verify method from SigningMethod -// For this verify method, key must be an ecdsa.PublicKey struct -func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { - var err error - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - // Get the key - var ecdsaKey *ecdsa.PublicKey - switch k := key.(type) { - case *ecdsa.PublicKey: - ecdsaKey = k - default: - return ErrInvalidKey - } - - if len(sig) != 2*m.KeySize { - return ErrECDSAVerification - } - - r := big.NewInt(0).SetBytes(sig[:m.KeySize]) - s := big.NewInt(0).SetBytes(sig[m.KeySize:]) - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Verify the signature - if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true { - return nil - } else { - return ErrECDSAVerification - } -} - -// Implements the Sign method from SigningMethod -// For this signing method, key must be an ecdsa.PrivateKey struct -func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { - // Get the key - var ecdsaKey *ecdsa.PrivateKey - switch k := key.(type) { - case *ecdsa.PrivateKey: - ecdsaKey = k - default: - return "", ErrInvalidKey - } - - // Create the hasher - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return r, s - if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { - curveBits := ecdsaKey.Curve.Params().BitSize - - if m.CurveBits != curveBits { - return "", ErrInvalidKey - } - - keyBytes := curveBits / 8 - if curveBits%8 > 0 { - keyBytes += 1 - } - - // We serialize the outpus (r and s) into big-endian byte arrays and pad - // them with zeros on the left to make sure the sizes work out. Both arrays - // must be keyBytes long, and the output must be 2*keyBytes long. - rBytes := r.Bytes() - rBytesPadded := make([]byte, keyBytes) - copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) - - sBytes := s.Bytes() - sBytesPadded := make([]byte, keyBytes) - copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) - - out := append(rBytesPadded, sBytesPadded...) - - return EncodeSegment(out), nil - } else { - return "", err - } -} diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa_test.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa_test.go deleted file mode 100644 index 753047b..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/ecdsa_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package jwt_test - -import ( - "crypto/ecdsa" - "io/ioutil" - "strings" - "testing" - - "github.com/dgrijalva/jwt-go" -) - -var ecdsaTestData = []struct { - name string - keys map[string]string - tokenString string - alg string - claims map[string]interface{} - valid bool -}{ - { - "Basic ES256", - map[string]string{"private": "test/ec256-private.pem", "public": "test/ec256-public.pem"}, - "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiJ9.eyJmb28iOiJiYXIifQ.feG39E-bn8HXAKhzDZq7yEAPWYDhZlwTn3sePJnU9VrGMmwdXAIEyoOnrjreYlVM_Z4N13eK9-TmMTWyfKJtHQ", - "ES256", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "Basic ES384", - map[string]string{"private": "test/ec384-private.pem", "public": "test/ec384-public.pem"}, - "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzM4NCJ9.eyJmb28iOiJiYXIifQ.ngAfKMbJUh0WWubSIYe5GMsA-aHNKwFbJk_wq3lq23aPp8H2anb1rRILIzVR0gUf4a8WzDtrzmiikuPWyCS6CN4-PwdgTk-5nehC7JXqlaBZU05p3toM3nWCwm_LXcld", - "ES384", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "Basic ES512", - map[string]string{"private": "test/ec512-private.pem", "public": "test/ec512-public.pem"}, - "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzUxMiJ9.eyJmb28iOiJiYXIifQ.AAU0TvGQOcdg2OvrwY73NHKgfk26UDekh9Prz-L_iWuTBIBqOFCWwwLsRiHB1JOddfKAls5do1W0jR_F30JpVd-6AJeTjGKA4C1A1H6gIKwRY0o_tFDIydZCl_lMBMeG5VNFAjO86-WCSKwc3hqaGkq1MugPRq_qrF9AVbuEB4JPLyL5", - "ES512", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "basic ES256 invalid: foo => bar", - map[string]string{"private": "test/ec256-private.pem", "public": "test/ec256-public.pem"}, - "eyJhbGciOiJFUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.MEQCIHoSJnmGlPaVQDqacx_2XlXEhhqtWceVopjomc2PJLtdAiAUTeGPoNYxZw0z8mgOnnIcjoxRuNDVZvybRZF3wR1l8W", - "ES256", - map[string]interface{}{"foo": "bar"}, - false, - }, -} - -func TestECDSAVerify(t *testing.T) { - for _, data := range ecdsaTestData { - var err error - - key, _ := ioutil.ReadFile(data.keys["public"]) - - var ecdsaKey *ecdsa.PublicKey - if ecdsaKey, err = jwt.ParseECPublicKeyFromPEM(key); err != nil { - t.Errorf("Unable to parse ECDSA public key: %v", err) - } - - parts := strings.Split(data.tokenString, ".") - - method := jwt.GetSigningMethod(data.alg) - err = method.Verify(strings.Join(parts[0:2], "."), parts[2], ecdsaKey) - if data.valid && err != nil { - t.Errorf("[%v] Error while verifying key: %v", data.name, err) - } - if !data.valid && err == nil { - t.Errorf("[%v] Invalid key passed validation", data.name) - } - } -} - -func TestECDSASign(t *testing.T) { - for _, data := range ecdsaTestData { - var err error - key, _ := ioutil.ReadFile(data.keys["private"]) - - var ecdsaKey *ecdsa.PrivateKey - if ecdsaKey, err = jwt.ParseECPrivateKeyFromPEM(key); err != nil { - t.Errorf("Unable to parse ECDSA private key: %v", err) - } - - if data.valid { - parts := strings.Split(data.tokenString, ".") - method := jwt.GetSigningMethod(data.alg) - sig, err := method.Sign(strings.Join(parts[0:2], "."), ecdsaKey) - if err != nil { - t.Errorf("[%v] Error signing token: %v", data.name, err) - } - if sig == parts[2] { - t.Errorf("[%v] Identical signatures\nbefore:\n%v\nafter:\n%v", data.name, parts[2], sig) - } - } - } -} diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go deleted file mode 100644 index d19624b..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go +++ /dev/null @@ -1,67 +0,0 @@ -package jwt - -import ( - "crypto/ecdsa" - "crypto/x509" - "encoding/pem" - "errors" -) - -var ( - ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") - ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") -) - -// Parse PEM encoded Elliptic Curve Private Key Structure -func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { - return nil, err - } - - var pkey *ecdsa.PrivateKey - var ok bool - if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { - return nil, ErrNotECPrivateKey - } - - return pkey, nil -} - -// Parse PEM encoded PKCS1 or PKCS8 public key -func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { - if cert, err := x509.ParseCertificate(block.Bytes); err == nil { - parsedKey = cert.PublicKey - } else { - return nil, err - } - } - - var pkey *ecdsa.PublicKey - var ok bool - if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { - return nil, ErrNotECPublicKey - } - - return pkey, nil -} diff --git a/vendor/github.com/dgrijalva/jwt-go/errors.go b/vendor/github.com/dgrijalva/jwt-go/errors.go deleted file mode 100644 index a6b60a3..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/errors.go +++ /dev/null @@ -1,51 +0,0 @@ -package jwt - -import ( - "errors" -) - -// Error constants -var ( - ErrInvalidKey = errors.New("key is invalid or of invalid type") - ErrHashUnavailable = errors.New("the requested hash function is unavailable") - ErrNoTokenInRequest = errors.New("no token present in request") -) - -// The errors that might occur when parsing and validating a token -const ( - ValidationErrorMalformed uint32 = 1 << iota // Token is malformed - ValidationErrorUnverifiable // Token could not be verified because of signing problems - ValidationErrorSignatureInvalid // Signature validation failed - ValidationErrorExpired // Exp validation failed - ValidationErrorNotValidYet // NBF validation failed -) - -// Helper for constructing a ValidationError with a string error message -func NewValidationError(errorText string, errorFlags uint32) *ValidationError { - return &ValidationError{ - Inner: errors.New(errorText), - Errors: errorFlags, - } -} - -// The error from Parse if token is not valid -type ValidationError struct { - Inner error // stores the error returned by external dependencies, i.e.: KeyFunc - Errors uint32 // bitfield. see ValidationError... constants -} - -// Validation error is an error type -func (e ValidationError) Error() string { - if e.Inner == nil { - return "token is invalid" - } - return e.Inner.Error() -} - -// No errors -func (e *ValidationError) valid() bool { - if e.Errors > 0 { - return false - } - return true -} diff --git a/vendor/github.com/dgrijalva/jwt-go/example_test.go b/vendor/github.com/dgrijalva/jwt-go/example_test.go deleted file mode 100644 index edb48e4..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/example_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package jwt_test - -import ( - "fmt" - "github.com/dgrijalva/jwt-go" - "time" -) - -func ExampleParse(myToken string, myLookupKey func(interface{}) (interface{}, error)) { - token, err := jwt.Parse(myToken, func(token *jwt.Token) (interface{}, error) { - return myLookupKey(token.Header["kid"]) - }) - - if err == nil && token.Valid { - fmt.Println("Your token is valid. I like your style.") - } else { - fmt.Println("This token is terrible! I cannot accept this.") - } -} - -func ExampleNew(mySigningKey []byte) (string, error) { - // Create the token - token := jwt.New(jwt.SigningMethodHS256) - // Set some claims - token.Claims["foo"] = "bar" - token.Claims["exp"] = time.Now().Add(time.Hour * 72).Unix() - // Sign and get the complete encoded token as a string - tokenString, err := token.SignedString(mySigningKey) - return tokenString, err -} - -func ExampleParse_errorChecking(myToken string, myLookupKey func(interface{}) (interface{}, error)) { - token, err := jwt.Parse(myToken, func(token *jwt.Token) (interface{}, error) { - return myLookupKey(token.Header["kid"]) - }) - - if token.Valid { - fmt.Println("You look nice today") - } else if ve, ok := err.(*jwt.ValidationError); ok { - if ve.Errors&jwt.ValidationErrorMalformed != 0 { - fmt.Println("That's not even a token") - } else if ve.Errors&(jwt.ValidationErrorExpired|jwt.ValidationErrorNotValidYet) != 0 { - // Token is either expired or not active yet - fmt.Println("Timing is everything") - } else { - fmt.Println("Couldn't handle this token:", err) - } - } else { - fmt.Println("Couldn't handle this token:", err) - } - -} diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac.go b/vendor/github.com/dgrijalva/jwt-go/hmac.go deleted file mode 100644 index 192e625..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/hmac.go +++ /dev/null @@ -1,94 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/hmac" - "errors" -) - -// Implements the HMAC-SHA family of signing methods signing methods -type SigningMethodHMAC struct { - Name string - Hash crypto.Hash -} - -// Specific instances for HS256 and company -var ( - SigningMethodHS256 *SigningMethodHMAC - SigningMethodHS384 *SigningMethodHMAC - SigningMethodHS512 *SigningMethodHMAC - ErrSignatureInvalid = errors.New("signature is invalid") -) - -func init() { - // HS256 - SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} - RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { - return SigningMethodHS256 - }) - - // HS384 - SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} - RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { - return SigningMethodHS384 - }) - - // HS512 - SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} - RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { - return SigningMethodHS512 - }) -} - -func (m *SigningMethodHMAC) Alg() string { - return m.Name -} - -// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. -func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { - // Verify the key is the right type - keyBytes, ok := key.([]byte) - if !ok { - return ErrInvalidKey - } - - // Decode signature, for comparison - sig, err := DecodeSegment(signature) - if err != nil { - return err - } - - // Can we use the specified hashing method? - if !m.Hash.Available() { - return ErrHashUnavailable - } - - // This signing method is symmetric, so we validate the signature - // by reproducing the signature from the signing string and key, then - // comparing that against the provided signature. - hasher := hmac.New(m.Hash.New, keyBytes) - hasher.Write([]byte(signingString)) - if !hmac.Equal(sig, hasher.Sum(nil)) { - return ErrSignatureInvalid - } - - // No validation errors. Signature is good. - return nil -} - -// Implements the Sign method from SigningMethod for this signing method. -// Key must be []byte -func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { - if keyBytes, ok := key.([]byte); ok { - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := hmac.New(m.Hash.New, keyBytes) - hasher.Write([]byte(signingString)) - - return EncodeSegment(hasher.Sum(nil)), nil - } - - return "", ErrInvalidKey -} diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac_test.go b/vendor/github.com/dgrijalva/jwt-go/hmac_test.go deleted file mode 100644 index c7e114f..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/hmac_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package jwt_test - -import ( - "github.com/dgrijalva/jwt-go" - "io/ioutil" - "strings" - "testing" -) - -var hmacTestData = []struct { - name string - tokenString string - alg string - claims map[string]interface{} - valid bool -}{ - { - "web sample", - "eyJ0eXAiOiJKV1QiLA0KICJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJqb2UiLA0KICJleHAiOjEzMDA4MTkzODAsDQogImh0dHA6Ly9leGFtcGxlLmNvbS9pc19yb290Ijp0cnVlfQ.dBjftJeZ4CVP-mB92K27uhbUJU1p1r_wW1gFWFOEjXk", - "HS256", - map[string]interface{}{"iss": "joe", "exp": 1300819380, "http://example.com/is_root": true}, - true, - }, - { - "HS384", - "eyJhbGciOiJIUzM4NCIsInR5cCI6IkpXVCJ9.eyJleHAiOjEuMzAwODE5MzhlKzA5LCJodHRwOi8vZXhhbXBsZS5jb20vaXNfcm9vdCI6dHJ1ZSwiaXNzIjoiam9lIn0.KWZEuOD5lbBxZ34g7F-SlVLAQ_r5KApWNWlZIIMyQVz5Zs58a7XdNzj5_0EcNoOy", - "HS384", - map[string]interface{}{"iss": "joe", "exp": 1300819380, "http://example.com/is_root": true}, - true, - }, - { - "HS512", - "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJleHAiOjEuMzAwODE5MzhlKzA5LCJodHRwOi8vZXhhbXBsZS5jb20vaXNfcm9vdCI6dHJ1ZSwiaXNzIjoiam9lIn0.CN7YijRX6Aw1n2jyI2Id1w90ja-DEMYiWixhYCyHnrZ1VfJRaFQz1bEbjjA5Fn4CLYaUG432dEYmSbS4Saokmw", - "HS512", - map[string]interface{}{"iss": "joe", "exp": 1300819380, "http://example.com/is_root": true}, - true, - }, - { - "web sample: invalid", - "eyJ0eXAiOiJKV1QiLA0KICJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJqb2UiLA0KICJleHAiOjEzMDA4MTkzODAsDQogImh0dHA6Ly9leGFtcGxlLmNvbS9pc19yb290Ijp0cnVlfQ.dBjftJeZ4CVP-mB92K27uhbUJU1p1r_wW1gFWFOEjXo", - "HS256", - map[string]interface{}{"iss": "joe", "exp": 1300819380, "http://example.com/is_root": true}, - false, - }, -} - -// Sample data from http://tools.ietf.org/html/draft-jones-json-web-signature-04#appendix-A.1 -var hmacTestKey, _ = ioutil.ReadFile("test/hmacTestKey") - -func TestHMACVerify(t *testing.T) { - for _, data := range hmacTestData { - parts := strings.Split(data.tokenString, ".") - - method := jwt.GetSigningMethod(data.alg) - err := method.Verify(strings.Join(parts[0:2], "."), parts[2], hmacTestKey) - if data.valid && err != nil { - t.Errorf("[%v] Error while verifying key: %v", data.name, err) - } - if !data.valid && err == nil { - t.Errorf("[%v] Invalid key passed validation", data.name) - } - } -} - -func TestHMACSign(t *testing.T) { - for _, data := range hmacTestData { - if data.valid { - parts := strings.Split(data.tokenString, ".") - method := jwt.GetSigningMethod(data.alg) - sig, err := method.Sign(strings.Join(parts[0:2], "."), hmacTestKey) - if err != nil { - t.Errorf("[%v] Error signing token: %v", data.name, err) - } - if sig != parts[2] { - t.Errorf("[%v] Incorrect signature.\nwas:\n%v\nexpecting:\n%v", data.name, sig, parts[2]) - } - } - } -} - -func BenchmarkHS256Signing(b *testing.B) { - benchmarkSigning(b, jwt.SigningMethodHS256, hmacTestKey) -} - -func BenchmarkHS384Signing(b *testing.B) { - benchmarkSigning(b, jwt.SigningMethodHS384, hmacTestKey) -} - -func BenchmarkHS512Signing(b *testing.B) { - benchmarkSigning(b, jwt.SigningMethodHS512, hmacTestKey) -} diff --git a/vendor/github.com/dgrijalva/jwt-go/none.go b/vendor/github.com/dgrijalva/jwt-go/none.go deleted file mode 100644 index f04d189..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/none.go +++ /dev/null @@ -1,52 +0,0 @@ -package jwt - -// Implements the none signing method. This is required by the spec -// but you probably should never use it. -var SigningMethodNone *signingMethodNone - -const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" - -var NoneSignatureTypeDisallowedError error - -type signingMethodNone struct{} -type unsafeNoneMagicConstant string - -func init() { - SigningMethodNone = &signingMethodNone{} - NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) - - RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { - return SigningMethodNone - }) -} - -func (m *signingMethodNone) Alg() string { - return "none" -} - -// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key -func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { - // Key must be UnsafeAllowNoneSignatureType to prevent accidentally - // accepting 'none' signing method - if _, ok := key.(unsafeNoneMagicConstant); !ok { - return NoneSignatureTypeDisallowedError - } - // If signing method is none, signature must be an empty string - if signature != "" { - return NewValidationError( - "'none' signing method with non-empty signature", - ValidationErrorSignatureInvalid, - ) - } - - // Accept 'none' signing method. - return nil -} - -// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key -func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { - if _, ok := key.(unsafeNoneMagicConstant); ok { - return "", nil - } - return "", NoneSignatureTypeDisallowedError -} diff --git a/vendor/github.com/dgrijalva/jwt-go/none_test.go b/vendor/github.com/dgrijalva/jwt-go/none_test.go deleted file mode 100644 index 29a69ef..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/none_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package jwt_test - -import ( - "github.com/dgrijalva/jwt-go" - "strings" - "testing" -) - -var noneTestData = []struct { - name string - tokenString string - alg string - key interface{} - claims map[string]interface{} - valid bool -}{ - { - "Basic", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.", - "none", - jwt.UnsafeAllowNoneSignatureType, - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "Basic - no key", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.", - "none", - nil, - map[string]interface{}{"foo": "bar"}, - false, - }, - { - "Signed", - "eyJhbGciOiJSUzM4NCIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.W-jEzRfBigtCWsinvVVuldiuilzVdU5ty0MvpLaSaqK9PlAWWlDQ1VIQ_qSKzwL5IXaZkvZFJXT3yL3n7OUVu7zCNJzdwznbC8Z-b0z2lYvcklJYi2VOFRcGbJtXUqgjk2oGsiqUMUMOLP70TTefkpsgqDxbRh9CDUfpOJgW-dU7cmgaoswe3wjUAUi6B6G2YEaiuXC0XScQYSYVKIzgKXJV8Zw-7AN_DBUI4GkTpsvQ9fVVjZM9csQiEXhYekyrKu1nu_POpQonGd8yqkIyXPECNmmqH5jH4sFiF67XhD7_JpkvLziBpI-uh86evBUadmHhb9Otqw3uV3NTaXLzJw", - "none", - jwt.UnsafeAllowNoneSignatureType, - map[string]interface{}{"foo": "bar"}, - false, - }, -} - -func TestNoneVerify(t *testing.T) { - for _, data := range noneTestData { - parts := strings.Split(data.tokenString, ".") - - method := jwt.GetSigningMethod(data.alg) - err := method.Verify(strings.Join(parts[0:2], "."), parts[2], data.key) - if data.valid && err != nil { - t.Errorf("[%v] Error while verifying key: %v", data.name, err) - } - if !data.valid && err == nil { - t.Errorf("[%v] Invalid key passed validation", data.name) - } - } -} - -func TestNoneSign(t *testing.T) { - for _, data := range noneTestData { - if data.valid { - parts := strings.Split(data.tokenString, ".") - method := jwt.GetSigningMethod(data.alg) - sig, err := method.Sign(strings.Join(parts[0:2], "."), data.key) - if err != nil { - t.Errorf("[%v] Error signing token: %v", data.name, err) - } - if sig != parts[2] { - t.Errorf("[%v] Incorrect signature.\nwas:\n%v\nexpecting:\n%v", data.name, sig, parts[2]) - } - } - } -} diff --git a/vendor/github.com/dgrijalva/jwt-go/parser.go b/vendor/github.com/dgrijalva/jwt-go/parser.go deleted file mode 100644 index fbde9cb..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/parser.go +++ /dev/null @@ -1,140 +0,0 @@ -package jwt - -import ( - "bytes" - "encoding/json" - "fmt" - "strings" - "time" -) - -type Parser struct { - ValidMethods []string // If populated, only these methods will be considered valid - UseJSONNumber bool // Use JSON Number format in JSON decoder -} - -// Parse, validate, and return a token. -// keyFunc will receive the parsed token and should return the key for validating. -// If everything is kosher, err will be nil -func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { - parts := strings.Split(tokenString, ".") - if len(parts) != 3 { - return nil, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) - } - - var err error - token := &Token{Raw: tokenString} - // parse Header - var headerBytes []byte - if headerBytes, err = DecodeSegment(parts[0]); err != nil { - if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { - return token, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) - } - return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - if err = json.Unmarshal(headerBytes, &token.Header); err != nil { - return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - - // parse Claims - var claimBytes []byte - if claimBytes, err = DecodeSegment(parts[1]); err != nil { - return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) - if p.UseJSONNumber { - dec.UseNumber() - } - if err = dec.Decode(&token.Claims); err != nil { - return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - - // Lookup signature method - if method, ok := token.Header["alg"].(string); ok { - if token.Method = GetSigningMethod(method); token.Method == nil { - return token, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) - } - } else { - return token, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) - } - - // Verify signing method is in the required set - if p.ValidMethods != nil { - var signingMethodValid = false - var alg = token.Method.Alg() - for _, m := range p.ValidMethods { - if m == alg { - signingMethodValid = true - break - } - } - if !signingMethodValid { - // signing method is not in the listed set - return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) - } - } - - // Lookup key - var key interface{} - if keyFunc == nil { - // keyFunc was not provided. short circuiting validation - return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) - } - if key, err = keyFunc(token); err != nil { - // keyFunc returned an error - return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} - } - - // Check expiration times - vErr := &ValidationError{} - now := TimeFunc().Unix() - var exp, nbf int64 - var vexp, vnbf bool - - // Parse 'exp' claim - switch num := token.Claims["exp"].(type) { - case json.Number: - if exp, err = num.Int64(); err == nil { - vexp = true - } - case float64: - vexp = true - exp = int64(num) - } - - // Parse 'nbf' claim - switch num := token.Claims["nbf"].(type) { - case json.Number: - if nbf, err = num.Int64(); err == nil { - vnbf = true - } - case float64: - vnbf = true - nbf = int64(num) - } - - if vexp && now > exp { - delta := time.Unix(now, 0).Sub(time.Unix(exp, 0)) - vErr.Inner = fmt.Errorf("token is expired by %v", delta) - vErr.Errors |= ValidationErrorExpired - } - - if vnbf && now < nbf { - vErr.Inner = fmt.Errorf("token is not valid yet") - vErr.Errors |= ValidationErrorNotValidYet - } - - // Perform validation - token.Signature = parts[2] - if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { - vErr.Inner = err - vErr.Errors |= ValidationErrorSignatureInvalid - } - - if vErr.valid() { - token.Valid = true - return token, nil - } - - return token, vErr -} diff --git a/vendor/github.com/dgrijalva/jwt-go/parser_test.go b/vendor/github.com/dgrijalva/jwt-go/parser_test.go deleted file mode 100644 index f1b7d2a..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/parser_test.go +++ /dev/null @@ -1,275 +0,0 @@ -package jwt_test - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "reflect" - "testing" - "time" - - "github.com/dgrijalva/jwt-go" -) - -var keyFuncError error = fmt.Errorf("error loading key") - -var ( - jwtTestDefaultKey []byte - defaultKeyFunc jwt.Keyfunc = func(t *jwt.Token) (interface{}, error) { return jwtTestDefaultKey, nil } - emptyKeyFunc jwt.Keyfunc = func(t *jwt.Token) (interface{}, error) { return nil, nil } - errorKeyFunc jwt.Keyfunc = func(t *jwt.Token) (interface{}, error) { return nil, keyFuncError } - nilKeyFunc jwt.Keyfunc = nil -) - -var jwtTestData = []struct { - name string - tokenString string - keyfunc jwt.Keyfunc - claims map[string]interface{} - valid bool - errors uint32 - parser *jwt.Parser -}{ - { - "basic", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", - defaultKeyFunc, - map[string]interface{}{"foo": "bar"}, - true, - 0, - nil, - }, - { - "basic expired", - "", // autogen - defaultKeyFunc, - map[string]interface{}{"foo": "bar", "exp": float64(time.Now().Unix() - 100)}, - false, - jwt.ValidationErrorExpired, - nil, - }, - { - "basic nbf", - "", // autogen - defaultKeyFunc, - map[string]interface{}{"foo": "bar", "nbf": float64(time.Now().Unix() + 100)}, - false, - jwt.ValidationErrorNotValidYet, - nil, - }, - { - "expired and nbf", - "", // autogen - defaultKeyFunc, - map[string]interface{}{"foo": "bar", "nbf": float64(time.Now().Unix() + 100), "exp": float64(time.Now().Unix() - 100)}, - false, - jwt.ValidationErrorNotValidYet | jwt.ValidationErrorExpired, - nil, - }, - { - "basic invalid", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.EhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", - defaultKeyFunc, - map[string]interface{}{"foo": "bar"}, - false, - jwt.ValidationErrorSignatureInvalid, - nil, - }, - { - "basic nokeyfunc", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", - nilKeyFunc, - map[string]interface{}{"foo": "bar"}, - false, - jwt.ValidationErrorUnverifiable, - nil, - }, - { - "basic nokey", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", - emptyKeyFunc, - map[string]interface{}{"foo": "bar"}, - false, - jwt.ValidationErrorSignatureInvalid, - nil, - }, - { - "basic errorkey", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", - errorKeyFunc, - map[string]interface{}{"foo": "bar"}, - false, - jwt.ValidationErrorUnverifiable, - nil, - }, - { - "invalid signing method", - "", - defaultKeyFunc, - map[string]interface{}{"foo": "bar"}, - false, - jwt.ValidationErrorSignatureInvalid, - &jwt.Parser{ValidMethods: []string{"HS256"}}, - }, - { - "valid signing method", - "", - defaultKeyFunc, - map[string]interface{}{"foo": "bar"}, - true, - 0, - &jwt.Parser{ValidMethods: []string{"RS256", "HS256"}}, - }, - { - "JSON Number", - "", - defaultKeyFunc, - map[string]interface{}{"foo": json.Number("123.4")}, - true, - 0, - &jwt.Parser{UseJSONNumber: true}, - }, - { - "JSON Number - basic expired", - "", // autogen - defaultKeyFunc, - map[string]interface{}{"foo": "bar", "exp": json.Number(fmt.Sprintf("%v", time.Now().Unix()-100))}, - false, - jwt.ValidationErrorExpired, - &jwt.Parser{UseJSONNumber: true}, - }, - { - "JSON Number - basic nbf", - "", // autogen - defaultKeyFunc, - map[string]interface{}{"foo": "bar", "nbf": json.Number(fmt.Sprintf("%v", time.Now().Unix()+100))}, - false, - jwt.ValidationErrorNotValidYet, - &jwt.Parser{UseJSONNumber: true}, - }, - { - "JSON Number - expired and nbf", - "", // autogen - defaultKeyFunc, - map[string]interface{}{"foo": "bar", "nbf": json.Number(fmt.Sprintf("%v", time.Now().Unix()+100)), "exp": json.Number(fmt.Sprintf("%v", time.Now().Unix()-100))}, - false, - jwt.ValidationErrorNotValidYet | jwt.ValidationErrorExpired, - &jwt.Parser{UseJSONNumber: true}, - }, -} - -func init() { - var e error - if jwtTestDefaultKey, e = ioutil.ReadFile("test/sample_key.pub"); e != nil { - panic(e) - } -} - -func makeSample(c map[string]interface{}) string { - key, e := ioutil.ReadFile("test/sample_key") - if e != nil { - panic(e.Error()) - } - - token := jwt.New(jwt.SigningMethodRS256) - token.Claims = c - s, e := token.SignedString(key) - - if e != nil { - panic(e.Error()) - } - - return s -} - -func TestParser_Parse(t *testing.T) { - for _, data := range jwtTestData { - if data.tokenString == "" { - data.tokenString = makeSample(data.claims) - } - - var token *jwt.Token - var err error - if data.parser != nil { - token, err = data.parser.Parse(data.tokenString, data.keyfunc) - } else { - token, err = jwt.Parse(data.tokenString, data.keyfunc) - } - - if !reflect.DeepEqual(data.claims, token.Claims) { - t.Errorf("[%v] Claims mismatch. Expecting: %v Got: %v", data.name, data.claims, token.Claims) - } - if data.valid && err != nil { - t.Errorf("[%v] Error while verifying token: %T:%v", data.name, err, err) - } - if !data.valid && err == nil { - t.Errorf("[%v] Invalid token passed validation", data.name) - } - if data.errors != 0 { - if err == nil { - t.Errorf("[%v] Expecting error. Didn't get one.", data.name) - } else { - - ve := err.(*jwt.ValidationError) - // compare the bitfield part of the error - if e := ve.Errors; e != data.errors { - t.Errorf("[%v] Errors don't match expectation. %v != %v", data.name, e, data.errors) - } - - if err.Error() == keyFuncError.Error() && ve.Inner != keyFuncError { - t.Errorf("[%v] Inner error does not match expectation. %v != %v", data.name, ve.Inner, keyFuncError) - } - } - } - if data.valid && token.Signature == "" { - t.Errorf("[%v] Signature is left unpopulated after parsing", data.name) - } - } -} - -func TestParseRequest(t *testing.T) { - // Bearer token request - for _, data := range jwtTestData { - // FIXME: custom parsers are not supported by this helper. skip tests that require them - if data.parser != nil { - t.Logf("Skipping [%v]. Custom parsers are not supported by ParseRequest", data.name) - continue - } - - if data.tokenString == "" { - data.tokenString = makeSample(data.claims) - } - - r, _ := http.NewRequest("GET", "/", nil) - r.Header.Set("Authorization", fmt.Sprintf("Bearer %v", data.tokenString)) - token, err := jwt.ParseFromRequest(r, data.keyfunc) - - if token == nil { - t.Errorf("[%v] Token was not found: %v", data.name, err) - continue - } - if !reflect.DeepEqual(data.claims, token.Claims) { - t.Errorf("[%v] Claims mismatch. Expecting: %v Got: %v", data.name, data.claims, token.Claims) - } - if data.valid && err != nil { - t.Errorf("[%v] Error while verifying token: %v", data.name, err) - } - if !data.valid && err == nil { - t.Errorf("[%v] Invalid token passed validation", data.name) - } - } -} - -// Helper method for benchmarking various methods -func benchmarkSigning(b *testing.B, method jwt.SigningMethod, key interface{}) { - t := jwt.New(method) - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - if _, err := t.SignedString(key); err != nil { - b.Fatal(err) - } - } - }) - -} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa.go b/vendor/github.com/dgrijalva/jwt-go/rsa.go deleted file mode 100644 index cddffce..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/rsa.go +++ /dev/null @@ -1,114 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" -) - -// Implements the RSA family of signing methods signing methods -type SigningMethodRSA struct { - Name string - Hash crypto.Hash -} - -// Specific instances for RS256 and company -var ( - SigningMethodRS256 *SigningMethodRSA - SigningMethodRS384 *SigningMethodRSA - SigningMethodRS512 *SigningMethodRSA -) - -func init() { - // RS256 - SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} - RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { - return SigningMethodRS256 - }) - - // RS384 - SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} - RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { - return SigningMethodRS384 - }) - - // RS512 - SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} - RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { - return SigningMethodRS512 - }) -} - -func (m *SigningMethodRSA) Alg() string { - return m.Name -} - -// Implements the Verify method from SigningMethod -// For this signing method, must be either a PEM encoded PKCS1 or PKCS8 RSA public key as -// []byte, or an rsa.PublicKey structure. -func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { - var err error - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - var rsaKey *rsa.PublicKey - - switch k := key.(type) { - case []byte: - if rsaKey, err = ParseRSAPublicKeyFromPEM(k); err != nil { - return err - } - case *rsa.PublicKey: - rsaKey = k - default: - return ErrInvalidKey - } - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Verify the signature - return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) -} - -// Implements the Sign method from SigningMethod -// For this signing method, must be either a PEM encoded PKCS1 or PKCS8 RSA private key as -// []byte, or an rsa.PrivateKey structure. -func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { - var err error - var rsaKey *rsa.PrivateKey - - switch k := key.(type) { - case []byte: - if rsaKey, err = ParseRSAPrivateKeyFromPEM(k); err != nil { - return "", err - } - case *rsa.PrivateKey: - rsaKey = k - default: - return "", ErrInvalidKey - } - - // Create the hasher - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return the encoded bytes - if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { - return EncodeSegment(sigBytes), nil - } else { - return "", err - } -} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go deleted file mode 100644 index b5b7073..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go +++ /dev/null @@ -1,126 +0,0 @@ -// +build go1.4 - -package jwt - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" -) - -// Implements the RSAPSS family of signing methods signing methods -type SigningMethodRSAPSS struct { - *SigningMethodRSA - Options *rsa.PSSOptions -} - -// Specific instances for RS/PS and company -var ( - SigningMethodPS256 *SigningMethodRSAPSS - SigningMethodPS384 *SigningMethodRSAPSS - SigningMethodPS512 *SigningMethodRSAPSS -) - -func init() { - // PS256 - SigningMethodPS256 = &SigningMethodRSAPSS{ - &SigningMethodRSA{ - Name: "PS256", - Hash: crypto.SHA256, - }, - &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - Hash: crypto.SHA256, - }, - } - RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { - return SigningMethodPS256 - }) - - // PS384 - SigningMethodPS384 = &SigningMethodRSAPSS{ - &SigningMethodRSA{ - Name: "PS384", - Hash: crypto.SHA384, - }, - &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - Hash: crypto.SHA384, - }, - } - RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { - return SigningMethodPS384 - }) - - // PS512 - SigningMethodPS512 = &SigningMethodRSAPSS{ - &SigningMethodRSA{ - Name: "PS512", - Hash: crypto.SHA512, - }, - &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - Hash: crypto.SHA512, - }, - } - RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { - return SigningMethodPS512 - }) -} - -// Implements the Verify method from SigningMethod -// For this verify method, key must be an rsa.PublicKey struct -func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { - var err error - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - var rsaKey *rsa.PublicKey - switch k := key.(type) { - case *rsa.PublicKey: - rsaKey = k - default: - return ErrInvalidKey - } - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options) -} - -// Implements the Sign method from SigningMethod -// For this signing method, key must be an rsa.PrivateKey struct -func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { - var rsaKey *rsa.PrivateKey - - switch k := key.(type) { - case *rsa.PrivateKey: - rsaKey = k - default: - return "", ErrInvalidKey - } - - // Create the hasher - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return the encoded bytes - if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { - return EncodeSegment(sigBytes), nil - } else { - return "", err - } -} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_pss_test.go b/vendor/github.com/dgrijalva/jwt-go/rsa_pss_test.go deleted file mode 100644 index 9045aaf..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/rsa_pss_test.go +++ /dev/null @@ -1,96 +0,0 @@ -// +build go1.4 - -package jwt_test - -import ( - "crypto/rsa" - "io/ioutil" - "strings" - "testing" - - "github.com/dgrijalva/jwt-go" -) - -var rsaPSSTestData = []struct { - name string - tokenString string - alg string - claims map[string]interface{} - valid bool -}{ - { - "Basic PS256", - "eyJhbGciOiJQUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.PPG4xyDVY8ffp4CcxofNmsTDXsrVG2npdQuibLhJbv4ClyPTUtR5giNSvuxo03kB6I8VXVr0Y9X7UxhJVEoJOmULAwRWaUsDnIewQa101cVhMa6iR8X37kfFoiZ6NkS-c7henVkkQWu2HtotkEtQvN5hFlk8IevXXPmvZlhQhwzB1sGzGYnoi1zOfuL98d3BIjUjtlwii5w6gYG2AEEzp7HnHCsb3jIwUPdq86Oe6hIFjtBwduIK90ca4UqzARpcfwxHwVLMpatKask00AgGVI0ysdk0BLMjmLutquD03XbThHScC2C2_Pp4cHWgMzvbgLU2RYYZcZRKr46QeNgz9w", - "PS256", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "Basic PS384", - "eyJhbGciOiJQUzM4NCIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.w7-qqgj97gK4fJsq_DCqdYQiylJjzWONvD0qWWWhqEOFk2P1eDULPnqHRnjgTXoO4HAw4YIWCsZPet7nR3Xxq4ZhMqvKW8b7KlfRTb9cH8zqFvzMmybQ4jv2hKc3bXYqVow3AoR7hN_CWXI3Dv6Kd2X5xhtxRHI6IL39oTVDUQ74LACe-9t4c3QRPuj6Pq1H4FAT2E2kW_0KOc6EQhCLWEhm2Z2__OZskDC8AiPpP8Kv4k2vB7l0IKQu8Pr4RcNBlqJdq8dA5D3hk5TLxP8V5nG1Ib80MOMMqoS3FQvSLyolFX-R_jZ3-zfq6Ebsqr0yEb0AH2CfsECF7935Pa0FKQ", - "PS384", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "Basic PS512", - "eyJhbGciOiJQUzUxMiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.GX1HWGzFaJevuSLavqqFYaW8_TpvcjQ8KfC5fXiSDzSiT9UD9nB_ikSmDNyDILNdtjZLSvVKfXxZJqCfefxAtiozEDDdJthZ-F0uO4SPFHlGiXszvKeodh7BuTWRI2wL9-ZO4mFa8nq3GMeQAfo9cx11i7nfN8n2YNQ9SHGovG7_T_AvaMZB_jT6jkDHpwGR9mz7x1sycckEo6teLdHRnH_ZdlHlxqknmyTu8Odr5Xh0sJFOL8BepWbbvIIn-P161rRHHiDWFv6nhlHwZnVzjx7HQrWSGb6-s2cdLie9QL_8XaMcUpjLkfOMKkDOfHo6AvpL7Jbwi83Z2ZTHjJWB-A", - "PS512", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "basic PS256 invalid: foo => bar", - "eyJhbGciOiJQUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.PPG4xyDVY8ffp4CcxofNmsTDXsrVG2npdQuibLhJbv4ClyPTUtR5giNSvuxo03kB6I8VXVr0Y9X7UxhJVEoJOmULAwRWaUsDnIewQa101cVhMa6iR8X37kfFoiZ6NkS-c7henVkkQWu2HtotkEtQvN5hFlk8IevXXPmvZlhQhwzB1sGzGYnoi1zOfuL98d3BIjUjtlwii5w6gYG2AEEzp7HnHCsb3jIwUPdq86Oe6hIFjtBwduIK90ca4UqzARpcfwxHwVLMpatKask00AgGVI0ysdk0BLMjmLutquD03XbThHScC2C2_Pp4cHWgMzvbgLU2RYYZcZRKr46QeNgz9W", - "PS256", - map[string]interface{}{"foo": "bar"}, - false, - }, -} - -func TestRSAPSSVerify(t *testing.T) { - var err error - - key, _ := ioutil.ReadFile("test/sample_key.pub") - var rsaPSSKey *rsa.PublicKey - if rsaPSSKey, err = jwt.ParseRSAPublicKeyFromPEM(key); err != nil { - t.Errorf("Unable to parse RSA public key: %v", err) - } - - for _, data := range rsaPSSTestData { - parts := strings.Split(data.tokenString, ".") - - method := jwt.GetSigningMethod(data.alg) - err := method.Verify(strings.Join(parts[0:2], "."), parts[2], rsaPSSKey) - if data.valid && err != nil { - t.Errorf("[%v] Error while verifying key: %v", data.name, err) - } - if !data.valid && err == nil { - t.Errorf("[%v] Invalid key passed validation", data.name) - } - } -} - -func TestRSAPSSSign(t *testing.T) { - var err error - - key, _ := ioutil.ReadFile("test/sample_key") - var rsaPSSKey *rsa.PrivateKey - if rsaPSSKey, err = jwt.ParseRSAPrivateKeyFromPEM(key); err != nil { - t.Errorf("Unable to parse RSA private key: %v", err) - } - - for _, data := range rsaPSSTestData { - if data.valid { - parts := strings.Split(data.tokenString, ".") - method := jwt.GetSigningMethod(data.alg) - sig, err := method.Sign(strings.Join(parts[0:2], "."), rsaPSSKey) - if err != nil { - t.Errorf("[%v] Error signing token: %v", data.name, err) - } - if sig == parts[2] { - t.Errorf("[%v] Signatures shouldn't match\nnew:\n%v\noriginal:\n%v", data.name, sig, parts[2]) - } - } - } -} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_test.go b/vendor/github.com/dgrijalva/jwt-go/rsa_test.go deleted file mode 100644 index 13ba1fc..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/rsa_test.go +++ /dev/null @@ -1,174 +0,0 @@ -package jwt_test - -import ( - "github.com/dgrijalva/jwt-go" - "io/ioutil" - "strings" - "testing" -) - -var rsaTestData = []struct { - name string - tokenString string - alg string - claims map[string]interface{} - valid bool -}{ - { - "Basic RS256", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", - "RS256", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "Basic RS384", - "eyJhbGciOiJSUzM4NCIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.W-jEzRfBigtCWsinvVVuldiuilzVdU5ty0MvpLaSaqK9PlAWWlDQ1VIQ_qSKzwL5IXaZkvZFJXT3yL3n7OUVu7zCNJzdwznbC8Z-b0z2lYvcklJYi2VOFRcGbJtXUqgjk2oGsiqUMUMOLP70TTefkpsgqDxbRh9CDUfpOJgW-dU7cmgaoswe3wjUAUi6B6G2YEaiuXC0XScQYSYVKIzgKXJV8Zw-7AN_DBUI4GkTpsvQ9fVVjZM9csQiEXhYekyrKu1nu_POpQonGd8yqkIyXPECNmmqH5jH4sFiF67XhD7_JpkvLziBpI-uh86evBUadmHhb9Otqw3uV3NTaXLzJw", - "RS384", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "Basic RS512", - "eyJhbGciOiJSUzUxMiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.zBlLlmRrUxx4SJPUbV37Q1joRcI9EW13grnKduK3wtYKmDXbgDpF1cZ6B-2Jsm5RB8REmMiLpGms-EjXhgnyh2TSHE-9W2gA_jvshegLWtwRVDX40ODSkTb7OVuaWgiy9y7llvcknFBTIg-FnVPVpXMmeV_pvwQyhaz1SSwSPrDyxEmksz1hq7YONXhXPpGaNbMMeDTNP_1oj8DZaqTIL9TwV8_1wb2Odt_Fy58Ke2RVFijsOLdnyEAjt2n9Mxihu9i3PhNBkkxa2GbnXBfq3kzvZ_xxGGopLdHhJjcGWXO-NiwI9_tiu14NRv4L2xC0ItD9Yz68v2ZIZEp_DuzwRQ", - "RS512", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "basic invalid: foo => bar", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.EhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", - "RS256", - map[string]interface{}{"foo": "bar"}, - false, - }, -} - -func TestRSAVerify(t *testing.T) { - key, _ := ioutil.ReadFile("test/sample_key.pub") - - for _, data := range rsaTestData { - parts := strings.Split(data.tokenString, ".") - - method := jwt.GetSigningMethod(data.alg) - err := method.Verify(strings.Join(parts[0:2], "."), parts[2], key) - if data.valid && err != nil { - t.Errorf("[%v] Error while verifying key: %v", data.name, err) - } - if !data.valid && err == nil { - t.Errorf("[%v] Invalid key passed validation", data.name) - } - } -} - -func TestRSASign(t *testing.T) { - key, _ := ioutil.ReadFile("test/sample_key") - - for _, data := range rsaTestData { - if data.valid { - parts := strings.Split(data.tokenString, ".") - method := jwt.GetSigningMethod(data.alg) - sig, err := method.Sign(strings.Join(parts[0:2], "."), key) - if err != nil { - t.Errorf("[%v] Error signing token: %v", data.name, err) - } - if sig != parts[2] { - t.Errorf("[%v] Incorrect signature.\nwas:\n%v\nexpecting:\n%v", data.name, sig, parts[2]) - } - } - } -} - -func TestRSAVerifyWithPreParsedPrivateKey(t *testing.T) { - key, _ := ioutil.ReadFile("test/sample_key.pub") - parsedKey, err := jwt.ParseRSAPublicKeyFromPEM(key) - if err != nil { - t.Fatal(err) - } - testData := rsaTestData[0] - parts := strings.Split(testData.tokenString, ".") - err = jwt.SigningMethodRS256.Verify(strings.Join(parts[0:2], "."), parts[2], parsedKey) - if err != nil { - t.Errorf("[%v] Error while verifying key: %v", testData.name, err) - } -} - -func TestRSAWithPreParsedPrivateKey(t *testing.T) { - key, _ := ioutil.ReadFile("test/sample_key") - parsedKey, err := jwt.ParseRSAPrivateKeyFromPEM(key) - if err != nil { - t.Fatal(err) - } - testData := rsaTestData[0] - parts := strings.Split(testData.tokenString, ".") - sig, err := jwt.SigningMethodRS256.Sign(strings.Join(parts[0:2], "."), parsedKey) - if err != nil { - t.Errorf("[%v] Error signing token: %v", testData.name, err) - } - if sig != parts[2] { - t.Errorf("[%v] Incorrect signature.\nwas:\n%v\nexpecting:\n%v", testData.name, sig, parts[2]) - } -} - -func TestRSAKeyParsing(t *testing.T) { - key, _ := ioutil.ReadFile("test/sample_key") - pubKey, _ := ioutil.ReadFile("test/sample_key.pub") - badKey := []byte("All your base are belong to key") - - // Test parsePrivateKey - if _, e := jwt.ParseRSAPrivateKeyFromPEM(key); e != nil { - t.Errorf("Failed to parse valid private key: %v", e) - } - - if k, e := jwt.ParseRSAPrivateKeyFromPEM(pubKey); e == nil { - t.Errorf("Parsed public key as valid private key: %v", k) - } - - if k, e := jwt.ParseRSAPrivateKeyFromPEM(badKey); e == nil { - t.Errorf("Parsed invalid key as valid private key: %v", k) - } - - // Test parsePublicKey - if _, e := jwt.ParseRSAPublicKeyFromPEM(pubKey); e != nil { - t.Errorf("Failed to parse valid public key: %v", e) - } - - if k, e := jwt.ParseRSAPublicKeyFromPEM(key); e == nil { - t.Errorf("Parsed private key as valid public key: %v", k) - } - - if k, e := jwt.ParseRSAPublicKeyFromPEM(badKey); e == nil { - t.Errorf("Parsed invalid key as valid private key: %v", k) - } - -} - -func BenchmarkRS256Signing(b *testing.B) { - key, _ := ioutil.ReadFile("test/sample_key") - parsedKey, err := jwt.ParseRSAPrivateKeyFromPEM(key) - if err != nil { - b.Fatal(err) - } - - benchmarkSigning(b, jwt.SigningMethodRS256, parsedKey) -} - -func BenchmarkRS384Signing(b *testing.B) { - key, _ := ioutil.ReadFile("test/sample_key") - parsedKey, err := jwt.ParseRSAPrivateKeyFromPEM(key) - if err != nil { - b.Fatal(err) - } - - benchmarkSigning(b, jwt.SigningMethodRS384, parsedKey) -} - -func BenchmarkRS512Signing(b *testing.B) { - key, _ := ioutil.ReadFile("test/sample_key") - parsedKey, err := jwt.ParseRSAPrivateKeyFromPEM(key) - if err != nil { - b.Fatal(err) - } - - benchmarkSigning(b, jwt.SigningMethodRS512, parsedKey) -} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go deleted file mode 100644 index 213a90d..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go +++ /dev/null @@ -1,69 +0,0 @@ -package jwt - -import ( - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "errors" -) - -var ( - ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key") - ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") - ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key") -) - -// Parse PEM encoded PKCS1 or PKCS8 private key -func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - var parsedKey interface{} - if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { - if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { - return nil, err - } - } - - var pkey *rsa.PrivateKey - var ok bool - if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { - return nil, ErrNotRSAPrivateKey - } - - return pkey, nil -} - -// Parse PEM encoded PKCS1 or PKCS8 public key -func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { - if cert, err := x509.ParseCertificate(block.Bytes); err == nil { - parsedKey = cert.PublicKey - } else { - return nil, err - } - } - - var pkey *rsa.PublicKey - var ok bool - if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { - return nil, ErrNotRSAPublicKey - } - - return pkey, nil -} diff --git a/vendor/github.com/dgrijalva/jwt-go/signing_method.go b/vendor/github.com/dgrijalva/jwt-go/signing_method.go deleted file mode 100644 index 12cf0f3..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/signing_method.go +++ /dev/null @@ -1,24 +0,0 @@ -package jwt - -var signingMethods = map[string]func() SigningMethod{} - -// Implement SigningMethod to add new methods for signing or verifying tokens. -type SigningMethod interface { - Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid - Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error - Alg() string // returns the alg identifier for this method (example: 'HS256') -} - -// Register the "alg" name and a factory function for signing method. -// This is typically done during init() in the method's implementation -func RegisterSigningMethod(alg string, f func() SigningMethod) { - signingMethods[alg] = f -} - -// Get a signing method from an "alg" string -func GetSigningMethod(alg string) (method SigningMethod) { - if methodF, ok := signingMethods[alg]; ok { - method = methodF() - } - return -} diff --git a/vendor/github.com/dgrijalva/jwt-go/test/ec256-private.pem b/vendor/github.com/dgrijalva/jwt-go/test/ec256-private.pem deleted file mode 100644 index a6882b3..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/test/ec256-private.pem +++ /dev/null @@ -1,5 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MHcCAQEEIAh5qA3rmqQQuu0vbKV/+zouz/y/Iy2pLpIcWUSyImSwoAoGCCqGSM49 -AwEHoUQDQgAEYD54V/vp+54P9DXarYqx4MPcm+HKRIQzNasYSoRQHQ/6S6Ps8tpM -cT+KvIIC8W/e9k0W7Cm72M1P9jU7SLf/vg== ------END EC PRIVATE KEY----- diff --git a/vendor/github.com/dgrijalva/jwt-go/test/ec256-public.pem b/vendor/github.com/dgrijalva/jwt-go/test/ec256-public.pem deleted file mode 100644 index 7191361..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/test/ec256-public.pem +++ /dev/null @@ -1,4 +0,0 @@ ------BEGIN PUBLIC KEY----- -MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEYD54V/vp+54P9DXarYqx4MPcm+HK -RIQzNasYSoRQHQ/6S6Ps8tpMcT+KvIIC8W/e9k0W7Cm72M1P9jU7SLf/vg== ------END PUBLIC KEY----- diff --git a/vendor/github.com/dgrijalva/jwt-go/test/ec384-private.pem b/vendor/github.com/dgrijalva/jwt-go/test/ec384-private.pem deleted file mode 100644 index a86c823..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/test/ec384-private.pem +++ /dev/null @@ -1,6 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MIGkAgEBBDCaCvMHKhcG/qT7xsNLYnDT7sE/D+TtWIol1ROdaK1a564vx5pHbsRy -SEKcIxISi1igBwYFK4EEACKhZANiAATYa7rJaU7feLMqrAx6adZFNQOpaUH/Uylb -ZLriOLON5YFVwtVUpO1FfEXZUIQpptRPtc5ixIPY658yhBSb6irfIJUSP9aYTflJ -GKk/mDkK4t8mWBzhiD5B6jg9cEGhGgA= ------END EC PRIVATE KEY----- diff --git a/vendor/github.com/dgrijalva/jwt-go/test/ec384-public.pem b/vendor/github.com/dgrijalva/jwt-go/test/ec384-public.pem deleted file mode 100644 index e80d005..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/test/ec384-public.pem +++ /dev/null @@ -1,5 +0,0 @@ ------BEGIN PUBLIC KEY----- -MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE2Gu6yWlO33izKqwMemnWRTUDqWlB/1Mp -W2S64jizjeWBVcLVVKTtRXxF2VCEKabUT7XOYsSD2OufMoQUm+oq3yCVEj/WmE35 -SRipP5g5CuLfJlgc4Yg+Qeo4PXBBoRoA ------END PUBLIC KEY----- diff --git a/vendor/github.com/dgrijalva/jwt-go/test/ec512-private.pem b/vendor/github.com/dgrijalva/jwt-go/test/ec512-private.pem deleted file mode 100644 index 213afaf..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/test/ec512-private.pem +++ /dev/null @@ -1,7 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MIHcAgEBBEIB0pE4uFaWRx7t03BsYlYvF1YvKaBGyvoakxnodm9ou0R9wC+sJAjH -QZZJikOg4SwNqgQ/hyrOuDK2oAVHhgVGcYmgBwYFK4EEACOhgYkDgYYABAAJXIuw -12MUzpHggia9POBFYXSxaOGKGbMjIyDI+6q7wi7LMw3HgbaOmgIqFG72o8JBQwYN -4IbXHf+f86CRY1AA2wHzbHvt6IhkCXTNxBEffa1yMUgu8n9cKKF2iLgyQKcKqW33 -8fGOw/n3Rm2Yd/EB56u2rnD29qS+nOM9eGS+gy39OQ== ------END EC PRIVATE KEY----- diff --git a/vendor/github.com/dgrijalva/jwt-go/test/ec512-public.pem b/vendor/github.com/dgrijalva/jwt-go/test/ec512-public.pem deleted file mode 100644 index 02ea022..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/test/ec512-public.pem +++ /dev/null @@ -1,6 +0,0 @@ ------BEGIN PUBLIC KEY----- -MIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQACVyLsNdjFM6R4IImvTzgRWF0sWjh -ihmzIyMgyPuqu8IuyzMNx4G2jpoCKhRu9qPCQUMGDeCG1x3/n/OgkWNQANsB82x7 -7eiIZAl0zcQRH32tcjFILvJ/XCihdoi4MkCnCqlt9/HxjsP590ZtmHfxAeertq5w -9vakvpzjPXhkvoMt/Tk= ------END PUBLIC KEY----- diff --git a/vendor/github.com/dgrijalva/jwt-go/test/hmacTestKey b/vendor/github.com/dgrijalva/jwt-go/test/hmacTestKey deleted file mode 100644 index 435b8dd..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/test/hmacTestKey +++ /dev/null @@ -1 +0,0 @@ -#5K+¥¼ƒ~ew{¦Z³(æðTÉ(©„²ÒP.¿ÓûZ’ÒGï–Š´Ãwb="=.!r.OÀÍšõgЀ£ \ No newline at end of file diff --git a/vendor/github.com/dgrijalva/jwt-go/test/sample_key b/vendor/github.com/dgrijalva/jwt-go/test/sample_key deleted file mode 100644 index abdbade..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/test/sample_key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEA4f5wg5l2hKsTeNem/V41fGnJm6gOdrj8ym3rFkEU/wT8RDtn -SgFEZOQpHEgQ7JL38xUfU0Y3g6aYw9QT0hJ7mCpz9Er5qLaMXJwZxzHzAahlfA0i -cqabvJOMvQtzD6uQv6wPEyZtDTWiQi9AXwBpHssPnpYGIn20ZZuNlX2BrClciHhC -PUIIZOQn/MmqTD31jSyjoQoV7MhhMTATKJx2XrHhR+1DcKJzQBSTAGnpYVaqpsAR -ap+nwRipr3nUTuxyGohBTSmjJ2usSeQXHI3bODIRe1AuTyHceAbewn8b462yEWKA -Rdpd9AjQW5SIVPfdsz5B6GlYQ5LdYKtznTuy7wIDAQABAoIBAQCwia1k7+2oZ2d3 -n6agCAbqIE1QXfCmh41ZqJHbOY3oRQG3X1wpcGH4Gk+O+zDVTV2JszdcOt7E5dAy -MaomETAhRxB7hlIOnEN7WKm+dGNrKRvV0wDU5ReFMRHg31/Lnu8c+5BvGjZX+ky9 -POIhFFYJqwCRlopGSUIxmVj5rSgtzk3iWOQXr+ah1bjEXvlxDOWkHN6YfpV5ThdE -KdBIPGEVqa63r9n2h+qazKrtiRqJqGnOrHzOECYbRFYhexsNFz7YT02xdfSHn7gM -IvabDDP/Qp0PjE1jdouiMaFHYnLBbgvlnZW9yuVf/rpXTUq/njxIXMmvmEyyvSDn -FcFikB8pAoGBAPF77hK4m3/rdGT7X8a/gwvZ2R121aBcdPwEaUhvj/36dx596zvY -mEOjrWfZhF083/nYWE2kVquj2wjs+otCLfifEEgXcVPTnEOPO9Zg3uNSL0nNQghj -FuD3iGLTUBCtM66oTe0jLSslHe8gLGEQqyMzHOzYxNqibxcOZIe8Qt0NAoGBAO+U -I5+XWjWEgDmvyC3TrOSf/KCGjtu0TSv30ipv27bDLMrpvPmD/5lpptTFwcxvVhCs -2b+chCjlghFSWFbBULBrfci2FtliClOVMYrlNBdUSJhf3aYSG2Doe6Bgt1n2CpNn -/iu37Y3NfemZBJA7hNl4dYe+f+uzM87cdQ214+jrAoGAXA0XxX8ll2+ToOLJsaNT -OvNB9h9Uc5qK5X5w+7G7O998BN2PC/MWp8H+2fVqpXgNENpNXttkRm1hk1dych86 -EunfdPuqsX+as44oCyJGFHVBnWpm33eWQw9YqANRI+pCJzP08I5WK3osnPiwshd+ -hR54yjgfYhBFNI7B95PmEQkCgYBzFSz7h1+s34Ycr8SvxsOBWxymG5zaCsUbPsL0 -4aCgLScCHb9J+E86aVbbVFdglYa5Id7DPTL61ixhl7WZjujspeXZGSbmq0Kcnckb -mDgqkLECiOJW2NHP/j0McAkDLL4tysF8TLDO8gvuvzNC+WQ6drO2ThrypLVZQ+ry -eBIPmwKBgEZxhqa0gVvHQG/7Od69KWj4eJP28kq13RhKay8JOoN0vPmspXJo1HY3 -CKuHRG+AP579dncdUnOMvfXOtkdM4vk0+hWASBQzM9xzVcztCa+koAugjVaLS9A+ -9uQoqEeVNTckxx0S2bYevRy7hGQmUJTyQm3j1zEUR5jpdbL83Fbq ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/dgrijalva/jwt-go/test/sample_key.pub b/vendor/github.com/dgrijalva/jwt-go/test/sample_key.pub deleted file mode 100644 index 03dc982..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/test/sample_key.pub +++ /dev/null @@ -1,9 +0,0 @@ ------BEGIN PUBLIC KEY----- -MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4f5wg5l2hKsTeNem/V41 -fGnJm6gOdrj8ym3rFkEU/wT8RDtnSgFEZOQpHEgQ7JL38xUfU0Y3g6aYw9QT0hJ7 -mCpz9Er5qLaMXJwZxzHzAahlfA0icqabvJOMvQtzD6uQv6wPEyZtDTWiQi9AXwBp -HssPnpYGIn20ZZuNlX2BrClciHhCPUIIZOQn/MmqTD31jSyjoQoV7MhhMTATKJx2 -XrHhR+1DcKJzQBSTAGnpYVaqpsARap+nwRipr3nUTuxyGohBTSmjJ2usSeQXHI3b -ODIRe1AuTyHceAbewn8b462yEWKARdpd9AjQW5SIVPfdsz5B6GlYQ5LdYKtznTuy -7wIDAQAB ------END PUBLIC KEY----- diff --git a/vendor/github.com/dgrijalva/jwt-go/token.go b/vendor/github.com/dgrijalva/jwt-go/token.go deleted file mode 100644 index c275333..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/token.go +++ /dev/null @@ -1,126 +0,0 @@ -package jwt - -import ( - "encoding/base64" - "encoding/json" - "net/http" - "strings" - "time" -) - -// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). -// You can override it to use another time value. This is useful for testing or if your -// server uses a different time zone than your tokens. -var TimeFunc = time.Now - -// Parse methods use this callback function to supply -// the key for verification. The function receives the parsed, -// but unverified Token. This allows you to use properties in the -// Header of the token (such as `kid`) to identify which key to use. -type Keyfunc func(*Token) (interface{}, error) - -// A JWT Token. Different fields will be used depending on whether you're -// creating or parsing/verifying a token. -type Token struct { - Raw string // The raw token. Populated when you Parse a token - Method SigningMethod // The signing method used or to be used - Header map[string]interface{} // The first segment of the token - Claims map[string]interface{} // The second segment of the token - Signature string // The third segment of the token. Populated when you Parse a token - Valid bool // Is the token valid? Populated when you Parse/Verify a token -} - -// Create a new Token. Takes a signing method -func New(method SigningMethod) *Token { - return &Token{ - Header: map[string]interface{}{ - "typ": "JWT", - "alg": method.Alg(), - }, - Claims: make(map[string]interface{}), - Method: method, - } -} - -// Get the complete, signed token -func (t *Token) SignedString(key interface{}) (string, error) { - var sig, sstr string - var err error - if sstr, err = t.SigningString(); err != nil { - return "", err - } - if sig, err = t.Method.Sign(sstr, key); err != nil { - return "", err - } - return strings.Join([]string{sstr, sig}, "."), nil -} - -// Generate the signing string. This is the -// most expensive part of the whole deal. Unless you -// need this for something special, just go straight for -// the SignedString. -func (t *Token) SigningString() (string, error) { - var err error - parts := make([]string, 2) - for i, _ := range parts { - var source map[string]interface{} - if i == 0 { - source = t.Header - } else { - source = t.Claims - } - - var jsonValue []byte - if jsonValue, err = json.Marshal(source); err != nil { - return "", err - } - - parts[i] = EncodeSegment(jsonValue) - } - return strings.Join(parts, "."), nil -} - -// Parse, validate, and return a token. -// keyFunc will receive the parsed token and should return the key for validating. -// If everything is kosher, err will be nil -func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { - return new(Parser).Parse(tokenString, keyFunc) -} - -// Try to find the token in an http.Request. -// This method will call ParseMultipartForm if there's no token in the header. -// Currently, it looks in the Authorization header as well as -// looking for an 'access_token' request parameter in req.Form. -func ParseFromRequest(req *http.Request, keyFunc Keyfunc) (token *Token, err error) { - - // Look for an Authorization header - if ah := req.Header.Get("Authorization"); ah != "" { - // Should be a bearer token - if len(ah) > 6 && strings.ToUpper(ah[0:7]) == "BEARER " { - return Parse(ah[7:], keyFunc) - } - } - - // Look for "access_token" parameter - req.ParseMultipartForm(10e6) - if tokStr := req.Form.Get("access_token"); tokStr != "" { - return Parse(tokStr, keyFunc) - } - - return nil, ErrNoTokenInRequest - -} - -// Encode JWT specific base64url encoding with padding stripped -func EncodeSegment(seg []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") -} - -// Decode JWT specific base64url encoding with padding stripped -func DecodeSegment(seg string) ([]byte, error) { - if l := len(seg) % 4; l > 0 { - seg += strings.Repeat("=", 4-l) - } - - return base64.URLEncoding.DecodeString(seg) -} diff --git a/vendor/github.com/dustin/go-humanize/.gitignore b/vendor/github.com/dustin/go-humanize/.gitignore deleted file mode 100644 index 05b4051..0000000 --- a/vendor/github.com/dustin/go-humanize/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -#* -*.[568] -*.a -*~ -[568].out -_* diff --git a/vendor/github.com/dustin/go-humanize/.travis.yml b/vendor/github.com/dustin/go-humanize/.travis.yml deleted file mode 100644 index ffa8740..0000000 --- a/vendor/github.com/dustin/go-humanize/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -sudo: false -language: go -go: - - 1.3.3 - - 1.5.4 - - 1.6.2 - - tip -matrix: - allow_failures: - - go: tip - fast_finish: true -install: - - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d -s .) - - go tool vet . - - go test -v -race ./... diff --git a/vendor/github.com/dustin/go-humanize/LICENSE b/vendor/github.com/dustin/go-humanize/LICENSE deleted file mode 100644 index 8d9a94a..0000000 --- a/vendor/github.com/dustin/go-humanize/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -Copyright (c) 2005-2008 Dustin Sallings - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - diff --git a/vendor/github.com/dustin/go-humanize/README.markdown b/vendor/github.com/dustin/go-humanize/README.markdown deleted file mode 100644 index 23dfee0..0000000 --- a/vendor/github.com/dustin/go-humanize/README.markdown +++ /dev/null @@ -1,92 +0,0 @@ -# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize) - -Just a few functions for helping humanize times and sizes. - -`go get` it as `github.com/dustin/go-humanize`, import it as -`"github.com/dustin/go-humanize"`, use it as `humanize` - -See [godoc](https://godoc.org/github.com/dustin/go-humanize) for -complete documentation. - -## Sizes - -This lets you take numbers like `82854982` and convert them to useful -strings like, `83MB` or `79MiB` (whichever you prefer). - -Example: - -```go -fmt.Printf("That file is %s.", humanize.Bytes(82854982)) -``` - -## Times - -This lets you take a `time.Time` and spit it out in relative terms. -For example, `12 seconds ago` or `3 days from now`. - -Example: - -```go -fmt.Printf("This was touched %s", humanize.Time(someTimeInstance)) -``` - -Thanks to Kyle Lemons for the time implementation from an IRC -conversation one day. It's pretty neat. - -## Ordinals - -From a [mailing list discussion][odisc] where a user wanted to be able -to label ordinals. - - 0 -> 0th - 1 -> 1st - 2 -> 2nd - 3 -> 3rd - 4 -> 4th - [...] - -Example: - -```go -fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) -``` - -## Commas - -Want to shove commas into numbers? Be my guest. - - 0 -> 0 - 100 -> 100 - 1000 -> 1,000 - 1000000000 -> 1,000,000,000 - -100000 -> -100,000 - -Example: - -```go -fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) -``` - -## Ftoa - -Nicer float64 formatter that removes trailing zeros. - -```go -fmt.Printf("%f", 2.24) // 2.240000 -fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24 -fmt.Printf("%f", 2.0) // 2.000000 -fmt.Printf("%s", humanize.Ftoa(2.0)) // 2 -``` - -## SI notation - -Format numbers with [SI notation][sinotation]. - -Example: - -```go -humanize.SI(0.00000000223, "M") // 2.23nM -``` - -[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion -[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix diff --git a/vendor/github.com/dustin/go-humanize/big.go b/vendor/github.com/dustin/go-humanize/big.go deleted file mode 100644 index f49dc33..0000000 --- a/vendor/github.com/dustin/go-humanize/big.go +++ /dev/null @@ -1,31 +0,0 @@ -package humanize - -import ( - "math/big" -) - -// order of magnitude (to a max order) -func oomm(n, b *big.Int, maxmag int) (float64, int) { - mag := 0 - m := &big.Int{} - for n.Cmp(b) >= 0 { - n.DivMod(n, b, m) - mag++ - if mag == maxmag && maxmag >= 0 { - break - } - } - return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag -} - -// total order of magnitude -// (same as above, but with no upper limit) -func oom(n, b *big.Int) (float64, int) { - mag := 0 - m := &big.Int{} - for n.Cmp(b) >= 0 { - n.DivMod(n, b, m) - mag++ - } - return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag -} diff --git a/vendor/github.com/dustin/go-humanize/bigbytes.go b/vendor/github.com/dustin/go-humanize/bigbytes.go deleted file mode 100644 index 67ea5c8..0000000 --- a/vendor/github.com/dustin/go-humanize/bigbytes.go +++ /dev/null @@ -1,164 +0,0 @@ -package humanize - -import ( - "fmt" - "math/big" - "strings" - "unicode" -) - -var ( - bigIECExp = big.NewInt(1024) - - // BigByte is one byte in bit.Ints - BigByte = big.NewInt(1) - // BigKiByte is 1,024 bytes in bit.Ints - BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp) - // BigMiByte is 1,024 k bytes in bit.Ints - BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp) - // BigGiByte is 1,024 m bytes in bit.Ints - BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp) - // BigTiByte is 1,024 g bytes in bit.Ints - BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp) - // BigPiByte is 1,024 t bytes in bit.Ints - BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp) - // BigEiByte is 1,024 p bytes in bit.Ints - BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp) - // BigZiByte is 1,024 e bytes in bit.Ints - BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp) - // BigYiByte is 1,024 z bytes in bit.Ints - BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp) -) - -var ( - bigSIExp = big.NewInt(1000) - - // BigSIByte is one SI byte in big.Ints - BigSIByte = big.NewInt(1) - // BigKByte is 1,000 SI bytes in big.Ints - BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp) - // BigMByte is 1,000 SI k bytes in big.Ints - BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp) - // BigGByte is 1,000 SI m bytes in big.Ints - BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp) - // BigTByte is 1,000 SI g bytes in big.Ints - BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp) - // BigPByte is 1,000 SI t bytes in big.Ints - BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp) - // BigEByte is 1,000 SI p bytes in big.Ints - BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp) - // BigZByte is 1,000 SI e bytes in big.Ints - BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp) - // BigYByte is 1,000 SI z bytes in big.Ints - BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp) -) - -var bigBytesSizeTable = map[string]*big.Int{ - "b": BigByte, - "kib": BigKiByte, - "kb": BigKByte, - "mib": BigMiByte, - "mb": BigMByte, - "gib": BigGiByte, - "gb": BigGByte, - "tib": BigTiByte, - "tb": BigTByte, - "pib": BigPiByte, - "pb": BigPByte, - "eib": BigEiByte, - "eb": BigEByte, - "zib": BigZiByte, - "zb": BigZByte, - "yib": BigYiByte, - "yb": BigYByte, - // Without suffix - "": BigByte, - "ki": BigKiByte, - "k": BigKByte, - "mi": BigMiByte, - "m": BigMByte, - "gi": BigGiByte, - "g": BigGByte, - "ti": BigTiByte, - "t": BigTByte, - "pi": BigPiByte, - "p": BigPByte, - "ei": BigEiByte, - "e": BigEByte, - "z": BigZByte, - "zi": BigZiByte, - "y": BigYByte, - "yi": BigYiByte, -} - -var ten = big.NewInt(10) - -func humanateBigBytes(s, base *big.Int, sizes []string) string { - if s.Cmp(ten) < 0 { - return fmt.Sprintf("%d B", s) - } - c := (&big.Int{}).Set(s) - val, mag := oomm(c, base, len(sizes)-1) - suffix := sizes[mag] - f := "%.0f %s" - if val < 10 { - f = "%.1f %s" - } - - return fmt.Sprintf(f, val, suffix) - -} - -// BigBytes produces a human readable representation of an SI size. -// -// See also: ParseBigBytes. -// -// BigBytes(82854982) -> 83MB -func BigBytes(s *big.Int) string { - sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} - return humanateBigBytes(s, bigSIExp, sizes) -} - -// BigIBytes produces a human readable representation of an IEC size. -// -// See also: ParseBigBytes. -// -// BigIBytes(82854982) -> 79MiB -func BigIBytes(s *big.Int) string { - sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} - return humanateBigBytes(s, bigIECExp, sizes) -} - -// ParseBigBytes parses a string representation of bytes into the number -// of bytes it represents. -// -// See also: BigBytes, BigIBytes. -// -// ParseBigBytes("42MB") -> 42000000, nil -// ParseBigBytes("42mib") -> 44040192, nil -func ParseBigBytes(s string) (*big.Int, error) { - lastDigit := 0 - for _, r := range s { - if !(unicode.IsDigit(r) || r == '.') { - break - } - lastDigit++ - } - - val := &big.Rat{} - _, err := fmt.Sscanf(s[:lastDigit], "%f", val) - if err != nil { - return nil, err - } - - extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) - if m, ok := bigBytesSizeTable[extra]; ok { - mv := (&big.Rat{}).SetInt(m) - val.Mul(val, mv) - rv := &big.Int{} - rv.Div(val.Num(), val.Denom()) - return rv, nil - } - - return nil, fmt.Errorf("unhandled size name: %v", extra) -} diff --git a/vendor/github.com/dustin/go-humanize/bigbytes_test.go b/vendor/github.com/dustin/go-humanize/bigbytes_test.go deleted file mode 100644 index 88eed45..0000000 --- a/vendor/github.com/dustin/go-humanize/bigbytes_test.go +++ /dev/null @@ -1,219 +0,0 @@ -package humanize - -import ( - "math/big" - "testing" -) - -func TestBigByteParsing(t *testing.T) { - tests := []struct { - in string - exp uint64 - }{ - {"42", 42}, - {"42MB", 42000000}, - {"42MiB", 44040192}, - {"42mb", 42000000}, - {"42mib", 44040192}, - {"42MIB", 44040192}, - {"42 MB", 42000000}, - {"42 MiB", 44040192}, - {"42 mb", 42000000}, - {"42 mib", 44040192}, - {"42 MIB", 44040192}, - {"42.5MB", 42500000}, - {"42.5MiB", 44564480}, - {"42.5 MB", 42500000}, - {"42.5 MiB", 44564480}, - // No need to say B - {"42M", 42000000}, - {"42Mi", 44040192}, - {"42m", 42000000}, - {"42mi", 44040192}, - {"42MI", 44040192}, - {"42 M", 42000000}, - {"42 Mi", 44040192}, - {"42 m", 42000000}, - {"42 mi", 44040192}, - {"42 MI", 44040192}, - {"42.5M", 42500000}, - {"42.5Mi", 44564480}, - {"42.5 M", 42500000}, - {"42.5 Mi", 44564480}, - // Large testing, breaks when too much larger than - // this. - {"12.5 EB", uint64(12.5 * float64(EByte))}, - {"12.5 E", uint64(12.5 * float64(EByte))}, - {"12.5 EiB", uint64(12.5 * float64(EiByte))}, - } - - for _, p := range tests { - got, err := ParseBigBytes(p.in) - if err != nil { - t.Errorf("Couldn't parse %v: %v", p.in, err) - } else { - if got.Uint64() != p.exp { - t.Errorf("Expected %v for %v, got %v", - p.exp, p.in, got) - } - } - } -} - -func TestBigByteErrors(t *testing.T) { - got, err := ParseBigBytes("84 JB") - if err == nil { - t.Errorf("Expected error, got %v", got) - } - got, err = ParseBigBytes("") - if err == nil { - t.Errorf("Expected error parsing nothing") - } -} - -func bbyte(in uint64) string { - return BigBytes((&big.Int{}).SetUint64(in)) -} - -func bibyte(in uint64) string { - return BigIBytes((&big.Int{}).SetUint64(in)) -} - -func TestBigBytes(t *testing.T) { - testList{ - {"bytes(0)", bbyte(0), "0 B"}, - {"bytes(1)", bbyte(1), "1 B"}, - {"bytes(803)", bbyte(803), "803 B"}, - {"bytes(999)", bbyte(999), "999 B"}, - - {"bytes(1024)", bbyte(1024), "1.0 kB"}, - {"bytes(1MB - 1)", bbyte(MByte - Byte), "1000 kB"}, - - {"bytes(1MB)", bbyte(1024 * 1024), "1.0 MB"}, - {"bytes(1GB - 1K)", bbyte(GByte - KByte), "1000 MB"}, - - {"bytes(1GB)", bbyte(GByte), "1.0 GB"}, - {"bytes(1TB - 1M)", bbyte(TByte - MByte), "1000 GB"}, - - {"bytes(1TB)", bbyte(TByte), "1.0 TB"}, - {"bytes(1PB - 1T)", bbyte(PByte - TByte), "999 TB"}, - - {"bytes(1PB)", bbyte(PByte), "1.0 PB"}, - {"bytes(1PB - 1T)", bbyte(EByte - PByte), "999 PB"}, - - {"bytes(1EB)", bbyte(EByte), "1.0 EB"}, - // Overflows. - // {"bytes(1EB - 1P)", Bytes((KByte*EByte)-PByte), "1023EB"}, - - {"bytes(0)", bibyte(0), "0 B"}, - {"bytes(1)", bibyte(1), "1 B"}, - {"bytes(803)", bibyte(803), "803 B"}, - {"bytes(1023)", bibyte(1023), "1023 B"}, - - {"bytes(1024)", bibyte(1024), "1.0 KiB"}, - {"bytes(1MB - 1)", bibyte(MiByte - IByte), "1024 KiB"}, - - {"bytes(1MB)", bibyte(1024 * 1024), "1.0 MiB"}, - {"bytes(1GB - 1K)", bibyte(GiByte - KiByte), "1024 MiB"}, - - {"bytes(1GB)", bibyte(GiByte), "1.0 GiB"}, - {"bytes(1TB - 1M)", bibyte(TiByte - MiByte), "1024 GiB"}, - - {"bytes(1TB)", bibyte(TiByte), "1.0 TiB"}, - {"bytes(1PB - 1T)", bibyte(PiByte - TiByte), "1023 TiB"}, - - {"bytes(1PB)", bibyte(PiByte), "1.0 PiB"}, - {"bytes(1PB - 1T)", bibyte(EiByte - PiByte), "1023 PiB"}, - - {"bytes(1EiB)", bibyte(EiByte), "1.0 EiB"}, - // Overflows. - // {"bytes(1EB - 1P)", bibyte((KIByte*EIByte)-PiByte), "1023EB"}, - - {"bytes(5.5GiB)", bibyte(5.5 * GiByte), "5.5 GiB"}, - - {"bytes(5.5GB)", bbyte(5.5 * GByte), "5.5 GB"}, - }.validate(t) -} - -func TestVeryBigBytes(t *testing.T) { - b, _ := (&big.Int{}).SetString("15347691069326346944512", 10) - s := BigBytes(b) - if s != "15 ZB" { - t.Errorf("Expected 15 ZB, got %v", s) - } - s = BigIBytes(b) - if s != "13 ZiB" { - t.Errorf("Expected 13 ZiB, got %v", s) - } - - b, _ = (&big.Int{}).SetString("15716035654990179271180288", 10) - s = BigBytes(b) - if s != "16 YB" { - t.Errorf("Expected 16 YB, got %v", s) - } - s = BigIBytes(b) - if s != "13 YiB" { - t.Errorf("Expected 13 YiB, got %v", s) - } -} - -func TestVeryVeryBigBytes(t *testing.T) { - b, _ := (&big.Int{}).SetString("16093220510709943573688614912", 10) - s := BigBytes(b) - if s != "16093 YB" { - t.Errorf("Expected 16093 YB, got %v", s) - } - s = BigIBytes(b) - if s != "13312 YiB" { - t.Errorf("Expected 13312 YiB, got %v", s) - } -} - -func TestParseVeryBig(t *testing.T) { - tests := []struct { - in string - out string - }{ - {"16 ZB", "16000000000000000000000"}, - {"16 ZiB", "18889465931478580854784"}, - {"16.5 ZB", "16500000000000000000000"}, - {"16.5 ZiB", "19479761741837286506496"}, - {"16 Z", "16000000000000000000000"}, - {"16 Zi", "18889465931478580854784"}, - {"16.5 Z", "16500000000000000000000"}, - {"16.5 Zi", "19479761741837286506496"}, - - {"16 YB", "16000000000000000000000000"}, - {"16 YiB", "19342813113834066795298816"}, - {"16.5 YB", "16500000000000000000000000"}, - {"16.5 YiB", "19947276023641381382651904"}, - {"16 Y", "16000000000000000000000000"}, - {"16 Yi", "19342813113834066795298816"}, - {"16.5 Y", "16500000000000000000000000"}, - {"16.5 Yi", "19947276023641381382651904"}, - } - - for _, test := range tests { - x, err := ParseBigBytes(test.in) - if err != nil { - t.Errorf("Error parsing %q: %v", test.in, err) - continue - } - - if x.String() != test.out { - t.Errorf("Expected %q for %q, got %v", test.out, test.in, x) - } - } -} - -func BenchmarkParseBigBytes(b *testing.B) { - for i := 0; i < b.N; i++ { - ParseBigBytes("16.5 Z") - } -} - -func BenchmarkBigBytes(b *testing.B) { - for i := 0; i < b.N; i++ { - bibyte(16.5 * GByte) - } -} diff --git a/vendor/github.com/dustin/go-humanize/bytes.go b/vendor/github.com/dustin/go-humanize/bytes.go deleted file mode 100644 index dacbb9c..0000000 --- a/vendor/github.com/dustin/go-humanize/bytes.go +++ /dev/null @@ -1,134 +0,0 @@ -package humanize - -import ( - "fmt" - "math" - "strconv" - "strings" - "unicode" -) - -// IEC Sizes. -// kibis of bits -const ( - Byte = 1 << (iota * 10) - KiByte - MiByte - GiByte - TiByte - PiByte - EiByte -) - -// SI Sizes. -const ( - IByte = 1 - KByte = IByte * 1000 - MByte = KByte * 1000 - GByte = MByte * 1000 - TByte = GByte * 1000 - PByte = TByte * 1000 - EByte = PByte * 1000 -) - -var bytesSizeTable = map[string]uint64{ - "b": Byte, - "kib": KiByte, - "kb": KByte, - "mib": MiByte, - "mb": MByte, - "gib": GiByte, - "gb": GByte, - "tib": TiByte, - "tb": TByte, - "pib": PiByte, - "pb": PByte, - "eib": EiByte, - "eb": EByte, - // Without suffix - "": Byte, - "ki": KiByte, - "k": KByte, - "mi": MiByte, - "m": MByte, - "gi": GiByte, - "g": GByte, - "ti": TiByte, - "t": TByte, - "pi": PiByte, - "p": PByte, - "ei": EiByte, - "e": EByte, -} - -func logn(n, b float64) float64 { - return math.Log(n) / math.Log(b) -} - -func humanateBytes(s uint64, base float64, sizes []string) string { - if s < 10 { - return fmt.Sprintf("%d B", s) - } - e := math.Floor(logn(float64(s), base)) - suffix := sizes[int(e)] - val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10 - f := "%.0f %s" - if val < 10 { - f = "%.1f %s" - } - - return fmt.Sprintf(f, val, suffix) -} - -// Bytes produces a human readable representation of an SI size. -// -// See also: ParseBytes. -// -// Bytes(82854982) -> 83MB -func Bytes(s uint64) string { - sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"} - return humanateBytes(s, 1000, sizes) -} - -// IBytes produces a human readable representation of an IEC size. -// -// See also: ParseBytes. -// -// IBytes(82854982) -> 79MiB -func IBytes(s uint64) string { - sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"} - return humanateBytes(s, 1024, sizes) -} - -// ParseBytes parses a string representation of bytes into the number -// of bytes it represents. -// -// See Also: Bytes, IBytes. -// -// ParseBytes("42MB") -> 42000000, nil -// ParseBytes("42mib") -> 44040192, nil -func ParseBytes(s string) (uint64, error) { - lastDigit := 0 - for _, r := range s { - if !(unicode.IsDigit(r) || r == '.') { - break - } - lastDigit++ - } - - f, err := strconv.ParseFloat(s[:lastDigit], 64) - if err != nil { - return 0, err - } - - extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) - if m, ok := bytesSizeTable[extra]; ok { - f *= float64(m) - if f >= math.MaxUint64 { - return 0, fmt.Errorf("too large: %v", s) - } - return uint64(f), nil - } - - return 0, fmt.Errorf("unhandled size name: %v", extra) -} diff --git a/vendor/github.com/dustin/go-humanize/bytes_test.go b/vendor/github.com/dustin/go-humanize/bytes_test.go deleted file mode 100644 index 99cad92..0000000 --- a/vendor/github.com/dustin/go-humanize/bytes_test.go +++ /dev/null @@ -1,144 +0,0 @@ -package humanize - -import ( - "testing" -) - -func TestByteParsing(t *testing.T) { - tests := []struct { - in string - exp uint64 - }{ - {"42", 42}, - {"42MB", 42000000}, - {"42MiB", 44040192}, - {"42mb", 42000000}, - {"42mib", 44040192}, - {"42MIB", 44040192}, - {"42 MB", 42000000}, - {"42 MiB", 44040192}, - {"42 mb", 42000000}, - {"42 mib", 44040192}, - {"42 MIB", 44040192}, - {"42.5MB", 42500000}, - {"42.5MiB", 44564480}, - {"42.5 MB", 42500000}, - {"42.5 MiB", 44564480}, - // No need to say B - {"42M", 42000000}, - {"42Mi", 44040192}, - {"42m", 42000000}, - {"42mi", 44040192}, - {"42MI", 44040192}, - {"42 M", 42000000}, - {"42 Mi", 44040192}, - {"42 m", 42000000}, - {"42 mi", 44040192}, - {"42 MI", 44040192}, - {"42.5M", 42500000}, - {"42.5Mi", 44564480}, - {"42.5 M", 42500000}, - {"42.5 Mi", 44564480}, - // Large testing, breaks when too much larger than - // this. - {"12.5 EB", uint64(12.5 * float64(EByte))}, - {"12.5 E", uint64(12.5 * float64(EByte))}, - {"12.5 EiB", uint64(12.5 * float64(EiByte))}, - } - - for _, p := range tests { - got, err := ParseBytes(p.in) - if err != nil { - t.Errorf("Couldn't parse %v: %v", p.in, err) - } - if got != p.exp { - t.Errorf("Expected %v for %v, got %v", - p.exp, p.in, got) - } - } -} - -func TestByteErrors(t *testing.T) { - got, err := ParseBytes("84 JB") - if err == nil { - t.Errorf("Expected error, got %v", got) - } - got, err = ParseBytes("") - if err == nil { - t.Errorf("Expected error parsing nothing") - } - got, err = ParseBytes("16 EiB") - if err == nil { - t.Errorf("Expected error, got %v", got) - } -} - -func TestBytes(t *testing.T) { - testList{ - {"bytes(0)", Bytes(0), "0 B"}, - {"bytes(1)", Bytes(1), "1 B"}, - {"bytes(803)", Bytes(803), "803 B"}, - {"bytes(999)", Bytes(999), "999 B"}, - - {"bytes(1024)", Bytes(1024), "1.0 kB"}, - {"bytes(9999)", Bytes(9999), "10 kB"}, - {"bytes(1MB - 1)", Bytes(MByte - Byte), "1000 kB"}, - - {"bytes(1MB)", Bytes(1024 * 1024), "1.0 MB"}, - {"bytes(1GB - 1K)", Bytes(GByte - KByte), "1000 MB"}, - - {"bytes(1GB)", Bytes(GByte), "1.0 GB"}, - {"bytes(1TB - 1M)", Bytes(TByte - MByte), "1000 GB"}, - {"bytes(10MB)", Bytes(9999 * 1000), "10 MB"}, - - {"bytes(1TB)", Bytes(TByte), "1.0 TB"}, - {"bytes(1PB - 1T)", Bytes(PByte - TByte), "999 TB"}, - - {"bytes(1PB)", Bytes(PByte), "1.0 PB"}, - {"bytes(1PB - 1T)", Bytes(EByte - PByte), "999 PB"}, - - {"bytes(1EB)", Bytes(EByte), "1.0 EB"}, - // Overflows. - // {"bytes(1EB - 1P)", Bytes((KByte*EByte)-PByte), "1023EB"}, - - {"bytes(0)", IBytes(0), "0 B"}, - {"bytes(1)", IBytes(1), "1 B"}, - {"bytes(803)", IBytes(803), "803 B"}, - {"bytes(1023)", IBytes(1023), "1023 B"}, - - {"bytes(1024)", IBytes(1024), "1.0 KiB"}, - {"bytes(1MB - 1)", IBytes(MiByte - IByte), "1024 KiB"}, - - {"bytes(1MB)", IBytes(1024 * 1024), "1.0 MiB"}, - {"bytes(1GB - 1K)", IBytes(GiByte - KiByte), "1024 MiB"}, - - {"bytes(1GB)", IBytes(GiByte), "1.0 GiB"}, - {"bytes(1TB - 1M)", IBytes(TiByte - MiByte), "1024 GiB"}, - - {"bytes(1TB)", IBytes(TiByte), "1.0 TiB"}, - {"bytes(1PB - 1T)", IBytes(PiByte - TiByte), "1023 TiB"}, - - {"bytes(1PB)", IBytes(PiByte), "1.0 PiB"}, - {"bytes(1PB - 1T)", IBytes(EiByte - PiByte), "1023 PiB"}, - - {"bytes(1EiB)", IBytes(EiByte), "1.0 EiB"}, - // Overflows. - // {"bytes(1EB - 1P)", IBytes((KIByte*EIByte)-PiByte), "1023EB"}, - - {"bytes(5.5GiB)", IBytes(5.5 * GiByte), "5.5 GiB"}, - - {"bytes(5.5GB)", Bytes(5.5 * GByte), "5.5 GB"}, - }.validate(t) -} - -func BenchmarkParseBytes(b *testing.B) { - for i := 0; i < b.N; i++ { - ParseBytes("16.5 GB") - } -} - -func BenchmarkBytes(b *testing.B) { - for i := 0; i < b.N; i++ { - Bytes(16.5 * GByte) - } -} diff --git a/vendor/github.com/dustin/go-humanize/comma.go b/vendor/github.com/dustin/go-humanize/comma.go deleted file mode 100644 index b65ea6f..0000000 --- a/vendor/github.com/dustin/go-humanize/comma.go +++ /dev/null @@ -1,101 +0,0 @@ -package humanize - -import ( - "bytes" - "math/big" - "strconv" - "strings" -) - -// Comma produces a string form of the given number in base 10 with -// commas after every three orders of magnitude. -// -// e.g. Comma(834142) -> 834,142 -func Comma(v int64) string { - sign := "" - if v < 0 { - sign = "-" - v = 0 - v - } - - parts := []string{"", "", "", "", "", "", ""} - j := len(parts) - 1 - - for v > 999 { - parts[j] = strconv.FormatInt(v%1000, 10) - switch len(parts[j]) { - case 2: - parts[j] = "0" + parts[j] - case 1: - parts[j] = "00" + parts[j] - } - v = v / 1000 - j-- - } - parts[j] = strconv.Itoa(int(v)) - return sign + strings.Join(parts[j:], ",") -} - -// Commaf produces a string form of the given number in base 10 with -// commas after every three orders of magnitude. -// -// e.g. Comma(834142.32) -> 834,142.32 -func Commaf(v float64) string { - buf := &bytes.Buffer{} - if v < 0 { - buf.Write([]byte{'-'}) - v = 0 - v - } - - comma := []byte{','} - - parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".") - pos := 0 - if len(parts[0])%3 != 0 { - pos += len(parts[0]) % 3 - buf.WriteString(parts[0][:pos]) - buf.Write(comma) - } - for ; pos < len(parts[0]); pos += 3 { - buf.WriteString(parts[0][pos : pos+3]) - buf.Write(comma) - } - buf.Truncate(buf.Len() - 1) - - if len(parts) > 1 { - buf.Write([]byte{'.'}) - buf.WriteString(parts[1]) - } - return buf.String() -} - -// BigComma produces a string form of the given big.Int in base 10 -// with commas after every three orders of magnitude. -func BigComma(b *big.Int) string { - sign := "" - if b.Sign() < 0 { - sign = "-" - b.Abs(b) - } - - athousand := big.NewInt(1000) - c := (&big.Int{}).Set(b) - _, m := oom(c, athousand) - parts := make([]string, m+1) - j := len(parts) - 1 - - mod := &big.Int{} - for b.Cmp(athousand) >= 0 { - b.DivMod(b, athousand, mod) - parts[j] = strconv.FormatInt(mod.Int64(), 10) - switch len(parts[j]) { - case 2: - parts[j] = "0" + parts[j] - case 1: - parts[j] = "00" + parts[j] - } - j-- - } - parts[j] = strconv.Itoa(int(b.Int64())) - return sign + strings.Join(parts[j:], ",") -} diff --git a/vendor/github.com/dustin/go-humanize/comma_test.go b/vendor/github.com/dustin/go-humanize/comma_test.go deleted file mode 100644 index 49040fb..0000000 --- a/vendor/github.com/dustin/go-humanize/comma_test.go +++ /dev/null @@ -1,134 +0,0 @@ -package humanize - -import ( - "math" - "math/big" - "testing" -) - -func TestCommas(t *testing.T) { - testList{ - {"0", Comma(0), "0"}, - {"10", Comma(10), "10"}, - {"100", Comma(100), "100"}, - {"1,000", Comma(1000), "1,000"}, - {"10,000", Comma(10000), "10,000"}, - {"100,000", Comma(100000), "100,000"}, - {"10,000,000", Comma(10000000), "10,000,000"}, - {"10,100,000", Comma(10100000), "10,100,000"}, - {"10,010,000", Comma(10010000), "10,010,000"}, - {"10,001,000", Comma(10001000), "10,001,000"}, - {"123,456,789", Comma(123456789), "123,456,789"}, - {"maxint", Comma(9.223372e+18), "9,223,372,000,000,000,000"}, - {"minint", Comma(-9.223372e+18), "-9,223,372,000,000,000,000"}, - {"-123,456,789", Comma(-123456789), "-123,456,789"}, - {"-10,100,000", Comma(-10100000), "-10,100,000"}, - {"-10,010,000", Comma(-10010000), "-10,010,000"}, - {"-10,001,000", Comma(-10001000), "-10,001,000"}, - {"-10,000,000", Comma(-10000000), "-10,000,000"}, - {"-100,000", Comma(-100000), "-100,000"}, - {"-10,000", Comma(-10000), "-10,000"}, - {"-1,000", Comma(-1000), "-1,000"}, - {"-100", Comma(-100), "-100"}, - {"-10", Comma(-10), "-10"}, - }.validate(t) -} - -func TestCommafs(t *testing.T) { - testList{ - {"0", Commaf(0), "0"}, - {"10.11", Commaf(10.11), "10.11"}, - {"100", Commaf(100), "100"}, - {"1,000", Commaf(1000), "1,000"}, - {"10,000", Commaf(10000), "10,000"}, - {"100,000", Commaf(100000), "100,000"}, - {"834,142.32", Commaf(834142.32), "834,142.32"}, - {"10,000,000", Commaf(10000000), "10,000,000"}, - {"10,100,000", Commaf(10100000), "10,100,000"}, - {"10,010,000", Commaf(10010000), "10,010,000"}, - {"10,001,000", Commaf(10001000), "10,001,000"}, - {"123,456,789", Commaf(123456789), "123,456,789"}, - {"maxf64", Commaf(math.MaxFloat64), "179,769,313,486,231,570,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000"}, - {"minf64", Commaf(math.SmallestNonzeroFloat64), "0.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005"}, - {"-123,456,789", Commaf(-123456789), "-123,456,789"}, - {"-10,100,000", Commaf(-10100000), "-10,100,000"}, - {"-10,010,000", Commaf(-10010000), "-10,010,000"}, - {"-10,001,000", Commaf(-10001000), "-10,001,000"}, - {"-10,000,000", Commaf(-10000000), "-10,000,000"}, - {"-100,000", Commaf(-100000), "-100,000"}, - {"-10,000", Commaf(-10000), "-10,000"}, - {"-1,000", Commaf(-1000), "-1,000"}, - {"-100.11", Commaf(-100.11), "-100.11"}, - {"-10", Commaf(-10), "-10"}, - }.validate(t) -} - -func BenchmarkCommas(b *testing.B) { - for i := 0; i < b.N; i++ { - Comma(1234567890) - } -} - -func BenchmarkCommaf(b *testing.B) { - for i := 0; i < b.N; i++ { - Commaf(1234567890.83584) - } -} - -func BenchmarkBigCommas(b *testing.B) { - for i := 0; i < b.N; i++ { - BigComma(big.NewInt(1234567890)) - } -} - -func bigComma(i int64) string { - return BigComma(big.NewInt(i)) -} - -func TestBigCommas(t *testing.T) { - testList{ - {"0", bigComma(0), "0"}, - {"10", bigComma(10), "10"}, - {"100", bigComma(100), "100"}, - {"1,000", bigComma(1000), "1,000"}, - {"10,000", bigComma(10000), "10,000"}, - {"100,000", bigComma(100000), "100,000"}, - {"10,000,000", bigComma(10000000), "10,000,000"}, - {"10,100,000", bigComma(10100000), "10,100,000"}, - {"10,010,000", bigComma(10010000), "10,010,000"}, - {"10,001,000", bigComma(10001000), "10,001,000"}, - {"123,456,789", bigComma(123456789), "123,456,789"}, - {"maxint", bigComma(9.223372e+18), "9,223,372,000,000,000,000"}, - {"minint", bigComma(-9.223372e+18), "-9,223,372,000,000,000,000"}, - {"-123,456,789", bigComma(-123456789), "-123,456,789"}, - {"-10,100,000", bigComma(-10100000), "-10,100,000"}, - {"-10,010,000", bigComma(-10010000), "-10,010,000"}, - {"-10,001,000", bigComma(-10001000), "-10,001,000"}, - {"-10,000,000", bigComma(-10000000), "-10,000,000"}, - {"-100,000", bigComma(-100000), "-100,000"}, - {"-10,000", bigComma(-10000), "-10,000"}, - {"-1,000", bigComma(-1000), "-1,000"}, - {"-100", bigComma(-100), "-100"}, - {"-10", bigComma(-10), "-10"}, - }.validate(t) -} - -func TestVeryBigCommas(t *testing.T) { - tests := []struct{ in, exp string }{ - { - "84889279597249724975972597249849757294578485", - "84,889,279,597,249,724,975,972,597,249,849,757,294,578,485", - }, - { - "-84889279597249724975972597249849757294578485", - "-84,889,279,597,249,724,975,972,597,249,849,757,294,578,485", - }, - } - for _, test := range tests { - n, _ := (&big.Int{}).SetString(test.in, 10) - got := BigComma(n) - if test.exp != got { - t.Errorf("Expected %q, got %q", test.exp, got) - } - } -} diff --git a/vendor/github.com/dustin/go-humanize/commaf.go b/vendor/github.com/dustin/go-humanize/commaf.go deleted file mode 100644 index 620690d..0000000 --- a/vendor/github.com/dustin/go-humanize/commaf.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build go1.6 - -package humanize - -import ( - "bytes" - "math/big" - "strings" -) - -// BigCommaf produces a string form of the given big.Float in base 10 -// with commas after every three orders of magnitude. -func BigCommaf(v *big.Float) string { - buf := &bytes.Buffer{} - if v.Sign() < 0 { - buf.Write([]byte{'-'}) - v.Abs(v) - } - - comma := []byte{','} - - parts := strings.Split(v.Text('f', -1), ".") - pos := 0 - if len(parts[0])%3 != 0 { - pos += len(parts[0]) % 3 - buf.WriteString(parts[0][:pos]) - buf.Write(comma) - } - for ; pos < len(parts[0]); pos += 3 { - buf.WriteString(parts[0][pos : pos+3]) - buf.Write(comma) - } - buf.Truncate(buf.Len() - 1) - - if len(parts) > 1 { - buf.Write([]byte{'.'}) - buf.WriteString(parts[1]) - } - return buf.String() -} diff --git a/vendor/github.com/dustin/go-humanize/commaf_test.go b/vendor/github.com/dustin/go-humanize/commaf_test.go deleted file mode 100644 index 21f7f9e..0000000 --- a/vendor/github.com/dustin/go-humanize/commaf_test.go +++ /dev/null @@ -1,44 +0,0 @@ -// +build go1.6 - -package humanize - -import ( - "math" - "math/big" - "testing" -) - -func BenchmarkBigCommaf(b *testing.B) { - for i := 0; i < b.N; i++ { - Commaf(1234567890.83584) - } -} - -func TestBigCommafs(t *testing.T) { - testList{ - {"0", BigCommaf(big.NewFloat(0)), "0"}, - {"10.11", BigCommaf(big.NewFloat(10.11)), "10.11"}, - {"100", BigCommaf(big.NewFloat(100)), "100"}, - {"1,000", BigCommaf(big.NewFloat(1000)), "1,000"}, - {"10,000", BigCommaf(big.NewFloat(10000)), "10,000"}, - {"100,000", BigCommaf(big.NewFloat(100000)), "100,000"}, - {"834,142.32", BigCommaf(big.NewFloat(834142.32)), "834,142.32"}, - {"10,000,000", BigCommaf(big.NewFloat(10000000)), "10,000,000"}, - {"10,100,000", BigCommaf(big.NewFloat(10100000)), "10,100,000"}, - {"10,010,000", BigCommaf(big.NewFloat(10010000)), "10,010,000"}, - {"10,001,000", BigCommaf(big.NewFloat(10001000)), "10,001,000"}, - {"123,456,789", BigCommaf(big.NewFloat(123456789)), "123,456,789"}, - {"maxf64", BigCommaf(big.NewFloat(math.MaxFloat64)), "179,769,313,486,231,570,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000"}, - {"minf64", BigCommaf(big.NewFloat(math.SmallestNonzeroFloat64)), "0.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004940656458412465"}, - {"-123,456,789", BigCommaf(big.NewFloat(-123456789)), "-123,456,789"}, - {"-10,100,000", BigCommaf(big.NewFloat(-10100000)), "-10,100,000"}, - {"-10,010,000", BigCommaf(big.NewFloat(-10010000)), "-10,010,000"}, - {"-10,001,000", BigCommaf(big.NewFloat(-10001000)), "-10,001,000"}, - {"-10,000,000", BigCommaf(big.NewFloat(-10000000)), "-10,000,000"}, - {"-100,000", BigCommaf(big.NewFloat(-100000)), "-100,000"}, - {"-10,000", BigCommaf(big.NewFloat(-10000)), "-10,000"}, - {"-1,000", BigCommaf(big.NewFloat(-1000)), "-1,000"}, - {"-100.11", BigCommaf(big.NewFloat(-100.11)), "-100.11"}, - {"-10", BigCommaf(big.NewFloat(-10)), "-10"}, - }.validate(t) -} diff --git a/vendor/github.com/dustin/go-humanize/common_test.go b/vendor/github.com/dustin/go-humanize/common_test.go deleted file mode 100644 index fc7db15..0000000 --- a/vendor/github.com/dustin/go-humanize/common_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package humanize - -import ( - "testing" -) - -type testList []struct { - name, got, exp string -} - -func (tl testList) validate(t *testing.T) { - for _, test := range tl { - if test.got != test.exp { - t.Errorf("On %v, expected '%v', but got '%v'", - test.name, test.exp, test.got) - } - } -} diff --git a/vendor/github.com/dustin/go-humanize/ftoa.go b/vendor/github.com/dustin/go-humanize/ftoa.go deleted file mode 100644 index c76190b..0000000 --- a/vendor/github.com/dustin/go-humanize/ftoa.go +++ /dev/null @@ -1,23 +0,0 @@ -package humanize - -import "strconv" - -func stripTrailingZeros(s string) string { - offset := len(s) - 1 - for offset > 0 { - if s[offset] == '.' { - offset-- - break - } - if s[offset] != '0' { - break - } - offset-- - } - return s[:offset+1] -} - -// Ftoa converts a float to a string with no trailing zeros. -func Ftoa(num float64) string { - return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64)) -} diff --git a/vendor/github.com/dustin/go-humanize/ftoa_test.go b/vendor/github.com/dustin/go-humanize/ftoa_test.go deleted file mode 100644 index 276d411..0000000 --- a/vendor/github.com/dustin/go-humanize/ftoa_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package humanize - -import ( - "fmt" - "regexp" - "strconv" - "testing" -) - -func TestFtoa(t *testing.T) { - testList{ - {"200", Ftoa(200), "200"}, - {"2", Ftoa(2), "2"}, - {"2.2", Ftoa(2.2), "2.2"}, - {"2.02", Ftoa(2.02), "2.02"}, - {"200.02", Ftoa(200.02), "200.02"}, - }.validate(t) -} - -func BenchmarkFtoaRegexTrailing(b *testing.B) { - trailingZerosRegex := regexp.MustCompile(`\.?0+$`) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - trailingZerosRegex.ReplaceAllString("2.00000", "") - trailingZerosRegex.ReplaceAllString("2.0000", "") - trailingZerosRegex.ReplaceAllString("2.000", "") - trailingZerosRegex.ReplaceAllString("2.00", "") - trailingZerosRegex.ReplaceAllString("2.0", "") - trailingZerosRegex.ReplaceAllString("2", "") - } -} - -func BenchmarkFtoaFunc(b *testing.B) { - for i := 0; i < b.N; i++ { - stripTrailingZeros("2.00000") - stripTrailingZeros("2.0000") - stripTrailingZeros("2.000") - stripTrailingZeros("2.00") - stripTrailingZeros("2.0") - stripTrailingZeros("2") - } -} - -func BenchmarkFmtF(b *testing.B) { - for i := 0; i < b.N; i++ { - _ = fmt.Sprintf("%f", 2.03584) - } -} - -func BenchmarkStrconvF(b *testing.B) { - for i := 0; i < b.N; i++ { - strconv.FormatFloat(2.03584, 'f', 6, 64) - } -} diff --git a/vendor/github.com/dustin/go-humanize/humanize.go b/vendor/github.com/dustin/go-humanize/humanize.go deleted file mode 100644 index a69540a..0000000 --- a/vendor/github.com/dustin/go-humanize/humanize.go +++ /dev/null @@ -1,8 +0,0 @@ -/* -Package humanize converts boring ugly numbers to human-friendly strings and back. - -Durations can be turned into strings such as "3 days ago", numbers -representing sizes like 82854982 into useful strings like, "83MB" or -"79MiB" (whichever you prefer). -*/ -package humanize diff --git a/vendor/github.com/dustin/go-humanize/number.go b/vendor/github.com/dustin/go-humanize/number.go deleted file mode 100644 index 3214134..0000000 --- a/vendor/github.com/dustin/go-humanize/number.go +++ /dev/null @@ -1,192 +0,0 @@ -package humanize - -/* -Slightly adapted from the source to fit go-humanize. - -Author: https://github.com/gorhill -Source: https://gist.github.com/gorhill/5285193 - -*/ - -import ( - "math" - "strconv" -) - -var ( - renderFloatPrecisionMultipliers = [...]float64{ - 1, - 10, - 100, - 1000, - 10000, - 100000, - 1000000, - 10000000, - 100000000, - 1000000000, - } - - renderFloatPrecisionRounders = [...]float64{ - 0.5, - 0.05, - 0.005, - 0.0005, - 0.00005, - 0.000005, - 0.0000005, - 0.00000005, - 0.000000005, - 0.0000000005, - } -) - -// FormatFloat produces a formatted number as string based on the following user-specified criteria: -// * thousands separator -// * decimal separator -// * decimal precision -// -// Usage: s := RenderFloat(format, n) -// The format parameter tells how to render the number n. -// -// See examples: http://play.golang.org/p/LXc1Ddm1lJ -// -// Examples of format strings, given n = 12345.6789: -// "#,###.##" => "12,345.67" -// "#,###." => "12,345" -// "#,###" => "12345,678" -// "#\u202F###,##" => "12 345,68" -// "#.###,###### => 12.345,678900 -// "" (aka default format) => 12,345.67 -// -// The highest precision allowed is 9 digits after the decimal symbol. -// There is also a version for integer number, FormatInteger(), -// which is convenient for calls within template. -func FormatFloat(format string, n float64) string { - // Special cases: - // NaN = "NaN" - // +Inf = "+Infinity" - // -Inf = "-Infinity" - if math.IsNaN(n) { - return "NaN" - } - if n > math.MaxFloat64 { - return "Infinity" - } - if n < -math.MaxFloat64 { - return "-Infinity" - } - - // default format - precision := 2 - decimalStr := "." - thousandStr := "," - positiveStr := "" - negativeStr := "-" - - if len(format) > 0 { - format := []rune(format) - - // If there is an explicit format directive, - // then default values are these: - precision = 9 - thousandStr = "" - - // collect indices of meaningful formatting directives - formatIndx := []int{} - for i, char := range format { - if char != '#' && char != '0' { - formatIndx = append(formatIndx, i) - } - } - - if len(formatIndx) > 0 { - // Directive at index 0: - // Must be a '+' - // Raise an error if not the case - // index: 0123456789 - // +0.000,000 - // +000,000.0 - // +0000.00 - // +0000 - if formatIndx[0] == 0 { - if format[formatIndx[0]] != '+' { - panic("RenderFloat(): invalid positive sign directive") - } - positiveStr = "+" - formatIndx = formatIndx[1:] - } - - // Two directives: - // First is thousands separator - // Raise an error if not followed by 3-digit - // 0123456789 - // 0.000,000 - // 000,000.00 - if len(formatIndx) == 2 { - if (formatIndx[1] - formatIndx[0]) != 4 { - panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers") - } - thousandStr = string(format[formatIndx[0]]) - formatIndx = formatIndx[1:] - } - - // One directive: - // Directive is decimal separator - // The number of digit-specifier following the separator indicates wanted precision - // 0123456789 - // 0.00 - // 000,0000 - if len(formatIndx) == 1 { - decimalStr = string(format[formatIndx[0]]) - precision = len(format) - formatIndx[0] - 1 - } - } - } - - // generate sign part - var signStr string - if n >= 0.000000001 { - signStr = positiveStr - } else if n <= -0.000000001 { - signStr = negativeStr - n = -n - } else { - signStr = "" - n = 0.0 - } - - // split number into integer and fractional parts - intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision]) - - // generate integer part string - intStr := strconv.Itoa(int(intf)) - - // add thousand separator if required - if len(thousandStr) > 0 { - for i := len(intStr); i > 3; { - i -= 3 - intStr = intStr[:i] + thousandStr + intStr[i:] - } - } - - // no fractional part, we can leave now - if precision == 0 { - return signStr + intStr - } - - // generate fractional part - fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision])) - // may need padding - if len(fracStr) < precision { - fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr - } - - return signStr + intStr + decimalStr + fracStr -} - -// FormatInteger produces a formatted number as string. -// See FormatFloat. -func FormatInteger(format string, n int) string { - return FormatFloat(format, float64(n)) -} diff --git a/vendor/github.com/dustin/go-humanize/number_test.go b/vendor/github.com/dustin/go-humanize/number_test.go deleted file mode 100644 index dd38a5b..0000000 --- a/vendor/github.com/dustin/go-humanize/number_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package humanize - -import ( - "math" - "testing" -) - -type TestStruct struct { - name string - format string - num float64 - formatted string -} - -func TestFormatFloat(t *testing.T) { - tests := []TestStruct{ - {"default", "", 12345.6789, "12,345.68"}, - {"#", "#", 12345.6789, "12345.678900000"}, - {"#.", "#.", 12345.6789, "12346"}, - {"#,#", "#,#", 12345.6789, "12345,7"}, - {"#,##", "#,##", 12345.6789, "12345,68"}, - {"#,###", "#,###", 12345.6789, "12345,679"}, - {"#,###.", "#,###.", 12345.6789, "12,346"}, - {"#,###.##", "#,###.##", 12345.6789, "12,345.68"}, - {"#,###.###", "#,###.###", 12345.6789, "12,345.679"}, - {"#,###.####", "#,###.####", 12345.6789, "12,345.6789"}, - {"#.###,######", "#.###,######", 12345.6789, "12.345,678900"}, - {"#\u202f###,##", "#\u202f###,##", 12345.6789, "12 345,68"}, - - // special cases - {"NaN", "#", math.NaN(), "NaN"}, - {"+Inf", "#", math.Inf(1), "Infinity"}, - {"-Inf", "#", math.Inf(-1), "-Infinity"}, - {"signStr <= -0.000000001", "", -0.000000002, "-0.00"}, - {"signStr = 0", "", 0, "0.00"}, - {"Format directive must start with +", "+000", 12345.6789, "+12345.678900000"}, - } - - for _, test := range tests { - got := FormatFloat(test.format, test.num) - if got != test.formatted { - t.Errorf("On %v (%v, %v), got %v, wanted %v", - test.name, test.format, test.num, got, test.formatted) - } - } - // Test a single integer - got := FormatInteger("#", 12345) - if got != "12345.000000000" { - t.Errorf("On %v (%v, %v), got %v, wanted %v", - "integerTest", "#", 12345, got, "12345.000000000") - } - // Test the things that could panic - panictests := []TestStruct{ - {"RenderFloat(): invalid positive sign directive", "-", 12345.6789, "12,345.68"}, - {"RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers", "0.01", 12345.6789, "12,345.68"}, - } - for _, test := range panictests { - didPanic := false - var message interface{} - func() { - - defer func() { - if message = recover(); message != nil { - didPanic = true - } - }() - - // call the target function - _ = FormatFloat(test.format, test.num) - - }() - if didPanic != true { - t.Errorf("On %v, should have panic and did not.", - test.name) - } - } - -} diff --git a/vendor/github.com/dustin/go-humanize/ordinals.go b/vendor/github.com/dustin/go-humanize/ordinals.go deleted file mode 100644 index 43d88a8..0000000 --- a/vendor/github.com/dustin/go-humanize/ordinals.go +++ /dev/null @@ -1,25 +0,0 @@ -package humanize - -import "strconv" - -// Ordinal gives you the input number in a rank/ordinal format. -// -// Ordinal(3) -> 3rd -func Ordinal(x int) string { - suffix := "th" - switch x % 10 { - case 1: - if x%100 != 11 { - suffix = "st" - } - case 2: - if x%100 != 12 { - suffix = "nd" - } - case 3: - if x%100 != 13 { - suffix = "rd" - } - } - return strconv.Itoa(x) + suffix -} diff --git a/vendor/github.com/dustin/go-humanize/ordinals_test.go b/vendor/github.com/dustin/go-humanize/ordinals_test.go deleted file mode 100644 index 51d85ee..0000000 --- a/vendor/github.com/dustin/go-humanize/ordinals_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package humanize - -import ( - "testing" -) - -func TestOrdinals(t *testing.T) { - testList{ - {"0", Ordinal(0), "0th"}, - {"1", Ordinal(1), "1st"}, - {"2", Ordinal(2), "2nd"}, - {"3", Ordinal(3), "3rd"}, - {"4", Ordinal(4), "4th"}, - {"10", Ordinal(10), "10th"}, - {"11", Ordinal(11), "11th"}, - {"12", Ordinal(12), "12th"}, - {"13", Ordinal(13), "13th"}, - {"101", Ordinal(101), "101st"}, - {"102", Ordinal(102), "102nd"}, - {"103", Ordinal(103), "103rd"}, - }.validate(t) -} diff --git a/vendor/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go deleted file mode 100644 index 9cce4e8..0000000 --- a/vendor/github.com/dustin/go-humanize/si.go +++ /dev/null @@ -1,113 +0,0 @@ -package humanize - -import ( - "errors" - "math" - "regexp" - "strconv" -) - -var siPrefixTable = map[float64]string{ - -24: "y", // yocto - -21: "z", // zepto - -18: "a", // atto - -15: "f", // femto - -12: "p", // pico - -9: "n", // nano - -6: "µ", // micro - -3: "m", // milli - 0: "", - 3: "k", // kilo - 6: "M", // mega - 9: "G", // giga - 12: "T", // tera - 15: "P", // peta - 18: "E", // exa - 21: "Z", // zetta - 24: "Y", // yotta -} - -var revSIPrefixTable = revfmap(siPrefixTable) - -// revfmap reverses the map and precomputes the power multiplier -func revfmap(in map[float64]string) map[string]float64 { - rv := map[string]float64{} - for k, v := range in { - rv[v] = math.Pow(10, k) - } - return rv -} - -var riParseRegex *regexp.Regexp - -func init() { - ri := `^([\-0-9.]+)\s?([` - for _, v := range siPrefixTable { - ri += v - } - ri += `]?)(.*)` - - riParseRegex = regexp.MustCompile(ri) -} - -// ComputeSI finds the most appropriate SI prefix for the given number -// and returns the prefix along with the value adjusted to be within -// that prefix. -// -// See also: SI, ParseSI. -// -// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p") -func ComputeSI(input float64) (float64, string) { - if input == 0 { - return 0, "" - } - mag := math.Abs(input) - exponent := math.Floor(logn(mag, 10)) - exponent = math.Floor(exponent/3) * 3 - - value := mag / math.Pow(10, exponent) - - // Handle special case where value is exactly 1000.0 - // Should return 1M instead of 1000k - if value == 1000.0 { - exponent += 3 - value = mag / math.Pow(10, exponent) - } - - value = math.Copysign(value, input) - - prefix := siPrefixTable[exponent] - return value, prefix -} - -// SI returns a string with default formatting. -// -// SI uses Ftoa to format float value, removing trailing zeros. -// -// See also: ComputeSI, ParseSI. -// -// e.g. SI(1000000, B) -> 1MB -// e.g. SI(2.2345e-12, "F") -> 2.2345pF -func SI(input float64, unit string) string { - value, prefix := ComputeSI(input) - return Ftoa(value) + " " + prefix + unit -} - -var errInvalid = errors.New("invalid input") - -// ParseSI parses an SI string back into the number and unit. -// -// See also: SI, ComputeSI. -// -// e.g. ParseSI(2.2345pF) -> (2.2345e-12, "F", nil) -func ParseSI(input string) (float64, string, error) { - found := riParseRegex.FindStringSubmatch(input) - if len(found) != 4 { - return 0, "", errInvalid - } - mag := revSIPrefixTable[found[2]] - unit := found[3] - - base, err := strconv.ParseFloat(found[1], 64) - return base * mag, unit, err -} diff --git a/vendor/github.com/dustin/go-humanize/si_test.go b/vendor/github.com/dustin/go-humanize/si_test.go deleted file mode 100644 index bc5bac6..0000000 --- a/vendor/github.com/dustin/go-humanize/si_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package humanize - -import ( - "math" - "testing" -) - -func TestSI(t *testing.T) { - tests := []struct { - name string - num float64 - formatted string - }{ - {"e-24", 1e-24, "1 yF"}, - {"e-21", 1e-21, "1 zF"}, - {"e-18", 1e-18, "1 aF"}, - {"e-15", 1e-15, "1 fF"}, - {"e-12", 1e-12, "1 pF"}, - {"e-12", 2.2345e-12, "2.2345 pF"}, - {"e-12", 2.23e-12, "2.23 pF"}, - {"e-11", 2.23e-11, "22.3 pF"}, - {"e-10", 2.2e-10, "220 pF"}, - {"e-9", 2.2e-9, "2.2 nF"}, - {"e-8", 2.2e-8, "22 nF"}, - {"e-7", 2.2e-7, "220 nF"}, - {"e-6", 2.2e-6, "2.2 µF"}, - {"e-6", 1e-6, "1 µF"}, - {"e-5", 2.2e-5, "22 µF"}, - {"e-4", 2.2e-4, "220 µF"}, - {"e-3", 2.2e-3, "2.2 mF"}, - {"e-2", 2.2e-2, "22 mF"}, - {"e-1", 2.2e-1, "220 mF"}, - {"e+0", 2.2e-0, "2.2 F"}, - {"e+0", 2.2, "2.2 F"}, - {"e+1", 2.2e+1, "22 F"}, - {"0", 0, "0 F"}, - {"e+1", 22, "22 F"}, - {"e+2", 2.2e+2, "220 F"}, - {"e+2", 220, "220 F"}, - {"e+3", 2.2e+3, "2.2 kF"}, - {"e+3", 2200, "2.2 kF"}, - {"e+4", 2.2e+4, "22 kF"}, - {"e+4", 22000, "22 kF"}, - {"e+5", 2.2e+5, "220 kF"}, - {"e+6", 2.2e+6, "2.2 MF"}, - {"e+6", 1e+6, "1 MF"}, - {"e+7", 2.2e+7, "22 MF"}, - {"e+8", 2.2e+8, "220 MF"}, - {"e+9", 2.2e+9, "2.2 GF"}, - {"e+10", 2.2e+10, "22 GF"}, - {"e+11", 2.2e+11, "220 GF"}, - {"e+12", 2.2e+12, "2.2 TF"}, - {"e+15", 2.2e+15, "2.2 PF"}, - {"e+18", 2.2e+18, "2.2 EF"}, - {"e+21", 2.2e+21, "2.2 ZF"}, - {"e+24", 2.2e+24, "2.2 YF"}, - - // special case - {"1F", 1000 * 1000, "1 MF"}, - {"1F", 1e6, "1 MF"}, - - // negative number - {"-100 F", -100, "-100 F"}, - } - - for _, test := range tests { - got := SI(test.num, "F") - if got != test.formatted { - t.Errorf("On %v (%v), got %v, wanted %v", - test.name, test.num, got, test.formatted) - } - - gotf, gotu, err := ParseSI(test.formatted) - if err != nil { - t.Errorf("Error parsing %v (%v): %v", test.name, test.formatted, err) - continue - } - - if math.Abs(1-(gotf/test.num)) > 0.01 { - t.Errorf("On %v (%v), got %v, wanted %v (±%v)", - test.name, test.formatted, gotf, test.num, - math.Abs(1-(gotf/test.num))) - } - if gotu != "F" { - t.Errorf("On %v (%v), expected unit F, got %v", - test.name, test.formatted, gotu) - } - } - - // Parse error - gotf, gotu, err := ParseSI("x1.21JW") // 1.21 jigga whats - if err == nil { - t.Errorf("Expected error on x1.21JW, got %v %v", gotf, gotu) - } -} - -func BenchmarkParseSI(b *testing.B) { - for i := 0; i < b.N; i++ { - ParseSI("2.2346ZB") - } -} diff --git a/vendor/github.com/dustin/go-humanize/times.go b/vendor/github.com/dustin/go-humanize/times.go deleted file mode 100644 index 49906b3..0000000 --- a/vendor/github.com/dustin/go-humanize/times.go +++ /dev/null @@ -1,90 +0,0 @@ -package humanize - -import ( - "fmt" - "math" - "sort" - "time" -) - -// Seconds-based time units -const ( - Minute = 60 - Hour = 60 * Minute - Day = 24 * Hour - Week = 7 * Day - Month = 30 * Day - Year = 12 * Month - LongTime = 37 * Year -) - -// Time formats a time into a relative string. -// -// Time(someT) -> "3 weeks ago" -func Time(then time.Time) string { - return RelTime(then, time.Now(), "ago", "from now") -} - -var magnitudes = []struct { - d int64 - format string - divby int64 -}{ - {1, "now", 1}, - {2, "1 second %s", 1}, - {Minute, "%d seconds %s", 1}, - {2 * Minute, "1 minute %s", 1}, - {Hour, "%d minutes %s", Minute}, - {2 * Hour, "1 hour %s", 1}, - {Day, "%d hours %s", Hour}, - {2 * Day, "1 day %s", 1}, - {Week, "%d days %s", Day}, - {2 * Week, "1 week %s", 1}, - {Month, "%d weeks %s", Week}, - {2 * Month, "1 month %s", 1}, - {Year, "%d months %s", Month}, - {18 * Month, "1 year %s", 1}, - {2 * Year, "2 years %s", 1}, - {LongTime, "%d years %s", Year}, - {math.MaxInt64, "a long while %s", 1}, -} - -// RelTime formats a time into a relative string. -// -// It takes two times and two labels. In addition to the generic time -// delta string (e.g. 5 minutes), the labels are used applied so that -// the label corresponding to the smaller time is applied. -// -// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier" -func RelTime(a, b time.Time, albl, blbl string) string { - lbl := albl - diff := b.Unix() - a.Unix() - - after := a.After(b) - if after { - lbl = blbl - diff = a.Unix() - b.Unix() - } - - n := sort.Search(len(magnitudes), func(i int) bool { - return magnitudes[i].d > diff - }) - - mag := magnitudes[n] - args := []interface{}{} - escaped := false - for _, ch := range mag.format { - if escaped { - switch ch { - case 's': - args = append(args, lbl) - case 'd': - args = append(args, diff/mag.divby) - } - escaped = false - } else { - escaped = ch == '%' - } - } - return fmt.Sprintf(mag.format, args...) -} diff --git a/vendor/github.com/dustin/go-humanize/times_test.go b/vendor/github.com/dustin/go-humanize/times_test.go deleted file mode 100644 index 528daa4..0000000 --- a/vendor/github.com/dustin/go-humanize/times_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package humanize - -import ( - "math" - "testing" - "time" -) - -func TestPast(t *testing.T) { - now := time.Now().Unix() - testList{ - {"now", Time(time.Unix(now, 0)), "now"}, - {"1 second ago", Time(time.Unix(now-1, 0)), "1 second ago"}, - {"12 seconds ago", Time(time.Unix(now-12, 0)), "12 seconds ago"}, - {"30 seconds ago", Time(time.Unix(now-30, 0)), "30 seconds ago"}, - {"45 seconds ago", Time(time.Unix(now-45, 0)), "45 seconds ago"}, - {"1 minute ago", Time(time.Unix(now-63, 0)), "1 minute ago"}, - {"15 minutes ago", Time(time.Unix(now-15*Minute, 0)), "15 minutes ago"}, - {"1 hour ago", Time(time.Unix(now-63*Minute, 0)), "1 hour ago"}, - {"2 hours ago", Time(time.Unix(now-2*Hour, 0)), "2 hours ago"}, - {"21 hours ago", Time(time.Unix(now-21*Hour, 0)), "21 hours ago"}, - {"1 day ago", Time(time.Unix(now-26*Hour, 0)), "1 day ago"}, - {"2 days ago", Time(time.Unix(now-49*Hour, 0)), "2 days ago"}, - {"3 days ago", Time(time.Unix(now-3*Day, 0)), "3 days ago"}, - {"1 week ago (1)", Time(time.Unix(now-7*Day, 0)), "1 week ago"}, - {"1 week ago (2)", Time(time.Unix(now-12*Day, 0)), "1 week ago"}, - {"2 weeks ago", Time(time.Unix(now-15*Day, 0)), "2 weeks ago"}, - {"1 month ago", Time(time.Unix(now-39*Day, 0)), "1 month ago"}, - {"3 months ago", Time(time.Unix(now-99*Day, 0)), "3 months ago"}, - {"1 year ago (1)", Time(time.Unix(now-365*Day, 0)), "1 year ago"}, - {"1 year ago (1)", Time(time.Unix(now-400*Day, 0)), "1 year ago"}, - {"2 years ago (1)", Time(time.Unix(now-548*Day, 0)), "2 years ago"}, - {"2 years ago (2)", Time(time.Unix(now-725*Day, 0)), "2 years ago"}, - {"2 years ago (3)", Time(time.Unix(now-800*Day, 0)), "2 years ago"}, - {"3 years ago", Time(time.Unix(now-3*Year, 0)), "3 years ago"}, - {"long ago", Time(time.Unix(now-LongTime, 0)), "a long while ago"}, - }.validate(t) -} - -func TestFuture(t *testing.T) { - now := time.Now().Unix() - testList{ - {"now", Time(time.Unix(now, 0)), "now"}, - {"1 second from now", Time(time.Unix(now+1, 0)), "1 second from now"}, - {"12 seconds from now", Time(time.Unix(now+12, 0)), "12 seconds from now"}, - {"30 seconds from now", Time(time.Unix(now+30, 0)), "30 seconds from now"}, - {"45 seconds from now", Time(time.Unix(now+45, 0)), "45 seconds from now"}, - {"15 minutes from now", Time(time.Unix(now+15*Minute, 0)), "15 minutes from now"}, - {"2 hours from now", Time(time.Unix(now+2*Hour, 0)), "2 hours from now"}, - {"21 hours from now", Time(time.Unix(now+21*Hour, 0)), "21 hours from now"}, - {"1 day from now", Time(time.Unix(now+26*Hour, 0)), "1 day from now"}, - {"2 days from now", Time(time.Unix(now+49*Hour, 0)), "2 days from now"}, - {"3 days from now", Time(time.Unix(now+3*Day, 0)), "3 days from now"}, - {"1 week from now (1)", Time(time.Unix(now+7*Day, 0)), "1 week from now"}, - {"1 week from now (2)", Time(time.Unix(now+12*Day, 0)), "1 week from now"}, - {"2 weeks from now", Time(time.Unix(now+15*Day, 0)), "2 weeks from now"}, - {"1 month from now", Time(time.Unix(now+30*Day, 0)), "1 month from now"}, - {"1 year from now", Time(time.Unix(now+365*Day, 0)), "1 year from now"}, - {"2 years from now", Time(time.Unix(now+2*Year, 0)), "2 years from now"}, - {"a while from now", Time(time.Unix(now+LongTime, 0)), "a long while from now"}, - }.validate(t) -} - -func TestRange(t *testing.T) { - start := time.Time{} - end := time.Unix(math.MaxInt64, math.MaxInt64) - x := RelTime(start, end, "ago", "from now") - if x != "a long while from now" { - t.Errorf("Expected a long while from now, got %q", x) - } -} diff --git a/vendor/github.com/flynn/go-shlex/COPYING b/vendor/github.com/flynn/go-shlex/COPYING deleted file mode 100644 index d645695..0000000 --- a/vendor/github.com/flynn/go-shlex/COPYING +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/flynn/go-shlex/Makefile b/vendor/github.com/flynn/go-shlex/Makefile deleted file mode 100644 index 038d9a4..0000000 --- a/vendor/github.com/flynn/go-shlex/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2011 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -include $(GOROOT)/src/Make.inc - -TARG=shlex -GOFILES=\ - shlex.go\ - -include $(GOROOT)/src/Make.pkg diff --git a/vendor/github.com/flynn/go-shlex/README.md b/vendor/github.com/flynn/go-shlex/README.md deleted file mode 100644 index c86bcc0..0000000 --- a/vendor/github.com/flynn/go-shlex/README.md +++ /dev/null @@ -1,2 +0,0 @@ -go-shlex is a simple lexer for go that supports shell-style quoting, -commenting, and escaping. diff --git a/vendor/github.com/flynn/go-shlex/shlex.go b/vendor/github.com/flynn/go-shlex/shlex.go deleted file mode 100644 index 7aeace8..0000000 --- a/vendor/github.com/flynn/go-shlex/shlex.go +++ /dev/null @@ -1,457 +0,0 @@ -/* -Copyright 2012 Google Inc. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package shlex - -/* -Package shlex implements a simple lexer which splits input in to tokens using -shell-style rules for quoting and commenting. -*/ -import ( - "bufio" - "errors" - "fmt" - "io" - "strings" -) - -/* -A TokenType is a top-level token; a word, space, comment, unknown. -*/ -type TokenType int - -/* -A RuneTokenType is the type of a UTF-8 character; a character, quote, space, escape. -*/ -type RuneTokenType int - -type lexerState int - -type Token struct { - tokenType TokenType - value string -} - -/* -Two tokens are equal if both their types and values are equal. A nil token can -never equal another token. -*/ -func (a *Token) Equal(b *Token) bool { - if a == nil || b == nil { - return false - } - if a.tokenType != b.tokenType { - return false - } - return a.value == b.value -} - -const ( - RUNE_CHAR string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789._-,/@$*()+=><:;&^%~|!?[]{}" - RUNE_SPACE string = " \t\r\n" - RUNE_ESCAPING_QUOTE string = "\"" - RUNE_NONESCAPING_QUOTE string = "'" - RUNE_ESCAPE = "\\" - RUNE_COMMENT = "#" - - RUNETOKEN_UNKNOWN RuneTokenType = 0 - RUNETOKEN_CHAR RuneTokenType = 1 - RUNETOKEN_SPACE RuneTokenType = 2 - RUNETOKEN_ESCAPING_QUOTE RuneTokenType = 3 - RUNETOKEN_NONESCAPING_QUOTE RuneTokenType = 4 - RUNETOKEN_ESCAPE RuneTokenType = 5 - RUNETOKEN_COMMENT RuneTokenType = 6 - RUNETOKEN_EOF RuneTokenType = 7 - - TOKEN_UNKNOWN TokenType = 0 - TOKEN_WORD TokenType = 1 - TOKEN_SPACE TokenType = 2 - TOKEN_COMMENT TokenType = 3 - - STATE_START lexerState = 0 - STATE_INWORD lexerState = 1 - STATE_ESCAPING lexerState = 2 - STATE_ESCAPING_QUOTED lexerState = 3 - STATE_QUOTED_ESCAPING lexerState = 4 - STATE_QUOTED lexerState = 5 - STATE_COMMENT lexerState = 6 - - INITIAL_TOKEN_CAPACITY int = 100 -) - -/* -A type for classifying characters. This allows for different sorts of -classifiers - those accepting extended non-ascii chars, or strict posix -compatibility, for example. -*/ -type TokenClassifier struct { - typeMap map[int32]RuneTokenType -} - -func addRuneClass(typeMap *map[int32]RuneTokenType, runes string, tokenType RuneTokenType) { - for _, rune := range runes { - (*typeMap)[int32(rune)] = tokenType - } -} - -/* -Create a new classifier for basic ASCII characters. -*/ -func NewDefaultClassifier() *TokenClassifier { - typeMap := map[int32]RuneTokenType{} - addRuneClass(&typeMap, RUNE_CHAR, RUNETOKEN_CHAR) - addRuneClass(&typeMap, RUNE_SPACE, RUNETOKEN_SPACE) - addRuneClass(&typeMap, RUNE_ESCAPING_QUOTE, RUNETOKEN_ESCAPING_QUOTE) - addRuneClass(&typeMap, RUNE_NONESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE) - addRuneClass(&typeMap, RUNE_ESCAPE, RUNETOKEN_ESCAPE) - addRuneClass(&typeMap, RUNE_COMMENT, RUNETOKEN_COMMENT) - return &TokenClassifier{ - typeMap: typeMap} -} - -func (classifier *TokenClassifier) ClassifyRune(rune int32) RuneTokenType { - return classifier.typeMap[rune] -} - -/* -A type for turning an input stream in to a sequence of strings. Whitespace and -comments are skipped. -*/ -type Lexer struct { - tokenizer *Tokenizer -} - -/* -Create a new lexer. -*/ -func NewLexer(r io.Reader) (*Lexer, error) { - - tokenizer, err := NewTokenizer(r) - if err != nil { - return nil, err - } - lexer := &Lexer{tokenizer: tokenizer} - return lexer, nil -} - -/* -Return the next word, and an error value. If there are no more words, the error -will be io.EOF. -*/ -func (l *Lexer) NextWord() (string, error) { - var token *Token - var err error - for { - token, err = l.tokenizer.NextToken() - if err != nil { - return "", err - } - switch token.tokenType { - case TOKEN_WORD: - { - return token.value, nil - } - case TOKEN_COMMENT: - { - // skip comments - } - default: - { - panic(fmt.Sprintf("Unknown token type: %v", token.tokenType)) - } - } - } - return "", io.EOF -} - -/* -A type for turning an input stream in to a sequence of typed tokens. -*/ -type Tokenizer struct { - input *bufio.Reader - classifier *TokenClassifier -} - -/* -Create a new tokenizer. -*/ -func NewTokenizer(r io.Reader) (*Tokenizer, error) { - input := bufio.NewReader(r) - classifier := NewDefaultClassifier() - tokenizer := &Tokenizer{ - input: input, - classifier: classifier} - return tokenizer, nil -} - -/* -Scan the stream for the next token. - -This uses an internal state machine. It will panic if it encounters a character -which it does not know how to handle. -*/ -func (t *Tokenizer) scanStream() (*Token, error) { - state := STATE_START - var tokenType TokenType - value := make([]int32, 0, INITIAL_TOKEN_CAPACITY) - var ( - nextRune int32 - nextRuneType RuneTokenType - err error - ) -SCAN: - for { - nextRune, _, err = t.input.ReadRune() - nextRuneType = t.classifier.ClassifyRune(nextRune) - if err != nil { - if err == io.EOF { - nextRuneType = RUNETOKEN_EOF - err = nil - } else { - return nil, err - } - } - switch state { - case STATE_START: // no runes read yet - { - switch nextRuneType { - case RUNETOKEN_EOF: - { - return nil, io.EOF - } - case RUNETOKEN_CHAR: - { - tokenType = TOKEN_WORD - value = append(value, nextRune) - state = STATE_INWORD - } - case RUNETOKEN_SPACE: - { - } - case RUNETOKEN_ESCAPING_QUOTE: - { - tokenType = TOKEN_WORD - state = STATE_QUOTED_ESCAPING - } - case RUNETOKEN_NONESCAPING_QUOTE: - { - tokenType = TOKEN_WORD - state = STATE_QUOTED - } - case RUNETOKEN_ESCAPE: - { - tokenType = TOKEN_WORD - state = STATE_ESCAPING - } - case RUNETOKEN_COMMENT: - { - tokenType = TOKEN_COMMENT - state = STATE_COMMENT - } - default: - { - return nil, errors.New(fmt.Sprintf("Unknown rune: %v", nextRune)) - } - } - } - case STATE_INWORD: // in a regular word - { - switch nextRuneType { - case RUNETOKEN_EOF: - { - break SCAN - } - case RUNETOKEN_CHAR, RUNETOKEN_COMMENT: - { - value = append(value, nextRune) - } - case RUNETOKEN_SPACE: - { - t.input.UnreadRune() - break SCAN - } - case RUNETOKEN_ESCAPING_QUOTE: - { - state = STATE_QUOTED_ESCAPING - } - case RUNETOKEN_NONESCAPING_QUOTE: - { - state = STATE_QUOTED - } - case RUNETOKEN_ESCAPE: - { - state = STATE_ESCAPING - } - default: - { - return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune)) - } - } - } - case STATE_ESCAPING: // the next rune after an escape character - { - switch nextRuneType { - case RUNETOKEN_EOF: - { - err = errors.New("EOF found after escape character") - break SCAN - } - case RUNETOKEN_CHAR, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT: - { - state = STATE_INWORD - value = append(value, nextRune) - } - default: - { - return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune)) - } - } - } - case STATE_ESCAPING_QUOTED: // the next rune after an escape character, in double quotes - { - switch nextRuneType { - case RUNETOKEN_EOF: - { - err = errors.New("EOF found after escape character") - break SCAN - } - case RUNETOKEN_CHAR, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT: - { - state = STATE_QUOTED_ESCAPING - value = append(value, nextRune) - } - default: - { - return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune)) - } - } - } - case STATE_QUOTED_ESCAPING: // in escaping double quotes - { - switch nextRuneType { - case RUNETOKEN_EOF: - { - err = errors.New("EOF found when expecting closing quote.") - break SCAN - } - case RUNETOKEN_CHAR, RUNETOKEN_UNKNOWN, RUNETOKEN_SPACE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_COMMENT: - { - value = append(value, nextRune) - } - case RUNETOKEN_ESCAPING_QUOTE: - { - state = STATE_INWORD - } - case RUNETOKEN_ESCAPE: - { - state = STATE_ESCAPING_QUOTED - } - default: - { - return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune)) - } - } - } - case STATE_QUOTED: // in non-escaping single quotes - { - switch nextRuneType { - case RUNETOKEN_EOF: - { - err = errors.New("EOF found when expecting closing quote.") - break SCAN - } - case RUNETOKEN_CHAR, RUNETOKEN_UNKNOWN, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT: - { - value = append(value, nextRune) - } - case RUNETOKEN_NONESCAPING_QUOTE: - { - state = STATE_INWORD - } - default: - { - return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune)) - } - } - } - case STATE_COMMENT: - { - switch nextRuneType { - case RUNETOKEN_EOF: - { - break SCAN - } - case RUNETOKEN_CHAR, RUNETOKEN_UNKNOWN, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT, RUNETOKEN_NONESCAPING_QUOTE: - { - value = append(value, nextRune) - } - case RUNETOKEN_SPACE: - { - if nextRune == '\n' { - state = STATE_START - break SCAN - } else { - value = append(value, nextRune) - } - } - default: - { - return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune)) - } - } - } - default: - { - panic(fmt.Sprintf("Unexpected state: %v", state)) - } - } - } - token := &Token{ - tokenType: tokenType, - value: string(value)} - return token, err -} - -/* -Return the next token in the stream, and an error value. If there are no more -tokens available, the error value will be io.EOF. -*/ -func (t *Tokenizer) NextToken() (*Token, error) { - return t.scanStream() -} - -/* -Split a string in to a slice of strings, based upon shell-style rules for -quoting, escaping, and spaces. -*/ -func Split(s string) ([]string, error) { - l, err := NewLexer(strings.NewReader(s)) - if err != nil { - return nil, err - } - subStrings := []string{} - for { - word, err := l.NextWord() - if err != nil { - if err == io.EOF { - return subStrings, nil - } - return subStrings, err - } - subStrings = append(subStrings, word) - } - return subStrings, nil -} diff --git a/vendor/github.com/flynn/go-shlex/shlex_test.go b/vendor/github.com/flynn/go-shlex/shlex_test.go deleted file mode 100644 index 7551f7c..0000000 --- a/vendor/github.com/flynn/go-shlex/shlex_test.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -Copyright 2012 Google Inc. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package shlex - -import ( - "strings" - "testing" -) - -func checkError(err error, t *testing.T) { - if err != nil { - t.Error(err) - } -} - -func TestClassifier(t *testing.T) { - classifier := NewDefaultClassifier() - runeTests := map[int32]RuneTokenType{ - 'a': RUNETOKEN_CHAR, - ' ': RUNETOKEN_SPACE, - '"': RUNETOKEN_ESCAPING_QUOTE, - '\'': RUNETOKEN_NONESCAPING_QUOTE, - '#': RUNETOKEN_COMMENT} - for rune, expectedType := range runeTests { - foundType := classifier.ClassifyRune(rune) - if foundType != expectedType { - t.Logf("Expected type: %v for rune '%c'(%v). Found type: %v.", expectedType, rune, rune, foundType) - t.Fail() - } - } -} - -func TestTokenizer(t *testing.T) { - testInput := strings.NewReader("one two \"three four\" \"five \\\"six\\\"\" seven#eight # nine # ten\n eleven") - expectedTokens := []*Token{ - &Token{ - tokenType: TOKEN_WORD, - value: "one"}, - &Token{ - tokenType: TOKEN_WORD, - value: "two"}, - &Token{ - tokenType: TOKEN_WORD, - value: "three four"}, - &Token{ - tokenType: TOKEN_WORD, - value: "five \"six\""}, - &Token{ - tokenType: TOKEN_WORD, - value: "seven#eight"}, - &Token{ - tokenType: TOKEN_COMMENT, - value: " nine # ten"}, - &Token{ - tokenType: TOKEN_WORD, - value: "eleven"}} - - tokenizer, err := NewTokenizer(testInput) - checkError(err, t) - for _, expectedToken := range expectedTokens { - foundToken, err := tokenizer.NextToken() - checkError(err, t) - if !foundToken.Equal(expectedToken) { - t.Error("Expected token:", expectedToken, ". Found:", foundToken) - } - } -} - -func TestLexer(t *testing.T) { - testInput := strings.NewReader("one") - expectedWord := "one" - lexer, err := NewLexer(testInput) - checkError(err, t) - foundWord, err := lexer.NextWord() - checkError(err, t) - if expectedWord != foundWord { - t.Error("Expected word:", expectedWord, ". Found:", foundWord) - } -} - -func TestSplitSimple(t *testing.T) { - testInput := "one two three" - expectedOutput := []string{"one", "two", "three"} - foundOutput, err := Split(testInput) - if err != nil { - t.Error("Split returned error:", err) - } - if len(expectedOutput) != len(foundOutput) { - t.Error("Split expected:", len(expectedOutput), "results. Found:", len(foundOutput), "results") - } - for i := range foundOutput { - if foundOutput[i] != expectedOutput[i] { - t.Error("Item:", i, "(", foundOutput[i], ") differs from the expected value:", expectedOutput[i]) - } - } -} - -func TestSplitEscapingQuotes(t *testing.T) { - testInput := "one \"два ${three}\" four" - expectedOutput := []string{"one", "два ${three}", "four"} - foundOutput, err := Split(testInput) - if err != nil { - t.Error("Split returned error:", err) - } - if len(expectedOutput) != len(foundOutput) { - t.Error("Split expected:", len(expectedOutput), "results. Found:", len(foundOutput), "results") - } - for i := range foundOutput { - if foundOutput[i] != expectedOutput[i] { - t.Error("Item:", i, "(", foundOutput[i], ") differs from the expected value:", expectedOutput[i]) - } - } -} - -func TestGlobbingExpressions(t *testing.T) { - testInput := "onefile *file one?ile onefil[de]" - expectedOutput := []string{"onefile", "*file", "one?ile", "onefil[de]"} - foundOutput, err := Split(testInput) - if err != nil { - t.Error("Split returned error", err) - } - if len(expectedOutput) != len(foundOutput) { - t.Error("Split expected:", len(expectedOutput), "results. Found:", len(foundOutput), "results") - } - for i := range foundOutput { - if foundOutput[i] != expectedOutput[i] { - t.Error("Item:", i, "(", foundOutput[i], ") differs from the expected value:", expectedOutput[i]) - } - } - -} - -func TestSplitNonEscapingQuotes(t *testing.T) { - testInput := "one 'два ${three}' four" - expectedOutput := []string{"one", "два ${three}", "four"} - foundOutput, err := Split(testInput) - if err != nil { - t.Error("Split returned error:", err) - } - if len(expectedOutput) != len(foundOutput) { - t.Error("Split expected:", len(expectedOutput), "results. Found:", len(foundOutput), "results") - } - for i := range foundOutput { - if foundOutput[i] != expectedOutput[i] { - t.Error("Item:", i, "(", foundOutput[i], ") differs from the expected value:", expectedOutput[i]) - } - } -} diff --git a/vendor/github.com/go-kit/kit/.gitignore b/vendor/github.com/go-kit/kit/.gitignore deleted file mode 100644 index 716bfff..0000000 --- a/vendor/github.com/go-kit/kit/.gitignore +++ /dev/null @@ -1,45 +0,0 @@ -examples/addsvc/addsvc -examples/addsvc/client/client -examples/apigateway/apigateway -examples/profilesvc/profilesvc -examples/stringsvc1/stringsvc1 -examples/stringsvc2/stringsvc2 -examples/stringsvc3/stringsvc3 -gover.coverprofile - -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test -_old* - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -# https://github.com/github/gitignore/blob/master/Global/Vim.gitignore -# swap -[._]*.s[a-w][a-z] -[._]s[a-w][a-z] -# session -Session.vim -# temporary -.netrwhist -*~ -# auto-generated tag files -tags - diff --git a/vendor/github.com/go-kit/kit/.travis.yml b/vendor/github.com/go-kit/kit/.travis.yml deleted file mode 100644 index 413f1db..0000000 --- a/vendor/github.com/go-kit/kit/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go - -script: go test -race -v ./... - -go: - - 1.5.3 - - 1.6 - #- tip diff --git a/vendor/github.com/go-kit/kit/CONTRIBUTING.md b/vendor/github.com/go-kit/kit/CONTRIBUTING.md deleted file mode 100644 index 5cb85a3..0000000 --- a/vendor/github.com/go-kit/kit/CONTRIBUTING.md +++ /dev/null @@ -1,22 +0,0 @@ -# Contributing - -First, thank you for contributing! We love and encourage pull requests from everyone. - -At this stage, we're still developing the initial drafts of all of the packages, using an -[RFC workflow](https://github.com/go-kit/kit/tree/master/rfc). - -Before submitting major changes, here are a few guidelines to follow: - -1. Check the [open issues][issues] and [pull requests][prs] for existing discussions. -1. Open an [issue][issues] to discuss a new feature. -1. Write tests. -1. Make sure the entire test suite passes locally and on Travis CI. -1. Open a Pull Request. -1. [Squash your commits][squash] after receiving feedback and add a [great commit message][message]. -1. Have fun! - -[issues]: https://github.com/go-kit/kit/issues -[prs]: https://github.com/go-kit/kit/pulls -[squash]: http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html -[message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html - diff --git a/vendor/github.com/go-kit/kit/LICENSE b/vendor/github.com/go-kit/kit/LICENSE deleted file mode 100644 index 9d83342..0000000 --- a/vendor/github.com/go-kit/kit/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Peter Bourgon - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/go-kit/kit/README.md b/vendor/github.com/go-kit/kit/README.md deleted file mode 100644 index daf208d..0000000 --- a/vendor/github.com/go-kit/kit/README.md +++ /dev/null @@ -1,222 +0,0 @@ -# Go kit [![Circle CI](https://circleci.com/gh/go-kit/kit.svg?style=svg)](https://circleci.com/gh/go-kit/kit) [![Drone.io](https://drone.io/github.com/go-kit/kit/status.png)](https://drone.io/github.com/go-kit/kit/latest) [![Travis CI](https://travis-ci.org/go-kit/kit.svg?branch=master)](https://travis-ci.org/go-kit/kit) [![GoDoc](https://godoc.org/github.com/go-kit/kit?status.svg)](https://godoc.org/github.com/go-kit/kit) [![Coverage Status](https://coveralls.io/repos/go-kit/kit/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-kit/kit?branch=master) [![Go Report Card](https://goreportcard.com/badge/go-kit/kit)](https://goreportcard.com/report/go-kit/kit) - -**Go kit** is a **distributed programming toolkit** for building microservices -in large organizations. We solve common problems in distributed systems, so -you can focus on your business logic. - -- Mailing list: [go-kit](https://groups.google.com/forum/#!forum/go-kit) -- Slack: [gophers.slack.com](https://gophers.slack.com) **#go-kit** ([invite](https://gophersinvite.herokuapp.com/)) - -## Documentation - -### Examples - -Perhaps the best way to understand Go kit is to follow along as we build an -[example service][examples] from first principles. This can serve as a -blueprint for your own new service, or demonstrate how to adapt your existing -service to use Go kit components. - -[examples]: https://github.com/go-kit/kit/tree/master/examples - -### Endpoint - -Go kit primarily deals in the RPC messaging pattern. We use an abstraction -called an **[endpoint][]** to model individual RPCs. An endpoint can be -implemented by a server, and called by a client. It's the fundamental building -block of many Go kit components. - -[endpoint]: https://github.com/go-kit/kit/tree/master/endpoint/endpoint.go - -#### Circuit breaker - -The [circuitbreaker package][circuitbreaker] provides endpoint adapters to -several popular circuit breaker libraries. Circuit breakers prevent thundering -herds, and improve resiliency against intermittent errors. Every client-side -endpoint should be wrapped in a circuit breaker. - -[circuitbreaker]: https://github.com/go-kit/kit/tree/master/circuitbreaker - -#### Rate limiter - -The [ratelimit package][ratelimit] provides endpoint adapters to rate limiting -packages. Rate limiters are equally applicable to both server- and client-side -endpoints. Use rate limiters to enforce upper thresholds on incoming or -outgoing request throughput. - -[ratelimit]: https://github.com/go-kit/kit/tree/master/ratelimit - -### Transport - -The [transport package][transport] provides helpers to bind endpoints to -specific serialization mechanisms. At the moment, Go kit just provides helpers -for simple JSON over HTTP. If your organization uses a fully-featured -transport, bindings are typically provided by the Go library for the -transport, and there's not much for Go kit to do. In those cases, see the -examples to understand how to write adapters for your endpoints. For now, see -the [addsvc][addsvc] to understand how transport bindings work. We have -specific examples for Thrift, gRPC, net/rpc, and JSON over HTTP. JSON/RPC and -Swagger support is planned. - -[transport]: https://github.com/go-kit/kit/tree/master/transport -[addsvc]: https://github.com/go-kit/kit/tree/master/examples/addsvc - -### Logging - -Services produce logs to be consumed later, either by humans or machines. -Humans might be interested in debugging errors, or tracing specific requests. -Machines might be interested in counting interesting events, or aggregating -information for offline processing. In both cases, it's important that the log -messages be structured and actionable. Go kit's [log package][log] is designed -to encourage both of these best practices. - -[log]: https://github.com/go-kit/kit/tree/master/log - -### Metrics (Instrumentation) - -Services can't be considered production-ready until they're thoroughly -instrumented with metrics that track counts, latency, health, and other -periodic or per-request information. Go kit's [metrics package][metrics] -provides a robust common set of interfaces for instrumenting your service. -Bindings exist for common backends, from [expvar][] to [statsd][] to -[Prometheus][]. - -[metrics]: https://github.com/go-kit/kit/tree/master/metrics -[expvar]: https://golang.org/pkg/expvar/ -[statsd]: https://github.com/etsy/statsd -[Prometheus]: http://prometheus.io - -### Request tracing - -As your infrastructure grows, it becomes important to be able to trace a -request, as it travels through multiple services and back to the user. Go -kit's [tracing package][tracing] provides enhancements for your endpoints and -transport bindings to capture information about requests and emit them to -request tracing systems. (Currently, [Zipkin][] is supported; [Appdash][] -support is planned.) - -[tracing]: https://github.com/go-kit/kit/tree/master/tracing -[Zipkin]: https://github.com/openzipkin/zipkin -[Appdash]: https://github.com/sourcegraph/appdash - -### Service discovery and load balancing - -If your service calls another service, it needs to know how to find it, and -should intelligently spread its load among those discovered instances. Go -kit's [loadbalancer package][loadbalancer] provides client-side endpoint -middleware to solve that problem, whether your organization uses static hosts -or IPs, [DNS SRV records][dnssrv], Consul, etcd, or Zookeeper. And if you use -a custom system, it's very easy to write your own [Publisher][] and use Go -kit's load balancing strategies. (Currently, static hosts, DNS SRV, etcd, Consul -and ZooKeeper are supported) - -[loadbalancer]: https://github.com/go-kit/kit/tree/master/loadbalancer -[dnssrv]: https://github.com/go-kit/kit/tree/master/loadbalancer/dnssrv -[Publisher]: https://github.com/go-kit/kit/tree/master/loadbalancer/publisher.go - -## Motivation - -Go has emerged as the language of the server, but it remains underrepresented -in large, consumer-focused tech companies like Facebook, Twitter, Netflix, and -SoundCloud. These organizations have largely adopted JVM-based stacks for -their business logic, owing in large part to libraries and ecosystems that -directly support their microservice architectures. - -To reach its next level of success, Go needs more than simple primitives and -idioms. It needs a comprehensive toolkit, for coherent distributed programming -in the large. Go kit is a set of packages and best practices, leveraging years -of production experience, and providing a comprehensive, robust, and trustable -platform for organizations of any size. - -In short, Go kit makes Go a viable choice for business-domain microservices. - -For more details, see - [the motivating blog post](http://peter.bourgon.org/go-kit/) and - [the video of the talk](https://www.youtube.com/watch?v=iFR_7AKkJFU). -See also the - [Go kit talk at GopherCon 2015](https://www.youtube.com/watch?v=1AjaZi4QuGo). - -## Goals - -- Operate in a heterogeneous SOA — expect to interact with mostly non-Go-kit services -- RPC as the primary messaging pattern -- Pluggable serialization and transport — not just JSON over HTTP -- Operate within existing infrastructures — no mandates for specific tools or technologies - -## Non-goals - -- Supporting messaging patterns other than RPC (for now) — e.g. MPI, pub/sub, CQRS, etc. -- Re-implementing functionality that can be provided by adapting existing software -- Having opinions on operational concerns: deployment, configuration, process supervision, orchestration, etc. - -## Contributing - -Please see [CONTRIBUTING.md][]. Thank you, [contributors][]! - -[CONTRIBUTING.md]: /CONTRIBUTING.md -[contributors]: https://github.com/go-kit/kit/graphs/contributors - -## Dependency management - -Go kit is a library, designed to be imported into a binary package. -Vendoring is currently the best way for binary package authors to ensure reliable, reproducible builds. -Therefore, we strongly recommend our users use vendoring for all of their dependencies, including Go kit. -To avoid compatibility and availability issues, Go kit doesn't vendor its own dependencies, and doesn't recommend use of third-party import proxies. - -There are several tools which make vendoring easier, including [gb][], [glide][], [gvt][], [govendor][], and [vendetta][]. -In addition, Go kit uses a variety of continuous integration providers to find and fix compatibility problems as soon as they occur. - -[gb]: http://getgb.io -[glide]: https://github.com/Masterminds/glide -[gvt]: https://github.com/FiloSottile/gvt -[govendor]: https://github.com/kardianos/govendor -[vendetta]: https://github.com/dpw/vendetta - -## Related projects - -Projects with a ★ have had particular influence on Go kit's design (or vice-versa). - -### Service frameworks - -- [gizmo](https://github.com/nytimes/gizmo), a microservice toolkit from The New York Times ★ -- [go-micro](https://github.com/myodc/go-micro), a microservices client/server library ★ -- [gocircuit](https://github.com/gocircuit/circuit), dynamic cloud orchestration -- [gotalk](https://github.com/rsms/gotalk), async peer communication protocol & library -- [h2](https://github.com/hailocab/h2), a microservices framework ★ -- [Kite](https://github.com/koding/kite), a micro-service framework - -### Individual components - -- [afex/hystrix-go](https://github.com/afex/hystrix-go), client-side latency and fault tolerance library -- [armon/go-metrics](https://github.com/armon/go-metrics), library for exporting performance and runtime metrics to external metrics systems -- [codahale/lunk](https://github.com/codahale/lunk), structured logging in the style of Google's Dapper or Twitter's Zipkin -- [eapache/go-resiliency](https://github.com/eapache/go-resiliency), resiliency patterns -- [sasbury/logging](https://github.com/sasbury/logging), a tagged style of logging -- [grpc/grpc-go](https://github.com/grpc/grpc-go), HTTP/2 based RPC -- [inconshreveable/log15](https://github.com/inconshreveable/log15), simple, powerful logging for Go ★ -- [mailgun/vulcand](https://github.com/vulcand/vulcand), programmatic load balancer backed by etcd -- [mattheath/phosphor](https://github.com/mondough/phosphor), distributed system tracing -- [pivotal-golang/lager](https://github.com/pivotal-golang/lager), an opinionated logging library -- [rubyist/circuitbreaker](https://github.com/rubyist/circuitbreaker), circuit breaker library -- [Sirupsen/logrus](https://github.com/Sirupsen/logrus), structured, pluggable logging for Go ★ -- [sourcegraph/appdash](https://github.com/sourcegraph/appdash), application tracing system based on Google's Dapper -- [spacemonkeygo/monitor](https://github.com/spacemonkeygo/monitor), data collection, monitoring, instrumentation, and Zipkin client library -- [streadway/handy](https://github.com/streadway/handy), net/http handler filters -- [vitess/rpcplus](https://godoc.org/github.com/youtube/vitess/go/rpcplus), package rpc + context.Context -- [gdamore/mangos](https://github.com/gdamore/mangos), nanomsg implementation in pure Go - -### Web frameworks - -- [Beego](http://beego.me/) -- [Gin](https://gin-gonic.github.io/gin/) -- [Goji](https://github.com/zenazn/goji) -- [Gorilla](http://www.gorillatoolkit.org) -- [Martini](https://github.com/go-martini/martini) -- [Negroni](https://github.com/codegangsta/negroni) -- [Revel](https://revel.github.io/) (considered harmful) - -## Additional reading - -- [Architecting for the Cloud](http://fr.slideshare.net/stonse/architecting-for-the-cloud-using-netflixoss-codemash-workshop-29852233) — Netflix -- [Dapper, a Large-Scale Distributed Systems Tracing Infrastructure](http://research.google.com/pubs/pub36356.html) — Google -- [Your Server as a Function](http://monkey.org/~marius/funsrv.pdf) (PDF) — Twitter - diff --git a/vendor/github.com/go-kit/kit/circuitbreaker/gobreaker.go b/vendor/github.com/go-kit/kit/circuitbreaker/gobreaker.go deleted file mode 100644 index b00de95..0000000 --- a/vendor/github.com/go-kit/kit/circuitbreaker/gobreaker.go +++ /dev/null @@ -1,21 +0,0 @@ -package circuitbreaker - -import ( - "github.com/sony/gobreaker" - "golang.org/x/net/context" - - "github.com/go-kit/kit/endpoint" -) - -// Gobreaker returns an endpoint.Middleware that implements the circuit -// breaker pattern using the sony/gobreaker package. Only errors returned by -// the wrapped endpoint count against the circuit breaker's error count. -// -// See http://godoc.org/github.com/sony/gobreaker for more information. -func Gobreaker(cb *gobreaker.CircuitBreaker) endpoint.Middleware { - return func(next endpoint.Endpoint) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - return cb.Execute(func() (interface{}, error) { return next(ctx, request) }) - } - } -} diff --git a/vendor/github.com/go-kit/kit/circuitbreaker/gobreaker_test.go b/vendor/github.com/go-kit/kit/circuitbreaker/gobreaker_test.go deleted file mode 100644 index b581cfe..0000000 --- a/vendor/github.com/go-kit/kit/circuitbreaker/gobreaker_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package circuitbreaker_test - -import ( - "testing" - - "github.com/sony/gobreaker" - - "github.com/go-kit/kit/circuitbreaker" -) - -func TestGobreaker(t *testing.T) { - var ( - breaker = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{})) - primeWith = 100 - shouldPass = func(n int) bool { return n <= 5 } // https://github.com/sony/gobreaker/blob/bfa846d/gobreaker.go#L76 - circuitOpenError = "circuit breaker is open" - ) - testFailingEndpoint(t, breaker, primeWith, shouldPass, 0, circuitOpenError) -} diff --git a/vendor/github.com/go-kit/kit/circuitbreaker/handy_breaker.go b/vendor/github.com/go-kit/kit/circuitbreaker/handy_breaker.go deleted file mode 100644 index 5875d4f..0000000 --- a/vendor/github.com/go-kit/kit/circuitbreaker/handy_breaker.go +++ /dev/null @@ -1,38 +0,0 @@ -package circuitbreaker - -import ( - "time" - - "github.com/streadway/handy/breaker" - "golang.org/x/net/context" - - "github.com/go-kit/kit/endpoint" -) - -// HandyBreaker returns an endpoint.Middleware that implements the circuit -// breaker pattern using the streadway/handy/breaker package. Only errors -// returned by the wrapped endpoint count against the circuit breaker's error -// count. -// -// See http://godoc.org/github.com/streadway/handy/breaker for more -// information. -func HandyBreaker(cb breaker.Breaker) endpoint.Middleware { - return func(next endpoint.Endpoint) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (response interface{}, err error) { - if !cb.Allow() { - return nil, breaker.ErrCircuitOpen - } - - defer func(begin time.Time) { - if err == nil { - cb.Success(time.Since(begin)) - } else { - cb.Failure(time.Since(begin)) - } - }(time.Now()) - - response, err = next(ctx, request) - return - } - } -} diff --git a/vendor/github.com/go-kit/kit/circuitbreaker/handy_breaker_test.go b/vendor/github.com/go-kit/kit/circuitbreaker/handy_breaker_test.go deleted file mode 100644 index f3642a1..0000000 --- a/vendor/github.com/go-kit/kit/circuitbreaker/handy_breaker_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package circuitbreaker_test - -import ( - "testing" - - handybreaker "github.com/streadway/handy/breaker" - - "github.com/go-kit/kit/circuitbreaker" -) - -func TestHandyBreaker(t *testing.T) { - var ( - failureRatio = 0.05 - breaker = circuitbreaker.HandyBreaker(handybreaker.NewBreaker(failureRatio)) - primeWith = handybreaker.DefaultMinObservations * 10 - shouldPass = func(n int) bool { return (float64(n) / float64(primeWith+n)) <= failureRatio } - openCircuitError = handybreaker.ErrCircuitOpen.Error() - ) - testFailingEndpoint(t, breaker, primeWith, shouldPass, 0, openCircuitError) -} diff --git a/vendor/github.com/go-kit/kit/circuitbreaker/hystrix.go b/vendor/github.com/go-kit/kit/circuitbreaker/hystrix.go deleted file mode 100644 index 5e7b144..0000000 --- a/vendor/github.com/go-kit/kit/circuitbreaker/hystrix.go +++ /dev/null @@ -1,30 +0,0 @@ -package circuitbreaker - -import ( - "github.com/afex/hystrix-go/hystrix" - "golang.org/x/net/context" - - "github.com/go-kit/kit/endpoint" -) - -// Hystrix returns an endpoint.Middleware that implements the circuit -// breaker pattern using the afex/hystrix-go package. -// -// When using this circuit breaker, please configure your commands separately. -// -// See https://godoc.org/github.com/afex/hystrix-go/hystrix for more -// information. -func Hystrix(commandName string) endpoint.Middleware { - return func(next endpoint.Endpoint) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (response interface{}, err error) { - var resp interface{} - if err := hystrix.Do(commandName, func() (err error) { - resp, err = next(ctx, request) - return err - }, nil); err != nil { - return nil, err - } - return resp, nil - } - } -} diff --git a/vendor/github.com/go-kit/kit/circuitbreaker/hystrix_test.go b/vendor/github.com/go-kit/kit/circuitbreaker/hystrix_test.go deleted file mode 100644 index da52757..0000000 --- a/vendor/github.com/go-kit/kit/circuitbreaker/hystrix_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package circuitbreaker_test - -import ( - "io/ioutil" - stdlog "log" - "testing" - "time" - - "github.com/afex/hystrix-go/hystrix" - - "github.com/go-kit/kit/circuitbreaker" -) - -func TestHystrix(t *testing.T) { - stdlog.SetOutput(ioutil.Discard) - - const ( - commandName = "my-endpoint" - errorPercent = 5 - maxConcurrent = 1000 - ) - hystrix.ConfigureCommand(commandName, hystrix.CommandConfig{ - ErrorPercentThreshold: errorPercent, - MaxConcurrentRequests: maxConcurrent, - }) - - var ( - breaker = circuitbreaker.Hystrix(commandName) - primeWith = hystrix.DefaultVolumeThreshold * 2 - shouldPass = func(n int) bool { return (float64(n) / float64(primeWith+n)) <= (float64(errorPercent-1) / 100.0) } - openCircuitError = hystrix.ErrCircuitOpen.Error() - ) - - // hystrix-go uses buffered channels to receive reports on request success/failure, - // and so is basically impossible to test deterministically. We have to make sure - // the report buffer is emptied, by injecting a sleep between each invocation. - requestDelay := 5 * time.Millisecond - - testFailingEndpoint(t, breaker, primeWith, shouldPass, requestDelay, openCircuitError) -} diff --git a/vendor/github.com/go-kit/kit/circuitbreaker/util_test.go b/vendor/github.com/go-kit/kit/circuitbreaker/util_test.go deleted file mode 100644 index 0039b6d..0000000 --- a/vendor/github.com/go-kit/kit/circuitbreaker/util_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package circuitbreaker_test - -import ( - "errors" - "fmt" - "path/filepath" - "runtime" - "testing" - "time" - - "golang.org/x/net/context" - - "github.com/go-kit/kit/endpoint" -) - -func testFailingEndpoint( - t *testing.T, - breaker endpoint.Middleware, - primeWith int, - shouldPass func(int) bool, - requestDelay time.Duration, - openCircuitError string, -) { - _, file, line, _ := runtime.Caller(1) - caller := fmt.Sprintf("%s:%d", filepath.Base(file), line) - - // Create a mock endpoint and wrap it with the breaker. - m := mock{} - var e endpoint.Endpoint - e = m.endpoint - e = breaker(e) - - // Prime the endpoint with successful requests. - for i := 0; i < primeWith; i++ { - if _, err := e(context.Background(), struct{}{}); err != nil { - t.Fatalf("%s: during priming, got error: %v", caller, err) - } - time.Sleep(requestDelay) - } - - // Switch the endpoint to start throwing errors. - m.err = errors.New("tragedy+disaster") - m.thru = 0 - - // The first several should be allowed through and yield our error. - for i := 0; shouldPass(i); i++ { - if _, err := e(context.Background(), struct{}{}); err != m.err { - t.Fatalf("%s: want %v, have %v", caller, m.err, err) - } - time.Sleep(requestDelay) - } - thru := m.thru - - // But the rest should be blocked by an open circuit. - for i := 0; i < 10; i++ { - if _, err := e(context.Background(), struct{}{}); err.Error() != openCircuitError { - t.Fatalf("%s: want %q, have %q", caller, openCircuitError, err.Error()) - } - time.Sleep(requestDelay) - } - - // Make sure none of those got through. - if want, have := thru, m.thru; want != have { - t.Errorf("%s: want %d, have %d", caller, want, have) - } -} - -type mock struct { - thru int - err error -} - -func (m *mock) endpoint(context.Context, interface{}) (interface{}, error) { - m.thru++ - return struct{}{}, m.err -} diff --git a/vendor/github.com/go-kit/kit/coverage.bash b/vendor/github.com/go-kit/kit/coverage.bash deleted file mode 100755 index f4b0524..0000000 --- a/vendor/github.com/go-kit/kit/coverage.bash +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env bash - -# This script runs the cover tool on all packages with test files. If you set a -# WEB environment variable, it will additionally open the web-based coverage -# visualizer for each package. - -set -e - -function go_files { find . -name '*_test.go' ; } -function filter { grep -v '/_' ; } -function remove_relative_prefix { sed -e 's/^\.\///g' ; } - -function directories { - go_files | filter | remove_relative_prefix | while read f - do - dirname $f - done -} - -function unique_directories { directories | sort | uniq ; } - -PATHS=${1:-$(unique_directories)} - -function report { - for path in $PATHS - do - go test -coverprofile=$path/cover.coverprofile ./$path - done -} - -function combine { - gover -} - -function clean { - find . -name cover.coverprofile | xargs rm -} - -report -combine -clean - -if [ -n "${WEB+x}" ] -then - go tool cover -html=gover.coverprofile -fi - diff --git a/vendor/github.com/go-kit/kit/endpoint/endpoint.go b/vendor/github.com/go-kit/kit/endpoint/endpoint.go deleted file mode 100644 index 2702ef2..0000000 --- a/vendor/github.com/go-kit/kit/endpoint/endpoint.go +++ /dev/null @@ -1,37 +0,0 @@ -package endpoint - -import ( - "errors" - - "golang.org/x/net/context" -) - -// Endpoint is the fundamental building block of servers and clients. -// It represents a single RPC method. -type Endpoint func(ctx context.Context, request interface{}) (response interface{}, err error) - -// Nop is an endpoint that does nothing and returns a nil error. -// Useful for tests. -func Nop(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil } - -// Middleware is a chainable behavior modifier for endpoints. -type Middleware func(Endpoint) Endpoint - -// ErrBadCast indicates an unexpected concrete request or response struct was -// received from an endpoint. -var ErrBadCast = errors.New("bad cast") - -// ErrContextCanceled indicates the request context was canceled. -var ErrContextCanceled = errors.New("context canceled") - -// Chain is a helper function for composing middlewares. Requests will -// traverse them in the order they're declared. That is, the first middleware -// is treated as the outermost middleware. -func Chain(outer Middleware, others ...Middleware) Middleware { - return func(next Endpoint) Endpoint { - for i := len(others) - 1; i >= 0; i-- { // reverse - next = others[i](next) - } - return outer(next) - } -} diff --git a/vendor/github.com/go-kit/kit/endpoint/endpoint_example_test.go b/vendor/github.com/go-kit/kit/endpoint/endpoint_example_test.go deleted file mode 100644 index dd25ec7..0000000 --- a/vendor/github.com/go-kit/kit/endpoint/endpoint_example_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package endpoint_test - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/go-kit/kit/endpoint" -) - -func ExampleChain() { - e := endpoint.Chain( - annotate("first"), - annotate("second"), - annotate("third"), - )(myEndpoint) - - if _, err := e(ctx, req); err != nil { - panic(err) - } - - // Output: - // first pre - // second pre - // third pre - // my endpoint! - // third post - // second post - // first post -} - -var ( - ctx = context.Background() - req = struct{}{} -) - -func annotate(s string) endpoint.Middleware { - return func(next endpoint.Endpoint) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - fmt.Println(s, "pre") - defer fmt.Println(s, "post") - return next(ctx, request) - } - } -} - -func myEndpoint(context.Context, interface{}) (interface{}, error) { - fmt.Println("my endpoint!") - return struct{}{}, nil -} diff --git a/vendor/github.com/go-kit/kit/examples/README.md b/vendor/github.com/go-kit/kit/examples/README.md deleted file mode 100644 index 0b11c21..0000000 --- a/vendor/github.com/go-kit/kit/examples/README.md +++ /dev/null @@ -1,721 +0,0 @@ -# Examples - -1. [A minimal example](#a-minimal-example) - 1. [Your business logic](#your-business-logic) - 1. [Requests and responses](#requests-and-responses) - 1. [Endpoints](#endpoints) - 1. [Transports](#transports) - 1. [stringsvc1](#stringsvc1) -1. [Logging and instrumentation](#logging-and-instrumentation) - 1. [Transport logging](#transport-logging) - 1. [Application logging](#application-logging) - 1. [Instrumentation](#instrumentation) - 1. [stringsvc2](#stringsvc2) -1. [Calling other services](#calling-other-services) - 1. [Client-side endpoints](#client-side-endpoints) - 1. [Service discovery and load balancing](#service-discovery-and-load-balancing) - 1. [stringsvc3](#stringsvc3) -1. [Advanced topics](#advanced-topics) - 1. [Creating a client package](#creating-a-client-package) - 1. [Request tracing](#request-tracing) - 1. [Threading a context](#threading-a-context) -1. [Other examples](#other-examples) - 1. [addsvc](#addsvc) - 1. [profilesvc](#profilesvc) - 1. [apigateway](#apigateway) - 1. [shipping](#shipping) - -## A minimal example - -Let's create a minimal Go kit service. - -### Your business logic - -Your service starts with your business logic. -In Go kit, we model a service as an **interface**. - -```go -// StringService provides operations on strings. -type StringService interface { - Uppercase(string) (string, error) - Count(string) int -} -``` - -That interface will have an implementation. - -```go -type stringService struct{} - -func (stringService) Uppercase(s string) (string, error) { - if s == "" { - return "", ErrEmpty - } - return strings.ToUpper(s), nil -} - -func (stringService) Count(s string) int { - return len(s) -} - -// ErrEmpty is returned when input string is empty -var ErrEmpty = errors.New("Empty string") -``` - -### Requests and responses - -In Go kit, the primary messaging pattern is RPC. -So, every method in our interface will be modeled as a remote procedure call. -For each method, we define **request and response** structs, - capturing all of the input and output parameters respectively. - -```go -type uppercaseRequest struct { - S string `json:"s"` -} - -type uppercaseResponse struct { - V string `json:"v"` - Err string `json:"err,omitempty"` // errors don't JSON-marshal, so we use a string -} - -type countRequest struct { - S string `json:"s"` -} - -type countResponse struct { - V int `json:"v"` -} -``` - -### Endpoints - -Go kit provides much of its functionality through an abstraction called an **endpoint**. - -```go -type Endpoint func(ctx context.Context, request interface{}) (response interface{}, err error) -``` - -An endpoint represents a single RPC. -That is, a single method in our service interface. -We'll write simple adapters to convert each of our service's methods into an endpoint. -Each adapter takes a StringService, and returns an endpoint that corresponds to one of the methods. - -```go -import ( - "golang.org/x/net/context" - "github.com/go-kit/kit/endpoint" -) - -func makeUppercaseEndpoint(svc StringService) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(uppercaseRequest) - v, err := svc.Uppercase(req.S) - if err != nil { - return uppercaseResponse{v, err.Error()}, nil - } - return uppercaseResponse{v, ""}, nil - } -} - -func makeCountEndpoint(svc StringService) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(countRequest) - v := svc.Count(req.S) - return countResponse{v}, nil - } -} -``` - -### Transports - -Now we need to expose your service to the outside world, so it can be called. -Your organization probably already has opinions about how services should talk to each other. -Maybe you use Thrift, or custom JSON over HTTP. -Go kit supports many **transports** out of the box. -(Adding support for new ones is easy—just [file an issue](https://github.com/go-kit/kit/issues).) - -For this minimal example service, let's use JSON over HTTP. -Go kit provides a helper struct, in package transport/http. - -```go -import ( - "encoding/json" - "log" - "net/http" - - "golang.org/x/net/context" - - httptransport "github.com/go-kit/kit/transport/http" -) - -func main() { - ctx := context.Background() - svc := stringService{} - - uppercaseHandler := httptransport.NewServer( - ctx, - makeUppercaseEndpoint(svc), - decodeUppercaseRequest, - encodeResponse, - ) - - countHandler := httptransport.NewServer( - ctx, - makeCountEndpoint(svc), - decodeCountRequest, - encodeResponse, - ) - - http.Handle("/uppercase", uppercaseHandler) - http.Handle("/count", countHandler) - log.Fatal(http.ListenAndServe(":8080", nil)) -} - -func decodeUppercaseRequest(_ context.Context, r *http.Request) (interface{}, error) { - var request uppercaseRequest - if err := json.NewDecoder(r.Body).Decode(&request); err != nil { - return nil, err - } - return request, nil -} - -func decodeCountRequest(_ context.Context, r *http.Request) (interface{}, error) { - var request countRequest - if err := json.NewDecoder(r.Body).Decode(&request); err != nil { - return nil, err - } - return request, nil -} - -func encodeResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { - return json.NewEncoder(w).Encode(response) -} -``` - -### stringsvc1 - -The complete service so far is [stringsvc1][]. - -[stringsvc1]: https://github.com/go-kit/kit/blob/master/examples/stringsvc1 - -``` -$ go get github.com/go-kit/kit/examples/stringsvc1 -$ stringsvc1 -``` - -``` -$ curl -XPOST -d'{"s":"hello, world"}' localhost:8080/uppercase -{"v":"HELLO, WORLD","err":null} -$ curl -XPOST -d'{"s":"hello, world"}' localhost:8080/count -{"v":12} -``` - -## Logging and instrumentation - -No service can be considered production-ready without thorough logging and instrumentation. - -### Transport logging - -Any component that needs to log should treat the logger like a dependency, same as a database connection. -So, we construct our logger in our `func main`, and pass it to components that need it. -We never use a globally-scoped logger. - -We could pass a logger directly into our stringService implementation, but there's a better way. -Let's use a **middleware**, also known as a decorator. -A middleware is a function that takes an endpoint and returns an endpoint. - -```go -type Middleware func(Endpoint) Endpoint -``` - -In between, it can do anything. -Let's create a basic logging middleware. - -```go -func loggingMiddleware(logger log.Logger) Middleware { - return func(next endpoint.Endpoint) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - logger.Log("msg", "calling endpoint") - defer logger.Log("msg", "called endpoint") - return next(ctx, request) - } - } -} -``` - -And wire it into each of our handlers. - -```go -logger := log.NewLogfmtLogger(os.Stderr) - -svc := stringService{} - -var uppercase endpoint.Endpoint -uppercase = makeUppercaseEndpoint(svc) -uppercase = loggingMiddleware(log.NewContext(logger).With("method", "uppercase"))(uppercase) - -var count endpoint.Endpoint -count = makeCountEndpoint(svc) -count = loggingMiddleware(log.NewContext(logger).With("method", "count"))(count) - -uppercaseHandler := httptransport.Server( - // ... - uppercase, - // ... -) - -countHandler := httptransport.Server( - // ... - count, - // ... -) -``` - -It turns out that this technique is useful for a lot more than just logging. -Many Go kit components are implemented as endpoint middlewares. - -### Application logging - -But what if we want to log in our application domain, like the parameters that are passed in? -It turns out that we can define a middleware for our service, and get the same nice and composable effects. -Since our StringService is defined as an interface, we just need to make a new type - which wraps an existing StringService, and performs the extra logging duties. - -```go -type loggingMiddleware struct { - logger log.Logger - next StringService -} - -func (mw loggingMiddleware) Uppercase(s string) (output string, err error) { - defer func(begin time.Time) { - mw.logger.Log( - "method", "uppercase", - "input", s, - "output", output, - "err", err, - "took", time.Since(begin), - ) - }(time.Now()) - - output, err = mw.next.Uppercase(s) - return -} - -func (mw loggingMiddleware) Count(s string) (n int) { - defer func(begin time.Time) { - mw.logger.Log( - "method", "count", - "input", s, - "n", n, - "took", time.Since(begin), - ) - }(time.Now()) - - n = mw.next.Count(s) - return -} -``` - -And wire it in. - -```go -import ( - "os" - - "github.com/go-kit/kit/log" - httptransport "github.com/go-kit/kit/transport/http" -) - -func main() { - logger := log.NewLogfmtLogger(os.Stderr) - - var svc StringService - svc = stringsvc{} - svc = loggingMiddleware{logger, svc} - - // ... - - uppercaseHandler := httptransport.NewServer( - // ... - makeUppercaseEndpoint(svc), - // ... - ) - - countHandler := httptransport.NewServer( - // ... - makeCountEndpoint(svc), - // ... - ) -} -``` - -Use endpoint middlewares for transport-domain concerns, like circuit breaking and rate limiting. -Use service middlewares for business-domain concerns, like logging and instrumentation. -Speaking of instrumentation... - -### Instrumentation - -In Go kit, instrumentation means using **package metrics** to record statistics about your service's runtime behavior. -Counting the number of jobs processed, - recording the duration of requests after they've finished, - and tracking the number of in-flight operations would all be considered instrumentation. - -We can use the same middleware pattern that we used for logging. - -```go -type instrumentingMiddleware struct { - requestCount metrics.Counter - requestLatency metrics.TimeHistogram - countResult metrics.Histogram - next StringService -} - -func (mw instrumentingMiddleware) Uppercase(s string) (output string, err error) { - defer func(begin time.Time) { - methodField := metrics.Field{Key: "method", Value: "uppercase"} - errorField := metrics.Field{Key: "error", Value: fmt.Sprintf("%v", err)} - mw.requestCount.With(methodField).With(errorField).Add(1) - mw.requestLatency.With(methodField).With(errorField).Observe(time.Since(begin)) - }(time.Now()) - - output, err = mw.next.Uppercase(s) - return -} - -func (mw instrumentingMiddleware) Count(s string) (n int) { - defer func(begin time.Time) { - methodField := metrics.Field{Key: "method", Value: "count"} - errorField := metrics.Field{Key: "error", Value: fmt.Sprintf("%v", error(nil))} - mw.requestCount.With(methodField).With(errorField).Add(1) - mw.requestLatency.With(methodField).With(errorField).Observe(time.Since(begin)) - mw.countResult.Observe(int64(n)) - }(time.Now()) - - n = mw.next.Count(s) - return -} -``` - -And wire it into our service. - -```go -import ( - stdprometheus "github.com/prometheus/client_golang/prometheus" - kitprometheus "github.com/go-kit/kit/metrics/prometheus" - "github.com/go-kit/kit/metrics" -) - -func main() { - logger := log.NewLogfmtLogger(os.Stderr) - - fieldKeys := []string{"method", "error"} - requestCount := kitprometheus.NewCounter(stdprometheus.CounterOpts{ - // ... - }, fieldKeys) - requestLatency := metrics.NewTimeHistogram(time.Microsecond, kitprometheus.NewSummary(stdprometheus.SummaryOpts{ - // ... - }, fieldKeys)) - countResult := kitprometheus.NewSummary(stdprometheus.SummaryOpts{ - // ... - }, []string{})) - - var svc StringService - svc = stringService{} - svc = loggingMiddleware{logger, svc} - svc = instrumentingMiddleware{requestCount, requestLatency, countResult, svc} - - // ... - - http.Handle("/metrics", stdprometheus.Handler()) -} -``` - -### stringsvc2 - -The complete service so far is [stringsvc2][]. - -[stringsvc2]: https://github.com/go-kit/kit/blob/master/examples/stringsvc2 - -``` -$ go get github.com/go-kit/kit/examples/stringsvc2 -$ stringsvc2 -msg=HTTP addr=:8080 -``` - -``` -$ curl -XPOST -d'{"s":"hello, world"}' localhost:8080/uppercase -{"v":"HELLO, WORLD","err":null} -$ curl -XPOST -d'{"s":"hello, world"}' localhost:8080/count -{"v":12} -``` - -``` -method=uppercase input="hello, world" output="HELLO, WORLD" err=null took=2.455µs -method=count input="hello, world" n=12 took=743ns -``` - -## Calling other services - -It's rare that a service exists in a vacuum. -Often, you need to call other services. -**This is where Go kit shines**. -We provide transport middlewares to solve many of the problems that come up. - -Let's say that we want to have our string service call out to a _different_ string service - to satisfy the Uppercase method. -In effect, proxying the request to another service. -Let's implement the proxying middleware as a ServiceMiddleware, same as a logging or instrumenting middleware. - -```go -// proxymw implements StringService, forwarding Uppercase requests to the -// provided endpoint, and serving all other (i.e. Count) requests via the -// next StringService. -type proxymw struct { - ctx context.Context - next StringService // Serve most requests via this service... - uppercase endpoint.Endpoint // ...except Uppercase, which gets served by this endpoint -} -``` - -### Client-side endpoints - -We've got exactly the same endpoint we already know about, but we'll use it to invoke, rather than serve, a request. -When used this way, we call it a _client_ endpoint. -And to invoke the client endpoint, we just do some simple conversions. - -```go -func (mw proxymw) Uppercase(s string) (string, error) { - response, err := mw.uppercase(mw.Context, uppercaseRequest{S: s}) - if err != nil { - return "", err - } - resp := response.(uppercaseResponse) - if resp.Err != "" { - return resp.V, errors.New(resp.Err) - } - return resp.V, nil -} -``` - -Now, to construct one of these proxying middlewares, we convert a proxy URL string to an endpoint. -If we assume JSON over HTTP, we can use a helper in the transport/http package. - -```go -import ( - httptransport "github.com/go-kit/kit/transport/http" -) - -func proxyingMiddleware(proxyURL string, ctx context.Context) ServiceMiddleware { - return func(next StringService) StringService { - return proxymw{ctx, next, makeUppercaseEndpoint(ctx, proxyURL)} - } -} - -func makeUppercaseEndpoint(ctx context.Context, proxyURL string) endpoint.Endpoint { - return httptransport.NewClient( - "GET", - mustParseURL(proxyURL), - encodeUppercaseRequest, - decodeUppercaseResponse, - ).Endpoint() -} -``` - -### Service discovery and load balancing - -That's fine if we only have a single remote service. -But in reality, we'll probably have many service instances available to us. -We want to discover them through some service discovery mechanism, and spread our load across all of them. -And if any of those instances start to behave badly, we want to deal with that, without affecting our own service's reliability. - -Go kit offers adapters to different service discovery systems, to get up-to-date sets of instances, exposed as individual endpoints. -Those adapters are called subscribers. - -```go -type Subscriber interface { - Endpoints() ([]endpoint.Endpoint, error) -} -``` - -Internally, subscribers use a provided factory function to convert each discovered instance string (typically host:port) to a usable endpoint. - -```go -type Factory func(instance string) (endpoint.Endpoint, error) -``` - -So far, our factory function, makeUppercaseEndpoint, just calls the URL directly. -But it's important to put some safety middleware, like circuit breakers and rate limiters, into your factory, too. - -```go -var e endpoint.Endpoint -e = makeUppercaseProxy(ctx, instance) -e = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{}))(e) -e = kitratelimit.NewTokenBucketLimiter(jujuratelimit.NewBucketWithRate(float64(maxQPS), int64(maxQPS)))(e) -} -``` - -Now that we've got a set of endpoints, we need to choose one. -Load balancers wrap subscribers, and select one endpoint from many. -Go kit provides a couple of basic load balancers, and it's easy to write your own if you want more advanced heuristics. - -```go -type Balancer interface { - Endpoint() (endpoint.Endpoint, error) -} -``` - -Now we have the ability to choose endpoints according to some heuristic. -We can use that to provide a single, logical, robust endpoint to consumers. -A retry strategy wraps a load balancer, and returns a usable endpoint. -The retry strategy will retry failed requests until either the max attempts or timeout has been reached. - -```go -func Retry(max int, timeout time.Duration, lb Balancer) endpoint.Endpoint -``` - -Let's wire up our final proxying middleware. -For simplicity, we'll assume the user will specify multiple comma-separate instance endpoints with a flag. - -```go -func proxyingMiddleware(instances string, ctx context.Context, logger log.Logger) ServiceMiddleware { - // If instances is empty, don't proxy. - if instances == "" { - logger.Log("proxy_to", "none") - return func(next StringService) StringService { return next } - } - - // Set some parameters for our client. - var ( - qps = 100 // beyond which we will return an error - maxAttempts = 3 // per request, before giving up - maxTime = 250 * time.Millisecond // wallclock time, before giving up - ) - - // Otherwise, construct an endpoint for each instance in the list, and add - // it to a fixed set of endpoints. In a real service, rather than doing this - // by hand, you'd probably use package sd's support for your service - // discovery system. - var ( - instanceList = split(instances) - subscriber sd.FixedSubscriber - ) - logger.Log("proxy_to", fmt.Sprint(instanceList)) - for _, instance := range instanceList { - var e endpoint.Endpoint - e = makeUppercaseProxy(ctx, instance) - e = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{}))(e) - e = kitratelimit.NewTokenBucketLimiter(jujuratelimit.NewBucketWithRate(float64(qps), int64(qps)))(e) - subscriber = append(subscriber, e) - } - - // Now, build a single, retrying, load-balancing endpoint out of all of - // those individual endpoints. - balancer := lb.NewRoundRobin(subscriber) - retry := lb.Retry(maxAttempts, maxTime, balancer) - - // And finally, return the ServiceMiddleware, implemented by proxymw. - return func(next StringService) StringService { - return proxymw{ctx, next, retry} - } -} -``` - -### stringsvc3 - -The complete service so far is [stringsvc3][]. - -[stringsvc3]: https://github.com/go-kit/kit/blob/master/examples/stringsvc3 - -``` -$ go get github.com/go-kit/kit/examples/stringsvc3 -$ stringsvc3 -listen=:8001 & -listen=:8001 caller=proxying.go:25 proxy_to=none -listen=:8001 caller=main.go:72 msg=HTTP addr=:8001 -$ stringsvc3 -listen=:8002 & -listen=:8002 caller=proxying.go:25 proxy_to=none -listen=:8002 caller=main.go:72 msg=HTTP addr=:8002 -$ stringsvc3 -listen=:8003 & -listen=:8003 caller=proxying.go:25 proxy_to=none -listen=:8003 caller=main.go:72 msg=HTTP addr=:8003 -$ stringsvc3 -listen=:8080 -proxy=localhost:8001,localhost:8002,localhost:8003 -listen=:8080 caller=proxying.go:29 proxy_to="[localhost:8001 localhost:8002 localhost:8003]" -listen=:8080 caller=main.go:72 msg=HTTP addr=:8080 -``` - -``` -$ for s in foo bar baz ; do curl -d"{\"s\":\"$s\"}" localhost:8080/uppercase ; done -{"v":"FOO","err":null} -{"v":"BAR","err":null} -{"v":"BAZ","err":null} -``` - -``` -listen=:8001 caller=logging.go:28 method=uppercase input=foo output=FOO err=null took=5.168µs -listen=:8080 caller=logging.go:28 method=uppercase input=foo output=FOO err=null took=4.39012ms -listen=:8002 caller=logging.go:28 method=uppercase input=bar output=BAR err=null took=5.445µs -listen=:8080 caller=logging.go:28 method=uppercase input=bar output=BAR err=null took=2.04831ms -listen=:8003 caller=logging.go:28 method=uppercase input=baz output=BAZ err=null took=3.285µs -listen=:8080 caller=logging.go:28 method=uppercase input=baz output=BAZ err=null took=1.388155ms -``` - -## Advanced topics - -### Threading a context - -The context object is used to carry information across conceptual boundaries in the scope of a single request. -In our example, we haven't yet threaded the context through our business logic. -But that's almost always a good idea. -It allows you to pass request-scoped information between business logic and middlewares, - and is necessary for more sophisticated tasks like granular distributed tracing annotations. - -Concretely, this means your business logic interfaces will look like - -```go -type MyService interface { - Foo(context.Context, string, int) (string, error) - Bar(context.Context, string) error - Baz(context.Context) (int, error) -} -``` - -### Request tracing - -Once your infrastructure grows beyond a certain size, it becomes important to trace requests through multiple services, so you can identify and troubleshoot hotspots. -See [package tracing](https://github.com/go-kit/kit/blob/master/tracing) for more information. - -### Creating a client package - -It's possible to use Go kit to create a client package to your service, to make consuming your service easier from other Go programs. -Effectively, your client package will provide an implementation of your service interface, which invokes a remote service instance using a specific transport. -See [package addsvc/client](https://github.com/go-kit/kit/tree/master/examples/addsvc/client) - or [package profilesvc/client](https://github.com/go-kit/kit/tree/master/examples/profilesvc/client) - for examples. - -## Other examples - -### addsvc - -[addsvc](https://github.com/go-kit/kit/blob/master/examples/addsvc) is the original example service. -It exposes a set of operations over **all supported transports**. -It's fully logged, instrumented, and uses Zipkin request tracing. -It also demonstrates how to create and use client packages. -It's a good example of a fully-featured Go kit service. - -### profilesvc - -[profilesvc](https://github.com/go-kit/kit/blob/master/examples/profilesvc) - demonstrates how to use Go kit to build a REST-ish microservice. - -### apigateway - -[apigateway](https://github.com/go-kit/kit/blob/master/examples/apigateway/main.go) - demonstrates how to implement the API gateway pattern, - backed by a Consul service discovery system. - -### shipping - -[shipping](https://github.com/go-kit/kit/tree/master/examples/shipping) - is a complete, "real-world" application composed of multiple microservices, - based on Domain Driven Design principles. diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/client/grpc/client.go b/vendor/github.com/go-kit/kit/examples/addsvc/client/grpc/client.go deleted file mode 100644 index f21a9f5..0000000 --- a/vendor/github.com/go-kit/kit/examples/addsvc/client/grpc/client.go +++ /dev/null @@ -1,75 +0,0 @@ -// Package grpc provides a gRPC client for the add service. -package grpc - -import ( - "time" - - jujuratelimit "github.com/juju/ratelimit" - stdopentracing "github.com/opentracing/opentracing-go" - "github.com/sony/gobreaker" - "google.golang.org/grpc" - - "github.com/go-kit/kit/circuitbreaker" - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/examples/addsvc" - "github.com/go-kit/kit/examples/addsvc/pb" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/ratelimit" - "github.com/go-kit/kit/tracing/opentracing" - grpctransport "github.com/go-kit/kit/transport/grpc" -) - -// New returns an AddService backed by a gRPC client connection. It is the -// responsibility of the caller to dial, and later close, the connection. -func New(conn *grpc.ClientConn, tracer stdopentracing.Tracer, logger log.Logger) addsvc.Service { - // We construct a single ratelimiter middleware, to limit the total outgoing - // QPS from this client to all methods on the remote instance. We also - // construct per-endpoint circuitbreaker middlewares to demonstrate how - // that's done, although they could easily be combined into a single breaker - // for the entire remote instance, too. - - limiter := ratelimit.NewTokenBucketLimiter(jujuratelimit.NewBucketWithRate(100, 100)) - - var sumEndpoint endpoint.Endpoint - { - sumEndpoint = grpctransport.NewClient( - conn, - "Add", - "Sum", - addsvc.EncodeGRPCSumRequest, - addsvc.DecodeGRPCSumResponse, - pb.SumReply{}, - grpctransport.ClientBefore(opentracing.FromGRPCRequest(tracer, "Sum", logger)), - ).Endpoint() - sumEndpoint = opentracing.TraceClient(tracer, "Sum")(sumEndpoint) - sumEndpoint = limiter(sumEndpoint) - sumEndpoint = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{ - Name: "Sum", - Timeout: 30 * time.Second, - }))(sumEndpoint) - } - - var concatEndpoint endpoint.Endpoint - { - concatEndpoint = grpctransport.NewClient( - conn, - "Add", - "Concat", - addsvc.EncodeGRPCConcatRequest, - addsvc.DecodeGRPCConcatResponse, - pb.ConcatReply{}, - grpctransport.ClientBefore(opentracing.FromGRPCRequest(tracer, "Concat", logger)), - ).Endpoint() - concatEndpoint = opentracing.TraceClient(tracer, "Concat")(concatEndpoint) - concatEndpoint = limiter(concatEndpoint) - sumEndpoint = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{ - Name: "Concat", - Timeout: 30 * time.Second, - }))(sumEndpoint) - } - - return addsvc.Endpoints{ - SumEndpoint: sumEndpoint, - ConcatEndpoint: concatEndpoint, - } -} diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/client/http/client.go b/vendor/github.com/go-kit/kit/examples/addsvc/client/http/client.go deleted file mode 100644 index cb349fb..0000000 --- a/vendor/github.com/go-kit/kit/examples/addsvc/client/http/client.go +++ /dev/null @@ -1,86 +0,0 @@ -// Package http provides an HTTP client for the add service. -package http - -import ( - "net/url" - "strings" - "time" - - jujuratelimit "github.com/juju/ratelimit" - stdopentracing "github.com/opentracing/opentracing-go" - "github.com/sony/gobreaker" - - "github.com/go-kit/kit/circuitbreaker" - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/examples/addsvc" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/ratelimit" - "github.com/go-kit/kit/tracing/opentracing" - httptransport "github.com/go-kit/kit/transport/http" -) - -// New returns an AddService backed by an HTTP server living at the remote -// instance. We expect instance to come from a service discovery system, so -// likely of the form "host:port". -func New(instance string, tracer stdopentracing.Tracer, logger log.Logger) (addsvc.Service, error) { - if !strings.HasPrefix(instance, "http") { - instance = "http://" + instance - } - u, err := url.Parse(instance) - if err != nil { - return nil, err - } - - // We construct a single ratelimiter middleware, to limit the total outgoing - // QPS from this client to all methods on the remote instance. We also - // construct per-endpoint circuitbreaker middlewares to demonstrate how - // that's done, although they could easily be combined into a single breaker - // for the entire remote instance, too. - - limiter := ratelimit.NewTokenBucketLimiter(jujuratelimit.NewBucketWithRate(100, 100)) - - var sumEndpoint endpoint.Endpoint - { - sumEndpoint = httptransport.NewClient( - "POST", - copyURL(u, "/sum"), - addsvc.EncodeHTTPGenericRequest, - addsvc.DecodeHTTPSumResponse, - httptransport.ClientBefore(opentracing.FromHTTPRequest(tracer, "Sum", logger)), - ).Endpoint() - sumEndpoint = opentracing.TraceClient(tracer, "Sum")(sumEndpoint) - sumEndpoint = limiter(sumEndpoint) - sumEndpoint = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{ - Name: "Sum", - Timeout: 30 * time.Second, - }))(sumEndpoint) - } - - var concatEndpoint endpoint.Endpoint - { - concatEndpoint = httptransport.NewClient( - "POST", - copyURL(u, "/concat"), - addsvc.EncodeHTTPGenericRequest, - addsvc.DecodeHTTPConcatResponse, - httptransport.ClientBefore(opentracing.FromHTTPRequest(tracer, "Concat", logger)), - ).Endpoint() - concatEndpoint = opentracing.TraceClient(tracer, "Concat")(concatEndpoint) - concatEndpoint = limiter(concatEndpoint) - sumEndpoint = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{ - Name: "Concat", - Timeout: 30 * time.Second, - }))(sumEndpoint) - } - - return addsvc.Endpoints{ - SumEndpoint: sumEndpoint, - ConcatEndpoint: concatEndpoint, - }, nil -} - -func copyURL(base *url.URL, path string) *url.URL { - next := *base - next.Path = path - return &next -} diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/client/thrift/client.go b/vendor/github.com/go-kit/kit/examples/addsvc/client/thrift/client.go deleted file mode 100644 index a943c7b..0000000 --- a/vendor/github.com/go-kit/kit/examples/addsvc/client/thrift/client.go +++ /dev/null @@ -1,55 +0,0 @@ -// Package thrift provides a Thrift client for the add service. -package thrift - -import ( - "time" - - jujuratelimit "github.com/juju/ratelimit" - "github.com/sony/gobreaker" - - "github.com/go-kit/kit/circuitbreaker" - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/examples/addsvc" - thriftadd "github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc" - "github.com/go-kit/kit/ratelimit" -) - -// New returns an AddService backed by a Thrift server described by the provided -// client. The caller is responsible for constructing the client, and eventually -// closing the underlying transport. -func New(client *thriftadd.AddServiceClient) addsvc.Service { - // We construct a single ratelimiter middleware, to limit the total outgoing - // QPS from this client to all methods on the remote instance. We also - // construct per-endpoint circuitbreaker middlewares to demonstrate how - // that's done, although they could easily be combined into a single breaker - // for the entire remote instance, too. - - limiter := ratelimit.NewTokenBucketLimiter(jujuratelimit.NewBucketWithRate(100, 100)) - - // Thrift does not currently have tracer bindings, so we skip tracing. - - var sumEndpoint endpoint.Endpoint - { - sumEndpoint = addsvc.MakeThriftSumEndpoint(client) - sumEndpoint = limiter(sumEndpoint) - sumEndpoint = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{ - Name: "Sum", - Timeout: 30 * time.Second, - }))(sumEndpoint) - } - - var concatEndpoint endpoint.Endpoint - { - concatEndpoint = addsvc.MakeThriftConcatEndpoint(client) - concatEndpoint = limiter(concatEndpoint) - sumEndpoint = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{ - Name: "Concat", - Timeout: 30 * time.Second, - }))(sumEndpoint) - } - - return addsvc.Endpoints{ - SumEndpoint: addsvc.MakeThriftSumEndpoint(client), - ConcatEndpoint: addsvc.MakeThriftConcatEndpoint(client), - } -} diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/cmd/addcli/main.go b/vendor/github.com/go-kit/kit/examples/addsvc/cmd/addcli/main.go deleted file mode 100644 index 870bfa8..0000000 --- a/vendor/github.com/go-kit/kit/examples/addsvc/cmd/addcli/main.go +++ /dev/null @@ -1,178 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "os" - "strconv" - "strings" - "time" - - "github.com/apache/thrift/lib/go/thrift" - "github.com/lightstep/lightstep-tracer-go" - stdopentracing "github.com/opentracing/opentracing-go" - zipkin "github.com/openzipkin/zipkin-go-opentracing" - appdashot "github.com/sourcegraph/appdash/opentracing" - "golang.org/x/net/context" - "google.golang.org/grpc" - "sourcegraph.com/sourcegraph/appdash" - - "github.com/go-kit/kit/examples/addsvc" - grpcclient "github.com/go-kit/kit/examples/addsvc/client/grpc" - httpclient "github.com/go-kit/kit/examples/addsvc/client/http" - thriftclient "github.com/go-kit/kit/examples/addsvc/client/thrift" - thriftadd "github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc" - "github.com/go-kit/kit/log" -) - -func main() { - // The addcli presumes no service discovery system, and expects users to - // provide the direct address of an addsvc. This presumption is reflected in - // the addcli binary and the the client packages: the -transport.addr flags - // and various client constructors both expect host:port strings. For an - // example service with a client built on top of a service discovery system, - // see profilesvc. - - var ( - httpAddr = flag.String("http.addr", "", "HTTP address of addsvc") - grpcAddr = flag.String("grpc.addr", "", "gRPC (HTTP) address of addsvc") - thriftAddr = flag.String("thrift.addr", "", "Thrift address of addsvc") - thriftProtocol = flag.String("thrift.protocol", "binary", "binary, compact, json, simplejson") - thriftBufferSize = flag.Int("thrift.buffer.size", 0, "0 for unbuffered") - thriftFramed = flag.Bool("thrift.framed", false, "true to enable framing") - zipkinAddr = flag.String("zipkin.addr", "", "Enable Zipkin tracing via a Kafka Collector host:port") - appdashAddr = flag.String("appdash.addr", "", "Enable Appdash tracing via an Appdash server host:port") - lightstepToken = flag.String("lightstep.token", "", "Enable LightStep tracing via a LightStep access token") - method = flag.String("method", "sum", "sum, concat") - ) - flag.Parse() - - if len(flag.Args()) != 2 { - fmt.Fprintf(os.Stderr, "usage: addcli [flags] \n") - os.Exit(1) - } - - // This is a demonstration client, which supports multiple tracers. - // Your clients will probably just use one tracer. - var tracer stdopentracing.Tracer - { - if *zipkinAddr != "" { - collector, err := zipkin.NewKafkaCollector( - strings.Split(*zipkinAddr, ","), - zipkin.KafkaLogger(log.NewNopLogger()), - ) - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } - tracer, err = zipkin.NewTracer( - zipkin.NewRecorder(collector, false, "localhost:8000", "addcli"), - ) - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } - } else if *appdashAddr != "" { - tracer = appdashot.NewTracer(appdash.NewRemoteCollector(*appdashAddr)) - } else if *lightstepToken != "" { - tracer = lightstep.NewTracer(lightstep.Options{ - AccessToken: *lightstepToken, - }) - defer lightstep.FlushLightStepTracer(tracer) - } else { - tracer = stdopentracing.GlobalTracer() // no-op - } - } - - // This is a demonstration client, which supports multiple transports. - // Your clients will probably just define and stick with 1 transport. - - var ( - service addsvc.Service - err error - ) - if *httpAddr != "" { - service, err = httpclient.New(*httpAddr, tracer, log.NewNopLogger()) - } else if *grpcAddr != "" { - conn, err := grpc.Dial(*grpcAddr, grpc.WithInsecure(), grpc.WithTimeout(time.Second)) - if err != nil { - fmt.Fprintf(os.Stderr, "error: %v", err) - os.Exit(1) - } - defer conn.Close() - service = grpcclient.New(conn, tracer, log.NewNopLogger()) - } else if *thriftAddr != "" { - // It's necessary to do all of this construction in the func main, - // because (among other reasons) we need to control the lifecycle of the - // Thrift transport, i.e. close it eventually. - var protocolFactory thrift.TProtocolFactory - switch *thriftProtocol { - case "compact": - protocolFactory = thrift.NewTCompactProtocolFactory() - case "simplejson": - protocolFactory = thrift.NewTSimpleJSONProtocolFactory() - case "json": - protocolFactory = thrift.NewTJSONProtocolFactory() - case "binary", "": - protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() - default: - fmt.Fprintf(os.Stderr, "error: invalid protocol %q\n", *thriftProtocol) - os.Exit(1) - } - var transportFactory thrift.TTransportFactory - if *thriftBufferSize > 0 { - transportFactory = thrift.NewTBufferedTransportFactory(*thriftBufferSize) - } else { - transportFactory = thrift.NewTTransportFactory() - } - if *thriftFramed { - transportFactory = thrift.NewTFramedTransportFactory(transportFactory) - } - transportSocket, err := thrift.NewTSocket(*thriftAddr) - if err != nil { - fmt.Fprintf(os.Stderr, "error: %v\n", err) - os.Exit(1) - } - transport := transportFactory.GetTransport(transportSocket) - if err := transport.Open(); err != nil { - fmt.Fprintf(os.Stderr, "error: %v\n", err) - os.Exit(1) - } - defer transport.Close() - client := thriftadd.NewAddServiceClientFactory(transport, protocolFactory) - service = thriftclient.New(client) - } else { - fmt.Fprintf(os.Stderr, "error: no remote address specified\n") - os.Exit(1) - } - if err != nil { - fmt.Fprintf(os.Stderr, "error: %v\n", err) - os.Exit(1) - } - - switch *method { - case "sum": - a, _ := strconv.ParseInt(flag.Args()[0], 10, 64) - b, _ := strconv.ParseInt(flag.Args()[1], 10, 64) - v, err := service.Sum(context.Background(), int(a), int(b)) - if err != nil { - fmt.Fprintf(os.Stderr, "error: %v\n", err) - os.Exit(1) - } - fmt.Fprintf(os.Stdout, "%d + %d = %d\n", a, b, v) - - case "concat": - a := flag.Args()[0] - b := flag.Args()[1] - v, err := service.Concat(context.Background(), a, b) - if err != nil { - fmt.Fprintf(os.Stderr, "error: %v\n", err) - os.Exit(1) - } - fmt.Fprintf(os.Stdout, "%q + %q = %q\n", a, b, v) - - default: - fmt.Fprintf(os.Stderr, "error: invalid method %q\n", method) - os.Exit(1) - } -} diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/cmd/addsvc/main.go b/vendor/github.com/go-kit/kit/examples/addsvc/cmd/addsvc/main.go deleted file mode 100644 index 2273a4c..0000000 --- a/vendor/github.com/go-kit/kit/examples/addsvc/cmd/addsvc/main.go +++ /dev/null @@ -1,257 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "net" - "net/http" - "net/http/pprof" - "os" - "os/signal" - "strings" - "syscall" - "time" - - "github.com/apache/thrift/lib/go/thrift" - lightstep "github.com/lightstep/lightstep-tracer-go" - stdopentracing "github.com/opentracing/opentracing-go" - zipkin "github.com/openzipkin/zipkin-go-opentracing" - stdprometheus "github.com/prometheus/client_golang/prometheus" - appdashot "github.com/sourcegraph/appdash/opentracing" - "golang.org/x/net/context" - "google.golang.org/grpc" - "sourcegraph.com/sourcegraph/appdash" - - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/examples/addsvc" - "github.com/go-kit/kit/examples/addsvc/pb" - thriftadd "github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/prometheus" - "github.com/go-kit/kit/tracing/opentracing" -) - -func main() { - var ( - debugAddr = flag.String("debug.addr", ":8080", "Debug and metrics listen address") - httpAddr = flag.String("http.addr", ":8081", "HTTP listen address") - grpcAddr = flag.String("grpc.addr", ":8082", "gRPC (HTTP) listen address") - thriftAddr = flag.String("thrift.addr", ":8083", "Thrift listen address") - thriftProtocol = flag.String("thrift.protocol", "binary", "binary, compact, json, simplejson") - thriftBufferSize = flag.Int("thrift.buffer.size", 0, "0 for unbuffered") - thriftFramed = flag.Bool("thrift.framed", false, "true to enable framing") - zipkinAddr = flag.String("zipkin.addr", "", "Enable Zipkin tracing via a Kafka server host:port") - appdashAddr = flag.String("appdash.addr", "", "Enable Appdash tracing via an Appdash server host:port") - lightstepToken = flag.String("lightstep.token", "", "Enable LightStep tracing via a LightStep access token") - ) - flag.Parse() - - // Logging domain. - var logger log.Logger - { - logger = log.NewLogfmtLogger(os.Stdout) - logger = log.NewContext(logger).With("ts", log.DefaultTimestampUTC) - logger = log.NewContext(logger).With("caller", log.DefaultCaller) - } - logger.Log("msg", "hello") - defer logger.Log("msg", "goodbye") - - // Metrics domain. - var ints, chars metrics.Counter - { - // Business level metrics. - ints = prometheus.NewCounter(stdprometheus.CounterOpts{ - Namespace: "addsvc", - Name: "integers_summed", - Help: "Total count of integers summed via the Sum method.", - }, []string{}) - chars = prometheus.NewCounter(stdprometheus.CounterOpts{ - Namespace: "addsvc", - Name: "characters_concatenated", - Help: "Total count of characters concatenated via the Concat method.", - }, []string{}) - } - var duration metrics.TimeHistogram - { - // Transport level metrics. - duration = metrics.NewTimeHistogram(time.Nanosecond, prometheus.NewSummary(stdprometheus.SummaryOpts{ - Namespace: "addsvc", - Name: "request_duration_ns", - Help: "Request duration in nanoseconds.", - }, []string{"method", "success"})) - } - - // Tracing domain. - var tracer stdopentracing.Tracer - { - if *zipkinAddr != "" { - logger := log.NewContext(logger).With("tracer", "Zipkin") - logger.Log("addr", *zipkinAddr) - collector, err := zipkin.NewKafkaCollector( - strings.Split(*zipkinAddr, ","), - zipkin.KafkaLogger(logger), - ) - if err != nil { - logger.Log("err", err) - os.Exit(1) - } - tracer, err = zipkin.NewTracer( - zipkin.NewRecorder(collector, false, "localhost:80", "addsvc"), - ) - if err != nil { - logger.Log("err", err) - os.Exit(1) - } - } else if *appdashAddr != "" { - logger := log.NewContext(logger).With("tracer", "Appdash") - logger.Log("addr", *appdashAddr) - tracer = appdashot.NewTracer(appdash.NewRemoteCollector(*appdashAddr)) - } else if *lightstepToken != "" { - logger := log.NewContext(logger).With("tracer", "LightStep") - logger.Log() // probably don't want to print out the token :) - tracer = lightstep.NewTracer(lightstep.Options{ - AccessToken: *lightstepToken, - }) - defer lightstep.FlushLightStepTracer(tracer) - } else { - logger := log.NewContext(logger).With("tracer", "none") - logger.Log() - tracer = stdopentracing.GlobalTracer() // no-op - } - } - - // Business domain. - var service addsvc.Service - { - service = addsvc.NewBasicService() - service = addsvc.ServiceLoggingMiddleware(logger)(service) - service = addsvc.ServiceInstrumentingMiddleware(ints, chars)(service) - } - - // Endpoint domain. - var sumEndpoint endpoint.Endpoint - { - sumDuration := duration.With(metrics.Field{Key: "method", Value: "Sum"}) - sumLogger := log.NewContext(logger).With("method", "Sum") - - sumEndpoint = addsvc.MakeSumEndpoint(service) - sumEndpoint = opentracing.TraceServer(tracer, "Sum")(sumEndpoint) - sumEndpoint = addsvc.EndpointInstrumentingMiddleware(sumDuration)(sumEndpoint) - sumEndpoint = addsvc.EndpointLoggingMiddleware(sumLogger)(sumEndpoint) - } - var concatEndpoint endpoint.Endpoint - { - concatDuration := duration.With(metrics.Field{Key: "method", Value: "Concat"}) - concatLogger := log.NewContext(logger).With("method", "Concat") - - concatEndpoint = addsvc.MakeConcatEndpoint(service) - concatEndpoint = opentracing.TraceServer(tracer, "Concat")(concatEndpoint) - concatEndpoint = addsvc.EndpointInstrumentingMiddleware(concatDuration)(concatEndpoint) - concatEndpoint = addsvc.EndpointLoggingMiddleware(concatLogger)(concatEndpoint) - } - endpoints := addsvc.Endpoints{ - SumEndpoint: sumEndpoint, - ConcatEndpoint: concatEndpoint, - } - - // Mechanical domain. - errc := make(chan error) - ctx := context.Background() - - // Interrupt handler. - go func() { - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGINT, syscall.SIGTERM) - errc <- fmt.Errorf("%s", <-c) - }() - - // Debug listener. - go func() { - logger := log.NewContext(logger).With("transport", "debug") - - m := http.NewServeMux() - m.Handle("/debug/pprof/", http.HandlerFunc(pprof.Index)) - m.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline)) - m.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile)) - m.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol)) - m.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace)) - m.Handle("/metrics", stdprometheus.Handler()) - - logger.Log("addr", *debugAddr) - errc <- http.ListenAndServe(*debugAddr, m) - }() - - // HTTP transport. - go func() { - logger := log.NewContext(logger).With("transport", "HTTP") - h := addsvc.MakeHTTPHandler(ctx, endpoints, tracer, logger) - logger.Log("addr", *httpAddr) - errc <- http.ListenAndServe(*httpAddr, h) - }() - - // gRPC transport. - go func() { - logger := log.NewContext(logger).With("transport", "gRPC") - - ln, err := net.Listen("tcp", *grpcAddr) - if err != nil { - errc <- err - return - } - - srv := addsvc.MakeGRPCServer(ctx, endpoints, tracer, logger) - s := grpc.NewServer() - pb.RegisterAddServer(s, srv) - - logger.Log("addr", *grpcAddr) - errc <- s.Serve(ln) - }() - - // Thrift transport. - go func() { - logger := log.NewContext(logger).With("transport", "Thrift") - - var protocolFactory thrift.TProtocolFactory - switch *thriftProtocol { - case "binary": - protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() - case "compact": - protocolFactory = thrift.NewTCompactProtocolFactory() - case "json": - protocolFactory = thrift.NewTJSONProtocolFactory() - case "simplejson": - protocolFactory = thrift.NewTSimpleJSONProtocolFactory() - default: - errc <- fmt.Errorf("invalid Thrift protocol %q", *thriftProtocol) - return - } - - var transportFactory thrift.TTransportFactory - if *thriftBufferSize > 0 { - transportFactory = thrift.NewTBufferedTransportFactory(*thriftBufferSize) - } else { - transportFactory = thrift.NewTTransportFactory() - } - if *thriftFramed { - transportFactory = thrift.NewTFramedTransportFactory(transportFactory) - } - - transport, err := thrift.NewTServerSocket(*thriftAddr) - if err != nil { - errc <- err - return - } - - logger.Log("addr", *thriftAddr) - errc <- thrift.NewTSimpleServer4( - thriftadd.NewAddServiceProcessor(addsvc.MakeThriftHandler(ctx, endpoints)), - transport, - transportFactory, - protocolFactory, - ).Serve() - }() - - // Run! - logger.Log("exit", <-errc) -} diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/doc.go b/vendor/github.com/go-kit/kit/examples/addsvc/doc.go deleted file mode 100644 index 8865046..0000000 --- a/vendor/github.com/go-kit/kit/examples/addsvc/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Package addsvc implements the business and transport logic for an example -// service that can sum integers and concatenate strings. -// -// A client library is available in the client subdirectory. A server binary is -// available in cmd/addsrv. An example client binary is available in cmd/addcli. -package addsvc diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/endpoints.go b/vendor/github.com/go-kit/kit/examples/addsvc/endpoints.go deleted file mode 100644 index 8fb7cbd..0000000 --- a/vendor/github.com/go-kit/kit/examples/addsvc/endpoints.go +++ /dev/null @@ -1,136 +0,0 @@ -package addsvc - -// This file contains methods to make individual endpoints from services, -// request and response types to serve those endpoints, as well as encoders and -// decoders for those types, for all of our supported transport serialization -// formats. It also includes endpoint middlewares. - -import ( - "fmt" - "time" - - "golang.org/x/net/context" - - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/metrics" -) - -// Endpoints collects all of the endpoints that compose an add service. It's -// meant to be used as a helper struct, to collect all of the endpoints into a -// single parameter. -// -// In a server, it's useful for functions that need to operate on a per-endpoint -// basis. For example, you might pass an Endpoints to a function that produces -// an http.Handler, with each method (endpoint) wired up to a specific path. (It -// is probably a mistake in design to invoke the Service methods on the -// Endpoints struct in a server.) -// -// In a client, it's useful to collect individually constructed endpoints into a -// single type that implements the Service interface. For example, you might -// construct individual endpoints using transport/http.NewClient, combine them -// into an Endpoints, and return it to the caller as a Service. -type Endpoints struct { - SumEndpoint endpoint.Endpoint - ConcatEndpoint endpoint.Endpoint -} - -// Sum implements Service. Primarily useful in a client. -func (e Endpoints) Sum(ctx context.Context, a, b int) (int, error) { - request := sumRequest{A: a, B: b} - response, err := e.SumEndpoint(ctx, request) - if err != nil { - return 0, err - } - return response.(sumResponse).V, response.(sumResponse).Err -} - -// Concat implements Service. Primarily useful in a client. -func (e Endpoints) Concat(ctx context.Context, a, b string) (string, error) { - request := concatRequest{A: a, B: b} - response, err := e.ConcatEndpoint(ctx, request) - if err != nil { - return "", err - } - return response.(concatResponse).V, response.(concatResponse).Err -} - -// MakeSumEndpoint returns an endpoint that invokes Sum on the service. -// Primarily useful in a server. -func MakeSumEndpoint(s Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (response interface{}, err error) { - sumReq := request.(sumRequest) - v, err := s.Sum(ctx, sumReq.A, sumReq.B) - if err == ErrIntOverflow { - return nil, err // special case; see comment on ErrIntOverflow - } - return sumResponse{ - V: v, - Err: err, - }, nil - } -} - -// MakeConcatEndpoint returns an endpoint that invokes Concat on the service. -// Primarily useful in a server. -func MakeConcatEndpoint(s Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (response interface{}, err error) { - concatReq := request.(concatRequest) - v, err := s.Concat(ctx, concatReq.A, concatReq.B) - return concatResponse{ - V: v, - Err: err, - }, nil - } -} - -// EndpointInstrumentingMiddleware returns an endpoint middleware that records -// the duration of each invocation to the passed histogram. The middleware adds -// a single field: "success", which is "true" if no error is returned, and -// "false" otherwise. -func EndpointInstrumentingMiddleware(duration metrics.TimeHistogram) endpoint.Middleware { - return func(next endpoint.Endpoint) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (response interface{}, err error) { - - defer func(begin time.Time) { - f := metrics.Field{Key: "success", Value: fmt.Sprint(err == nil)} - duration.With(f).Observe(time.Since(begin)) - }(time.Now()) - return next(ctx, request) - - } - } -} - -// EndpointLoggingMiddleware returns an endpoint middleware that logs the -// duration of each invocation, and the resulting error, if any. -func EndpointLoggingMiddleware(logger log.Logger) endpoint.Middleware { - return func(next endpoint.Endpoint) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (response interface{}, err error) { - - defer func(begin time.Time) { - logger.Log("error", err, "took", time.Since(begin)) - }(time.Now()) - return next(ctx, request) - - } - } -} - -// These types are unexported because they only exist to serve the endpoint -// domain, which is totally encapsulated in this package. They are otherwise -// opaque to all callers. - -type sumRequest struct{ A, B int } - -type sumResponse struct { - V int - Err error -} - -type concatRequest struct{ A, B string } - -type concatResponse struct { - V string - Err error -} diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/pb/addsvc.pb.go b/vendor/github.com/go-kit/kit/examples/addsvc/pb/addsvc.pb.go deleted file mode 100644 index 0e8cff5..0000000 --- a/vendor/github.com/go-kit/kit/examples/addsvc/pb/addsvc.pb.go +++ /dev/null @@ -1,215 +0,0 @@ -// Code generated by protoc-gen-go. -// source: addsvc.proto -// DO NOT EDIT! - -/* -Package pb is a generated protocol buffer package. - -It is generated from these files: - addsvc.proto - -It has these top-level messages: - SumRequest - SumReply - ConcatRequest - ConcatReply -*/ -package pb - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// The sum request contains two parameters. -type SumRequest struct { - A int64 `protobuf:"varint,1,opt,name=a" json:"a,omitempty"` - B int64 `protobuf:"varint,2,opt,name=b" json:"b,omitempty"` -} - -func (m *SumRequest) Reset() { *m = SumRequest{} } -func (m *SumRequest) String() string { return proto.CompactTextString(m) } -func (*SumRequest) ProtoMessage() {} -func (*SumRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -// The sum response contains the result of the calculation. -type SumReply struct { - V int64 `protobuf:"varint,1,opt,name=v" json:"v,omitempty"` - Err string `protobuf:"bytes,2,opt,name=err" json:"err,omitempty"` -} - -func (m *SumReply) Reset() { *m = SumReply{} } -func (m *SumReply) String() string { return proto.CompactTextString(m) } -func (*SumReply) ProtoMessage() {} -func (*SumReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -// The Concat request contains two parameters. -type ConcatRequest struct { - A string `protobuf:"bytes,1,opt,name=a" json:"a,omitempty"` - B string `protobuf:"bytes,2,opt,name=b" json:"b,omitempty"` -} - -func (m *ConcatRequest) Reset() { *m = ConcatRequest{} } -func (m *ConcatRequest) String() string { return proto.CompactTextString(m) } -func (*ConcatRequest) ProtoMessage() {} -func (*ConcatRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -// The Concat response contains the result of the concatenation. -type ConcatReply struct { - V string `protobuf:"bytes,1,opt,name=v" json:"v,omitempty"` - Err string `protobuf:"bytes,2,opt,name=err" json:"err,omitempty"` -} - -func (m *ConcatReply) Reset() { *m = ConcatReply{} } -func (m *ConcatReply) String() string { return proto.CompactTextString(m) } -func (*ConcatReply) ProtoMessage() {} -func (*ConcatReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -func init() { - proto.RegisterType((*SumRequest)(nil), "pb.SumRequest") - proto.RegisterType((*SumReply)(nil), "pb.SumReply") - proto.RegisterType((*ConcatRequest)(nil), "pb.ConcatRequest") - proto.RegisterType((*ConcatReply)(nil), "pb.ConcatReply") -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion3 - -// Client API for Add service - -type AddClient interface { - // Sums two integers. - Sum(ctx context.Context, in *SumRequest, opts ...grpc.CallOption) (*SumReply, error) - // Concatenates two strings - Concat(ctx context.Context, in *ConcatRequest, opts ...grpc.CallOption) (*ConcatReply, error) -} - -type addClient struct { - cc *grpc.ClientConn -} - -func NewAddClient(cc *grpc.ClientConn) AddClient { - return &addClient{cc} -} - -func (c *addClient) Sum(ctx context.Context, in *SumRequest, opts ...grpc.CallOption) (*SumReply, error) { - out := new(SumReply) - err := grpc.Invoke(ctx, "/pb.Add/Sum", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *addClient) Concat(ctx context.Context, in *ConcatRequest, opts ...grpc.CallOption) (*ConcatReply, error) { - out := new(ConcatReply) - err := grpc.Invoke(ctx, "/pb.Add/Concat", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Add service - -type AddServer interface { - // Sums two integers. - Sum(context.Context, *SumRequest) (*SumReply, error) - // Concatenates two strings - Concat(context.Context, *ConcatRequest) (*ConcatReply, error) -} - -func RegisterAddServer(s *grpc.Server, srv AddServer) { - s.RegisterService(&_Add_serviceDesc, srv) -} - -func _Add_Sum_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SumRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AddServer).Sum(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/pb.Add/Sum", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AddServer).Sum(ctx, req.(*SumRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Add_Concat_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ConcatRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AddServer).Concat(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/pb.Add/Concat", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AddServer).Concat(ctx, req.(*ConcatRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Add_serviceDesc = grpc.ServiceDesc{ - ServiceName: "pb.Add", - HandlerType: (*AddServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Sum", - Handler: _Add_Sum_Handler, - }, - { - MethodName: "Concat", - Handler: _Add_Concat_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: fileDescriptor0, -} - -func init() { proto.RegisterFile("addsvc.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 188 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x49, 0x4c, 0x49, 0x29, - 0x2e, 0x4b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2a, 0x48, 0x52, 0xd2, 0xe0, 0xe2, - 0x0a, 0x2e, 0xcd, 0x0d, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x11, 0xe2, 0xe1, 0x62, 0x4c, 0x94, - 0x60, 0x54, 0x60, 0xd4, 0x60, 0x0e, 0x62, 0x4c, 0x04, 0xf1, 0x92, 0x24, 0x98, 0x20, 0xbc, 0x24, - 0x25, 0x2d, 0x2e, 0x0e, 0xb0, 0xca, 0x82, 0x9c, 0x4a, 0x90, 0x4c, 0x19, 0x4c, 0x5d, 0x99, 0x90, - 0x00, 0x17, 0x73, 0x6a, 0x51, 0x11, 0x58, 0x25, 0x67, 0x10, 0x88, 0xa9, 0xa4, 0xcd, 0xc5, 0xeb, - 0x9c, 0x9f, 0x97, 0x9c, 0x58, 0x82, 0x61, 0x30, 0x27, 0x8a, 0xc1, 0x9c, 0x20, 0x83, 0x75, 0xb9, - 0xb8, 0x61, 0x8a, 0x51, 0xcc, 0xe6, 0xc4, 0x6a, 0xb6, 0x51, 0x0c, 0x17, 0xb3, 0x63, 0x4a, 0x8a, - 0x90, 0x2a, 0x17, 0x33, 0xd0, 0x39, 0x42, 0x7c, 0x7a, 0x05, 0x49, 0x7a, 0x08, 0x1f, 0x48, 0xf1, - 0xc0, 0xf9, 0x40, 0xb3, 0x94, 0x18, 0x84, 0xf4, 0xb8, 0xd8, 0x20, 0x86, 0x0b, 0x09, 0x82, 0x64, - 0x50, 0x5c, 0x25, 0xc5, 0x8f, 0x2c, 0x04, 0x56, 0x9f, 0xc4, 0x06, 0x0e, 0x1a, 0x63, 0x40, 0x00, - 0x00, 0x00, 0xff, 0xff, 0xdc, 0x37, 0x81, 0x99, 0x2a, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/pb/addsvc.proto b/vendor/github.com/go-kit/kit/examples/addsvc/pb/addsvc.proto deleted file mode 100644 index cf61532..0000000 --- a/vendor/github.com/go-kit/kit/examples/addsvc/pb/addsvc.proto +++ /dev/null @@ -1,36 +0,0 @@ -syntax = "proto3"; - -package pb; - -// The Add service definition. -service Add { - // Sums two integers. - rpc Sum (SumRequest) returns (SumReply) {} - - // Concatenates two strings - rpc Concat (ConcatRequest) returns (ConcatReply) {} -} - -// The sum request contains two parameters. -message SumRequest { - int64 a = 1; - int64 b = 2; -} - -// The sum response contains the result of the calculation. -message SumReply { - int64 v = 1; - string err = 2; -} - -// The Concat request contains two parameters. -message ConcatRequest { - string a = 1; - string b = 2; -} - -// The Concat response contains the result of the concatenation. -message ConcatReply { - string v = 1; - string err = 2; -} diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/pb/compile.sh b/vendor/github.com/go-kit/kit/examples/addsvc/pb/compile.sh deleted file mode 100755 index c026844..0000000 --- a/vendor/github.com/go-kit/kit/examples/addsvc/pb/compile.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env sh - -# Install proto3 from source -# brew install autoconf automake libtool -# git clone https://github.com/google/protobuf -# ./autogen.sh ; ./configure ; make ; make install -# -# Update protoc Go bindings via -# go get -u github.com/golang/protobuf/{proto,protoc-gen-go} -# -# See also -# https://github.com/grpc/grpc-go/tree/master/examples - -protoc addsvc.proto --go_out=plugins=grpc:. diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/service.go b/vendor/github.com/go-kit/kit/examples/addsvc/service.go deleted file mode 100644 index c60b676..0000000 --- a/vendor/github.com/go-kit/kit/examples/addsvc/service.go +++ /dev/null @@ -1,164 +0,0 @@ -package addsvc - -// This file contains the Service definition, and a basic service -// implementation. It also includes service middlewares. - -import ( - "errors" - "time" - - "golang.org/x/net/context" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/metrics" -) - -// Service describes a service that adds things together. -type Service interface { - Sum(ctx context.Context, a, b int) (int, error) - Concat(ctx context.Context, a, b string) (string, error) -} - -// Business-domain errors like these may be served in two ways: returned -// directly by endpoints, or bundled into the response struct. Both methods can -// be made to work, but errors returned directly by endpoints are counted by -// middlewares that check errors, like circuit breakers. -// -// If you don't want that behavior -- and you probably don't -- then it's better -// to bundle errors into the response struct. - -var ( - // ErrTwoZeroes is an arbitrary business rule for the Add method. - ErrTwoZeroes = errors.New("can't sum two zeroes") - - // ErrIntOverflow protects the Add method. We've decided that this error - // indicates a misbehaving service and should count against e.g. circuit - // breakers. So, we return it directly in endpoints, to illustrate the - // difference. In a real service, this probably wouldn't be the case. - ErrIntOverflow = errors.New("integer overflow") - - // ErrMaxSizeExceeded protects the Concat method. - ErrMaxSizeExceeded = errors.New("result exceeds maximum size") -) - -// These annoying helper functions are required to translate Go error types to -// and from strings, which is the type we use in our IDLs to represent errors. -// There is special casing to treat empty strings as nil errors. - -func str2err(s string) error { - if s == "" { - return nil - } - return errors.New(s) -} - -func err2str(err error) string { - if err == nil { - return "" - } - return err.Error() -} - -// NewBasicService returns a naïve, stateless implementation of Service. -func NewBasicService() Service { - return basicService{} -} - -type basicService struct{} - -const ( - intMax = 1<<31 - 1 - intMin = -(intMax + 1) - maxLen = 102400 -) - -// Sum implements Service. -func (s basicService) Sum(_ context.Context, a, b int) (int, error) { - if a == 0 && b == 0 { - return 0, ErrTwoZeroes - } - if (b > 0 && a > (intMax-b)) || (b < 0 && a < (intMin-b)) { - return 0, ErrIntOverflow - } - return a + b, nil -} - -// Concat implements Service. -func (s basicService) Concat(_ context.Context, a, b string) (string, error) { - if len(a)+len(b) > maxLen { - return "", ErrMaxSizeExceeded - } - return a + b, nil -} - -// Middleware describes a service (as opposed to endpoint) middleware. -type Middleware func(Service) Service - -// ServiceLoggingMiddleware returns a service middleware that logs the -// parameters and result of each method invocation. -func ServiceLoggingMiddleware(logger log.Logger) Middleware { - return func(next Service) Service { - return serviceLoggingMiddleware{ - logger: logger, - next: next, - } - } -} - -type serviceLoggingMiddleware struct { - logger log.Logger - next Service -} - -func (mw serviceLoggingMiddleware) Sum(ctx context.Context, a, b int) (v int, err error) { - defer func(begin time.Time) { - mw.logger.Log( - "method", "Sum", - "a", a, "b", b, "result", v, "error", err, - "took", time.Since(begin), - ) - }(time.Now()) - return mw.next.Sum(ctx, a, b) -} - -func (mw serviceLoggingMiddleware) Concat(ctx context.Context, a, b string) (v string, err error) { - defer func(begin time.Time) { - mw.logger.Log( - "method", "Concat", - "a", a, "b", b, "result", v, "error", err, - "took", time.Since(begin), - ) - }(time.Now()) - return mw.next.Concat(ctx, a, b) -} - -// ServiceInstrumentingMiddleware returns a service middleware that instruments -// the number of integers summed and characters concatenated over the lifetime of -// the service. -func ServiceInstrumentingMiddleware(ints, chars metrics.Counter) Middleware { - return func(next Service) Service { - return serviceInstrumentingMiddleware{ - ints: ints, - chars: chars, - next: next, - } - } -} - -type serviceInstrumentingMiddleware struct { - ints metrics.Counter - chars metrics.Counter - next Service -} - -func (mw serviceInstrumentingMiddleware) Sum(ctx context.Context, a, b int) (int, error) { - v, err := mw.next.Sum(ctx, a, b) - mw.ints.Add(uint64(v)) - return v, err -} - -func (mw serviceInstrumentingMiddleware) Concat(ctx context.Context, a, b string) (string, error) { - v, err := mw.next.Concat(ctx, a, b) - mw.chars.Add(uint64(len(v))) - return v, err -} diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/thrift/addsvc.thrift b/vendor/github.com/go-kit/kit/examples/addsvc/thrift/addsvc.thrift deleted file mode 100644 index e67ce1b..0000000 --- a/vendor/github.com/go-kit/kit/examples/addsvc/thrift/addsvc.thrift +++ /dev/null @@ -1,14 +0,0 @@ -struct SumReply { - 1: i64 value - 2: string err -} - -struct ConcatReply { - 1: string value - 2: string err -} - -service AddService { - SumReply Sum(1: i64 a, 2: i64 b) - ConcatReply Concat(1: string a, 2: string b) -} diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/thrift/compile.sh b/vendor/github.com/go-kit/kit/examples/addsvc/thrift/compile.sh deleted file mode 100755 index 2ecce5b..0000000 --- a/vendor/github.com/go-kit/kit/examples/addsvc/thrift/compile.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env sh - -# See also https://thrift.apache.org/tutorial/go - -thrift -r --gen "go:package_prefix=github.com/go-kit/kit/examples/addsvc/thrift/gen-go/,thrift_import=github.com/apache/thrift/lib/go/thrift" addsvc.thrift diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc/add_service-remote/add_service-remote.go b/vendor/github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc/add_service-remote/add_service-remote.go deleted file mode 100755 index b8ce67c..0000000 --- a/vendor/github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc/add_service-remote/add_service-remote.go +++ /dev/null @@ -1,157 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package main - -import ( - "flag" - "fmt" - "github.com/apache/thrift/lib/go/thrift" - "github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc" - "math" - "net" - "net/url" - "os" - "strconv" - "strings" -) - -func Usage() { - fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:") - flag.PrintDefaults() - fmt.Fprintln(os.Stderr, "\nFunctions:") - fmt.Fprintln(os.Stderr, " SumReply Sum(i64 a, i64 b)") - fmt.Fprintln(os.Stderr, " ConcatReply Concat(string a, string b)") - fmt.Fprintln(os.Stderr) - os.Exit(0) -} - -func main() { - flag.Usage = Usage - var host string - var port int - var protocol string - var urlString string - var framed bool - var useHttp bool - var parsedUrl url.URL - var trans thrift.TTransport - _ = strconv.Atoi - _ = math.Abs - flag.Usage = Usage - flag.StringVar(&host, "h", "localhost", "Specify host and port") - flag.IntVar(&port, "p", 9090, "Specify port") - flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)") - flag.StringVar(&urlString, "u", "", "Specify the url") - flag.BoolVar(&framed, "framed", false, "Use framed transport") - flag.BoolVar(&useHttp, "http", false, "Use http") - flag.Parse() - - if len(urlString) > 0 { - parsedUrl, err := url.Parse(urlString) - if err != nil { - fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) - flag.Usage() - } - host = parsedUrl.Host - useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http" - } else if useHttp { - _, err := url.Parse(fmt.Sprint("http://", host, ":", port)) - if err != nil { - fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) - flag.Usage() - } - } - - cmd := flag.Arg(0) - var err error - if useHttp { - trans, err = thrift.NewTHttpClient(parsedUrl.String()) - } else { - portStr := fmt.Sprint(port) - if strings.Contains(host, ":") { - host, portStr, err = net.SplitHostPort(host) - if err != nil { - fmt.Fprintln(os.Stderr, "error with host:", err) - os.Exit(1) - } - } - trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr)) - if err != nil { - fmt.Fprintln(os.Stderr, "error resolving address:", err) - os.Exit(1) - } - if framed { - trans = thrift.NewTFramedTransport(trans) - } - } - if err != nil { - fmt.Fprintln(os.Stderr, "Error creating transport", err) - os.Exit(1) - } - defer trans.Close() - var protocolFactory thrift.TProtocolFactory - switch protocol { - case "compact": - protocolFactory = thrift.NewTCompactProtocolFactory() - break - case "simplejson": - protocolFactory = thrift.NewTSimpleJSONProtocolFactory() - break - case "json": - protocolFactory = thrift.NewTJSONProtocolFactory() - break - case "binary", "": - protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() - break - default: - fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol) - Usage() - os.Exit(1) - } - client := addsvc.NewAddServiceClientFactory(trans, protocolFactory) - if err := trans.Open(); err != nil { - fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err) - os.Exit(1) - } - - switch cmd { - case "Sum": - if flag.NArg()-1 != 2 { - fmt.Fprintln(os.Stderr, "Sum requires 2 args") - flag.Usage() - } - argvalue0, err6 := (strconv.ParseInt(flag.Arg(1), 10, 64)) - if err6 != nil { - Usage() - return - } - value0 := argvalue0 - argvalue1, err7 := (strconv.ParseInt(flag.Arg(2), 10, 64)) - if err7 != nil { - Usage() - return - } - value1 := argvalue1 - fmt.Print(client.Sum(value0, value1)) - fmt.Print("\n") - break - case "Concat": - if flag.NArg()-1 != 2 { - fmt.Fprintln(os.Stderr, "Concat requires 2 args") - flag.Usage() - } - argvalue0 := flag.Arg(1) - value0 := argvalue0 - argvalue1 := flag.Arg(2) - value1 := argvalue1 - fmt.Print(client.Concat(value0, value1)) - fmt.Print("\n") - break - case "": - Usage() - break - default: - fmt.Fprintln(os.Stderr, "Invalid function ", cmd) - } -} diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc/addservice.go b/vendor/github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc/addservice.go deleted file mode 100644 index 3f3aeeb..0000000 --- a/vendor/github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc/addservice.go +++ /dev/null @@ -1,807 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package addsvc - -import ( - "bytes" - "fmt" - "github.com/apache/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -type AddService interface { - // Parameters: - // - A - // - B - Sum(a int64, b int64) (r *SumReply, err error) - // Parameters: - // - A - // - B - Concat(a string, b string) (r *ConcatReply, err error) -} - -type AddServiceClient struct { - Transport thrift.TTransport - ProtocolFactory thrift.TProtocolFactory - InputProtocol thrift.TProtocol - OutputProtocol thrift.TProtocol - SeqId int32 -} - -func NewAddServiceClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AddServiceClient { - return &AddServiceClient{Transport: t, - ProtocolFactory: f, - InputProtocol: f.GetProtocol(t), - OutputProtocol: f.GetProtocol(t), - SeqId: 0, - } -} - -func NewAddServiceClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AddServiceClient { - return &AddServiceClient{Transport: t, - ProtocolFactory: nil, - InputProtocol: iprot, - OutputProtocol: oprot, - SeqId: 0, - } -} - -// Parameters: -// - A -// - B -func (p *AddServiceClient) Sum(a int64, b int64) (r *SumReply, err error) { - if err = p.sendSum(a, b); err != nil { - return - } - return p.recvSum() -} - -func (p *AddServiceClient) sendSum(a int64, b int64) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("Sum", thrift.CALL, p.SeqId); err != nil { - return - } - args := AddServiceSumArgs{ - A: a, - B: b, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -func (p *AddServiceClient) recvSum() (value *SumReply, err error) { - iprot := p.InputProtocol - if iprot == nil { - iprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.InputProtocol = iprot - } - method, mTypeId, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return - } - if method != "Sum" { - err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "Sum failed: wrong method name") - return - } - if p.SeqId != seqId { - err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "Sum failed: out of sequence response") - return - } - if mTypeId == thrift.EXCEPTION { - error0 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error1 error - error1, err = error0.Read(iprot) - if err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - err = error1 - return - } - if mTypeId != thrift.REPLY { - err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "Sum failed: invalid message type") - return - } - result := AddServiceSumResult{} - if err = result.Read(iprot); err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - value = result.GetSuccess() - return -} - -// Parameters: -// - A -// - B -func (p *AddServiceClient) Concat(a string, b string) (r *ConcatReply, err error) { - if err = p.sendConcat(a, b); err != nil { - return - } - return p.recvConcat() -} - -func (p *AddServiceClient) sendConcat(a string, b string) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("Concat", thrift.CALL, p.SeqId); err != nil { - return - } - args := AddServiceConcatArgs{ - A: a, - B: b, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -func (p *AddServiceClient) recvConcat() (value *ConcatReply, err error) { - iprot := p.InputProtocol - if iprot == nil { - iprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.InputProtocol = iprot - } - method, mTypeId, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return - } - if method != "Concat" { - err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "Concat failed: wrong method name") - return - } - if p.SeqId != seqId { - err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "Concat failed: out of sequence response") - return - } - if mTypeId == thrift.EXCEPTION { - error2 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error3 error - error3, err = error2.Read(iprot) - if err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - err = error3 - return - } - if mTypeId != thrift.REPLY { - err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "Concat failed: invalid message type") - return - } - result := AddServiceConcatResult{} - if err = result.Read(iprot); err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - value = result.GetSuccess() - return -} - -type AddServiceProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler AddService -} - -func (p *AddServiceProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *AddServiceProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *AddServiceProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewAddServiceProcessor(handler AddService) *AddServiceProcessor { - - self4 := &AddServiceProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self4.processorMap["Sum"] = &addServiceProcessorSum{handler: handler} - self4.processorMap["Concat"] = &addServiceProcessorConcat{handler: handler} - return self4 -} - -func (p *AddServiceProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(seqId, iprot, oprot) - } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x5 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x5.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, x5 - -} - -type addServiceProcessorSum struct { - handler AddService -} - -func (p *addServiceProcessorSum) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AddServiceSumArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("Sum", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := AddServiceSumResult{} - var retval *SumReply - var err2 error - if retval, err2 = p.handler.Sum(args.A, args.B); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing Sum: "+err2.Error()) - oprot.WriteMessageBegin("Sum", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("Sum", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type addServiceProcessorConcat struct { - handler AddService -} - -func (p *addServiceProcessorConcat) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AddServiceConcatArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("Concat", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := AddServiceConcatResult{} - var retval *ConcatReply - var err2 error - if retval, err2 = p.handler.Concat(args.A, args.B); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing Concat: "+err2.Error()) - oprot.WriteMessageBegin("Concat", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("Concat", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - A -// - B -type AddServiceSumArgs struct { - A int64 `thrift:"a,1" json:"a"` - B int64 `thrift:"b,2" json:"b"` -} - -func NewAddServiceSumArgs() *AddServiceSumArgs { - return &AddServiceSumArgs{} -} - -func (p *AddServiceSumArgs) GetA() int64 { - return p.A -} - -func (p *AddServiceSumArgs) GetB() int64 { - return p.B -} -func (p *AddServiceSumArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AddServiceSumArgs) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.A = v - } - return nil -} - -func (p *AddServiceSumArgs) readField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.B = v - } - return nil -} - -func (p *AddServiceSumArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Sum_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AddServiceSumArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("a", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:a: ", p), err) - } - if err := oprot.WriteI64(int64(p.A)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.a (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:a: ", p), err) - } - return err -} - -func (p *AddServiceSumArgs) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("b", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:b: ", p), err) - } - if err := oprot.WriteI64(int64(p.B)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.b (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:b: ", p), err) - } - return err -} - -func (p *AddServiceSumArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AddServiceSumArgs(%+v)", *p) -} - -// Attributes: -// - Success -type AddServiceSumResult struct { - Success *SumReply `thrift:"success,0" json:"success,omitempty"` -} - -func NewAddServiceSumResult() *AddServiceSumResult { - return &AddServiceSumResult{} -} - -var AddServiceSumResult_Success_DEFAULT *SumReply - -func (p *AddServiceSumResult) GetSuccess() *SumReply { - if !p.IsSetSuccess() { - return AddServiceSumResult_Success_DEFAULT - } - return p.Success -} -func (p *AddServiceSumResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *AddServiceSumResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if err := p.readField0(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AddServiceSumResult) readField0(iprot thrift.TProtocol) error { - p.Success = &SumReply{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *AddServiceSumResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Sum_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField0(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AddServiceSumResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *AddServiceSumResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AddServiceSumResult(%+v)", *p) -} - -// Attributes: -// - A -// - B -type AddServiceConcatArgs struct { - A string `thrift:"a,1" json:"a"` - B string `thrift:"b,2" json:"b"` -} - -func NewAddServiceConcatArgs() *AddServiceConcatArgs { - return &AddServiceConcatArgs{} -} - -func (p *AddServiceConcatArgs) GetA() string { - return p.A -} - -func (p *AddServiceConcatArgs) GetB() string { - return p.B -} -func (p *AddServiceConcatArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AddServiceConcatArgs) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.A = v - } - return nil -} - -func (p *AddServiceConcatArgs) readField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.B = v - } - return nil -} - -func (p *AddServiceConcatArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Concat_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AddServiceConcatArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("a", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:a: ", p), err) - } - if err := oprot.WriteString(string(p.A)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.a (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:a: ", p), err) - } - return err -} - -func (p *AddServiceConcatArgs) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("b", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:b: ", p), err) - } - if err := oprot.WriteString(string(p.B)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.b (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:b: ", p), err) - } - return err -} - -func (p *AddServiceConcatArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AddServiceConcatArgs(%+v)", *p) -} - -// Attributes: -// - Success -type AddServiceConcatResult struct { - Success *ConcatReply `thrift:"success,0" json:"success,omitempty"` -} - -func NewAddServiceConcatResult() *AddServiceConcatResult { - return &AddServiceConcatResult{} -} - -var AddServiceConcatResult_Success_DEFAULT *ConcatReply - -func (p *AddServiceConcatResult) GetSuccess() *ConcatReply { - if !p.IsSetSuccess() { - return AddServiceConcatResult_Success_DEFAULT - } - return p.Success -} -func (p *AddServiceConcatResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *AddServiceConcatResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if err := p.readField0(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AddServiceConcatResult) readField0(iprot thrift.TProtocol) error { - p.Success = &ConcatReply{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *AddServiceConcatResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Concat_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField0(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AddServiceConcatResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *AddServiceConcatResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AddServiceConcatResult(%+v)", *p) -} diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc/constants.go b/vendor/github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc/constants.go deleted file mode 100644 index 2f0079a..0000000 --- a/vendor/github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc/constants.go +++ /dev/null @@ -1,18 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package addsvc - -import ( - "bytes" - "fmt" - "github.com/apache/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -func init() { -} diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc/ttypes.go b/vendor/github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc/ttypes.go deleted file mode 100644 index 2fcbd55..0000000 --- a/vendor/github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc/ttypes.go +++ /dev/null @@ -1,269 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package addsvc - -import ( - "bytes" - "fmt" - "github.com/apache/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -var GoUnusedProtection__ int - -// Attributes: -// - Value -// - Err -type SumReply struct { - Value int64 `thrift:"value,1" json:"value"` - Err string `thrift:"err,2" json:"err"` -} - -func NewSumReply() *SumReply { - return &SumReply{} -} - -func (p *SumReply) GetValue() int64 { - return p.Value -} - -func (p *SumReply) GetErr() string { - return p.Err -} -func (p *SumReply) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *SumReply) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Value = v - } - return nil -} - -func (p *SumReply) readField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Err = v - } - return nil -} - -func (p *SumReply) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("SumReply"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *SumReply) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("value", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:value: ", p), err) - } - if err := oprot.WriteI64(int64(p.Value)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.value (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:value: ", p), err) - } - return err -} - -func (p *SumReply) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("err", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:err: ", p), err) - } - if err := oprot.WriteString(string(p.Err)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.err (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:err: ", p), err) - } - return err -} - -func (p *SumReply) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("SumReply(%+v)", *p) -} - -// Attributes: -// - Value -// - Err -type ConcatReply struct { - Value string `thrift:"value,1" json:"value"` - Err string `thrift:"err,2" json:"err"` -} - -func NewConcatReply() *ConcatReply { - return &ConcatReply{} -} - -func (p *ConcatReply) GetValue() string { - return p.Value -} - -func (p *ConcatReply) GetErr() string { - return p.Err -} -func (p *ConcatReply) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ConcatReply) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Value = v - } - return nil -} - -func (p *ConcatReply) readField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Err = v - } - return nil -} - -func (p *ConcatReply) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("ConcatReply"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ConcatReply) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("value", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:value: ", p), err) - } - if err := oprot.WriteString(string(p.Value)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.value (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:value: ", p), err) - } - return err -} - -func (p *ConcatReply) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("err", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:err: ", p), err) - } - if err := oprot.WriteString(string(p.Err)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.err (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:err: ", p), err) - } - return err -} - -func (p *ConcatReply) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ConcatReply(%+v)", *p) -} diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/transport_grpc.go b/vendor/github.com/go-kit/kit/examples/addsvc/transport_grpc.go deleted file mode 100644 index 6ad30cb..0000000 --- a/vendor/github.com/go-kit/kit/examples/addsvc/transport_grpc.go +++ /dev/null @@ -1,118 +0,0 @@ -package addsvc - -// This file provides server-side bindings for the gRPC transport. -// It utilizes the transport/grpc.Server. - -import ( - stdopentracing "github.com/opentracing/opentracing-go" - "golang.org/x/net/context" - - "github.com/go-kit/kit/examples/addsvc/pb" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/tracing/opentracing" - grpctransport "github.com/go-kit/kit/transport/grpc" -) - -// MakeGRPCServer makes a set of endpoints available as a gRPC AddServer. -func MakeGRPCServer(ctx context.Context, endpoints Endpoints, tracer stdopentracing.Tracer, logger log.Logger) pb.AddServer { - options := []grpctransport.ServerOption{ - grpctransport.ServerErrorLogger(logger), - } - return &grpcServer{ - sum: grpctransport.NewServer( - ctx, - endpoints.SumEndpoint, - DecodeGRPCSumRequest, - EncodeGRPCSumResponse, - append(options, grpctransport.ServerBefore(opentracing.FromGRPCRequest(tracer, "Sum", logger)))..., - ), - concat: grpctransport.NewServer( - ctx, - endpoints.ConcatEndpoint, - DecodeGRPCConcatRequest, - EncodeGRPCConcatResponse, - append(options, grpctransport.ServerBefore(opentracing.FromGRPCRequest(tracer, "Concat", logger)))..., - ), - } -} - -type grpcServer struct { - sum grpctransport.Handler - concat grpctransport.Handler -} - -func (s *grpcServer) Sum(ctx context.Context, req *pb.SumRequest) (*pb.SumReply, error) { - _, rep, err := s.sum.ServeGRPC(ctx, req) - if err != nil { - return nil, err - } - return rep.(*pb.SumReply), nil -} - -func (s *grpcServer) Concat(ctx context.Context, req *pb.ConcatRequest) (*pb.ConcatReply, error) { - _, rep, err := s.concat.ServeGRPC(ctx, req) - if err != nil { - return nil, err - } - return rep.(*pb.ConcatReply), nil -} - -// DecodeGRPCSumRequest is a transport/grpc.DecodeRequestFunc that converts a -// gRPC sum request to a user-domain sum request. Primarily useful in a server. -func DecodeGRPCSumRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { - req := grpcReq.(*pb.SumRequest) - return sumRequest{A: int(req.A), B: int(req.B)}, nil -} - -// DecodeGRPCConcatRequest is a transport/grpc.DecodeRequestFunc that converts a -// gRPC concat request to a user-domain concat request. Primarily useful in a -// server. -func DecodeGRPCConcatRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { - req := grpcReq.(*pb.ConcatRequest) - return concatRequest{A: req.A, B: req.B}, nil -} - -// DecodeGRPCSumResponse is a transport/grpc.DecodeResponseFunc that converts a -// gRPC sum reply to a user-domain sum response. Primarily useful in a client. -func DecodeGRPCSumResponse(_ context.Context, grpcReply interface{}) (interface{}, error) { - reply := grpcReply.(*pb.SumReply) - return sumResponse{V: int(reply.V), Err: str2err(reply.Err)}, nil -} - -// DecodeGRPCConcatResponse is a transport/grpc.DecodeResponseFunc that converts -// a gRPC concat reply to a user-domain concat response. Primarily useful in a -// client. -func DecodeGRPCConcatResponse(_ context.Context, grpcReply interface{}) (interface{}, error) { - reply := grpcReply.(*pb.ConcatReply) - return concatResponse{V: reply.V, Err: str2err(reply.Err)}, nil -} - -// EncodeGRPCSumResponse is a transport/grpc.EncodeResponseFunc that converts a -// user-domain sum response to a gRPC sum reply. Primarily useful in a server. -func EncodeGRPCSumResponse(_ context.Context, response interface{}) (interface{}, error) { - resp := response.(sumResponse) - return &pb.SumReply{V: int64(resp.V), Err: err2str(resp.Err)}, nil -} - -// EncodeGRPCConcatResponse is a transport/grpc.EncodeResponseFunc that converts -// a user-domain concat response to a gRPC concat reply. Primarily useful in a -// server. -func EncodeGRPCConcatResponse(_ context.Context, response interface{}) (interface{}, error) { - resp := response.(concatResponse) - return &pb.ConcatReply{V: resp.V, Err: err2str(resp.Err)}, nil -} - -// EncodeGRPCSumRequest is a transport/grpc.EncodeRequestFunc that converts a -// user-domain sum request to a gRPC sum request. Primarily useful in a client. -func EncodeGRPCSumRequest(_ context.Context, request interface{}) (interface{}, error) { - req := request.(sumRequest) - return &pb.SumRequest{A: int64(req.A), B: int64(req.B)}, nil -} - -// EncodeGRPCConcatRequest is a transport/grpc.EncodeRequestFunc that converts a -// user-domain concat request to a gRPC concat request. Primarily useful in a -// client. -func EncodeGRPCConcatRequest(_ context.Context, request interface{}) (interface{}, error) { - req := request.(concatRequest) - return &pb.ConcatRequest{A: req.A, B: req.B}, nil -} diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/transport_http.go b/vendor/github.com/go-kit/kit/examples/addsvc/transport_http.go deleted file mode 100644 index e2d8f6d..0000000 --- a/vendor/github.com/go-kit/kit/examples/addsvc/transport_http.go +++ /dev/null @@ -1,141 +0,0 @@ -package addsvc - -// This file provides server-side bindings for the HTTP transport. -// It utilizes the transport/http.Server. - -import ( - "bytes" - "encoding/json" - "errors" - "io/ioutil" - "net/http" - - stdopentracing "github.com/opentracing/opentracing-go" - "golang.org/x/net/context" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/tracing/opentracing" - httptransport "github.com/go-kit/kit/transport/http" -) - -// MakeHTTPHandler returns a handler that makes a set of endpoints available -// on predefined paths. -func MakeHTTPHandler(ctx context.Context, endpoints Endpoints, tracer stdopentracing.Tracer, logger log.Logger) http.Handler { - options := []httptransport.ServerOption{ - httptransport.ServerErrorEncoder(errorEncoder), - httptransport.ServerErrorLogger(logger), - } - m := http.NewServeMux() - m.Handle("/sum", httptransport.NewServer( - ctx, - endpoints.SumEndpoint, - DecodeHTTPSumRequest, - EncodeHTTPGenericResponse, - append(options, httptransport.ServerBefore(opentracing.FromHTTPRequest(tracer, "Sum", logger)))..., - )) - m.Handle("/concat", httptransport.NewServer( - ctx, - endpoints.ConcatEndpoint, - DecodeHTTPConcatRequest, - EncodeHTTPGenericResponse, - append(options, httptransport.ServerBefore(opentracing.FromHTTPRequest(tracer, "Concat", logger)))..., - )) - return m -} - -func errorEncoder(_ context.Context, err error, w http.ResponseWriter) { - code := http.StatusInternalServerError - msg := err.Error() - - if e, ok := err.(httptransport.Error); ok { - msg = e.Err.Error() - switch e.Domain { - case httptransport.DomainDecode: - code = http.StatusBadRequest - - case httptransport.DomainDo: - switch e.Err { - case ErrTwoZeroes, ErrMaxSizeExceeded, ErrIntOverflow: - code = http.StatusBadRequest - } - } - } - - w.WriteHeader(code) - json.NewEncoder(w).Encode(errorWrapper{Error: msg}) -} - -func errorDecoder(r *http.Response) error { - var w errorWrapper - if err := json.NewDecoder(r.Body).Decode(&w); err != nil { - return err - } - return errors.New(w.Error) -} - -type errorWrapper struct { - Error string `json:"error"` -} - -// DecodeHTTPSumRequest is a transport/http.DecodeRequestFunc that decodes a -// JSON-encoded sum request from the HTTP request body. Primarily useful in a -// server. -func DecodeHTTPSumRequest(_ context.Context, r *http.Request) (interface{}, error) { - var req sumRequest - err := json.NewDecoder(r.Body).Decode(&req) - return req, err -} - -// DecodeHTTPConcatRequest is a transport/http.DecodeRequestFunc that decodes a -// JSON-encoded concat request from the HTTP request body. Primarily useful in a -// server. -func DecodeHTTPConcatRequest(_ context.Context, r *http.Request) (interface{}, error) { - var req concatRequest - err := json.NewDecoder(r.Body).Decode(&req) - return req, err -} - -// DecodeHTTPSumResponse is a transport/http.DecodeResponseFunc that decodes a -// JSON-encoded sum response from the HTTP response body. If the response has a -// non-200 status code, we will interpret that as an error and attempt to decode -// the specific error message from the response body. Primarily useful in a -// client. -func DecodeHTTPSumResponse(_ context.Context, r *http.Response) (interface{}, error) { - if r.StatusCode != http.StatusOK { - return nil, errorDecoder(r) - } - var resp sumResponse - err := json.NewDecoder(r.Body).Decode(&resp) - return resp, err -} - -// DecodeHTTPConcatResponse is a transport/http.DecodeResponseFunc that decodes -// a JSON-encoded concat response from the HTTP response body. If the response -// has a non-200 status code, we will interpret that as an error and attempt to -// decode the specific error message from the response body. Primarily useful in -// a client. -func DecodeHTTPConcatResponse(_ context.Context, r *http.Response) (interface{}, error) { - if r.StatusCode != http.StatusOK { - return nil, errorDecoder(r) - } - var resp concatResponse - err := json.NewDecoder(r.Body).Decode(&resp) - return resp, err -} - -// EncodeHTTPGenericRequest is a transport/http.EncodeRequestFunc that -// JSON-encodes any request to the request body. Primarily useful in a client. -func EncodeHTTPGenericRequest(_ context.Context, r *http.Request, request interface{}) error { - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(request); err != nil { - return err - } - r.Body = ioutil.NopCloser(&buf) - return nil -} - -// EncodeHTTPGenericResponse is a transport/http.EncodeResponseFunc that encodes -// the response as JSON to the response writer. Primarily useful in a server. -func EncodeHTTPGenericResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { - return json.NewEncoder(w).Encode(response) -} diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/transport_thrift.go b/vendor/github.com/go-kit/kit/examples/addsvc/transport_thrift.go deleted file mode 100644 index 23b1f1c..0000000 --- a/vendor/github.com/go-kit/kit/examples/addsvc/transport_thrift.go +++ /dev/null @@ -1,73 +0,0 @@ -package addsvc - -// This file provides server-side bindings for the Thrift transport. -// -// This file also provides endpoint constructors that utilize a Thrift client, -// for use in client packages, because package transport/thrift doesn't exist -// yet. See https://github.com/go-kit/kit/issues/184. - -import ( - "golang.org/x/net/context" - - "github.com/go-kit/kit/endpoint" - thriftadd "github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc" -) - -// MakeThriftHandler makes a set of endpoints available as a Thrift service. -func MakeThriftHandler(ctx context.Context, e Endpoints) thriftadd.AddService { - return &thriftServer{ - ctx: ctx, - sum: e.SumEndpoint, - concat: e.ConcatEndpoint, - } -} - -type thriftServer struct { - ctx context.Context - sum endpoint.Endpoint - concat endpoint.Endpoint -} - -func (s *thriftServer) Sum(a int64, b int64) (*thriftadd.SumReply, error) { - request := sumRequest{A: int(a), B: int(b)} - response, err := s.sum(s.ctx, request) - if err != nil { - return nil, err - } - resp := response.(sumResponse) - return &thriftadd.SumReply{Value: int64(resp.V), Err: err2str(resp.Err)}, nil -} - -func (s *thriftServer) Concat(a string, b string) (*thriftadd.ConcatReply, error) { - request := concatRequest{A: a, B: b} - response, err := s.concat(s.ctx, request) - if err != nil { - return nil, err - } - resp := response.(concatResponse) - return &thriftadd.ConcatReply{Value: resp.V, Err: err2str(resp.Err)}, nil -} - -// MakeThriftSumEndpoint returns an endpoint that invokes the passed Thrift client. -// Useful only in clients, and only until a proper transport/thrift.Client exists. -func MakeThriftSumEndpoint(client *thriftadd.AddServiceClient) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(sumRequest) - reply, err := client.Sum(int64(req.A), int64(req.B)) - if err == ErrIntOverflow { - return nil, err // special case; see comment on ErrIntOverflow - } - return sumResponse{V: int(reply.Value), Err: err}, nil - } -} - -// MakeThriftConcatEndpoint returns an endpoint that invokes the passed Thrift -// client. Useful only in clients, and only until a proper -// transport/thrift.Client exists. -func MakeThriftConcatEndpoint(client *thriftadd.AddServiceClient) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(concatRequest) - reply, err := client.Concat(req.A, req.B) - return concatResponse{V: reply.Value, Err: err}, nil - } -} diff --git a/vendor/github.com/go-kit/kit/examples/apigateway/main.go b/vendor/github.com/go-kit/kit/examples/apigateway/main.go deleted file mode 100644 index 01367ec..0000000 --- a/vendor/github.com/go-kit/kit/examples/apigateway/main.go +++ /dev/null @@ -1,282 +0,0 @@ -package main - -import ( - "bytes" - "encoding/json" - "flag" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "os/signal" - "strings" - "syscall" - "time" - - "github.com/gorilla/mux" - "github.com/hashicorp/consul/api" - stdopentracing "github.com/opentracing/opentracing-go" - "golang.org/x/net/context" - - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/examples/addsvc" - addsvcgrpcclient "github.com/go-kit/kit/examples/addsvc/client/grpc" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/sd" - consulsd "github.com/go-kit/kit/sd/consul" - "github.com/go-kit/kit/sd/lb" - httptransport "github.com/go-kit/kit/transport/http" - "google.golang.org/grpc" -) - -func main() { - var ( - httpAddr = flag.String("http.addr", ":8000", "Address for HTTP (JSON) server") - consulAddr = flag.String("consul.addr", "", "Consul agent address") - retryMax = flag.Int("retry.max", 3, "per-request retries to different instances") - retryTimeout = flag.Duration("retry.timeout", 500*time.Millisecond, "per-request timeout, including retries") - ) - flag.Parse() - - // Logging domain. - var logger log.Logger - { - logger = log.NewLogfmtLogger(os.Stderr) - logger = log.NewContext(logger).With("ts", log.DefaultTimestampUTC) - logger = log.NewContext(logger).With("caller", log.DefaultCaller) - } - - // Service discovery domain. In this example we use Consul. - var client consulsd.Client - { - consulConfig := api.DefaultConfig() - if len(*consulAddr) > 0 { - consulConfig.Address = *consulAddr - } - consulClient, err := api.NewClient(consulConfig) - if err != nil { - logger.Log("err", err) - os.Exit(1) - } - client = consulsd.NewClient(consulClient) - } - - // Transport domain. - tracer := stdopentracing.GlobalTracer() // no-op - ctx := context.Background() - r := mux.NewRouter() - - // Now we begin installing the routes. Each route corresponds to a single - // method: sum, concat, uppercase, and count. - - // addsvc routes. - { - // Each method gets constructed with a factory. Factories take an - // instance string, and return a specific endpoint. In the factory we - // dial the instance string we get from Consul, and then leverage an - // addsvc client package to construct a complete service. We can then - // leverage the addsvc.Make{Sum,Concat}Endpoint constructors to convert - // the complete service to specific endpoint. - - var ( - tags = []string{} - passingOnly = true - endpoints = addsvc.Endpoints{} - ) - { - factory := addsvcFactory(addsvc.MakeSumEndpoint, tracer, logger) - subscriber := consulsd.NewSubscriber(client, factory, logger, "addsvc", tags, passingOnly) - balancer := lb.NewRoundRobin(subscriber) - retry := lb.Retry(*retryMax, *retryTimeout, balancer) - endpoints.SumEndpoint = retry - } - { - factory := addsvcFactory(addsvc.MakeConcatEndpoint, tracer, logger) - subscriber := consulsd.NewSubscriber(client, factory, logger, "addsvc", tags, passingOnly) - balancer := lb.NewRoundRobin(subscriber) - retry := lb.Retry(*retryMax, *retryTimeout, balancer) - endpoints.ConcatEndpoint = retry - } - - // Here we leverage the fact that addsvc comes with a constructor for an - // HTTP handler, and just install it under a particular path prefix in - // our router. - - r.PathPrefix("addsvc/").Handler(addsvc.MakeHTTPHandler(ctx, endpoints, tracer, logger)) - } - - // stringsvc routes. - { - // addsvc had lots of nice importable Go packages we could leverage. - // With stringsvc we are not so fortunate, it just has some endpoints - // that we assume will exist. So we have to write that logic here. This - // is by design, so you can see two totally different methods of - // proxying to a remote service. - - var ( - tags = []string{} - passingOnly = true - uppercase endpoint.Endpoint - count endpoint.Endpoint - ) - { - factory := stringsvcFactory(ctx, "GET", "/uppercase") - subscriber := consulsd.NewSubscriber(client, factory, logger, "stringsvc", tags, passingOnly) - balancer := lb.NewRoundRobin(subscriber) - retry := lb.Retry(*retryMax, *retryTimeout, balancer) - uppercase = retry - } - { - factory := stringsvcFactory(ctx, "GET", "/count") - subscriber := consulsd.NewSubscriber(client, factory, logger, "stringsvc", tags, passingOnly) - balancer := lb.NewRoundRobin(subscriber) - retry := lb.Retry(*retryMax, *retryTimeout, balancer) - count = retry - } - - // We can use the transport/http.Server to act as our handler, all we - // have to do provide it with the encode and decode functions for our - // stringsvc methods. - - r.Handle("/stringsvc/uppercase", httptransport.NewServer(ctx, uppercase, decodeUppercaseRequest, encodeJSONResponse)) - r.Handle("/stringsvc/count", httptransport.NewServer(ctx, count, decodeCountRequest, encodeJSONResponse)) - } - - // Interrupt handler. - errc := make(chan error) - go func() { - c := make(chan os.Signal) - signal.Notify(c, syscall.SIGINT, syscall.SIGTERM) - errc <- fmt.Errorf("%s", <-c) - }() - - // HTTP transport. - go func() { - logger.Log("transport", "HTTP", "addr", *httpAddr) - errc <- http.ListenAndServe(*httpAddr, r) - }() - - // Run! - logger.Log("exit", <-errc) -} - -func addsvcFactory(makeEndpoint func(addsvc.Service) endpoint.Endpoint, tracer stdopentracing.Tracer, logger log.Logger) sd.Factory { - return func(instance string) (endpoint.Endpoint, io.Closer, error) { - // We could just as easily use the HTTP or Thrift client package to make - // the connection to addsvc. We've chosen gRPC arbitrarily. Note that - // the transport is an implementation detail: it doesn't leak out of - // this function. Nice! - - conn, err := grpc.Dial(instance, grpc.WithInsecure()) - if err != nil { - return nil, nil, err - } - service := addsvcgrpcclient.New(conn, tracer, logger) - endpoint := makeEndpoint(service) - - // Notice that the addsvc gRPC client converts the connection to a - // complete addsvc, and we just throw away everything except the method - // we're interested in. A smarter factory would mux multiple methods - // over the same connection. But that would require more work to manage - // the returned io.Closer, e.g. reference counting. Since this is for - // the purposes of demonstration, we'll just keep it simple. - - return endpoint, conn, nil - } -} - -func stringsvcFactory(ctx context.Context, method, path string) sd.Factory { - return func(instance string) (endpoint.Endpoint, io.Closer, error) { - if !strings.HasPrefix(instance, "http") { - instance = "http://" + instance - } - tgt, err := url.Parse(instance) - if err != nil { - return nil, nil, err - } - tgt.Path = path - - // Since stringsvc doesn't have any kind of package we can import, or - // any formal spec, we are forced to just assert where the endpoints - // live, and write our own code to encode and decode requests and - // responses. Ideally, if you write the service, you will want to - // provide stronger guarantees to your clients. - - var ( - enc httptransport.EncodeRequestFunc - dec httptransport.DecodeResponseFunc - ) - switch path { - case "/uppercase": - enc, dec = encodeJSONRequest, decodeUppercaseResponse - case "/count": - enc, dec = encodeJSONRequest, decodeCountResponse - default: - return nil, nil, fmt.Errorf("unknown stringsvc path %q", path) - } - - return httptransport.NewClient(method, tgt, enc, dec).Endpoint(), nil, nil - } -} - -func encodeJSONRequest(_ context.Context, req *http.Request, request interface{}) error { - // Both uppercase and count requests are encoded in the same way: - // simple JSON serialization to the request body. - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(request); err != nil { - return err - } - req.Body = ioutil.NopCloser(&buf) - return nil -} - -func encodeJSONResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - return json.NewEncoder(w).Encode(response) -} - -// I've just copied these functions from stringsvc3/transport.go, inlining the -// struct definitions. - -func decodeUppercaseResponse(ctx context.Context, resp *http.Response) (interface{}, error) { - var response struct { - V string `json:"v"` - Err string `json:"err,omitempty"` - } - if err := json.NewDecoder(resp.Body).Decode(&response); err != nil { - return nil, err - } - return response, nil -} - -func decodeCountResponse(ctx context.Context, resp *http.Response) (interface{}, error) { - var response struct { - V int `json:"v"` - } - if err := json.NewDecoder(resp.Body).Decode(&response); err != nil { - return nil, err - } - return response, nil -} - -func decodeUppercaseRequest(ctx context.Context, req *http.Request) (interface{}, error) { - var request struct { - S string `json:"s"` - } - if err := json.NewDecoder(req.Body).Decode(&request); err != nil { - return nil, err - } - return request, nil -} - -func decodeCountRequest(ctx context.Context, req *http.Request) (interface{}, error) { - var request struct { - S string `json:"s"` - } - if err := json.NewDecoder(req.Body).Decode(&request); err != nil { - return nil, err - } - return request, nil -} diff --git a/vendor/github.com/go-kit/kit/examples/profilesvc/README.md b/vendor/github.com/go-kit/kit/examples/profilesvc/README.md deleted file mode 100644 index 68c4125..0000000 --- a/vendor/github.com/go-kit/kit/examples/profilesvc/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# profilesvc - -This example demonstrates how to use Go kit to implement a REST-y HTTP service. -It leverages the excellent [gorilla mux package](https://github.com/gorilla/mux) for routing. diff --git a/vendor/github.com/go-kit/kit/examples/profilesvc/client/client.go b/vendor/github.com/go-kit/kit/examples/profilesvc/client/client.go deleted file mode 100644 index 6b1dff0..0000000 --- a/vendor/github.com/go-kit/kit/examples/profilesvc/client/client.go +++ /dev/null @@ -1,120 +0,0 @@ -// Package client provides a profilesvc client based on a predefined Consul -// service name and relevant tags. Users must only provide the address of a -// Consul server. -package client - -import ( - "io" - "time" - - consulapi "github.com/hashicorp/consul/api" - - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/examples/profilesvc" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/sd" - "github.com/go-kit/kit/sd/consul" - "github.com/go-kit/kit/sd/lb" -) - -// New returns a service that's load-balanced over instances of profilesvc found -// in the provided Consul server. The mechanism of looking up profilesvc -// instances in Consul is hard-coded into the client. -func New(consulAddr string, logger log.Logger) (profilesvc.Service, error) { - apiclient, err := consulapi.NewClient(&consulapi.Config{ - Address: consulAddr, - }) - if err != nil { - return nil, err - } - - // As the implementer of profilesvc, we declare and enforce these - // parameters for all of the profilesvc consumers. - var ( - consulService = "profilesvc" - consulTags = []string{"prod"} - passingOnly = true - retryMax = 3 - retryTimeout = 500 * time.Millisecond - ) - - var ( - sdclient = consul.NewClient(apiclient) - endpoints profilesvc.Endpoints - ) - { - factory := factoryFor(profilesvc.MakePostProfileEndpoint) - subscriber := consul.NewSubscriber(sdclient, factory, logger, consulService, consulTags, passingOnly) - balancer := lb.NewRoundRobin(subscriber) - retry := lb.Retry(retryMax, retryTimeout, balancer) - endpoints.PostProfileEndpoint = retry - } - { - factory := factoryFor(profilesvc.MakeGetProfileEndpoint) - subscriber := consul.NewSubscriber(sdclient, factory, logger, consulService, consulTags, passingOnly) - balancer := lb.NewRoundRobin(subscriber) - retry := lb.Retry(retryMax, retryTimeout, balancer) - endpoints.GetProfileEndpoint = retry - } - { - factory := factoryFor(profilesvc.MakePutProfileEndpoint) - subscriber := consul.NewSubscriber(sdclient, factory, logger, consulService, consulTags, passingOnly) - balancer := lb.NewRoundRobin(subscriber) - retry := lb.Retry(retryMax, retryTimeout, balancer) - endpoints.PutProfileEndpoint = retry - } - { - factory := factoryFor(profilesvc.MakePatchProfileEndpoint) - subscriber := consul.NewSubscriber(sdclient, factory, logger, consulService, consulTags, passingOnly) - balancer := lb.NewRoundRobin(subscriber) - retry := lb.Retry(retryMax, retryTimeout, balancer) - endpoints.PatchProfileEndpoint = retry - } - { - factory := factoryFor(profilesvc.MakeDeleteProfileEndpoint) - subscriber := consul.NewSubscriber(sdclient, factory, logger, consulService, consulTags, passingOnly) - balancer := lb.NewRoundRobin(subscriber) - retry := lb.Retry(retryMax, retryTimeout, balancer) - endpoints.DeleteProfileEndpoint = retry - } - { - factory := factoryFor(profilesvc.MakeGetAddressesEndpoint) - subscriber := consul.NewSubscriber(sdclient, factory, logger, consulService, consulTags, passingOnly) - balancer := lb.NewRoundRobin(subscriber) - retry := lb.Retry(retryMax, retryTimeout, balancer) - endpoints.GetAddressesEndpoint = retry - } - { - factory := factoryFor(profilesvc.MakeGetAddressEndpoint) - subscriber := consul.NewSubscriber(sdclient, factory, logger, consulService, consulTags, passingOnly) - balancer := lb.NewRoundRobin(subscriber) - retry := lb.Retry(retryMax, retryTimeout, balancer) - endpoints.GetAddressEndpoint = retry - } - { - factory := factoryFor(profilesvc.MakePostAddressEndpoint) - subscriber := consul.NewSubscriber(sdclient, factory, logger, consulService, consulTags, passingOnly) - balancer := lb.NewRoundRobin(subscriber) - retry := lb.Retry(retryMax, retryTimeout, balancer) - endpoints.PostAddressEndpoint = retry - } - { - factory := factoryFor(profilesvc.MakeDeleteAddressEndpoint) - subscriber := consul.NewSubscriber(sdclient, factory, logger, consulService, consulTags, passingOnly) - balancer := lb.NewRoundRobin(subscriber) - retry := lb.Retry(retryMax, retryTimeout, balancer) - endpoints.DeleteAddressEndpoint = retry - } - - return endpoints, nil -} - -func factoryFor(makeEndpoint func(profilesvc.Service) endpoint.Endpoint) sd.Factory { - return func(instance string) (endpoint.Endpoint, io.Closer, error) { - service, err := profilesvc.MakeClientEndpoints(instance) - if err != nil { - return nil, nil, err - } - return makeEndpoint(service), nil, nil - } -} diff --git a/vendor/github.com/go-kit/kit/examples/profilesvc/cmd/profilesvc/main.go b/vendor/github.com/go-kit/kit/examples/profilesvc/cmd/profilesvc/main.go deleted file mode 100644 index a340e69..0000000 --- a/vendor/github.com/go-kit/kit/examples/profilesvc/cmd/profilesvc/main.go +++ /dev/null @@ -1,59 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "net/http" - "os" - "os/signal" - "syscall" - - "golang.org/x/net/context" - - "github.com/go-kit/kit/examples/profilesvc" - "github.com/go-kit/kit/log" -) - -func main() { - var ( - httpAddr = flag.String("http.addr", ":8080", "HTTP listen address") - ) - flag.Parse() - - var logger log.Logger - { - logger = log.NewLogfmtLogger(os.Stderr) - logger = log.NewContext(logger).With("ts", log.DefaultTimestampUTC) - logger = log.NewContext(logger).With("caller", log.DefaultCaller) - } - - var ctx context.Context - { - ctx = context.Background() - } - - var s profilesvc.Service - { - s = profilesvc.NewInmemService() - s = profilesvc.LoggingMiddleware(logger)(s) - } - - var h http.Handler - { - h = profilesvc.MakeHTTPHandler(ctx, s, log.NewContext(logger).With("component", "HTTP")) - } - - errs := make(chan error) - go func() { - c := make(chan os.Signal) - signal.Notify(c, syscall.SIGINT, syscall.SIGTERM) - errs <- fmt.Errorf("%s", <-c) - }() - - go func() { - logger.Log("transport", "HTTP", "addr", *httpAddr) - errs <- http.ListenAndServe(*httpAddr, h) - }() - - logger.Log("exit", <-errs) -} diff --git a/vendor/github.com/go-kit/kit/examples/profilesvc/endpoints.go b/vendor/github.com/go-kit/kit/examples/profilesvc/endpoints.go deleted file mode 100644 index 6dd129f..0000000 --- a/vendor/github.com/go-kit/kit/examples/profilesvc/endpoints.go +++ /dev/null @@ -1,388 +0,0 @@ -package profilesvc - -import ( - "net/url" - "strings" - - "golang.org/x/net/context" - - "github.com/go-kit/kit/endpoint" - httptransport "github.com/go-kit/kit/transport/http" -) - -// Endpoints collects all of the endpoints that compose a profile service. It's -// meant to be used as a helper struct, to collect all of the endpoints into a -// single parameter. -// -// In a server, it's useful for functions that need to operate on a per-endpoint -// basis. For example, you might pass an Endpoints to a function that produces -// an http.Handler, with each method (endpoint) wired up to a specific path. (It -// is probably a mistake in design to invoke the Service methods on the -// Endpoints struct in a server.) -// -// In a client, it's useful to collect individually constructed endpoints into a -// single type that implements the Service interface. For example, you might -// construct individual endpoints using transport/http.NewClient, combine them -// into an Endpoints, and return it to the caller as a Service. -type Endpoints struct { - PostProfileEndpoint endpoint.Endpoint - GetProfileEndpoint endpoint.Endpoint - PutProfileEndpoint endpoint.Endpoint - PatchProfileEndpoint endpoint.Endpoint - DeleteProfileEndpoint endpoint.Endpoint - GetAddressesEndpoint endpoint.Endpoint - GetAddressEndpoint endpoint.Endpoint - PostAddressEndpoint endpoint.Endpoint - DeleteAddressEndpoint endpoint.Endpoint -} - -// MakeServerEndpoints returns an Endpoints struct where each endpoint invokes -// the corresponding method on the provided service. Useful in a profilesvc -// server. -func MakeServerEndpoints(s Service) Endpoints { - return Endpoints{ - PostProfileEndpoint: MakePostProfileEndpoint(s), - GetProfileEndpoint: MakeGetProfileEndpoint(s), - PutProfileEndpoint: MakePutProfileEndpoint(s), - PatchProfileEndpoint: MakePatchProfileEndpoint(s), - DeleteProfileEndpoint: MakeDeleteProfileEndpoint(s), - GetAddressesEndpoint: MakeGetAddressesEndpoint(s), - GetAddressEndpoint: MakeGetAddressEndpoint(s), - PostAddressEndpoint: MakePostAddressEndpoint(s), - DeleteAddressEndpoint: MakeDeleteAddressEndpoint(s), - } -} - -// MakeClientEndpoints returns an Endpoints struct where each endpoint invokes -// the corresponding method on the remote instance, via a transport/http.Client. -// Useful in a profilesvc client. -func MakeClientEndpoints(instance string) (Endpoints, error) { - if !strings.HasPrefix(instance, "http") { - instance = "http://" + instance - } - tgt, err := url.Parse(instance) - if err != nil { - return Endpoints{}, err - } - tgt.Path = "" - - options := []httptransport.ClientOption{} - - // Note that the request encoders need to modify the request URL, changing - // the path and method. That's fine: we simply need to provide specific - // encoders for each endpoint. - - return Endpoints{ - PostProfileEndpoint: httptransport.NewClient("POST", tgt, encodePostProfileRequest, decodePostProfileResponse, options...).Endpoint(), - GetProfileEndpoint: httptransport.NewClient("GET", tgt, encodeGetProfileRequest, decodeGetProfileResponse, options...).Endpoint(), - PutProfileEndpoint: httptransport.NewClient("PUT", tgt, encodePutProfileRequest, decodePutProfileResponse, options...).Endpoint(), - PatchProfileEndpoint: httptransport.NewClient("PATCH", tgt, encodePatchProfileRequest, decodePatchProfileResponse, options...).Endpoint(), - DeleteProfileEndpoint: httptransport.NewClient("DELETE", tgt, encodeDeleteProfileRequest, decodeDeleteProfileResponse, options...).Endpoint(), - GetAddressesEndpoint: httptransport.NewClient("GET", tgt, encodeGetAddressesRequest, decodeGetAddressesResponse, options...).Endpoint(), - GetAddressEndpoint: httptransport.NewClient("GET", tgt, encodeGetAddressRequest, decodeGetAddressResponse, options...).Endpoint(), - PostAddressEndpoint: httptransport.NewClient("POST", tgt, encodePostAddressRequest, decodePostAddressResponse, options...).Endpoint(), - DeleteAddressEndpoint: httptransport.NewClient("DELETE", tgt, encodeDeleteAddressRequest, decodeDeleteAddressResponse, options...).Endpoint(), - }, nil -} - -// PostProfile implements Service. Primarily useful in a client. -func (e Endpoints) PostProfile(ctx context.Context, p Profile) error { - request := postProfileRequest{Profile: p} - response, err := e.PostProfileEndpoint(ctx, request) - if err != nil { - return err - } - resp := response.(postProfileResponse) - return resp.Err -} - -// GetProfile implements Service. Primarily useful in a client. -func (e Endpoints) GetProfile(ctx context.Context, id string) (Profile, error) { - request := getProfileRequest{ID: id} - response, err := e.GetProfileEndpoint(ctx, request) - if err != nil { - return Profile{}, err - } - resp := response.(getProfileResponse) - return resp.Profile, resp.Err -} - -// PutProfile implements Service. Primarily useful in a client. -func (e Endpoints) PutProfile(ctx context.Context, id string, p Profile) error { - request := putProfileRequest{ID: id, Profile: p} - response, err := e.PutProfileEndpoint(ctx, request) - if err != nil { - return err - } - resp := response.(putProfileResponse) - return resp.Err -} - -// PatchProfile implements Service. Primarily useful in a client. -func (e Endpoints) PatchProfile(ctx context.Context, id string, p Profile) error { - request := patchProfileRequest{ID: id, Profile: p} - response, err := e.PatchProfileEndpoint(ctx, request) - if err != nil { - return err - } - resp := response.(patchProfileResponse) - return resp.Err -} - -// DeleteProfile implements Service. Primarily useful in a client. -func (e Endpoints) DeleteProfile(ctx context.Context, id string) error { - request := deleteProfileRequest{ID: id} - response, err := e.DeleteProfileEndpoint(ctx, request) - if err != nil { - return err - } - resp := response.(deleteProfileResponse) - return resp.Err -} - -// GetAddresses implements Service. Primarily useful in a client. -func (e Endpoints) GetAddresses(ctx context.Context, profileID string) ([]Address, error) { - request := getAddressesRequest{ProfileID: profileID} - response, err := e.GetAddressesEndpoint(ctx, request) - if err != nil { - return nil, err - } - resp := response.(getAddressesResponse) - return resp.Addresses, resp.Err -} - -// GetAddress implements Service. Primarily useful in a client. -func (e Endpoints) GetAddress(ctx context.Context, profileID string, addressID string) (Address, error) { - request := getAddressRequest{ProfileID: profileID, AddressID: addressID} - response, err := e.GetAddressEndpoint(ctx, request) - if err != nil { - return Address{}, err - } - resp := response.(getAddressResponse) - return resp.Address, resp.Err -} - -// PostAddress implements Service. Primarily useful in a client. -func (e Endpoints) PostAddress(ctx context.Context, profileID string, a Address) error { - request := postAddressRequest{ProfileID: profileID, Address: a} - response, err := e.PostAddressEndpoint(ctx, request) - if err != nil { - return err - } - resp := response.(postAddressResponse) - return resp.Err -} - -// DeleteAddress implements Service. Primarily useful in a client. -func (e Endpoints) DeleteAddress(ctx context.Context, profileID string, addressID string) error { - request := deleteAddressRequest{ProfileID: profileID, AddressID: addressID} - response, err := e.DeleteAddressEndpoint(ctx, request) - if err != nil { - return err - } - resp := response.(deleteAddressResponse) - return resp.Err -} - -// MakePostProfileEndpoint returns an endpoint via the passed service. -// Primarily useful in a server. -func MakePostProfileEndpoint(s Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (response interface{}, err error) { - req := request.(postProfileRequest) - e := s.PostProfile(ctx, req.Profile) - return postProfileResponse{Err: e}, nil - } -} - -// MakeGetProfileEndpoint returns an endpoint via the passed service. -// Primarily useful in a server. -func MakeGetProfileEndpoint(s Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (response interface{}, err error) { - req := request.(getProfileRequest) - p, e := s.GetProfile(ctx, req.ID) - return getProfileResponse{Profile: p, Err: e}, nil - } -} - -// MakePutProfileEndpoint returns an endpoint via the passed service. -// Primarily useful in a server. -func MakePutProfileEndpoint(s Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (response interface{}, err error) { - req := request.(putProfileRequest) - e := s.PutProfile(ctx, req.ID, req.Profile) - return putProfileResponse{Err: e}, nil - } -} - -// MakePatchProfileEndpoint returns an endpoint via the passed service. -// Primarily useful in a server. -func MakePatchProfileEndpoint(s Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (response interface{}, err error) { - req := request.(patchProfileRequest) - e := s.PatchProfile(ctx, req.ID, req.Profile) - return patchProfileResponse{Err: e}, nil - } -} - -// MakeDeleteProfileEndpoint returns an endpoint via the passed service. -// Primarily useful in a server. -func MakeDeleteProfileEndpoint(s Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (response interface{}, err error) { - req := request.(deleteProfileRequest) - e := s.DeleteProfile(ctx, req.ID) - return deleteProfileResponse{Err: e}, nil - } -} - -// MakeGetAddressesEndpoint returns an endpoint via the passed service. -// Primarily useful in a server. -func MakeGetAddressesEndpoint(s Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (response interface{}, err error) { - req := request.(getAddressesRequest) - a, e := s.GetAddresses(ctx, req.ProfileID) - return getAddressesResponse{Addresses: a, Err: e}, nil - } -} - -// MakeGetAddressEndpoint returns an endpoint via the passed service. -// Primarily useful in a server. -func MakeGetAddressEndpoint(s Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (response interface{}, err error) { - req := request.(getAddressRequest) - a, e := s.GetAddress(ctx, req.ProfileID, req.AddressID) - return getAddressResponse{Address: a, Err: e}, nil - } -} - -// MakePostAddressEndpoint returns an endpoint via the passed service. -// Primarily useful in a server. -func MakePostAddressEndpoint(s Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (response interface{}, err error) { - req := request.(postAddressRequest) - e := s.PostAddress(ctx, req.ProfileID, req.Address) - return postAddressResponse{Err: e}, nil - } -} - -// MakeDeleteAddressEndpoint returns an endpoint via the passed service. -// Primarily useful in a server. -func MakeDeleteAddressEndpoint(s Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (response interface{}, err error) { - req := request.(deleteAddressRequest) - e := s.DeleteAddress(ctx, req.ProfileID, req.AddressID) - return deleteAddressResponse{Err: e}, nil - } -} - -// We have two options to return errors from the business logic. -// -// We could return the error via the endpoint itself. That makes certain things -// a little bit easier, like providing non-200 HTTP responses to the client. But -// Go kit assumes that endpoint errors are (or may be treated as) -// transport-domain errors. For example, an endpoint error will count against a -// circuit breaker error count. -// -// Therefore, it's often better to return service (business logic) errors in the -// response object. This means we have to do a bit more work in the HTTP -// response encoder to detect e.g. a not-found error and provide a proper HTTP -// status code. That work is done with the errorer interface, in transport.go. -// Response types that may contain business-logic errors implement that -// interface. - -type postProfileRequest struct { - Profile Profile -} - -type postProfileResponse struct { - Err error `json:"err,omitempty"` -} - -func (r postProfileResponse) error() error { return r.Err } - -type getProfileRequest struct { - ID string -} - -type getProfileResponse struct { - Profile Profile `json:"profile,omitempty"` - Err error `json:"err,omitempty"` -} - -func (r getProfileResponse) error() error { return r.Err } - -type putProfileRequest struct { - ID string - Profile Profile -} - -type putProfileResponse struct { - Err error `json:"err,omitempty"` -} - -func (r putProfileResponse) error() error { return nil } - -type patchProfileRequest struct { - ID string - Profile Profile -} - -type patchProfileResponse struct { - Err error `json:"err,omitempty"` -} - -func (r patchProfileResponse) error() error { return r.Err } - -type deleteProfileRequest struct { - ID string -} - -type deleteProfileResponse struct { - Err error `json:"err,omitempty"` -} - -func (r deleteProfileResponse) error() error { return r.Err } - -type getAddressesRequest struct { - ProfileID string -} - -type getAddressesResponse struct { - Addresses []Address `json:"addresses,omitempty"` - Err error `json:"err,omitempty"` -} - -func (r getAddressesResponse) error() error { return r.Err } - -type getAddressRequest struct { - ProfileID string - AddressID string -} - -type getAddressResponse struct { - Address Address `json:"address,omitempty"` - Err error `json:"err,omitempty"` -} - -func (r getAddressResponse) error() error { return r.Err } - -type postAddressRequest struct { - ProfileID string - Address Address -} - -type postAddressResponse struct { - Err error `json:"err,omitempty"` -} - -func (r postAddressResponse) error() error { return r.Err } - -type deleteAddressRequest struct { - ProfileID string - AddressID string -} - -type deleteAddressResponse struct { - Err error `json:"err,omitempty"` -} - -func (r deleteAddressResponse) error() error { return r.Err } diff --git a/vendor/github.com/go-kit/kit/examples/profilesvc/middlewares.go b/vendor/github.com/go-kit/kit/examples/profilesvc/middlewares.go deleted file mode 100644 index 76708e5..0000000 --- a/vendor/github.com/go-kit/kit/examples/profilesvc/middlewares.go +++ /dev/null @@ -1,89 +0,0 @@ -package profilesvc - -import ( - "time" - - "golang.org/x/net/context" - - "github.com/go-kit/kit/log" -) - -// Middleware describes a service (as opposed to endpoint) middleware. -type Middleware func(Service) Service - -func LoggingMiddleware(logger log.Logger) Middleware { - return func(next Service) Service { - return &loggingMiddleware{ - next: next, - logger: logger, - } - } -} - -type loggingMiddleware struct { - next Service - logger log.Logger -} - -func (mw loggingMiddleware) PostProfile(ctx context.Context, p Profile) (err error) { - defer func(begin time.Time) { - mw.logger.Log("method", "PostProfile", "id", p.ID, "took", time.Since(begin), "err", err) - }(time.Now()) - return mw.next.PostProfile(ctx, p) -} - -func (mw loggingMiddleware) GetProfile(ctx context.Context, id string) (p Profile, err error) { - defer func(begin time.Time) { - mw.logger.Log("method", "GetProfile", "id", id, "took", time.Since(begin), "err", err) - }(time.Now()) - return mw.next.GetProfile(ctx, id) -} - -func (mw loggingMiddleware) PutProfile(ctx context.Context, id string, p Profile) (err error) { - defer func(begin time.Time) { - mw.logger.Log("method", "PutProfile", "id", id, "took", time.Since(begin), "err", err) - }(time.Now()) - return mw.next.PutProfile(ctx, id, p) -} - -func (mw loggingMiddleware) PatchProfile(ctx context.Context, id string, p Profile) (err error) { - defer func(begin time.Time) { - mw.logger.Log("method", "PatchProfile", "id", id, "took", time.Since(begin), "err", err) - }(time.Now()) - return mw.next.PatchProfile(ctx, id, p) -} - -func (mw loggingMiddleware) DeleteProfile(ctx context.Context, id string) (err error) { - defer func(begin time.Time) { - mw.logger.Log("method", "DeleteProfile", "id", id, "took", time.Since(begin), "err", err) - }(time.Now()) - return mw.next.DeleteProfile(ctx, id) -} - -func (mw loggingMiddleware) GetAddresses(ctx context.Context, profileID string) (addresses []Address, err error) { - defer func(begin time.Time) { - mw.logger.Log("method", "GetAddresses", "profileID", profileID, "took", time.Since(begin), "err", err) - }(time.Now()) - return mw.next.GetAddresses(ctx, profileID) -} - -func (mw loggingMiddleware) GetAddress(ctx context.Context, profileID string, addressID string) (a Address, err error) { - defer func(begin time.Time) { - mw.logger.Log("method", "GetAddress", "profileID", profileID, "addressID", addressID, "took", time.Since(begin), "err", err) - }(time.Now()) - return mw.next.GetAddress(ctx, profileID, addressID) -} - -func (mw loggingMiddleware) PostAddress(ctx context.Context, profileID string, a Address) (err error) { - defer func(begin time.Time) { - mw.logger.Log("method", "PostAddress", "profileID", profileID, "took", time.Since(begin), "err", err) - }(time.Now()) - return mw.next.PostAddress(ctx, profileID, a) -} - -func (mw loggingMiddleware) DeleteAddress(ctx context.Context, profileID string, addressID string) (err error) { - defer func(begin time.Time) { - mw.logger.Log("method", "DeleteAddress", "profileID", profileID, "addressID", addressID, "took", time.Since(begin), "err", err) - }(time.Now()) - return mw.next.DeleteAddress(ctx, profileID, addressID) -} diff --git a/vendor/github.com/go-kit/kit/examples/profilesvc/service.go b/vendor/github.com/go-kit/kit/examples/profilesvc/service.go deleted file mode 100644 index 4ae6756..0000000 --- a/vendor/github.com/go-kit/kit/examples/profilesvc/service.go +++ /dev/null @@ -1,186 +0,0 @@ -package profilesvc - -import ( - "errors" - "sync" - - "golang.org/x/net/context" -) - -// Service is a simple CRUD interface for user profiles. -type Service interface { - PostProfile(ctx context.Context, p Profile) error - GetProfile(ctx context.Context, id string) (Profile, error) - PutProfile(ctx context.Context, id string, p Profile) error - PatchProfile(ctx context.Context, id string, p Profile) error - DeleteProfile(ctx context.Context, id string) error - GetAddresses(ctx context.Context, profileID string) ([]Address, error) - GetAddress(ctx context.Context, profileID string, addressID string) (Address, error) - PostAddress(ctx context.Context, profileID string, a Address) error - DeleteAddress(ctx context.Context, profileID string, addressID string) error -} - -// Profile represents a single user profile. -// ID should be globally unique. -type Profile struct { - ID string `json:"id"` - Name string `json:"name,omitempty"` - Addresses []Address `json:"addresses,omitempty"` -} - -// Address is a field of a user profile. -// ID should be unique within the profile (at a minimum). -type Address struct { - ID string `json:"id"` - Location string `json:"location,omitempty"` -} - -var ( - ErrInconsistentIDs = errors.New("inconsistent IDs") - ErrAlreadyExists = errors.New("already exists") - ErrNotFound = errors.New("not found") -) - -type inmemService struct { - mtx sync.RWMutex - m map[string]Profile -} - -func NewInmemService() Service { - return &inmemService{ - m: map[string]Profile{}, - } -} - -func (s *inmemService) PostProfile(ctx context.Context, p Profile) error { - s.mtx.Lock() - defer s.mtx.Unlock() - if _, ok := s.m[p.ID]; ok { - return ErrAlreadyExists // POST = create, don't overwrite - } - s.m[p.ID] = p - return nil -} - -func (s *inmemService) GetProfile(ctx context.Context, id string) (Profile, error) { - s.mtx.RLock() - defer s.mtx.RUnlock() - p, ok := s.m[id] - if !ok { - return Profile{}, ErrNotFound - } - return p, nil -} - -func (s *inmemService) PutProfile(ctx context.Context, id string, p Profile) error { - if id != p.ID { - return ErrInconsistentIDs - } - s.mtx.Lock() - defer s.mtx.Unlock() - s.m[id] = p // PUT = create or update - return nil -} - -func (s *inmemService) PatchProfile(ctx context.Context, id string, p Profile) error { - if p.ID != "" && id != p.ID { - return ErrInconsistentIDs - } - - s.mtx.Lock() - defer s.mtx.Unlock() - - existing, ok := s.m[id] - if !ok { - return ErrNotFound // PATCH = update existing, don't create - } - - // We assume that it's not possible to PATCH the ID, and that it's not - // possible to PATCH any field to its zero value. That is, the zero value - // means not specified. The way around this is to use e.g. Name *string in - // the Profile definition. But since this is just a demonstrative example, - // I'm leaving that out. - - if p.Name != "" { - existing.Name = p.Name - } - if len(p.Addresses) > 0 { - existing.Addresses = p.Addresses - } - s.m[id] = existing - return nil -} - -func (s *inmemService) DeleteProfile(ctx context.Context, id string) error { - s.mtx.Lock() - defer s.mtx.Unlock() - if _, ok := s.m[id]; !ok { - return ErrNotFound - } - delete(s.m, id) - return nil -} - -func (s *inmemService) GetAddresses(ctx context.Context, profileID string) ([]Address, error) { - s.mtx.RLock() - defer s.mtx.RUnlock() - p, ok := s.m[profileID] - if !ok { - return []Address{}, ErrNotFound - } - return p.Addresses, nil -} - -func (s *inmemService) GetAddress(ctx context.Context, profileID string, addressID string) (Address, error) { - s.mtx.RLock() - defer s.mtx.RUnlock() - p, ok := s.m[profileID] - if !ok { - return Address{}, ErrNotFound - } - for _, address := range p.Addresses { - if address.ID == addressID { - return address, nil - } - } - return Address{}, ErrNotFound -} - -func (s *inmemService) PostAddress(ctx context.Context, profileID string, a Address) error { - s.mtx.Lock() - defer s.mtx.Unlock() - p, ok := s.m[profileID] - if !ok { - return ErrNotFound - } - for _, address := range p.Addresses { - if address.ID == a.ID { - return ErrAlreadyExists - } - } - p.Addresses = append(p.Addresses, a) - s.m[profileID] = p - return nil -} - -func (s *inmemService) DeleteAddress(ctx context.Context, profileID string, addressID string) error { - s.mtx.Lock() - defer s.mtx.Unlock() - p, ok := s.m[profileID] - if !ok { - return ErrNotFound - } - newAddresses := make([]Address, 0, len(p.Addresses)) - for _, address := range p.Addresses { - if address.ID == addressID { - continue // delete - } - newAddresses = append(newAddresses, address) - } - if len(newAddresses) == len(p.Addresses) { - return ErrNotFound - } - p.Addresses = newAddresses - s.m[profileID] = p - return nil -} diff --git a/vendor/github.com/go-kit/kit/examples/profilesvc/transport.go b/vendor/github.com/go-kit/kit/examples/profilesvc/transport.go deleted file mode 100644 index 02d807c..0000000 --- a/vendor/github.com/go-kit/kit/examples/profilesvc/transport.go +++ /dev/null @@ -1,410 +0,0 @@ -package profilesvc - -// The profilesvc is just over HTTP, so we just have a single transport.go. - -import ( - "bytes" - "encoding/json" - "errors" - "io/ioutil" - "net/http" - - "github.com/gorilla/mux" - "golang.org/x/net/context" - - "net/url" - - "github.com/go-kit/kit/log" - httptransport "github.com/go-kit/kit/transport/http" -) - -var ( - // ErrBadRouting is returned when an expected path variable is missing. - // It always indicates programmer error. - ErrBadRouting = errors.New("inconsistent mapping between route and handler (programmer error)") -) - -// MakeHTTPHandler mounts all of the service endpoints into an http.Handler. -// Useful in a profilesvc server. -func MakeHTTPHandler(ctx context.Context, s Service, logger log.Logger) http.Handler { - r := mux.NewRouter() - e := MakeServerEndpoints(s) - options := []httptransport.ServerOption{ - httptransport.ServerErrorLogger(logger), - httptransport.ServerErrorEncoder(encodeError), - } - - // POST /profiles adds another profile - // GET /profiles/:id retrieves the given profile by id - // PUT /profiles/:id post updated profile information about the profile - // PATCH /profiles/:id partial updated profile information - // DELETE /profiles/:id remove the given profile - // GET /profiles/:id/addresses retrieve addresses associated with the profile - // GET /profiles/:id/addresses/:addressID retrieve a particular profile address - // POST /profiles/:id/addresses add a new address - // DELETE /profiles/:id/addresses/:addressID remove an address - - r.Methods("POST").Path("/profiles/").Handler(httptransport.NewServer( - ctx, - e.PostProfileEndpoint, - decodePostProfileRequest, - encodeResponse, - options..., - )) - r.Methods("GET").Path("/profiles/{id}").Handler(httptransport.NewServer( - ctx, - e.GetProfileEndpoint, - decodeGetProfileRequest, - encodeResponse, - options..., - )) - r.Methods("PUT").Path("/profiles/{id}").Handler(httptransport.NewServer( - ctx, - e.PutProfileEndpoint, - decodePutProfileRequest, - encodeResponse, - options..., - )) - r.Methods("PATCH").Path("/profiles/{id}").Handler(httptransport.NewServer( - ctx, - e.PatchProfileEndpoint, - decodePatchProfileRequest, - encodeResponse, - options..., - )) - r.Methods("DELETE").Path("/profiles/{id}").Handler(httptransport.NewServer( - ctx, - e.DeleteProfileEndpoint, - decodeDeleteProfileRequest, - encodeResponse, - options..., - )) - r.Methods("GET").Path("/profiles/{id}/addresses/").Handler(httptransport.NewServer( - ctx, - e.GetAddressesEndpoint, - decodeGetAddressesRequest, - encodeResponse, - options..., - )) - r.Methods("GET").Path("/profiles/{id}/addresses/{addressID}").Handler(httptransport.NewServer( - ctx, - e.GetAddressEndpoint, - decodeGetAddressRequest, - encodeResponse, - options..., - )) - r.Methods("POST").Path("/profiles/{id}/addresses/").Handler(httptransport.NewServer( - ctx, - e.PostAddressEndpoint, - decodePostAddressRequest, - encodeResponse, - options..., - )) - r.Methods("DELETE").Path("/profiles/{id}/addresses/{addressID}").Handler(httptransport.NewServer( - ctx, - e.DeleteAddressEndpoint, - decodeDeleteAddressRequest, - encodeResponse, - options..., - )) - return r -} - -func decodePostProfileRequest(_ context.Context, r *http.Request) (request interface{}, err error) { - var req postProfileRequest - if e := json.NewDecoder(r.Body).Decode(&req.Profile); e != nil { - return nil, e - } - return req, nil -} - -func decodeGetProfileRequest(_ context.Context, r *http.Request) (request interface{}, err error) { - vars := mux.Vars(r) - id, ok := vars["id"] - if !ok { - return nil, ErrBadRouting - } - return getProfileRequest{ID: id}, nil -} - -func decodePutProfileRequest(_ context.Context, r *http.Request) (request interface{}, err error) { - vars := mux.Vars(r) - id, ok := vars["id"] - if !ok { - return nil, ErrBadRouting - } - var profile Profile - if err := json.NewDecoder(r.Body).Decode(&profile); err != nil { - return nil, err - } - return putProfileRequest{ - ID: id, - Profile: profile, - }, nil -} - -func decodePatchProfileRequest(_ context.Context, r *http.Request) (request interface{}, err error) { - vars := mux.Vars(r) - id, ok := vars["id"] - if !ok { - return nil, ErrBadRouting - } - var profile Profile - if err := json.NewDecoder(r.Body).Decode(&profile); err != nil { - return nil, err - } - return patchProfileRequest{ - ID: id, - Profile: profile, - }, nil -} - -func decodeDeleteProfileRequest(_ context.Context, r *http.Request) (request interface{}, err error) { - vars := mux.Vars(r) - id, ok := vars["id"] - if !ok { - return nil, ErrBadRouting - } - return deleteProfileRequest{ID: id}, nil -} - -func decodeGetAddressesRequest(_ context.Context, r *http.Request) (request interface{}, err error) { - vars := mux.Vars(r) - id, ok := vars["id"] - if !ok { - return nil, ErrBadRouting - } - return getAddressesRequest{ProfileID: id}, nil -} - -func decodeGetAddressRequest(_ context.Context, r *http.Request) (request interface{}, err error) { - vars := mux.Vars(r) - id, ok := vars["id"] - if !ok { - return nil, ErrBadRouting - } - addressID, ok := vars["addressID"] - if !ok { - return nil, ErrBadRouting - } - return getAddressRequest{ - ProfileID: id, - AddressID: addressID, - }, nil -} - -func decodePostAddressRequest(_ context.Context, r *http.Request) (request interface{}, err error) { - vars := mux.Vars(r) - id, ok := vars["id"] - if !ok { - return nil, ErrBadRouting - } - var address Address - if err := json.NewDecoder(r.Body).Decode(&address); err != nil { - return nil, err - } - return postAddressRequest{ - ProfileID: id, - Address: address, - }, nil -} - -func decodeDeleteAddressRequest(_ context.Context, r *http.Request) (request interface{}, err error) { - vars := mux.Vars(r) - id, ok := vars["id"] - if !ok { - return nil, ErrBadRouting - } - addressID, ok := vars["addressID"] - if !ok { - return nil, ErrBadRouting - } - return deleteAddressRequest{ - ProfileID: id, - AddressID: addressID, - }, nil -} - -func encodePostProfileRequest(ctx context.Context, req *http.Request, request interface{}) error { - // r.Methods("POST").Path("/profiles/") - req.Method, req.URL.Path = "POST", url.QueryEscape("/profiles/") - return encodeRequest(ctx, req, request) -} - -func encodeGetProfileRequest(ctx context.Context, req *http.Request, request interface{}) error { - // r.Methods("GET").Path("/profiles/{id}") - r := request.(getProfileRequest) - req.Method, req.URL.Path = "GET", url.QueryEscape("/profiles/"+r.ID) - return encodeRequest(ctx, req, request) -} - -func encodePutProfileRequest(ctx context.Context, req *http.Request, request interface{}) error { - // r.Methods("PUT").Path("/profiles/{id}") - r := request.(putProfileRequest) - req.Method, req.URL.Path = "PUT", url.QueryEscape("/profiles/"+r.ID) - return encodeRequest(ctx, req, request) -} - -func encodePatchProfileRequest(ctx context.Context, req *http.Request, request interface{}) error { - // r.Methods("PATCH").Path("/profiles/{id}") - r := request.(patchProfileRequest) - req.Method, req.URL.Path = "PATCH", url.QueryEscape("/profiles/"+r.ID) - return encodeRequest(ctx, req, request) -} - -func encodeDeleteProfileRequest(ctx context.Context, req *http.Request, request interface{}) error { - // r.Methods("DELETE").Path("/profiles/{id}") - r := request.(deleteProfileRequest) - req.Method, req.URL.Path = "DELETE", url.QueryEscape("/profiles/"+r.ID) - return encodeRequest(ctx, req, request) -} - -func encodeGetAddressesRequest(ctx context.Context, req *http.Request, request interface{}) error { - // r.Methods("GET").Path("/profiles/{id}/addresses/") - r := request.(getAddressesRequest) - req.Method, req.URL.Path = "GET", url.QueryEscape("/profiles/"+r.ProfileID+"/addresses/") - return encodeRequest(ctx, req, request) -} - -func encodeGetAddressRequest(ctx context.Context, req *http.Request, request interface{}) error { - // r.Methods("GET").Path("/profiles/{id}/addresses/{addressID}") - r := request.(getAddressRequest) - req.Method, req.URL.Path = "GET", url.QueryEscape("/profiles/"+r.ProfileID+"/addresses/"+r.AddressID) - return encodeRequest(ctx, req, request) -} - -func encodePostAddressRequest(ctx context.Context, req *http.Request, request interface{}) error { - // r.Methods("POST").Path("/profiles/{id}/addresses/") - r := request.(postAddressRequest) - req.Method, req.URL.Path = "POST", url.QueryEscape("/profiles/"+r.ProfileID+"/addresses/") - return encodeRequest(ctx, req, request) -} - -func encodeDeleteAddressRequest(ctx context.Context, req *http.Request, request interface{}) error { - // r.Methods("DELETE").Path("/profiles/{id}/addresses/{addressID}") - r := request.(deleteAddressRequest) - req.Method, req.URL.Path = "DELETE", url.QueryEscape("/profiles/"+r.ProfileID+"/addresses/"+r.AddressID) - return encodeRequest(ctx, req, request) -} - -func decodePostProfileResponse(_ context.Context, resp *http.Response) (interface{}, error) { - var response postProfileResponse - err := json.NewDecoder(resp.Body).Decode(&response) - return response, err -} - -func decodeGetProfileResponse(_ context.Context, resp *http.Response) (interface{}, error) { - var response getProfileResponse - err := json.NewDecoder(resp.Body).Decode(&response) - return response, err -} - -func decodePutProfileResponse(_ context.Context, resp *http.Response) (interface{}, error) { - var response putProfileResponse - err := json.NewDecoder(resp.Body).Decode(&response) - return response, err -} - -func decodePatchProfileResponse(_ context.Context, resp *http.Response) (interface{}, error) { - var response patchProfileResponse - err := json.NewDecoder(resp.Body).Decode(&response) - return response, err -} - -func decodeDeleteProfileResponse(_ context.Context, resp *http.Response) (interface{}, error) { - var response deleteProfileResponse - err := json.NewDecoder(resp.Body).Decode(&response) - return response, err -} - -func decodeGetAddressesResponse(_ context.Context, resp *http.Response) (interface{}, error) { - var response getAddressesResponse - err := json.NewDecoder(resp.Body).Decode(&response) - return response, err -} - -func decodeGetAddressResponse(_ context.Context, resp *http.Response) (interface{}, error) { - var response getAddressResponse - err := json.NewDecoder(resp.Body).Decode(&response) - return response, err -} - -func decodePostAddressResponse(_ context.Context, resp *http.Response) (interface{}, error) { - var response postAddressResponse - err := json.NewDecoder(resp.Body).Decode(&response) - return response, err -} - -func decodeDeleteAddressResponse(_ context.Context, resp *http.Response) (interface{}, error) { - var response deleteAddressResponse - err := json.NewDecoder(resp.Body).Decode(&response) - return response, err -} - -// errorer is implemented by all concrete response types that may contain -// errors. It allows us to change the HTTP response code without needing to -// trigger an endpoint (transport-level) error. For more information, read the -// big comment in endpoints.go. -type errorer interface { - error() error -} - -// encodeResponse is the common method to encode all response types to the -// client. I chose to do it this way because, since we're using JSON, there's no -// reason to provide anything more specific. It's certainly possible to -// specialize on a per-response (per-method) basis. -func encodeResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error { - if e, ok := response.(errorer); ok && e.error() != nil { - // Not a Go kit transport error, but a business-logic error. - // Provide those as HTTP errors. - encodeError(ctx, e.error(), w) - return nil - } - w.Header().Set("Content-Type", "application/json; charset=utf-8") - return json.NewEncoder(w).Encode(response) -} - -// encodeRequest likewise JSON-encodes the request to the HTTP request body. -// Don't use it directly as a transport/http.Client EncodeRequestFunc: -// profilesvc endpoints require mutating the HTTP method and request path. -func encodeRequest(_ context.Context, req *http.Request, request interface{}) error { - var buf bytes.Buffer - err := json.NewEncoder(&buf).Encode(request) - if err != nil { - return err - } - req.Body = ioutil.NopCloser(&buf) - return nil -} - -func encodeError(_ context.Context, err error, w http.ResponseWriter) { - if err == nil { - panic("encodeError with nil error") - } - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.WriteHeader(codeFrom(err)) - json.NewEncoder(w).Encode(map[string]interface{}{ - "error": err.Error(), - }) -} - -func codeFrom(err error) int { - switch err { - case ErrNotFound: - return http.StatusNotFound - case ErrAlreadyExists, ErrInconsistentIDs: - return http.StatusBadRequest - default: - if e, ok := err.(httptransport.Error); ok { - switch e.Domain { - case httptransport.DomainDecode: - return http.StatusBadRequest - case httptransport.DomainDo: - return http.StatusServiceUnavailable - default: - return http.StatusInternalServerError - } - } - return http.StatusInternalServerError - } -} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/README.md b/vendor/github.com/go-kit/kit/examples/shipping/README.md deleted file mode 100644 index cbcc4df..0000000 --- a/vendor/github.com/go-kit/kit/examples/shipping/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# shipping - -This example demonstrates a more real-world application consisting of multiple services. - -## Description - -The implementation is based on the container shipping domain from the [Domain Driven Design](http://www.amazon.com/Domain-Driven-Design-Tackling-Complexity-Software/dp/0321125215) book by Eric Evans, which was [originally](http://dddsample.sourceforge.net/) implemented in Java but has since been ported to Go. This example is a somewhat stripped down version to demonstrate the use of Go kit. The [original Go application](https://github.com/marcusolsson/goddd) is maintained separately and accompanied by an [AngularJS application](https://github.com/marcusolsson/dddelivery-angularjs) as well as a mock [routing service](https://github.com/marcusolsson/pathfinder). - -### Organization - -The application consists of three application services, `booking`, `handling` and `tracking`. Each of these is an individual Go kit service as seen in previous examples. - -- __booking__ - used by the shipping company to book and route cargos. -- __handling__ - used by our staff around the world to register whenever the cargo has been received, loaded etc. -- __tracking__ - used by the customer to track the cargo along the route - -There are also a few pure domain packages that contain some intricate business-logic. They provide domain objects and services that are used by each application service to provide interesting use-cases for the user. - -`repository` contains in-memory implementations for the repositories found in the domain packages. - -The `routing` package provides a _domain service_ that is used to query an external application for possible routes. - -## Contributing - -As with all Go kit examples you are more than welcome to contribute. If you do however, please consider contributing back to the original project as well. diff --git a/vendor/github.com/go-kit/kit/examples/shipping/booking/endpoint.go b/vendor/github.com/go-kit/kit/examples/shipping/booking/endpoint.go deleted file mode 100644 index b9864d2..0000000 --- a/vendor/github.com/go-kit/kit/examples/shipping/booking/endpoint.go +++ /dev/null @@ -1,139 +0,0 @@ -package booking - -import ( - "time" - - "golang.org/x/net/context" - - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/examples/shipping/cargo" - "github.com/go-kit/kit/examples/shipping/location" -) - -type bookCargoRequest struct { - Origin location.UNLocode - Destination location.UNLocode - ArrivalDeadline time.Time -} - -type bookCargoResponse struct { - ID cargo.TrackingID `json:"tracking_id,omitempty"` - Err error `json:"error,omitempty"` -} - -func (r bookCargoResponse) error() error { return r.Err } - -func makeBookCargoEndpoint(s Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(bookCargoRequest) - id, err := s.BookNewCargo(req.Origin, req.Destination, req.ArrivalDeadline) - return bookCargoResponse{ID: id, Err: err}, nil - } -} - -type loadCargoRequest struct { - ID cargo.TrackingID -} - -type loadCargoResponse struct { - Cargo *Cargo `json:"cargo,omitempty"` - Err error `json:"error,omitempty"` -} - -func (r loadCargoResponse) error() error { return r.Err } - -func makeLoadCargoEndpoint(bs Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(loadCargoRequest) - c, err := bs.LoadCargo(req.ID) - return loadCargoResponse{Cargo: &c, Err: err}, nil - } -} - -type requestRoutesRequest struct { - ID cargo.TrackingID -} - -type requestRoutesResponse struct { - Routes []cargo.Itinerary `json:"routes,omitempty"` - Err error `json:"error,omitempty"` -} - -func (r requestRoutesResponse) error() error { return r.Err } - -func makeRequestRoutesEndpoint(s Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(requestRoutesRequest) - itin := s.RequestPossibleRoutesForCargo(req.ID) - return requestRoutesResponse{Routes: itin, Err: nil}, nil - } -} - -type assignToRouteRequest struct { - ID cargo.TrackingID - Itinerary cargo.Itinerary -} - -type assignToRouteResponse struct { - Err error `json:"error,omitempty"` -} - -func (r assignToRouteResponse) error() error { return r.Err } - -func makeAssignToRouteEndpoint(s Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(assignToRouteRequest) - err := s.AssignCargoToRoute(req.ID, req.Itinerary) - return assignToRouteResponse{Err: err}, nil - } -} - -type changeDestinationRequest struct { - ID cargo.TrackingID - Destination location.UNLocode -} - -type changeDestinationResponse struct { - Err error `json:"error,omitempty"` -} - -func (r changeDestinationResponse) error() error { return r.Err } - -func makeChangeDestinationEndpoint(s Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(changeDestinationRequest) - err := s.ChangeDestination(req.ID, req.Destination) - return changeDestinationResponse{Err: err}, nil - } -} - -type listCargosRequest struct{} - -type listCargosResponse struct { - Cargos []Cargo `json:"cargos,omitempty"` - Err error `json:"error,omitempty"` -} - -func (r listCargosResponse) error() error { return r.Err } - -func makeListCargosEndpoint(s Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - _ = request.(listCargosRequest) - return listCargosResponse{Cargos: s.Cargos(), Err: nil}, nil - } -} - -type listLocationsRequest struct { -} - -type listLocationsResponse struct { - Locations []Location `json:"locations,omitempty"` - Err error `json:"error,omitempty"` -} - -func makeListLocationsEndpoint(s Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - _ = request.(listLocationsRequest) - return listLocationsResponse{Locations: s.Locations(), Err: nil}, nil - } -} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/booking/instrumenting.go b/vendor/github.com/go-kit/kit/examples/shipping/booking/instrumenting.go deleted file mode 100644 index 71feb59..0000000 --- a/vendor/github.com/go-kit/kit/examples/shipping/booking/instrumenting.go +++ /dev/null @@ -1,95 +0,0 @@ -package booking - -import ( - "time" - - "github.com/go-kit/kit/metrics" - - "github.com/go-kit/kit/examples/shipping/cargo" - "github.com/go-kit/kit/examples/shipping/location" -) - -type instrumentingService struct { - requestCount metrics.Counter - requestLatency metrics.TimeHistogram - Service -} - -// NewInstrumentingService returns an instance of an instrumenting Service. -func NewInstrumentingService(requestCount metrics.Counter, requestLatency metrics.TimeHistogram, s Service) Service { - return &instrumentingService{ - requestCount: requestCount, - requestLatency: requestLatency, - Service: s, - } -} - -func (s *instrumentingService) BookNewCargo(origin, destination location.UNLocode, arrivalDeadline time.Time) (cargo.TrackingID, error) { - defer func(begin time.Time) { - methodField := metrics.Field{Key: "method", Value: "book"} - s.requestCount.With(methodField).Add(1) - s.requestLatency.With(methodField).Observe(time.Since(begin)) - }(time.Now()) - - return s.Service.BookNewCargo(origin, destination, arrivalDeadline) -} - -func (s *instrumentingService) LoadCargo(id cargo.TrackingID) (c Cargo, err error) { - defer func(begin time.Time) { - methodField := metrics.Field{Key: "method", Value: "load"} - s.requestCount.With(methodField).Add(1) - s.requestLatency.With(methodField).Observe(time.Since(begin)) - }(time.Now()) - - return s.Service.LoadCargo(id) -} - -func (s *instrumentingService) RequestPossibleRoutesForCargo(id cargo.TrackingID) []cargo.Itinerary { - defer func(begin time.Time) { - methodField := metrics.Field{Key: "method", Value: "request_routes"} - s.requestCount.With(methodField).Add(1) - s.requestLatency.With(methodField).Observe(time.Since(begin)) - }(time.Now()) - - return s.Service.RequestPossibleRoutesForCargo(id) -} - -func (s *instrumentingService) AssignCargoToRoute(id cargo.TrackingID, itinerary cargo.Itinerary) (err error) { - defer func(begin time.Time) { - methodField := metrics.Field{Key: "method", Value: "assign_to_route"} - s.requestCount.With(methodField).Add(1) - s.requestLatency.With(methodField).Observe(time.Since(begin)) - }(time.Now()) - - return s.Service.AssignCargoToRoute(id, itinerary) -} - -func (s *instrumentingService) ChangeDestination(id cargo.TrackingID, l location.UNLocode) (err error) { - defer func(begin time.Time) { - methodField := metrics.Field{Key: "method", Value: "change_destination"} - s.requestCount.With(methodField).Add(1) - s.requestLatency.With(methodField).Observe(time.Since(begin)) - }(time.Now()) - - return s.Service.ChangeDestination(id, l) -} - -func (s *instrumentingService) Cargos() []Cargo { - defer func(begin time.Time) { - methodField := metrics.Field{Key: "method", Value: "list_cargos"} - s.requestCount.With(methodField).Add(1) - s.requestLatency.With(methodField).Observe(time.Since(begin)) - }(time.Now()) - - return s.Service.Cargos() -} - -func (s *instrumentingService) Locations() []Location { - defer func(begin time.Time) { - methodField := metrics.Field{Key: "method", Value: "list_locations"} - s.requestCount.With(methodField).Add(1) - s.requestLatency.With(methodField).Observe(time.Since(begin)) - }(time.Now()) - - return s.Service.Locations() -} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/booking/logging.go b/vendor/github.com/go-kit/kit/examples/shipping/booking/logging.go deleted file mode 100644 index 3a04576..0000000 --- a/vendor/github.com/go-kit/kit/examples/shipping/booking/logging.go +++ /dev/null @@ -1,101 +0,0 @@ -package booking - -import ( - "time" - - "github.com/go-kit/kit/examples/shipping/cargo" - "github.com/go-kit/kit/examples/shipping/location" - "github.com/go-kit/kit/log" -) - -type loggingService struct { - logger log.Logger - Service -} - -// NewLoggingService returns a new instance of a logging Service. -func NewLoggingService(logger log.Logger, s Service) Service { - return &loggingService{logger, s} -} - -func (s *loggingService) BookNewCargo(origin location.UNLocode, destination location.UNLocode, arrivalDeadline time.Time) (id cargo.TrackingID, err error) { - defer func(begin time.Time) { - s.logger.Log( - "method", "book", - "origin", origin, - "destination", destination, - "arrival_deadline", arrivalDeadline, - "took", time.Since(begin), - "err", err, - ) - }(time.Now()) - return s.Service.BookNewCargo(origin, destination, arrivalDeadline) -} - -func (s *loggingService) LoadCargo(id cargo.TrackingID) (c Cargo, err error) { - defer func(begin time.Time) { - s.logger.Log( - "method", "load", - "tracking_id", id, - "took", time.Since(begin), - "err", err, - ) - }(time.Now()) - return s.Service.LoadCargo(id) -} - -func (s *loggingService) RequestPossibleRoutesForCargo(id cargo.TrackingID) []cargo.Itinerary { - defer func(begin time.Time) { - s.logger.Log( - "method", "request_routes", - "tracking_id", id, - "took", time.Since(begin), - ) - }(time.Now()) - return s.Service.RequestPossibleRoutesForCargo(id) -} - -func (s *loggingService) AssignCargoToRoute(id cargo.TrackingID, itinerary cargo.Itinerary) (err error) { - defer func(begin time.Time) { - s.logger.Log( - "method", "assign_to_route", - "tracking_id", id, - "took", time.Since(begin), - "err", err, - ) - }(time.Now()) - return s.Service.AssignCargoToRoute(id, itinerary) -} - -func (s *loggingService) ChangeDestination(id cargo.TrackingID, l location.UNLocode) (err error) { - defer func(begin time.Time) { - s.logger.Log( - "method", "change_destination", - "tracking_id", id, - "destination", l, - "took", time.Since(begin), - "err", err, - ) - }(time.Now()) - return s.Service.ChangeDestination(id, l) -} - -func (s *loggingService) Cargos() []Cargo { - defer func(begin time.Time) { - s.logger.Log( - "method", "list_cargos", - "took", time.Since(begin), - ) - }(time.Now()) - return s.Service.Cargos() -} - -func (s *loggingService) Locations() []Location { - defer func(begin time.Time) { - s.logger.Log( - "method", "list_locations", - "took", time.Since(begin), - ) - }(time.Now()) - return s.Service.Locations() -} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/booking/service.go b/vendor/github.com/go-kit/kit/examples/shipping/booking/service.go deleted file mode 100644 index 47605f8..0000000 --- a/vendor/github.com/go-kit/kit/examples/shipping/booking/service.go +++ /dev/null @@ -1,201 +0,0 @@ -// Package booking provides the use-case of booking a cargo. Used by views -// facing an administrator. -package booking - -import ( - "errors" - "time" - - "github.com/go-kit/kit/examples/shipping/cargo" - "github.com/go-kit/kit/examples/shipping/location" - "github.com/go-kit/kit/examples/shipping/routing" -) - -// ErrInvalidArgument is returned when one or more arguments are invalid. -var ErrInvalidArgument = errors.New("invalid argument") - -// Service is the interface that provides booking methods. -type Service interface { - // BookNewCargo registers a new cargo in the tracking system, not yet - // routed. - BookNewCargo(origin location.UNLocode, destination location.UNLocode, arrivalDeadline time.Time) (cargo.TrackingID, error) - - // LoadCargo returns a read model of a cargo. - LoadCargo(trackingID cargo.TrackingID) (Cargo, error) - - // RequestPossibleRoutesForCargo requests a list of itineraries describing - // possible routes for this cargo. - RequestPossibleRoutesForCargo(trackingID cargo.TrackingID) []cargo.Itinerary - - // AssignCargoToRoute assigns a cargo to the route specified by the - // itinerary. - AssignCargoToRoute(trackingID cargo.TrackingID, itinerary cargo.Itinerary) error - - // ChangeDestination changes the destination of a cargo. - ChangeDestination(trackingID cargo.TrackingID, unLocode location.UNLocode) error - - // Cargos returns a list of all cargos that have been booked. - Cargos() []Cargo - - // Locations returns a list of registered locations. - Locations() []Location -} - -type service struct { - cargoRepository cargo.Repository - locationRepository location.Repository - routingService routing.Service - handlingEventRepository cargo.HandlingEventRepository -} - -func (s *service) AssignCargoToRoute(id cargo.TrackingID, itinerary cargo.Itinerary) error { - if id == "" || len(itinerary.Legs) == 0 { - return ErrInvalidArgument - } - - c, err := s.cargoRepository.Find(id) - if err != nil { - return err - } - - c.AssignToRoute(itinerary) - - if err := s.cargoRepository.Store(c); err != nil { - return err - } - - return nil -} - -func (s *service) BookNewCargo(origin, destination location.UNLocode, arrivalDeadline time.Time) (cargo.TrackingID, error) { - if origin == "" || destination == "" || arrivalDeadline.IsZero() { - return "", ErrInvalidArgument - } - - id := cargo.NextTrackingID() - rs := cargo.RouteSpecification{ - Origin: origin, - Destination: destination, - ArrivalDeadline: arrivalDeadline, - } - - c := cargo.New(id, rs) - - if err := s.cargoRepository.Store(c); err != nil { - return "", err - } - - return c.TrackingID, nil -} - -func (s *service) LoadCargo(trackingID cargo.TrackingID) (Cargo, error) { - if trackingID == "" { - return Cargo{}, ErrInvalidArgument - } - - c, err := s.cargoRepository.Find(trackingID) - if err != nil { - return Cargo{}, err - } - - return assemble(c, s.handlingEventRepository), nil -} - -func (s *service) ChangeDestination(id cargo.TrackingID, destination location.UNLocode) error { - if id == "" || destination == "" { - return ErrInvalidArgument - } - - c, err := s.cargoRepository.Find(id) - if err != nil { - return err - } - - l, err := s.locationRepository.Find(destination) - if err != nil { - return err - } - - c.SpecifyNewRoute(cargo.RouteSpecification{ - Origin: c.Origin, - Destination: l.UNLocode, - ArrivalDeadline: c.RouteSpecification.ArrivalDeadline, - }) - - if err := s.cargoRepository.Store(c); err != nil { - return err - } - - return nil -} - -func (s *service) RequestPossibleRoutesForCargo(id cargo.TrackingID) []cargo.Itinerary { - if id == "" { - return nil - } - - c, err := s.cargoRepository.Find(id) - if err != nil { - return []cargo.Itinerary{} - } - - return s.routingService.FetchRoutesForSpecification(c.RouteSpecification) -} - -func (s *service) Cargos() []Cargo { - var result []Cargo - for _, c := range s.cargoRepository.FindAll() { - result = append(result, assemble(c, s.handlingEventRepository)) - } - return result -} - -func (s *service) Locations() []Location { - var result []Location - for _, v := range s.locationRepository.FindAll() { - result = append(result, Location{ - UNLocode: string(v.UNLocode), - Name: v.Name, - }) - } - return result -} - -// NewService creates a booking service with necessary dependencies. -func NewService(cr cargo.Repository, lr location.Repository, her cargo.HandlingEventRepository, rs routing.Service) Service { - return &service{ - cargoRepository: cr, - locationRepository: lr, - handlingEventRepository: her, - routingService: rs, - } -} - -// Location is a read model for booking views. -type Location struct { - UNLocode string `json:"locode"` - Name string `json:"name"` -} - -// Cargo is a read model for booking views. -type Cargo struct { - ArrivalDeadline time.Time `json:"arrival_deadline"` - Destination string `json:"destination"` - Legs []cargo.Leg `json:"legs,omitempty"` - Misrouted bool `json:"misrouted"` - Origin string `json:"origin"` - Routed bool `json:"routed"` - TrackingID string `json:"tracking_id"` -} - -func assemble(c *cargo.Cargo, her cargo.HandlingEventRepository) Cargo { - return Cargo{ - TrackingID: string(c.TrackingID), - Origin: string(c.Origin), - Destination: string(c.RouteSpecification.Destination), - Misrouted: c.Delivery.RoutingStatus == cargo.Misrouted, - Routed: !c.Itinerary.IsEmpty(), - ArrivalDeadline: c.RouteSpecification.ArrivalDeadline, - Legs: c.Itinerary.Legs, - } -} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/booking/transport.go b/vendor/github.com/go-kit/kit/examples/shipping/booking/transport.go deleted file mode 100644 index 7cf5994..0000000 --- a/vendor/github.com/go-kit/kit/examples/shipping/booking/transport.go +++ /dev/null @@ -1,201 +0,0 @@ -package booking - -import ( - "encoding/json" - "errors" - "net/http" - "time" - - "github.com/gorilla/mux" - "golang.org/x/net/context" - - "github.com/go-kit/kit/examples/shipping/cargo" - "github.com/go-kit/kit/examples/shipping/location" - kitlog "github.com/go-kit/kit/log" - kithttp "github.com/go-kit/kit/transport/http" -) - -// MakeHandler returns a handler for the booking service. -func MakeHandler(ctx context.Context, bs Service, logger kitlog.Logger) http.Handler { - opts := []kithttp.ServerOption{ - kithttp.ServerErrorLogger(logger), - kithttp.ServerErrorEncoder(encodeError), - } - - bookCargoHandler := kithttp.NewServer( - ctx, - makeBookCargoEndpoint(bs), - decodeBookCargoRequest, - encodeResponse, - opts..., - ) - loadCargoHandler := kithttp.NewServer( - ctx, - makeLoadCargoEndpoint(bs), - decodeLoadCargoRequest, - encodeResponse, - opts..., - ) - requestRoutesHandler := kithttp.NewServer( - ctx, - makeRequestRoutesEndpoint(bs), - decodeRequestRoutesRequest, - encodeResponse, - opts..., - ) - assignToRouteHandler := kithttp.NewServer( - ctx, - makeAssignToRouteEndpoint(bs), - decodeAssignToRouteRequest, - encodeResponse, - opts..., - ) - changeDestinationHandler := kithttp.NewServer( - ctx, - makeChangeDestinationEndpoint(bs), - decodeChangeDestinationRequest, - encodeResponse, - opts..., - ) - listCargosHandler := kithttp.NewServer( - ctx, - makeListCargosEndpoint(bs), - decodeListCargosRequest, - encodeResponse, - opts..., - ) - listLocationsHandler := kithttp.NewServer( - ctx, - makeListLocationsEndpoint(bs), - decodeListLocationsRequest, - encodeResponse, - opts..., - ) - - r := mux.NewRouter() - - r.Handle("/booking/v1/cargos", bookCargoHandler).Methods("POST") - r.Handle("/booking/v1/cargos", listCargosHandler).Methods("GET") - r.Handle("/booking/v1/cargos/{id}", loadCargoHandler).Methods("GET") - r.Handle("/booking/v1/cargos/{id}/request_routes", requestRoutesHandler).Methods("GET") - r.Handle("/booking/v1/cargos/{id}/assign_to_route", assignToRouteHandler).Methods("POST") - r.Handle("/booking/v1/cargos/{id}/change_destination", changeDestinationHandler).Methods("POST") - r.Handle("/booking/v1/locations", listLocationsHandler).Methods("GET") - r.Handle("/booking/v1/docs", http.StripPrefix("/booking/v1/docs", http.FileServer(http.Dir("booking/docs")))) - - return r -} - -var errBadRoute = errors.New("bad route") - -func decodeBookCargoRequest(_ context.Context, r *http.Request) (interface{}, error) { - var body struct { - Origin string `json:"origin"` - Destination string `json:"destination"` - ArrivalDeadline time.Time `json:"arrival_deadline"` - } - - if err := json.NewDecoder(r.Body).Decode(&body); err != nil { - return nil, err - } - - return bookCargoRequest{ - Origin: location.UNLocode(body.Origin), - Destination: location.UNLocode(body.Destination), - ArrivalDeadline: body.ArrivalDeadline, - }, nil -} - -func decodeLoadCargoRequest(_ context.Context, r *http.Request) (interface{}, error) { - vars := mux.Vars(r) - id, ok := vars["id"] - if !ok { - return nil, errBadRoute - } - return loadCargoRequest{ID: cargo.TrackingID(id)}, nil -} - -func decodeRequestRoutesRequest(_ context.Context, r *http.Request) (interface{}, error) { - vars := mux.Vars(r) - id, ok := vars["id"] - if !ok { - return nil, errBadRoute - } - return requestRoutesRequest{ID: cargo.TrackingID(id)}, nil -} - -func decodeAssignToRouteRequest(_ context.Context, r *http.Request) (interface{}, error) { - vars := mux.Vars(r) - id, ok := vars["id"] - if !ok { - return nil, errBadRoute - } - - var itinerary cargo.Itinerary - if err := json.NewDecoder(r.Body).Decode(&itinerary); err != nil { - return nil, err - } - - return assignToRouteRequest{ - ID: cargo.TrackingID(id), - Itinerary: itinerary, - }, nil -} - -func decodeChangeDestinationRequest(_ context.Context, r *http.Request) (interface{}, error) { - vars := mux.Vars(r) - id, ok := vars["id"] - if !ok { - return nil, errBadRoute - } - - var body struct { - Destination string `json:"destination"` - } - - if err := json.NewDecoder(r.Body).Decode(&body); err != nil { - return nil, err - } - - return changeDestinationRequest{ - ID: cargo.TrackingID(id), - Destination: location.UNLocode(body.Destination), - }, nil -} - -func decodeListCargosRequest(_ context.Context, r *http.Request) (interface{}, error) { - return listCargosRequest{}, nil -} - -func decodeListLocationsRequest(_ context.Context, r *http.Request) (interface{}, error) { - return listLocationsRequest{}, nil -} - -func encodeResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error { - if e, ok := response.(errorer); ok && e.error() != nil { - encodeError(ctx, e.error(), w) - return nil - } - w.Header().Set("Content-Type", "application/json; charset=utf-8") - return json.NewEncoder(w).Encode(response) -} - -type errorer interface { - error() error -} - -// encode errors from business-logic -func encodeError(_ context.Context, err error, w http.ResponseWriter) { - switch err { - case cargo.ErrUnknown: - w.WriteHeader(http.StatusNotFound) - case ErrInvalidArgument: - w.WriteHeader(http.StatusBadRequest) - default: - w.WriteHeader(http.StatusInternalServerError) - } - w.Header().Set("Content-Type", "application/json; charset=utf-8") - json.NewEncoder(w).Encode(map[string]interface{}{ - "error": err.Error(), - }) -} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/cargo/cargo.go b/vendor/github.com/go-kit/kit/examples/shipping/cargo/cargo.go deleted file mode 100644 index d4bb5f4..0000000 --- a/vendor/github.com/go-kit/kit/examples/shipping/cargo/cargo.go +++ /dev/null @@ -1,137 +0,0 @@ -// Package cargo contains the heart of the domain model. -package cargo - -import ( - "errors" - "strings" - "time" - - "github.com/pborman/uuid" - - "github.com/go-kit/kit/examples/shipping/location" -) - -// TrackingID uniquely identifies a particular cargo. -type TrackingID string - -// Cargo is the central class in the domain model. -type Cargo struct { - TrackingID TrackingID - Origin location.UNLocode - RouteSpecification RouteSpecification - Itinerary Itinerary - Delivery Delivery -} - -// SpecifyNewRoute specifies a new route for this cargo. -func (c *Cargo) SpecifyNewRoute(rs RouteSpecification) { - c.RouteSpecification = rs - c.Delivery = c.Delivery.UpdateOnRouting(c.RouteSpecification, c.Itinerary) -} - -// AssignToRoute attaches a new itinerary to this cargo. -func (c *Cargo) AssignToRoute(itinerary Itinerary) { - c.Itinerary = itinerary - c.Delivery = c.Delivery.UpdateOnRouting(c.RouteSpecification, c.Itinerary) -} - -// DeriveDeliveryProgress updates all aspects of the cargo aggregate status -// based on the current route specification, itinerary and handling of the cargo. -func (c *Cargo) DeriveDeliveryProgress(history HandlingHistory) { - c.Delivery = DeriveDeliveryFrom(c.RouteSpecification, c.Itinerary, history) -} - -// New creates a new, unrouted cargo. -func New(id TrackingID, rs RouteSpecification) *Cargo { - itinerary := Itinerary{} - history := HandlingHistory{make([]HandlingEvent, 0)} - - return &Cargo{ - TrackingID: id, - Origin: rs.Origin, - RouteSpecification: rs, - Delivery: DeriveDeliveryFrom(rs, itinerary, history), - } -} - -// Repository provides access a cargo store. -type Repository interface { - Store(cargo *Cargo) error - Find(trackingID TrackingID) (*Cargo, error) - FindAll() []*Cargo -} - -// ErrUnknown is used when a cargo could not be found. -var ErrUnknown = errors.New("unknown cargo") - -// NextTrackingID generates a new tracking ID. -// TODO: Move to infrastructure(?) -func NextTrackingID() TrackingID { - return TrackingID(strings.Split(strings.ToUpper(uuid.New()), "-")[0]) -} - -// RouteSpecification Contains information about a route: its origin, -// destination and arrival deadline. -type RouteSpecification struct { - Origin location.UNLocode - Destination location.UNLocode - ArrivalDeadline time.Time -} - -// IsSatisfiedBy checks whether provided itinerary satisfies this -// specification. -func (s RouteSpecification) IsSatisfiedBy(itinerary Itinerary) bool { - return itinerary.Legs != nil && - s.Origin == itinerary.InitialDepartureLocation() && - s.Destination == itinerary.FinalArrivalLocation() -} - -// RoutingStatus describes status of cargo routing. -type RoutingStatus int - -// Valid routing statuses. -const ( - NotRouted RoutingStatus = iota - Misrouted - Routed -) - -func (s RoutingStatus) String() string { - switch s { - case NotRouted: - return "Not routed" - case Misrouted: - return "Misrouted" - case Routed: - return "Routed" - } - return "" -} - -// TransportStatus describes status of cargo transportation. -type TransportStatus int - -// Valid transport statuses. -const ( - NotReceived TransportStatus = iota - InPort - OnboardCarrier - Claimed - Unknown -) - -func (s TransportStatus) String() string { - switch s { - case NotReceived: - return "Not received" - case InPort: - return "In port" - case OnboardCarrier: - return "Onboard carrier" - case Claimed: - return "Claimed" - case Unknown: - return "Unknown" - } - return "" -} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/cargo/delivery.go b/vendor/github.com/go-kit/kit/examples/shipping/cargo/delivery.go deleted file mode 100644 index 34f079d..0000000 --- a/vendor/github.com/go-kit/kit/examples/shipping/cargo/delivery.go +++ /dev/null @@ -1,174 +0,0 @@ -package cargo - -import ( - "time" - - "github.com/go-kit/kit/examples/shipping/location" - "github.com/go-kit/kit/examples/shipping/voyage" -) - -// Delivery is the actual transportation of the cargo, as opposed to the -// customer requirement (RouteSpecification) and the plan (Itinerary). -type Delivery struct { - Itinerary Itinerary - RouteSpecification RouteSpecification - RoutingStatus RoutingStatus - TransportStatus TransportStatus - NextExpectedActivity HandlingActivity - LastEvent HandlingEvent - LastKnownLocation location.UNLocode - CurrentVoyage voyage.Number - ETA time.Time - IsMisdirected bool - IsUnloadedAtDestination bool -} - -// UpdateOnRouting creates a new delivery snapshot to reflect changes in -// routing, i.e. when the route specification or the itinerary has changed but -// no additional handling of the cargo has been performed. -func (d Delivery) UpdateOnRouting(rs RouteSpecification, itinerary Itinerary) Delivery { - return newDelivery(d.LastEvent, itinerary, rs) -} - -// IsOnTrack checks if the delivery is on track. -func (d Delivery) IsOnTrack() bool { - return d.RoutingStatus == Routed && !d.IsMisdirected -} - -// DeriveDeliveryFrom creates a new delivery snapshot based on the complete -// handling history of a cargo, as well as its route specification and -// itinerary. -func DeriveDeliveryFrom(rs RouteSpecification, itinerary Itinerary, history HandlingHistory) Delivery { - lastEvent, _ := history.MostRecentlyCompletedEvent() - return newDelivery(lastEvent, itinerary, rs) -} - -// newDelivery creates a up-to-date delivery based on an handling event, -// itinerary and a route specification. -func newDelivery(lastEvent HandlingEvent, itinerary Itinerary, rs RouteSpecification) Delivery { - var ( - routingStatus = calculateRoutingStatus(itinerary, rs) - transportStatus = calculateTransportStatus(lastEvent) - lastKnownLocation = calculateLastKnownLocation(lastEvent) - isMisdirected = calculateMisdirectedStatus(lastEvent, itinerary) - isUnloadedAtDestination = calculateUnloadedAtDestination(lastEvent, rs) - currentVoyage = calculateCurrentVoyage(transportStatus, lastEvent) - ) - - d := Delivery{ - LastEvent: lastEvent, - Itinerary: itinerary, - RouteSpecification: rs, - RoutingStatus: routingStatus, - TransportStatus: transportStatus, - LastKnownLocation: lastKnownLocation, - IsMisdirected: isMisdirected, - IsUnloadedAtDestination: isUnloadedAtDestination, - CurrentVoyage: currentVoyage, - } - - d.NextExpectedActivity = calculateNextExpectedActivity(d) - d.ETA = calculateETA(d) - - return d -} - -// Below are internal functions used when creating a new delivery. - -func calculateRoutingStatus(itinerary Itinerary, rs RouteSpecification) RoutingStatus { - if itinerary.Legs == nil { - return NotRouted - } - - if rs.IsSatisfiedBy(itinerary) { - return Routed - } - - return Misrouted -} - -func calculateMisdirectedStatus(event HandlingEvent, itinerary Itinerary) bool { - if event.Activity.Type == NotHandled { - return false - } - - return !itinerary.IsExpected(event) -} - -func calculateUnloadedAtDestination(event HandlingEvent, rs RouteSpecification) bool { - if event.Activity.Type == NotHandled { - return false - } - - return event.Activity.Type == Unload && rs.Destination == event.Activity.Location -} - -func calculateTransportStatus(event HandlingEvent) TransportStatus { - switch event.Activity.Type { - case NotHandled: - return NotReceived - case Load: - return OnboardCarrier - case Unload: - return InPort - case Receive: - return InPort - case Customs: - return InPort - case Claim: - return Claimed - } - return Unknown -} - -func calculateLastKnownLocation(event HandlingEvent) location.UNLocode { - return event.Activity.Location -} - -func calculateNextExpectedActivity(d Delivery) HandlingActivity { - if !d.IsOnTrack() { - return HandlingActivity{} - } - - switch d.LastEvent.Activity.Type { - case NotHandled: - return HandlingActivity{Type: Receive, Location: d.RouteSpecification.Origin} - case Receive: - l := d.Itinerary.Legs[0] - return HandlingActivity{Type: Load, Location: l.LoadLocation, VoyageNumber: l.VoyageNumber} - case Load: - for _, l := range d.Itinerary.Legs { - if l.LoadLocation == d.LastEvent.Activity.Location { - return HandlingActivity{Type: Unload, Location: l.UnloadLocation, VoyageNumber: l.VoyageNumber} - } - } - case Unload: - for i, l := range d.Itinerary.Legs { - if l.UnloadLocation == d.LastEvent.Activity.Location { - if i < len(d.Itinerary.Legs)-1 { - return HandlingActivity{Type: Load, Location: d.Itinerary.Legs[i+1].LoadLocation, VoyageNumber: d.Itinerary.Legs[i+1].VoyageNumber} - } - - return HandlingActivity{Type: Claim, Location: l.UnloadLocation} - } - } - } - - return HandlingActivity{} -} - -func calculateCurrentVoyage(transportStatus TransportStatus, event HandlingEvent) voyage.Number { - if transportStatus == OnboardCarrier && event.Activity.Type != NotHandled { - return event.Activity.VoyageNumber - } - - return voyage.Number("") -} - -func calculateETA(d Delivery) time.Time { - if !d.IsOnTrack() { - return time.Time{} - } - - return d.Itinerary.FinalArrivalTime() -} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/cargo/handling.go b/vendor/github.com/go-kit/kit/examples/shipping/cargo/handling.go deleted file mode 100644 index 5f77bc4..0000000 --- a/vendor/github.com/go-kit/kit/examples/shipping/cargo/handling.go +++ /dev/null @@ -1,121 +0,0 @@ -package cargo - -// TODO: It would make sense to have this in its own package. Unfortunately, -// then there would be a circular dependency between the cargo and handling -// packages since cargo.Delivery would use handling.HandlingEvent and -// handling.HandlingEvent would use cargo.TrackingID. Also, -// HandlingEventFactory depends on the cargo repository. -// -// It would make sense not having the cargo package depend on handling. - -import ( - "errors" - "time" - - "github.com/go-kit/kit/examples/shipping/location" - "github.com/go-kit/kit/examples/shipping/voyage" -) - -// HandlingActivity represents how and where a cargo can be handled, and can -// be used to express predictions about what is expected to happen to a cargo -// in the future. -type HandlingActivity struct { - Type HandlingEventType - Location location.UNLocode - VoyageNumber voyage.Number -} - -// HandlingEvent is used to register the event when, for instance, a cargo is -// unloaded from a carrier at a some location at a given time. -type HandlingEvent struct { - TrackingID TrackingID - Activity HandlingActivity -} - -// HandlingEventType describes type of a handling event. -type HandlingEventType int - -// Valid handling event types. -const ( - NotHandled HandlingEventType = iota - Load - Unload - Receive - Claim - Customs -) - -func (t HandlingEventType) String() string { - switch t { - case NotHandled: - return "Not Handled" - case Load: - return "Load" - case Unload: - return "Unload" - case Receive: - return "Receive" - case Claim: - return "Claim" - case Customs: - return "Customs" - } - - return "" -} - -// HandlingHistory is the handling history of a cargo. -type HandlingHistory struct { - HandlingEvents []HandlingEvent -} - -// MostRecentlyCompletedEvent returns most recently completed handling event. -func (h HandlingHistory) MostRecentlyCompletedEvent() (HandlingEvent, error) { - if len(h.HandlingEvents) == 0 { - return HandlingEvent{}, errors.New("delivery history is empty") - } - - return h.HandlingEvents[len(h.HandlingEvents)-1], nil -} - -// HandlingEventRepository provides access a handling event store. -type HandlingEventRepository interface { - Store(e HandlingEvent) - QueryHandlingHistory(TrackingID) HandlingHistory -} - -// HandlingEventFactory creates handling events. -type HandlingEventFactory struct { - CargoRepository Repository - VoyageRepository voyage.Repository - LocationRepository location.Repository -} - -// CreateHandlingEvent creates a validated handling event. -func (f *HandlingEventFactory) CreateHandlingEvent(registrationTime time.Time, completionTime time.Time, trackingID TrackingID, - voyageNumber voyage.Number, unLocode location.UNLocode, eventType HandlingEventType) (HandlingEvent, error) { - - if _, err := f.CargoRepository.Find(trackingID); err != nil { - return HandlingEvent{}, err - } - - if _, err := f.VoyageRepository.Find(voyageNumber); err != nil { - // TODO: This is pretty ugly, but when creating a Receive event, the voyage number is not known. - if len(voyageNumber) > 0 { - return HandlingEvent{}, err - } - } - - if _, err := f.LocationRepository.Find(unLocode); err != nil { - return HandlingEvent{}, err - } - - return HandlingEvent{ - TrackingID: trackingID, - Activity: HandlingActivity{ - Type: eventType, - Location: unLocode, - VoyageNumber: voyageNumber, - }, - }, nil -} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/cargo/itinerary.go b/vendor/github.com/go-kit/kit/examples/shipping/cargo/itinerary.go deleted file mode 100644 index 6b5088e..0000000 --- a/vendor/github.com/go-kit/kit/examples/shipping/cargo/itinerary.go +++ /dev/null @@ -1,91 +0,0 @@ -package cargo - -import ( - "time" - - "github.com/go-kit/kit/examples/shipping/location" - "github.com/go-kit/kit/examples/shipping/voyage" -) - -// Leg describes the transportation between two locations on a voyage. -type Leg struct { - VoyageNumber voyage.Number `json:"voyage_number"` - LoadLocation location.UNLocode `json:"from"` - UnloadLocation location.UNLocode `json:"to"` - LoadTime time.Time `json:"load_time"` - UnloadTime time.Time `json:"unload_time"` -} - -// NewLeg creates a new itinerary leg. -func NewLeg(voyageNumber voyage.Number, loadLocation, unloadLocation location.UNLocode, loadTime, unloadTime time.Time) Leg { - return Leg{ - VoyageNumber: voyageNumber, - LoadLocation: loadLocation, - UnloadLocation: unloadLocation, - LoadTime: loadTime, - UnloadTime: unloadTime, - } -} - -// Itinerary specifies steps required to transport a cargo from its origin to -// destination. -type Itinerary struct { - Legs []Leg `json:"legs"` -} - -// InitialDepartureLocation returns the start of the itinerary. -func (i Itinerary) InitialDepartureLocation() location.UNLocode { - if i.IsEmpty() { - return location.UNLocode("") - } - return i.Legs[0].LoadLocation -} - -// FinalArrivalLocation returns the end of the itinerary. -func (i Itinerary) FinalArrivalLocation() location.UNLocode { - if i.IsEmpty() { - return location.UNLocode("") - } - return i.Legs[len(i.Legs)-1].UnloadLocation -} - -// FinalArrivalTime returns the expected arrival time at final destination. -func (i Itinerary) FinalArrivalTime() time.Time { - return i.Legs[len(i.Legs)-1].UnloadTime -} - -// IsEmpty checks if the itinerary contains at least one leg. -func (i Itinerary) IsEmpty() bool { - return i.Legs == nil || len(i.Legs) == 0 -} - -// IsExpected checks if the given handling event is expected when executing -// this itinerary. -func (i Itinerary) IsExpected(event HandlingEvent) bool { - if i.IsEmpty() { - return true - } - - switch event.Activity.Type { - case Receive: - return i.InitialDepartureLocation() == event.Activity.Location - case Load: - for _, l := range i.Legs { - if l.LoadLocation == event.Activity.Location && l.VoyageNumber == event.Activity.VoyageNumber { - return true - } - } - return false - case Unload: - for _, l := range i.Legs { - if l.UnloadLocation == event.Activity.Location && l.VoyageNumber == event.Activity.VoyageNumber { - return true - } - } - return false - case Claim: - return i.FinalArrivalLocation() == event.Activity.Location - } - - return true -} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/handling/endpoint.go b/vendor/github.com/go-kit/kit/examples/shipping/handling/endpoint.go deleted file mode 100644 index e10bdda..0000000 --- a/vendor/github.com/go-kit/kit/examples/shipping/handling/endpoint.go +++ /dev/null @@ -1,34 +0,0 @@ -package handling - -import ( - "time" - - "golang.org/x/net/context" - - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/examples/shipping/cargo" - "github.com/go-kit/kit/examples/shipping/location" - "github.com/go-kit/kit/examples/shipping/voyage" -) - -type registerIncidentRequest struct { - ID cargo.TrackingID - Location location.UNLocode - Voyage voyage.Number - EventType cargo.HandlingEventType - CompletionTime time.Time -} - -type registerIncidentResponse struct { - Err error `json:"error,omitempty"` -} - -func (r registerIncidentResponse) error() error { return r.Err } - -func makeRegisterIncidentEndpoint(hs Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(registerIncidentRequest) - err := hs.RegisterHandlingEvent(req.CompletionTime, req.ID, req.Voyage, req.Location, req.EventType) - return registerIncidentResponse{Err: err}, nil - } -} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/handling/instrumenting.go b/vendor/github.com/go-kit/kit/examples/shipping/handling/instrumenting.go deleted file mode 100644 index 1d1d6da..0000000 --- a/vendor/github.com/go-kit/kit/examples/shipping/handling/instrumenting.go +++ /dev/null @@ -1,38 +0,0 @@ -package handling - -import ( - "time" - - "github.com/go-kit/kit/metrics" - - "github.com/go-kit/kit/examples/shipping/cargo" - "github.com/go-kit/kit/examples/shipping/location" - "github.com/go-kit/kit/examples/shipping/voyage" -) - -type instrumentingService struct { - requestCount metrics.Counter - requestLatency metrics.TimeHistogram - Service -} - -// NewInstrumentingService returns an instance of an instrumenting Service. -func NewInstrumentingService(requestCount metrics.Counter, requestLatency metrics.TimeHistogram, s Service) Service { - return &instrumentingService{ - requestCount: requestCount, - requestLatency: requestLatency, - Service: s, - } -} - -func (s *instrumentingService) RegisterHandlingEvent(completionTime time.Time, trackingID cargo.TrackingID, voyage voyage.Number, - loc location.UNLocode, eventType cargo.HandlingEventType) error { - - defer func(begin time.Time) { - methodField := metrics.Field{Key: "method", Value: "register_incident"} - s.requestCount.With(methodField).Add(1) - s.requestLatency.With(methodField).Observe(time.Since(begin)) - }(time.Now()) - - return s.Service.RegisterHandlingEvent(completionTime, trackingID, voyage, loc, eventType) -} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/handling/logging.go b/vendor/github.com/go-kit/kit/examples/shipping/handling/logging.go deleted file mode 100644 index 26457ac..0000000 --- a/vendor/github.com/go-kit/kit/examples/shipping/handling/logging.go +++ /dev/null @@ -1,37 +0,0 @@ -package handling - -import ( - "time" - - "github.com/go-kit/kit/examples/shipping/cargo" - "github.com/go-kit/kit/examples/shipping/location" - "github.com/go-kit/kit/examples/shipping/voyage" - "github.com/go-kit/kit/log" -) - -type loggingService struct { - logger log.Logger - Service -} - -// NewLoggingService returns a new instance of a logging Service. -func NewLoggingService(logger log.Logger, s Service) Service { - return &loggingService{logger, s} -} - -func (s *loggingService) RegisterHandlingEvent(completionTime time.Time, trackingID cargo.TrackingID, voyageNumber voyage.Number, - unLocode location.UNLocode, eventType cargo.HandlingEventType) (err error) { - defer func(begin time.Time) { - s.logger.Log( - "method", "register_incident", - "tracking_id", trackingID, - "location", unLocode, - "voyage", voyageNumber, - "event_type", eventType, - "completion_time", completionTime, - "took", time.Since(begin), - "err", err, - ) - }(time.Now()) - return s.Service.RegisterHandlingEvent(completionTime, trackingID, voyageNumber, unLocode, eventType) -} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/handling/service.go b/vendor/github.com/go-kit/kit/examples/shipping/handling/service.go deleted file mode 100644 index f548f4c..0000000 --- a/vendor/github.com/go-kit/kit/examples/shipping/handling/service.go +++ /dev/null @@ -1,76 +0,0 @@ -// Package handling provides the use-case for registering incidents. Used by -// views facing the people handling the cargo along its route. -package handling - -import ( - "errors" - "time" - - "github.com/go-kit/kit/examples/shipping/cargo" - "github.com/go-kit/kit/examples/shipping/inspection" - "github.com/go-kit/kit/examples/shipping/location" - "github.com/go-kit/kit/examples/shipping/voyage" -) - -// ErrInvalidArgument is returned when one or more arguments are invalid. -var ErrInvalidArgument = errors.New("invalid argument") - -// EventHandler provides a means of subscribing to registered handling events. -type EventHandler interface { - CargoWasHandled(cargo.HandlingEvent) -} - -// Service provides handling operations. -type Service interface { - // RegisterHandlingEvent registers a handling event in the system, and - // notifies interested parties that a cargo has been handled. - RegisterHandlingEvent(completionTime time.Time, trackingID cargo.TrackingID, voyageNumber voyage.Number, - unLocode location.UNLocode, eventType cargo.HandlingEventType) error -} - -type service struct { - handlingEventRepository cargo.HandlingEventRepository - handlingEventFactory cargo.HandlingEventFactory - handlingEventHandler EventHandler -} - -func (s *service) RegisterHandlingEvent(completionTime time.Time, trackingID cargo.TrackingID, voyage voyage.Number, - loc location.UNLocode, eventType cargo.HandlingEventType) error { - if completionTime.IsZero() || trackingID == "" || loc == "" || eventType == cargo.NotHandled { - return ErrInvalidArgument - } - - e, err := s.handlingEventFactory.CreateHandlingEvent(time.Now(), completionTime, trackingID, voyage, loc, eventType) - if err != nil { - return err - } - - s.handlingEventRepository.Store(e) - s.handlingEventHandler.CargoWasHandled(e) - - return nil -} - -// NewService creates a handling event service with necessary dependencies. -func NewService(r cargo.HandlingEventRepository, f cargo.HandlingEventFactory, h EventHandler) Service { - return &service{ - handlingEventRepository: r, - handlingEventFactory: f, - handlingEventHandler: h, - } -} - -type handlingEventHandler struct { - InspectionService inspection.Service -} - -func (h *handlingEventHandler) CargoWasHandled(event cargo.HandlingEvent) { - h.InspectionService.InspectCargo(event.TrackingID) -} - -// NewEventHandler returns a new instance of a EventHandler. -func NewEventHandler(s inspection.Service) EventHandler { - return &handlingEventHandler{ - InspectionService: s, - } -} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/handling/transport.go b/vendor/github.com/go-kit/kit/examples/shipping/handling/transport.go deleted file mode 100644 index 1777ad6..0000000 --- a/vendor/github.com/go-kit/kit/examples/shipping/handling/transport.go +++ /dev/null @@ -1,100 +0,0 @@ -package handling - -import ( - "encoding/json" - "net/http" - "time" - - "github.com/gorilla/mux" - "golang.org/x/net/context" - - "github.com/go-kit/kit/examples/shipping/cargo" - "github.com/go-kit/kit/examples/shipping/location" - "github.com/go-kit/kit/examples/shipping/voyage" - kitlog "github.com/go-kit/kit/log" - kithttp "github.com/go-kit/kit/transport/http" -) - -// MakeHandler returns a handler for the handling service. -func MakeHandler(ctx context.Context, hs Service, logger kitlog.Logger) http.Handler { - r := mux.NewRouter() - - opts := []kithttp.ServerOption{ - kithttp.ServerErrorLogger(logger), - kithttp.ServerErrorEncoder(encodeError), - } - - registerIncidentHandler := kithttp.NewServer( - ctx, - makeRegisterIncidentEndpoint(hs), - decodeRegisterIncidentRequest, - encodeResponse, - opts..., - ) - - r.Handle("/handling/v1/incidents", registerIncidentHandler).Methods("POST") - - return r -} - -func decodeRegisterIncidentRequest(_ context.Context, r *http.Request) (interface{}, error) { - var body struct { - CompletionTime time.Time `json:"completion_time"` - TrackingID string `json:"tracking_id"` - VoyageNumber string `json:"voyage"` - Location string `json:"location"` - EventType string `json:"event_type"` - } - - if err := json.NewDecoder(r.Body).Decode(&body); err != nil { - return nil, err - } - - return registerIncidentRequest{ - CompletionTime: body.CompletionTime, - ID: cargo.TrackingID(body.TrackingID), - Voyage: voyage.Number(body.VoyageNumber), - Location: location.UNLocode(body.Location), - EventType: stringToEventType(body.EventType), - }, nil -} - -func stringToEventType(s string) cargo.HandlingEventType { - types := map[string]cargo.HandlingEventType{ - cargo.Receive.String(): cargo.Receive, - cargo.Load.String(): cargo.Load, - cargo.Unload.String(): cargo.Unload, - cargo.Customs.String(): cargo.Customs, - cargo.Claim.String(): cargo.Claim, - } - return types[s] -} - -func encodeResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error { - if e, ok := response.(errorer); ok && e.error() != nil { - encodeError(ctx, e.error(), w) - return nil - } - w.Header().Set("Content-Type", "application/json; charset=utf-8") - return json.NewEncoder(w).Encode(response) -} - -type errorer interface { - error() error -} - -// encode errors from business-logic -func encodeError(_ context.Context, err error, w http.ResponseWriter) { - switch err { - case cargo.ErrUnknown: - w.WriteHeader(http.StatusNotFound) - case ErrInvalidArgument: - w.WriteHeader(http.StatusBadRequest) - default: - w.WriteHeader(http.StatusInternalServerError) - } - w.Header().Set("Content-Type", "application/json; charset=utf-8") - json.NewEncoder(w).Encode(map[string]interface{}{ - "error": err.Error(), - }) -} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/inspection/inspection.go b/vendor/github.com/go-kit/kit/examples/shipping/inspection/inspection.go deleted file mode 100644 index a3f7147..0000000 --- a/vendor/github.com/go-kit/kit/examples/shipping/inspection/inspection.go +++ /dev/null @@ -1,51 +0,0 @@ -// Package inspection provides means to inspect cargos. -package inspection - -import "github.com/go-kit/kit/examples/shipping/cargo" - -// EventHandler provides means of subscribing to inspection events. -type EventHandler interface { - CargoWasMisdirected(*cargo.Cargo) - CargoHasArrived(*cargo.Cargo) -} - -// Service provides cargo inspection operations. -type Service interface { - // InspectCargo inspects cargo and send relevant notifications to - // interested parties, for example if a cargo has been misdirected, or - // unloaded at the final destination. - InspectCargo(trackingID cargo.TrackingID) -} - -type service struct { - cargoRepository cargo.Repository - handlingEventRepository cargo.HandlingEventRepository - cargoEventHandler EventHandler -} - -// TODO: Should be transactional -func (s *service) InspectCargo(trackingID cargo.TrackingID) { - c, err := s.cargoRepository.Find(trackingID) - if err != nil { - return - } - - h := s.handlingEventRepository.QueryHandlingHistory(trackingID) - - c.DeriveDeliveryProgress(h) - - if c.Delivery.IsMisdirected { - s.cargoEventHandler.CargoWasMisdirected(c) - } - - if c.Delivery.IsUnloadedAtDestination { - s.cargoEventHandler.CargoHasArrived(c) - } - - s.cargoRepository.Store(c) -} - -// NewService creates a inspection service with necessary dependencies. -func NewService(cargoRepository cargo.Repository, handlingEventRepository cargo.HandlingEventRepository, eventHandler EventHandler) Service { - return &service{cargoRepository, handlingEventRepository, eventHandler} -} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/location/location.go b/vendor/github.com/go-kit/kit/examples/shipping/location/location.go deleted file mode 100644 index 5129380..0000000 --- a/vendor/github.com/go-kit/kit/examples/shipping/location/location.go +++ /dev/null @@ -1,27 +0,0 @@ -// Package location provides the Location aggregate. -package location - -import "errors" - -// UNLocode is the United Nations location code that uniquely identifies a -// particular location. -// -// http://www.unece.org/cefact/locode/ -// http://www.unece.org/cefact/locode/DocColumnDescription.htm#LOCODE -type UNLocode string - -// Location is a location is our model is stops on a journey, such as cargo -// origin or destination, or carrier movement endpoints. -type Location struct { - UNLocode UNLocode - Name string -} - -// ErrUnknown is used when a location could not be found. -var ErrUnknown = errors.New("unknown location") - -// Repository provides access a location store. -type Repository interface { - Find(locode UNLocode) (Location, error) - FindAll() []Location -} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/location/sample_locations.go b/vendor/github.com/go-kit/kit/examples/shipping/location/sample_locations.go deleted file mode 100644 index de0d4c1..0000000 --- a/vendor/github.com/go-kit/kit/examples/shipping/location/sample_locations.go +++ /dev/null @@ -1,27 +0,0 @@ -package location - -// Sample UN locodes. -var ( - SESTO UNLocode = "SESTO" - AUMEL UNLocode = "AUMEL" - CNHKG UNLocode = "CNHKG" - USNYC UNLocode = "USNYC" - USCHI UNLocode = "USCHI" - JNTKO UNLocode = "JNTKO" - DEHAM UNLocode = "DEHAM" - NLRTM UNLocode = "NLRTM" - FIHEL UNLocode = "FIHEL" -) - -// Sample locations. -var ( - Stockholm = Location{SESTO, "Stockholm"} - Melbourne = Location{AUMEL, "Melbourne"} - Hongkong = Location{CNHKG, "Hongkong"} - NewYork = Location{USNYC, "New York"} - Chicago = Location{USCHI, "Chicago"} - Tokyo = Location{JNTKO, "Tokyo"} - Hamburg = Location{DEHAM, "Hamburg"} - Rotterdam = Location{NLRTM, "Rotterdam"} - Helsinki = Location{FIHEL, "Helsinki"} -) diff --git a/vendor/github.com/go-kit/kit/examples/shipping/main.go b/vendor/github.com/go-kit/kit/examples/shipping/main.go deleted file mode 100644 index 98a081c..0000000 --- a/vendor/github.com/go-kit/kit/examples/shipping/main.go +++ /dev/null @@ -1,203 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "net/http" - "os" - "os/signal" - "sync" - "syscall" - "time" - - stdprometheus "github.com/prometheus/client_golang/prometheus" - "golang.org/x/net/context" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/metrics" - kitprometheus "github.com/go-kit/kit/metrics/prometheus" - - "github.com/go-kit/kit/examples/shipping/booking" - "github.com/go-kit/kit/examples/shipping/cargo" - "github.com/go-kit/kit/examples/shipping/handling" - "github.com/go-kit/kit/examples/shipping/inspection" - "github.com/go-kit/kit/examples/shipping/location" - "github.com/go-kit/kit/examples/shipping/repository" - "github.com/go-kit/kit/examples/shipping/routing" - "github.com/go-kit/kit/examples/shipping/tracking" -) - -const ( - defaultPort = "8080" - defaultRoutingServiceURL = "http://localhost:7878" -) - -func main() { - var ( - addr = envString("PORT", defaultPort) - rsurl = envString("ROUTINGSERVICE_URL", defaultRoutingServiceURL) - - httpAddr = flag.String("http.addr", ":"+addr, "HTTP listen address") - routingServiceURL = flag.String("service.routing", rsurl, "routing service URL") - - ctx = context.Background() - ) - - flag.Parse() - - var logger log.Logger - logger = log.NewLogfmtLogger(os.Stderr) - logger = &serializedLogger{Logger: logger} - logger = log.NewContext(logger).With("ts", log.DefaultTimestampUTC) - - var ( - cargos = repository.NewCargo() - locations = repository.NewLocation() - voyages = repository.NewVoyage() - handlingEvents = repository.NewHandlingEvent() - ) - - // Configure some questionable dependencies. - var ( - handlingEventFactory = cargo.HandlingEventFactory{ - CargoRepository: cargos, - VoyageRepository: voyages, - LocationRepository: locations, - } - handlingEventHandler = handling.NewEventHandler( - inspection.NewService(cargos, handlingEvents, nil), - ) - ) - - // Facilitate testing by adding some cargos. - storeTestData(cargos) - - fieldKeys := []string{"method"} - - var rs routing.Service - rs = routing.NewProxyingMiddleware(*routingServiceURL, ctx)(rs) - - var bs booking.Service - bs = booking.NewService(cargos, locations, handlingEvents, rs) - bs = booking.NewLoggingService(log.NewContext(logger).With("component", "booking"), bs) - bs = booking.NewInstrumentingService( - kitprometheus.NewCounter(stdprometheus.CounterOpts{ - Namespace: "api", - Subsystem: "booking_service", - Name: "request_count", - Help: "Number of requests received.", - }, fieldKeys), - metrics.NewTimeHistogram(time.Microsecond, kitprometheus.NewSummary(stdprometheus.SummaryOpts{ - Namespace: "api", - Subsystem: "booking_service", - Name: "request_latency_microseconds", - Help: "Total duration of requests in microseconds.", - }, fieldKeys)), bs) - - var ts tracking.Service - ts = tracking.NewService(cargos, handlingEvents) - ts = tracking.NewLoggingService(log.NewContext(logger).With("component", "tracking"), ts) - ts = tracking.NewInstrumentingService( - kitprometheus.NewCounter(stdprometheus.CounterOpts{ - Namespace: "api", - Subsystem: "tracking_service", - Name: "request_count", - Help: "Number of requests received.", - }, fieldKeys), - metrics.NewTimeHistogram(time.Microsecond, kitprometheus.NewSummary(stdprometheus.SummaryOpts{ - Namespace: "api", - Subsystem: "tracking_service", - Name: "request_latency_microseconds", - Help: "Total duration of requests in microseconds.", - }, fieldKeys)), ts) - - var hs handling.Service - hs = handling.NewService(handlingEvents, handlingEventFactory, handlingEventHandler) - hs = handling.NewLoggingService(log.NewContext(logger).With("component", "handling"), hs) - hs = handling.NewInstrumentingService( - kitprometheus.NewCounter(stdprometheus.CounterOpts{ - Namespace: "api", - Subsystem: "handling_service", - Name: "request_count", - Help: "Number of requests received.", - }, fieldKeys), - metrics.NewTimeHistogram(time.Microsecond, kitprometheus.NewSummary(stdprometheus.SummaryOpts{ - Namespace: "api", - Subsystem: "handling_service", - Name: "request_latency_microseconds", - Help: "Total duration of requests in microseconds.", - }, fieldKeys)), hs) - - httpLogger := log.NewContext(logger).With("component", "http") - - mux := http.NewServeMux() - - mux.Handle("/booking/v1/", booking.MakeHandler(ctx, bs, httpLogger)) - mux.Handle("/tracking/v1/", tracking.MakeHandler(ctx, ts, httpLogger)) - mux.Handle("/handling/v1/", handling.MakeHandler(ctx, hs, httpLogger)) - - http.Handle("/", accessControl(mux)) - http.Handle("/metrics", stdprometheus.Handler()) - - errs := make(chan error, 2) - go func() { - logger.Log("transport", "http", "address", *httpAddr, "msg", "listening") - errs <- http.ListenAndServe(*httpAddr, nil) - }() - go func() { - c := make(chan os.Signal) - signal.Notify(c, syscall.SIGINT) - errs <- fmt.Errorf("%s", <-c) - }() - - logger.Log("terminated", <-errs) -} - -func accessControl(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") - w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS") - w.Header().Set("Access-Control-Allow-Headers", "Origin, Content-Type") - - if r.Method == "OPTIONS" { - return - } - - h.ServeHTTP(w, r) - }) -} - -func envString(env, fallback string) string { - e := os.Getenv(env) - if e == "" { - return fallback - } - return e -} - -func storeTestData(r cargo.Repository) { - test1 := cargo.New("FTL456", cargo.RouteSpecification{ - Origin: location.AUMEL, - Destination: location.SESTO, - ArrivalDeadline: time.Now().AddDate(0, 0, 7), - }) - _ = r.Store(test1) - - test2 := cargo.New("ABC123", cargo.RouteSpecification{ - Origin: location.SESTO, - Destination: location.CNHKG, - ArrivalDeadline: time.Now().AddDate(0, 0, 14), - }) - _ = r.Store(test2) -} - -type serializedLogger struct { - mtx sync.Mutex - log.Logger -} - -func (l *serializedLogger) Log(keyvals ...interface{}) error { - l.mtx.Lock() - defer l.mtx.Unlock() - return l.Logger.Log(keyvals...) -} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/repository/repositories.go b/vendor/github.com/go-kit/kit/examples/shipping/repository/repositories.go deleted file mode 100644 index 714d0a8..0000000 --- a/vendor/github.com/go-kit/kit/examples/shipping/repository/repositories.go +++ /dev/null @@ -1,142 +0,0 @@ -// Package repository provides implementations of all the domain repositories. -package repository - -import ( - "sync" - - "github.com/go-kit/kit/examples/shipping/cargo" - "github.com/go-kit/kit/examples/shipping/location" - "github.com/go-kit/kit/examples/shipping/voyage" -) - -type cargoRepository struct { - mtx sync.RWMutex - cargos map[cargo.TrackingID]*cargo.Cargo -} - -func (r *cargoRepository) Store(c *cargo.Cargo) error { - r.mtx.Lock() - defer r.mtx.Unlock() - r.cargos[c.TrackingID] = c - return nil -} - -func (r *cargoRepository) Find(trackingID cargo.TrackingID) (*cargo.Cargo, error) { - r.mtx.RLock() - defer r.mtx.RUnlock() - if val, ok := r.cargos[trackingID]; ok { - return val, nil - } - return nil, cargo.ErrUnknown -} - -func (r *cargoRepository) FindAll() []*cargo.Cargo { - r.mtx.RLock() - defer r.mtx.RUnlock() - c := make([]*cargo.Cargo, 0, len(r.cargos)) - for _, val := range r.cargos { - c = append(c, val) - } - return c -} - -// NewCargo returns a new instance of a in-memory cargo repository. -func NewCargo() cargo.Repository { - return &cargoRepository{ - cargos: make(map[cargo.TrackingID]*cargo.Cargo), - } -} - -type locationRepository struct { - locations map[location.UNLocode]location.Location -} - -func (r *locationRepository) Find(locode location.UNLocode) (location.Location, error) { - if l, ok := r.locations[locode]; ok { - return l, nil - } - return location.Location{}, location.ErrUnknown -} - -func (r *locationRepository) FindAll() []location.Location { - l := make([]location.Location, 0, len(r.locations)) - for _, val := range r.locations { - l = append(l, val) - } - return l -} - -// NewLocation returns a new instance of a in-memory location repository. -func NewLocation() location.Repository { - r := &locationRepository{ - locations: make(map[location.UNLocode]location.Location), - } - - r.locations[location.SESTO] = location.Stockholm - r.locations[location.AUMEL] = location.Melbourne - r.locations[location.CNHKG] = location.Hongkong - r.locations[location.JNTKO] = location.Tokyo - r.locations[location.NLRTM] = location.Rotterdam - r.locations[location.DEHAM] = location.Hamburg - - return r -} - -type voyageRepository struct { - voyages map[voyage.Number]*voyage.Voyage -} - -func (r *voyageRepository) Find(voyageNumber voyage.Number) (*voyage.Voyage, error) { - if v, ok := r.voyages[voyageNumber]; ok { - return v, nil - } - - return nil, voyage.ErrUnknown -} - -// NewVoyage returns a new instance of a in-memory voyage repository. -func NewVoyage() voyage.Repository { - r := &voyageRepository{ - voyages: make(map[voyage.Number]*voyage.Voyage), - } - - r.voyages[voyage.V100.Number] = voyage.V100 - r.voyages[voyage.V300.Number] = voyage.V300 - r.voyages[voyage.V400.Number] = voyage.V400 - - r.voyages[voyage.V0100S.Number] = voyage.V0100S - r.voyages[voyage.V0200T.Number] = voyage.V0200T - r.voyages[voyage.V0300A.Number] = voyage.V0300A - r.voyages[voyage.V0301S.Number] = voyage.V0301S - r.voyages[voyage.V0400S.Number] = voyage.V0400S - - return r -} - -type handlingEventRepository struct { - mtx sync.RWMutex - events map[cargo.TrackingID][]cargo.HandlingEvent -} - -func (r *handlingEventRepository) Store(e cargo.HandlingEvent) { - r.mtx.Lock() - defer r.mtx.Unlock() - // Make array if it's the first event with this tracking ID. - if _, ok := r.events[e.TrackingID]; !ok { - r.events[e.TrackingID] = make([]cargo.HandlingEvent, 0) - } - r.events[e.TrackingID] = append(r.events[e.TrackingID], e) -} - -func (r *handlingEventRepository) QueryHandlingHistory(trackingID cargo.TrackingID) cargo.HandlingHistory { - r.mtx.RLock() - defer r.mtx.RUnlock() - return cargo.HandlingHistory{HandlingEvents: r.events[trackingID]} -} - -// NewHandlingEvent returns a new instance of a in-memory handling event repository. -func NewHandlingEvent() cargo.HandlingEventRepository { - return &handlingEventRepository{ - events: make(map[cargo.TrackingID][]cargo.HandlingEvent), - } -} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/routing/proxying.go b/vendor/github.com/go-kit/kit/examples/shipping/routing/proxying.go deleted file mode 100644 index 3051caf..0000000 --- a/vendor/github.com/go-kit/kit/examples/shipping/routing/proxying.go +++ /dev/null @@ -1,117 +0,0 @@ -package routing - -import ( - "encoding/json" - "net/http" - "net/url" - "time" - - "golang.org/x/net/context" - - "github.com/go-kit/kit/circuitbreaker" - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/examples/shipping/cargo" - "github.com/go-kit/kit/examples/shipping/location" - "github.com/go-kit/kit/examples/shipping/voyage" - kithttp "github.com/go-kit/kit/transport/http" -) - -type proxyService struct { - context.Context - FetchRoutesEndpoint endpoint.Endpoint - Service -} - -func (s proxyService) FetchRoutesForSpecification(rs cargo.RouteSpecification) []cargo.Itinerary { - response, err := s.FetchRoutesEndpoint(s.Context, fetchRoutesRequest{ - From: string(rs.Origin), - To: string(rs.Destination), - }) - if err != nil { - return []cargo.Itinerary{} - } - - resp := response.(fetchRoutesResponse) - - var itineraries []cargo.Itinerary - for _, r := range resp.Paths { - var legs []cargo.Leg - for _, e := range r.Edges { - legs = append(legs, cargo.Leg{ - VoyageNumber: voyage.Number(e.Voyage), - LoadLocation: location.UNLocode(e.Origin), - UnloadLocation: location.UNLocode(e.Destination), - LoadTime: e.Departure, - UnloadTime: e.Arrival, - }) - } - - itineraries = append(itineraries, cargo.Itinerary{Legs: legs}) - } - - return itineraries -} - -// ServiceMiddleware defines a middleware for a routing service. -type ServiceMiddleware func(Service) Service - -// NewProxyingMiddleware returns a new instance of a proxying middleware. -func NewProxyingMiddleware(proxyURL string, ctx context.Context) ServiceMiddleware { - return func(next Service) Service { - var e endpoint.Endpoint - e = makeFetchRoutesEndpoint(ctx, proxyURL) - e = circuitbreaker.Hystrix("fetch-routes")(e) - return proxyService{ctx, e, next} - } -} - -type fetchRoutesRequest struct { - From string - To string -} - -type fetchRoutesResponse struct { - Paths []struct { - Edges []struct { - Origin string `json:"origin"` - Destination string `json:"destination"` - Voyage string `json:"voyage"` - Departure time.Time `json:"departure"` - Arrival time.Time `json:"arrival"` - } `json:"edges"` - } `json:"paths"` -} - -func makeFetchRoutesEndpoint(ctx context.Context, instance string) endpoint.Endpoint { - u, err := url.Parse(instance) - if err != nil { - panic(err) - } - if u.Path == "" { - u.Path = "/paths" - } - return kithttp.NewClient( - "GET", u, - encodeFetchRoutesRequest, - decodeFetchRoutesResponse, - ).Endpoint() -} - -func decodeFetchRoutesResponse(_ context.Context, resp *http.Response) (interface{}, error) { - var response fetchRoutesResponse - if err := json.NewDecoder(resp.Body).Decode(&response); err != nil { - return nil, err - } - return response, nil -} - -func encodeFetchRoutesRequest(_ context.Context, r *http.Request, request interface{}) error { - req := request.(fetchRoutesRequest) - - vals := r.URL.Query() - vals.Add("from", req.From) - vals.Add("to", req.To) - r.URL.RawQuery = vals.Encode() - - return nil -} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/routing/routing.go b/vendor/github.com/go-kit/kit/examples/shipping/routing/routing.go deleted file mode 100644 index dac3690..0000000 --- a/vendor/github.com/go-kit/kit/examples/shipping/routing/routing.go +++ /dev/null @@ -1,13 +0,0 @@ -// Package routing provides the routing domain service. It does not actually -// implement the routing service but merely acts as a proxy for a separate -// bounded context. -package routing - -import "github.com/go-kit/kit/examples/shipping/cargo" - -// Service provides access to an external routing service. -type Service interface { - // FetchRoutesForSpecification finds all possible routes that satisfy a - // given specification. - FetchRoutesForSpecification(rs cargo.RouteSpecification) []cargo.Itinerary -} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/tracking/endpoint.go b/vendor/github.com/go-kit/kit/examples/shipping/tracking/endpoint.go deleted file mode 100644 index ea105d5..0000000 --- a/vendor/github.com/go-kit/kit/examples/shipping/tracking/endpoint.go +++ /dev/null @@ -1,26 +0,0 @@ -package tracking - -import ( - "golang.org/x/net/context" - - "github.com/go-kit/kit/endpoint" -) - -type trackCargoRequest struct { - ID string -} - -type trackCargoResponse struct { - Cargo *Cargo `json:"cargo,omitempty"` - Err error `json:"error,omitempty"` -} - -func (r trackCargoResponse) error() error { return r.Err } - -func makeTrackCargoEndpoint(ts Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(trackCargoRequest) - c, err := ts.Track(req.ID) - return trackCargoResponse{Cargo: &c, Err: err}, nil - } -} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/tracking/instrumenting.go b/vendor/github.com/go-kit/kit/examples/shipping/tracking/instrumenting.go deleted file mode 100644 index 629ab27..0000000 --- a/vendor/github.com/go-kit/kit/examples/shipping/tracking/instrumenting.go +++ /dev/null @@ -1,32 +0,0 @@ -package tracking - -import ( - "time" - - "github.com/go-kit/kit/metrics" -) - -type instrumentingService struct { - requestCount metrics.Counter - requestLatency metrics.TimeHistogram - Service -} - -// NewInstrumentingService returns an instance of an instrumenting Service. -func NewInstrumentingService(requestCount metrics.Counter, requestLatency metrics.TimeHistogram, s Service) Service { - return &instrumentingService{ - requestCount: requestCount, - requestLatency: requestLatency, - Service: s, - } -} - -func (s *instrumentingService) Track(id string) (Cargo, error) { - defer func(begin time.Time) { - methodField := metrics.Field{Key: "method", Value: "track"} - s.requestCount.With(methodField).Add(1) - s.requestLatency.With(methodField).Observe(time.Since(begin)) - }(time.Now()) - - return s.Service.Track(id) -} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/tracking/logging.go b/vendor/github.com/go-kit/kit/examples/shipping/tracking/logging.go deleted file mode 100644 index 584aeaa..0000000 --- a/vendor/github.com/go-kit/kit/examples/shipping/tracking/logging.go +++ /dev/null @@ -1,24 +0,0 @@ -package tracking - -import ( - "time" - - "github.com/go-kit/kit/log" -) - -type loggingService struct { - logger log.Logger - Service -} - -// NewLoggingService returns a new instance of a logging Service. -func NewLoggingService(logger log.Logger, s Service) Service { - return &loggingService{logger, s} -} - -func (s *loggingService) Track(id string) (c Cargo, err error) { - defer func(begin time.Time) { - s.logger.Log("method", "track", "tracking_id", id, "took", time.Since(begin), "err", err) - }(time.Now()) - return s.Service.Track(id) -} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/tracking/service.go b/vendor/github.com/go-kit/kit/examples/shipping/tracking/service.go deleted file mode 100644 index d5b9273..0000000 --- a/vendor/github.com/go-kit/kit/examples/shipping/tracking/service.go +++ /dev/null @@ -1,163 +0,0 @@ -// Package tracking provides the use-case of tracking a cargo. Used by views -// facing the end-user. -package tracking - -import ( - "errors" - "fmt" - "strings" - "time" - - "github.com/go-kit/kit/examples/shipping/cargo" -) - -// ErrInvalidArgument is returned when one or more arguments are invalid. -var ErrInvalidArgument = errors.New("invalid argument") - -// Service is the interface that provides the basic Track method. -type Service interface { - // Track returns a cargo matching a tracking ID. - Track(id string) (Cargo, error) -} - -type service struct { - cargos cargo.Repository - handlingEvents cargo.HandlingEventRepository -} - -func (s *service) Track(id string) (Cargo, error) { - if id == "" { - return Cargo{}, ErrInvalidArgument - } - c, err := s.cargos.Find(cargo.TrackingID(id)) - if err != nil { - return Cargo{}, err - } - return assemble(c, s.handlingEvents), nil -} - -// NewService returns a new instance of the default Service. -func NewService(cargos cargo.Repository, handlingEvents cargo.HandlingEventRepository) Service { - return &service{ - cargos: cargos, - handlingEvents: handlingEvents, - } -} - -// Cargo is a read model for tracking views. -type Cargo struct { - TrackingID string `json:"tracking_id"` - StatusText string `json:"status_text"` - Origin string `json:"origin"` - Destination string `json:"destination"` - ETA time.Time `json:"eta"` - NextExpectedActivity string `json:"next_expected_activity"` - ArrivalDeadline time.Time `json:"arrival_deadline"` - Events []Event `json:"events"` -} - -// Leg is a read model for booking views. -type Leg struct { - VoyageNumber string `json:"voyage_number"` - From string `json:"from"` - To string `json:"to"` - LoadTime time.Time `json:"load_time"` - UnloadTime time.Time `json:"unload_time"` -} - -// Event is a read model for tracking views. -type Event struct { - Description string `json:"description"` - Expected bool `json:"expected"` -} - -func assemble(c *cargo.Cargo, her cargo.HandlingEventRepository) Cargo { - return Cargo{ - TrackingID: string(c.TrackingID), - Origin: string(c.Origin), - Destination: string(c.RouteSpecification.Destination), - ETA: c.Delivery.ETA, - NextExpectedActivity: nextExpectedActivity(c), - ArrivalDeadline: c.RouteSpecification.ArrivalDeadline, - StatusText: assembleStatusText(c), - Events: assembleEvents(c, her), - } -} - -func assembleLegs(c cargo.Cargo) []Leg { - var legs []Leg - for _, l := range c.Itinerary.Legs { - legs = append(legs, Leg{ - VoyageNumber: string(l.VoyageNumber), - From: string(l.LoadLocation), - To: string(l.UnloadLocation), - LoadTime: l.LoadTime, - UnloadTime: l.UnloadTime, - }) - } - return legs -} - -func nextExpectedActivity(c *cargo.Cargo) string { - a := c.Delivery.NextExpectedActivity - prefix := "Next expected activity is to" - - switch a.Type { - case cargo.Load: - return fmt.Sprintf("%s %s cargo onto voyage %s in %s.", prefix, strings.ToLower(a.Type.String()), a.VoyageNumber, a.Location) - case cargo.Unload: - return fmt.Sprintf("%s %s cargo off of voyage %s in %s.", prefix, strings.ToLower(a.Type.String()), a.VoyageNumber, a.Location) - case cargo.NotHandled: - return "There are currently no expected activities for this cargo." - } - - return fmt.Sprintf("%s %s cargo in %s.", prefix, strings.ToLower(a.Type.String()), a.Location) -} - -func assembleStatusText(c *cargo.Cargo) string { - switch c.Delivery.TransportStatus { - case cargo.NotReceived: - return "Not received" - case cargo.InPort: - return fmt.Sprintf("In port %s", c.Delivery.LastKnownLocation) - case cargo.OnboardCarrier: - return fmt.Sprintf("Onboard voyage %s", c.Delivery.CurrentVoyage) - case cargo.Claimed: - return "Claimed" - default: - return "Unknown" - } -} - -func assembleEvents(c *cargo.Cargo, r cargo.HandlingEventRepository) []Event { - h := r.QueryHandlingHistory(c.TrackingID) - - var events []Event - for _, e := range h.HandlingEvents { - var description string - - switch e.Activity.Type { - case cargo.NotHandled: - description = "Cargo has not yet been received." - case cargo.Receive: - description = fmt.Sprintf("Received in %s, at %s", e.Activity.Location, time.Now().Format(time.RFC3339)) - case cargo.Load: - description = fmt.Sprintf("Loaded onto voyage %s in %s, at %s.", e.Activity.VoyageNumber, e.Activity.Location, time.Now().Format(time.RFC3339)) - case cargo.Unload: - description = fmt.Sprintf("Unloaded off voyage %s in %s, at %s.", e.Activity.VoyageNumber, e.Activity.Location, time.Now().Format(time.RFC3339)) - case cargo.Claim: - description = fmt.Sprintf("Claimed in %s, at %s.", e.Activity.Location, time.Now().Format(time.RFC3339)) - case cargo.Customs: - description = fmt.Sprintf("Cleared customs in %s, at %s.", e.Activity.Location, time.Now().Format(time.RFC3339)) - default: - description = "[Unknown status]" - } - - events = append(events, Event{ - Description: description, - Expected: c.Itinerary.IsExpected(e), - }) - } - - return events -} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/tracking/transport.go b/vendor/github.com/go-kit/kit/examples/shipping/tracking/transport.go deleted file mode 100644 index 9cac1ec..0000000 --- a/vendor/github.com/go-kit/kit/examples/shipping/tracking/transport.go +++ /dev/null @@ -1,74 +0,0 @@ -package tracking - -import ( - "encoding/json" - "errors" - "net/http" - - "github.com/gorilla/mux" - "golang.org/x/net/context" - - "github.com/go-kit/kit/examples/shipping/cargo" - kitlog "github.com/go-kit/kit/log" - kithttp "github.com/go-kit/kit/transport/http" -) - -// MakeHandler returns a handler for the tracking service. -func MakeHandler(ctx context.Context, ts Service, logger kitlog.Logger) http.Handler { - r := mux.NewRouter() - - opts := []kithttp.ServerOption{ - kithttp.ServerErrorLogger(logger), - kithttp.ServerErrorEncoder(encodeError), - } - - trackCargoHandler := kithttp.NewServer( - ctx, - makeTrackCargoEndpoint(ts), - decodeTrackCargoRequest, - encodeResponse, - opts..., - ) - - r.Handle("/tracking/v1/cargos/{id}", trackCargoHandler).Methods("GET") - - return r -} - -func decodeTrackCargoRequest(_ context.Context, r *http.Request) (interface{}, error) { - vars := mux.Vars(r) - id, ok := vars["id"] - if !ok { - return nil, errors.New("bad route") - } - return trackCargoRequest{ID: id}, nil -} - -func encodeResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error { - if e, ok := response.(errorer); ok && e.error() != nil { - encodeError(ctx, e.error(), w) - return nil - } - w.Header().Set("Content-Type", "application/json; charset=utf-8") - return json.NewEncoder(w).Encode(response) -} - -type errorer interface { - error() error -} - -// encode errors from business-logic -func encodeError(_ context.Context, err error, w http.ResponseWriter) { - switch err { - case cargo.ErrUnknown: - w.WriteHeader(http.StatusNotFound) - case ErrInvalidArgument: - w.WriteHeader(http.StatusBadRequest) - default: - w.WriteHeader(http.StatusInternalServerError) - } - w.Header().Set("Content-Type", "application/json; charset=utf-8") - json.NewEncoder(w).Encode(map[string]interface{}{ - "error": err.Error(), - }) -} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/voyage/sample_voyages.go b/vendor/github.com/go-kit/kit/examples/shipping/voyage/sample_voyages.go deleted file mode 100644 index 51b7a05..0000000 --- a/vendor/github.com/go-kit/kit/examples/shipping/voyage/sample_voyages.go +++ /dev/null @@ -1,40 +0,0 @@ -package voyage - -import "github.com/go-kit/kit/examples/shipping/location" - -// A set of sample voyages. -var ( - V100 = New("V100", Schedule{ - []CarrierMovement{ - {DepartureLocation: location.Hongkong, ArrivalLocation: location.Tokyo}, - {DepartureLocation: location.Tokyo, ArrivalLocation: location.NewYork}, - }, - }) - - V300 = New("V300", Schedule{ - []CarrierMovement{ - {DepartureLocation: location.Tokyo, ArrivalLocation: location.Rotterdam}, - {DepartureLocation: location.Rotterdam, ArrivalLocation: location.Hamburg}, - {DepartureLocation: location.Hamburg, ArrivalLocation: location.Melbourne}, - {DepartureLocation: location.Melbourne, ArrivalLocation: location.Tokyo}, - }, - }) - - V400 = New("V400", Schedule{ - []CarrierMovement{ - {DepartureLocation: location.Hamburg, ArrivalLocation: location.Stockholm}, - {DepartureLocation: location.Stockholm, ArrivalLocation: location.Helsinki}, - {DepartureLocation: location.Helsinki, ArrivalLocation: location.Hamburg}, - }, - }) -) - -// These voyages are hard-coded into the current pathfinder. Make sure -// they exist. -var ( - V0100S = New("0100S", Schedule{[]CarrierMovement{}}) - V0200T = New("0200T", Schedule{[]CarrierMovement{}}) - V0300A = New("0300A", Schedule{[]CarrierMovement{}}) - V0301S = New("0301S", Schedule{[]CarrierMovement{}}) - V0400S = New("0400S", Schedule{[]CarrierMovement{}}) -) diff --git a/vendor/github.com/go-kit/kit/examples/shipping/voyage/voyage.go b/vendor/github.com/go-kit/kit/examples/shipping/voyage/voyage.go deleted file mode 100644 index 57a70b0..0000000 --- a/vendor/github.com/go-kit/kit/examples/shipping/voyage/voyage.go +++ /dev/null @@ -1,44 +0,0 @@ -// Package voyage provides the Voyage aggregate. -package voyage - -import ( - "errors" - "time" - - "github.com/go-kit/kit/examples/shipping/location" -) - -// Number uniquely identifies a particular Voyage. -type Number string - -// Voyage is a uniquely identifiable series of carrier movements. -type Voyage struct { - Number Number - Schedule Schedule -} - -// New creates a voyage with a voyage number and a provided schedule. -func New(n Number, s Schedule) *Voyage { - return &Voyage{Number: n, Schedule: s} -} - -// Schedule describes a voyage schedule. -type Schedule struct { - CarrierMovements []CarrierMovement -} - -// CarrierMovement is a vessel voyage from one location to another. -type CarrierMovement struct { - DepartureLocation location.Location - ArrivalLocation location.Location - DepartureTime time.Time - ArrivalTime time.Time -} - -// ErrUnknown is used when a voyage could not be found. -var ErrUnknown = errors.New("unknown voyage") - -// Repository provides access a voyage store. -type Repository interface { - Find(Number) (*Voyage, error) -} diff --git a/vendor/github.com/go-kit/kit/examples/stringsvc1/main.go b/vendor/github.com/go-kit/kit/examples/stringsvc1/main.go deleted file mode 100644 index 876eb9c..0000000 --- a/vendor/github.com/go-kit/kit/examples/stringsvc1/main.go +++ /dev/null @@ -1,115 +0,0 @@ -package main - -import ( - "encoding/json" - "errors" - "log" - "net/http" - "strings" - - "golang.org/x/net/context" - - "github.com/go-kit/kit/endpoint" - httptransport "github.com/go-kit/kit/transport/http" -) - -// StringService provides operations on strings. -type StringService interface { - Uppercase(string) (string, error) - Count(string) int -} - -type stringService struct{} - -func (stringService) Uppercase(s string) (string, error) { - if s == "" { - return "", ErrEmpty - } - return strings.ToUpper(s), nil -} - -func (stringService) Count(s string) int { - return len(s) -} - -func main() { - ctx := context.Background() - svc := stringService{} - - uppercaseHandler := httptransport.NewServer( - ctx, - makeUppercaseEndpoint(svc), - decodeUppercaseRequest, - encodeResponse, - ) - - countHandler := httptransport.NewServer( - ctx, - makeCountEndpoint(svc), - decodeCountRequest, - encodeResponse, - ) - - http.Handle("/uppercase", uppercaseHandler) - http.Handle("/count", countHandler) - log.Fatal(http.ListenAndServe(":8080", nil)) -} - -func makeUppercaseEndpoint(svc StringService) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(uppercaseRequest) - v, err := svc.Uppercase(req.S) - if err != nil { - return uppercaseResponse{v, err.Error()}, nil - } - return uppercaseResponse{v, ""}, nil - } -} - -func makeCountEndpoint(svc StringService) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(countRequest) - v := svc.Count(req.S) - return countResponse{v}, nil - } -} - -func decodeUppercaseRequest(_ context.Context, r *http.Request) (interface{}, error) { - var request uppercaseRequest - if err := json.NewDecoder(r.Body).Decode(&request); err != nil { - return nil, err - } - return request, nil -} - -func decodeCountRequest(_ context.Context, r *http.Request) (interface{}, error) { - var request countRequest - if err := json.NewDecoder(r.Body).Decode(&request); err != nil { - return nil, err - } - return request, nil -} - -func encodeResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { - return json.NewEncoder(w).Encode(response) -} - -type uppercaseRequest struct { - S string `json:"s"` -} - -type uppercaseResponse struct { - V string `json:"v"` - Err string `json:"err,omitempty"` // errors don't define JSON marshaling -} - -type countRequest struct { - S string `json:"s"` -} - -type countResponse struct { - V int `json:"v"` -} - -// ErrEmpty is returned when an input string is empty. -var ErrEmpty = errors.New("empty string") diff --git a/vendor/github.com/go-kit/kit/examples/stringsvc2/instrumenting.go b/vendor/github.com/go-kit/kit/examples/stringsvc2/instrumenting.go deleted file mode 100644 index f461845..0000000 --- a/vendor/github.com/go-kit/kit/examples/stringsvc2/instrumenting.go +++ /dev/null @@ -1,40 +0,0 @@ -package main - -import ( - "fmt" - "time" - - "github.com/go-kit/kit/metrics" -) - -type instrumentingMiddleware struct { - requestCount metrics.Counter - requestLatency metrics.TimeHistogram - countResult metrics.Histogram - next StringService -} - -func (mw instrumentingMiddleware) Uppercase(s string) (output string, err error) { - defer func(begin time.Time) { - methodField := metrics.Field{Key: "method", Value: "uppercase"} - errorField := metrics.Field{Key: "error", Value: fmt.Sprintf("%v", err)} - mw.requestCount.With(methodField).With(errorField).Add(1) - mw.requestLatency.With(methodField).With(errorField).Observe(time.Since(begin)) - }(time.Now()) - - output, err = mw.next.Uppercase(s) - return -} - -func (mw instrumentingMiddleware) Count(s string) (n int) { - defer func(begin time.Time) { - methodField := metrics.Field{Key: "method", Value: "count"} - errorField := metrics.Field{Key: "error", Value: fmt.Sprintf("%v", error(nil))} - mw.requestCount.With(methodField).With(errorField).Add(1) - mw.requestLatency.With(methodField).With(errorField).Observe(time.Since(begin)) - mw.countResult.Observe(int64(n)) - }(time.Now()) - - n = mw.next.Count(s) - return -} diff --git a/vendor/github.com/go-kit/kit/examples/stringsvc2/logging.go b/vendor/github.com/go-kit/kit/examples/stringsvc2/logging.go deleted file mode 100644 index b958f3b..0000000 --- a/vendor/github.com/go-kit/kit/examples/stringsvc2/logging.go +++ /dev/null @@ -1,41 +0,0 @@ -package main - -import ( - "time" - - "github.com/go-kit/kit/log" -) - -type loggingMiddleware struct { - logger log.Logger - next StringService -} - -func (mw loggingMiddleware) Uppercase(s string) (output string, err error) { - defer func(begin time.Time) { - _ = mw.logger.Log( - "method", "uppercase", - "input", s, - "output", output, - "err", err, - "took", time.Since(begin), - ) - }(time.Now()) - - output, err = mw.next.Uppercase(s) - return -} - -func (mw loggingMiddleware) Count(s string) (n int) { - defer func(begin time.Time) { - _ = mw.logger.Log( - "method", "count", - "input", s, - "n", n, - "took", time.Since(begin), - ) - }(time.Now()) - - n = mw.next.Count(s) - return -} diff --git a/vendor/github.com/go-kit/kit/examples/stringsvc2/main.go b/vendor/github.com/go-kit/kit/examples/stringsvc2/main.go deleted file mode 100644 index dbe4788..0000000 --- a/vendor/github.com/go-kit/kit/examples/stringsvc2/main.go +++ /dev/null @@ -1,65 +0,0 @@ -package main - -import ( - "net/http" - "os" - "time" - - stdprometheus "github.com/prometheus/client_golang/prometheus" - "golang.org/x/net/context" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/metrics" - kitprometheus "github.com/go-kit/kit/metrics/prometheus" - httptransport "github.com/go-kit/kit/transport/http" -) - -func main() { - ctx := context.Background() - logger := log.NewLogfmtLogger(os.Stderr) - - fieldKeys := []string{"method", "error"} - requestCount := kitprometheus.NewCounter(stdprometheus.CounterOpts{ - Namespace: "my_group", - Subsystem: "string_service", - Name: "request_count", - Help: "Number of requests received.", - }, fieldKeys) - requestLatency := metrics.NewTimeHistogram(time.Microsecond, kitprometheus.NewSummary(stdprometheus.SummaryOpts{ - Namespace: "my_group", - Subsystem: "string_service", - Name: "request_latency_microseconds", - Help: "Total duration of requests in microseconds.", - }, fieldKeys)) - countResult := kitprometheus.NewSummary(stdprometheus.SummaryOpts{ - Namespace: "my_group", - Subsystem: "string_service", - Name: "count_result", - Help: "The result of each count method.", - }, []string{}) // no fields here - - var svc StringService - svc = stringService{} - svc = loggingMiddleware{logger, svc} - svc = instrumentingMiddleware{requestCount, requestLatency, countResult, svc} - - uppercaseHandler := httptransport.NewServer( - ctx, - makeUppercaseEndpoint(svc), - decodeUppercaseRequest, - encodeResponse, - ) - - countHandler := httptransport.NewServer( - ctx, - makeCountEndpoint(svc), - decodeCountRequest, - encodeResponse, - ) - - http.Handle("/uppercase", uppercaseHandler) - http.Handle("/count", countHandler) - http.Handle("/metrics", stdprometheus.Handler()) - logger.Log("msg", "HTTP", "addr", ":8080") - logger.Log("err", http.ListenAndServe(":8080", nil)) -} diff --git a/vendor/github.com/go-kit/kit/examples/stringsvc2/service.go b/vendor/github.com/go-kit/kit/examples/stringsvc2/service.go deleted file mode 100644 index 1da2f3e..0000000 --- a/vendor/github.com/go-kit/kit/examples/stringsvc2/service.go +++ /dev/null @@ -1,28 +0,0 @@ -package main - -import ( - "errors" - "strings" -) - -// StringService provides operations on strings. -type StringService interface { - Uppercase(string) (string, error) - Count(string) int -} - -type stringService struct{} - -func (stringService) Uppercase(s string) (string, error) { - if s == "" { - return "", ErrEmpty - } - return strings.ToUpper(s), nil -} - -func (stringService) Count(s string) int { - return len(s) -} - -// ErrEmpty is returned when an input string is empty. -var ErrEmpty = errors.New("empty string") diff --git a/vendor/github.com/go-kit/kit/examples/stringsvc2/transport.go b/vendor/github.com/go-kit/kit/examples/stringsvc2/transport.go deleted file mode 100644 index a70ad3f..0000000 --- a/vendor/github.com/go-kit/kit/examples/stringsvc2/transport.go +++ /dev/null @@ -1,66 +0,0 @@ -package main - -import ( - "encoding/json" - "net/http" - - "golang.org/x/net/context" - - "github.com/go-kit/kit/endpoint" -) - -func makeUppercaseEndpoint(svc StringService) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(uppercaseRequest) - v, err := svc.Uppercase(req.S) - if err != nil { - return uppercaseResponse{v, err.Error()}, nil - } - return uppercaseResponse{v, ""}, nil - } -} - -func makeCountEndpoint(svc StringService) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(countRequest) - v := svc.Count(req.S) - return countResponse{v}, nil - } -} - -func decodeUppercaseRequest(_ context.Context, r *http.Request) (interface{}, error) { - var request uppercaseRequest - if err := json.NewDecoder(r.Body).Decode(&request); err != nil { - return nil, err - } - return request, nil -} - -func decodeCountRequest(_ context.Context, r *http.Request) (interface{}, error) { - var request countRequest - if err := json.NewDecoder(r.Body).Decode(&request); err != nil { - return nil, err - } - return request, nil -} - -func encodeResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { - return json.NewEncoder(w).Encode(response) -} - -type uppercaseRequest struct { - S string `json:"s"` -} - -type uppercaseResponse struct { - V string `json:"v"` - Err string `json:"err,omitempty"` -} - -type countRequest struct { - S string `json:"s"` -} - -type countResponse struct { - V int `json:"v"` -} diff --git a/vendor/github.com/go-kit/kit/examples/stringsvc3/instrumenting.go b/vendor/github.com/go-kit/kit/examples/stringsvc3/instrumenting.go deleted file mode 100644 index 1b27b61..0000000 --- a/vendor/github.com/go-kit/kit/examples/stringsvc3/instrumenting.go +++ /dev/null @@ -1,50 +0,0 @@ -package main - -import ( - "fmt" - "time" - - "github.com/go-kit/kit/metrics" -) - -func instrumentingMiddleware( - requestCount metrics.Counter, - requestLatency metrics.TimeHistogram, - countResult metrics.Histogram, -) ServiceMiddleware { - return func(next StringService) StringService { - return instrmw{requestCount, requestLatency, countResult, next} - } -} - -type instrmw struct { - requestCount metrics.Counter - requestLatency metrics.TimeHistogram - countResult metrics.Histogram - StringService -} - -func (mw instrmw) Uppercase(s string) (output string, err error) { - defer func(begin time.Time) { - methodField := metrics.Field{Key: "method", Value: "uppercase"} - errorField := metrics.Field{Key: "error", Value: fmt.Sprintf("%v", err)} - mw.requestCount.With(methodField).With(errorField).Add(1) - mw.requestLatency.With(methodField).With(errorField).Observe(time.Since(begin)) - }(time.Now()) - - output, err = mw.StringService.Uppercase(s) - return -} - -func (mw instrmw) Count(s string) (n int) { - defer func(begin time.Time) { - methodField := metrics.Field{Key: "method", Value: "count"} - errorField := metrics.Field{Key: "error", Value: fmt.Sprintf("%v", error(nil))} - mw.requestCount.With(methodField).With(errorField).Add(1) - mw.requestLatency.With(methodField).With(errorField).Observe(time.Since(begin)) - mw.countResult.Observe(int64(n)) - }(time.Now()) - - n = mw.StringService.Count(s) - return -} diff --git a/vendor/github.com/go-kit/kit/examples/stringsvc3/logging.go b/vendor/github.com/go-kit/kit/examples/stringsvc3/logging.go deleted file mode 100644 index 72a2709..0000000 --- a/vendor/github.com/go-kit/kit/examples/stringsvc3/logging.go +++ /dev/null @@ -1,47 +0,0 @@ -package main - -import ( - "time" - - "github.com/go-kit/kit/log" -) - -func loggingMiddleware(logger log.Logger) ServiceMiddleware { - return func(next StringService) StringService { - return logmw{logger, next} - } -} - -type logmw struct { - logger log.Logger - StringService -} - -func (mw logmw) Uppercase(s string) (output string, err error) { - defer func(begin time.Time) { - _ = mw.logger.Log( - "method", "uppercase", - "input", s, - "output", output, - "err", err, - "took", time.Since(begin), - ) - }(time.Now()) - - output, err = mw.StringService.Uppercase(s) - return -} - -func (mw logmw) Count(s string) (n int) { - defer func(begin time.Time) { - _ = mw.logger.Log( - "method", "count", - "input", s, - "n", n, - "took", time.Since(begin), - ) - }(time.Now()) - - n = mw.StringService.Count(s) - return -} diff --git a/vendor/github.com/go-kit/kit/examples/stringsvc3/main.go b/vendor/github.com/go-kit/kit/examples/stringsvc3/main.go deleted file mode 100644 index b62952a..0000000 --- a/vendor/github.com/go-kit/kit/examples/stringsvc3/main.go +++ /dev/null @@ -1,75 +0,0 @@ -package main - -import ( - "flag" - "net/http" - "os" - "time" - - stdprometheus "github.com/prometheus/client_golang/prometheus" - "golang.org/x/net/context" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/metrics" - kitprometheus "github.com/go-kit/kit/metrics/prometheus" - httptransport "github.com/go-kit/kit/transport/http" -) - -func main() { - var ( - listen = flag.String("listen", ":8080", "HTTP listen address") - proxy = flag.String("proxy", "", "Optional comma-separated list of URLs to proxy uppercase requests") - ) - flag.Parse() - - var logger log.Logger - logger = log.NewLogfmtLogger(os.Stderr) - logger = log.NewContext(logger).With("listen", *listen).With("caller", log.DefaultCaller) - - ctx := context.Background() - - fieldKeys := []string{"method", "error"} - requestCount := kitprometheus.NewCounter(stdprometheus.CounterOpts{ - Namespace: "my_group", - Subsystem: "string_service", - Name: "request_count", - Help: "Number of requests received.", - }, fieldKeys) - requestLatency := metrics.NewTimeHistogram(time.Microsecond, kitprometheus.NewSummary(stdprometheus.SummaryOpts{ - Namespace: "my_group", - Subsystem: "string_service", - Name: "request_latency_microseconds", - Help: "Total duration of requests in microseconds.", - }, fieldKeys)) - countResult := kitprometheus.NewSummary(stdprometheus.SummaryOpts{ - Namespace: "my_group", - Subsystem: "string_service", - Name: "count_result", - Help: "The result of each count method.", - }, []string{}) - - var svc StringService - svc = stringService{} - svc = proxyingMiddleware(*proxy, ctx, logger)(svc) - svc = loggingMiddleware(logger)(svc) - svc = instrumentingMiddleware(requestCount, requestLatency, countResult)(svc) - - uppercaseHandler := httptransport.NewServer( - ctx, - makeUppercaseEndpoint(svc), - decodeUppercaseRequest, - encodeResponse, - ) - countHandler := httptransport.NewServer( - ctx, - makeCountEndpoint(svc), - decodeCountRequest, - encodeResponse, - ) - - http.Handle("/uppercase", uppercaseHandler) - http.Handle("/count", countHandler) - http.Handle("/metrics", stdprometheus.Handler()) - logger.Log("msg", "HTTP", "addr", *listen) - logger.Log("err", http.ListenAndServe(*listen, nil)) -} diff --git a/vendor/github.com/go-kit/kit/examples/stringsvc3/proxying.go b/vendor/github.com/go-kit/kit/examples/stringsvc3/proxying.go deleted file mode 100644 index 33bc156..0000000 --- a/vendor/github.com/go-kit/kit/examples/stringsvc3/proxying.go +++ /dev/null @@ -1,116 +0,0 @@ -package main - -import ( - "errors" - "fmt" - "net/url" - "strings" - "time" - - jujuratelimit "github.com/juju/ratelimit" - "github.com/sony/gobreaker" - "golang.org/x/net/context" - - "github.com/go-kit/kit/circuitbreaker" - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/ratelimit" - "github.com/go-kit/kit/sd" - "github.com/go-kit/kit/sd/lb" - httptransport "github.com/go-kit/kit/transport/http" -) - -func proxyingMiddleware(instances string, ctx context.Context, logger log.Logger) ServiceMiddleware { - // If instances is empty, don't proxy. - if instances == "" { - logger.Log("proxy_to", "none") - return func(next StringService) StringService { return next } - } - - // Set some parameters for our client. - var ( - qps = 100 // beyond which we will return an error - maxAttempts = 3 // per request, before giving up - maxTime = 250 * time.Millisecond // wallclock time, before giving up - ) - - // Otherwise, construct an endpoint for each instance in the list, and add - // it to a fixed set of endpoints. In a real service, rather than doing this - // by hand, you'd probably use package sd's support for your service - // discovery system. - var ( - instanceList = split(instances) - subscriber sd.FixedSubscriber - ) - logger.Log("proxy_to", fmt.Sprint(instanceList)) - for _, instance := range instanceList { - var e endpoint.Endpoint - e = makeUppercaseProxy(ctx, instance) - e = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{}))(e) - e = ratelimit.NewTokenBucketLimiter(jujuratelimit.NewBucketWithRate(float64(qps), int64(qps)))(e) - subscriber = append(subscriber, e) - } - - // Now, build a single, retrying, load-balancing endpoint out of all of - // those individual endpoints. - balancer := lb.NewRoundRobin(subscriber) - retry := lb.Retry(maxAttempts, maxTime, balancer) - - // And finally, return the ServiceMiddleware, implemented by proxymw. - return func(next StringService) StringService { - return proxymw{ctx, next, retry} - } -} - -// proxymw implements StringService, forwarding Uppercase requests to the -// provided endpoint, and serving all other (i.e. Count) requests via the -// next StringService. -type proxymw struct { - ctx context.Context - next StringService // Serve most requests via this service... - uppercase endpoint.Endpoint // ...except Uppercase, which gets served by this endpoint -} - -func (mw proxymw) Count(s string) int { - return mw.next.Count(s) -} - -func (mw proxymw) Uppercase(s string) (string, error) { - response, err := mw.uppercase(mw.ctx, uppercaseRequest{S: s}) - if err != nil { - return "", err - } - - resp := response.(uppercaseResponse) - if resp.Err != "" { - return resp.V, errors.New(resp.Err) - } - return resp.V, nil -} - -func makeUppercaseProxy(ctx context.Context, instance string) endpoint.Endpoint { - if !strings.HasPrefix(instance, "http") { - instance = "http://" + instance - } - u, err := url.Parse(instance) - if err != nil { - panic(err) - } - if u.Path == "" { - u.Path = "/uppercase" - } - return httptransport.NewClient( - "GET", - u, - encodeRequest, - decodeUppercaseResponse, - ).Endpoint() -} - -func split(s string) []string { - a := strings.Split(s, ",") - for i := range a { - a[i] = strings.TrimSpace(a[i]) - } - return a -} diff --git a/vendor/github.com/go-kit/kit/examples/stringsvc3/service.go b/vendor/github.com/go-kit/kit/examples/stringsvc3/service.go deleted file mode 100644 index 7e1773a..0000000 --- a/vendor/github.com/go-kit/kit/examples/stringsvc3/service.go +++ /dev/null @@ -1,31 +0,0 @@ -package main - -import ( - "errors" - "strings" -) - -// StringService provides operations on strings. -type StringService interface { - Uppercase(string) (string, error) - Count(string) int -} - -type stringService struct{} - -func (stringService) Uppercase(s string) (string, error) { - if s == "" { - return "", ErrEmpty - } - return strings.ToUpper(s), nil -} - -func (stringService) Count(s string) int { - return len(s) -} - -// ErrEmpty is returned when an input string is empty. -var ErrEmpty = errors.New("empty string") - -// ServiceMiddleware is a chainable behavior modifier for StringService. -type ServiceMiddleware func(StringService) StringService diff --git a/vendor/github.com/go-kit/kit/examples/stringsvc3/transport.go b/vendor/github.com/go-kit/kit/examples/stringsvc3/transport.go deleted file mode 100644 index c6341c1..0000000 --- a/vendor/github.com/go-kit/kit/examples/stringsvc3/transport.go +++ /dev/null @@ -1,85 +0,0 @@ -package main - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http" - - "golang.org/x/net/context" - - "github.com/go-kit/kit/endpoint" -) - -func makeUppercaseEndpoint(svc StringService) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(uppercaseRequest) - v, err := svc.Uppercase(req.S) - if err != nil { - return uppercaseResponse{v, err.Error()}, nil - } - return uppercaseResponse{v, ""}, nil - } -} - -func makeCountEndpoint(svc StringService) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(countRequest) - v := svc.Count(req.S) - return countResponse{v}, nil - } -} - -func decodeUppercaseRequest(_ context.Context, r *http.Request) (interface{}, error) { - var request uppercaseRequest - if err := json.NewDecoder(r.Body).Decode(&request); err != nil { - return nil, err - } - return request, nil -} - -func decodeCountRequest(_ context.Context, r *http.Request) (interface{}, error) { - var request countRequest - if err := json.NewDecoder(r.Body).Decode(&request); err != nil { - return nil, err - } - return request, nil -} - -func decodeUppercaseResponse(_ context.Context, r *http.Response) (interface{}, error) { - var response uppercaseResponse - if err := json.NewDecoder(r.Body).Decode(&response); err != nil { - return nil, err - } - return response, nil -} - -func encodeResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { - return json.NewEncoder(w).Encode(response) -} - -func encodeRequest(_ context.Context, r *http.Request, request interface{}) error { - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(request); err != nil { - return err - } - r.Body = ioutil.NopCloser(&buf) - return nil -} - -type uppercaseRequest struct { - S string `json:"s"` -} - -type uppercaseResponse struct { - V string `json:"v"` - Err string `json:"err,omitempty"` -} - -type countRequest struct { - S string `json:"s"` -} - -type countResponse struct { - V int `json:"v"` -} diff --git a/vendor/github.com/go-kit/kit/lint b/vendor/github.com/go-kit/kit/lint deleted file mode 100755 index 12e3072..0000000 --- a/vendor/github.com/go-kit/kit/lint +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit -set -o nounset -set -o pipefail - -if [ ! $(command -v gometalinter) ] -then - go get github.com/alecthomas/gometalinter - gometalinter --update --install -fi - -time gometalinter \ - --exclude='error return value not checked.*(Close|Log|Print).*\(errcheck\)$' \ - --exclude='.*_test\.go:.*error return value not checked.*\(errcheck\)$' \ - --exclude='/thrift/' \ - --exclude='/pb/' \ - --exclude='no args in Log call \(vet\)' \ - --disable=dupl \ - --disable=aligncheck \ - --disable=gotype \ - --cyclo-over=20 \ - --tests \ - --concurrency=2 \ - --deadline=300s \ - ./... diff --git a/vendor/github.com/go-kit/kit/log/README.md b/vendor/github.com/go-kit/kit/log/README.md deleted file mode 100644 index 452157d..0000000 --- a/vendor/github.com/go-kit/kit/log/README.md +++ /dev/null @@ -1,147 +0,0 @@ -# package log - -`package log` provides a minimal interface for structured logging in services. -It may be wrapped to encode conventions, enforce type-safety, provide leveled logging, and so on. -It can be used for both typical application log events, and log-structured data streams. - -## Structured logging - -Structured logging is, basically, conceding to the reality that logs are _data_, - and warrant some level of schematic rigor. -Using a stricter, key/value-oriented message format for our logs, - containing contextual and semantic information, - makes it much easier to get insight into the operational activity of the systems we build. -Consequently, `package log` is of the strong belief that - "[the benefits of structured logging outweigh the minimal effort involved](https://www.thoughtworks.com/radar/techniques/structured-logging)". - -Migrating from unstructured to structured logging is probably a lot easier than you'd expect. - -```go -// Unstructured -log.Printf("HTTP server listening on %s", addr) - -// Structured -logger.Log("transport", "HTTP", "addr", addr, "msg", "listening") -``` - -## Usage - -### Typical application logging - -```go -logger := log.NewLogfmtLogger(os.Stderr) -logger.Log("question", "what is the meaning of life?", "answer", 42) - -// Output: -// question="what is the meaning of life?" answer=42 -``` - -### Log contexts - -```go -func main() { - var logger log.Logger - logger = log.NewLogfmtLogger(os.Stderr) - logger = log.NewContext(logger).With("instance_id", 123) - - logger.Log("msg", "starting") - NewWorker(log.NewContext(logger).With("component", "worker")).Run() - NewSlacker(log.NewContext(logger).With("component", "slacker")).Run() -} - -// Output: -// instance_id=123 msg=starting -// instance_id=123 component=worker msg=running -// instance_id=123 component=slacker msg=running -``` - -### Interact with stdlib logger - -Redirect stdlib logger to Go kit logger. - -```go -import ( - "os" - stdlog "log" - kitlog "github.com/go-kit/kit/log" -) - -func main() { - logger := kitlog.NewJSONLogger(os.Stdout) - stdlog.SetOutput(kitlog.NewStdlibAdapter(logger)) - stdlog.Print("I sure like pie") -} - -// Output: -// {"msg":"I sure like pie","ts":"2016/01/01 12:34:56"} -``` - -Or, if, for legacy reasons, - you need to pipe all of your logging through the stdlib log package, - you can redirect Go kit logger to the stdlib logger. - -```go -logger := kitlog.NewLogfmtLogger(kitlog.StdlibWriter{}) -logger.Log("legacy", true, "msg", "at least it's something") - -// Output: -// 2016/01/01 12:34:56 legacy=true msg="at least it's something" -``` - -### Timestamps and callers - -```go -var logger log.Logger -logger = log.NewLogfmtLogger(os.Stderr) -logger = log.NewContext(logger).With("ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) - -logger.Log("msg", "hello") - -// Output: -// ts=2016-01-01T12:34:56Z caller=main.go:15 msg=hello -``` - -## Supported output formats - -- [Logfmt](https://brandur.org/logfmt) -- JSON - -## Enhancements - -`package log` is centered on the one-method Logger interface. - -```go -type Logger interface { - Log(keyvals ...interface{}) error -} -``` - -This interface, and its supporting code like [log.Context](https://godoc.org/github.com/go-kit/kit/log#Context), - is the product of much iteration and evaluation. -For more details on the evolution of the Logger interface, - see [The Hunt for a Logger Interface](http://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide#1), - a talk by [Chris Hines](https://github.com/ChrisHines). -Also, please see - [#63](https://github.com/go-kit/kit/issues/63), - [#76](https://github.com/go-kit/kit/pull/76), - [#131](https://github.com/go-kit/kit/issues/131), - [#157](https://github.com/go-kit/kit/pull/157), - [#164](https://github.com/go-kit/kit/issues/164), and - [#252](https://github.com/go-kit/kit/pull/252) - to review historical conversations about package log and the Logger interface. - -Value-add packages and suggestions, - like improvements to [the leveled logger](https://godoc.org/github.com/go-kit/kit/log/levels), - are of course welcome. -Good proposals should - -- Be composable with [log.Context](https://godoc.org/github.com/go-kit/kit/log#Context), -- Not break the behavior of [log.Caller](https://godoc.org/github.com/go-kit/kit/log#Caller) in any wrapped context, and -- Be friendly to packages that accept only an unadorned log.Logger. - -## Benchmarks & comparisons - -There are a few Go logging benchmarks and comparisons that include Go kit's package log. - -- [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) includes kit/log -- [uber-common/zap](https://github.com/uber-common/zap), a zero-alloc logging library, includes a comparison with kit/log diff --git a/vendor/github.com/go-kit/kit/log/benchmark_test.go b/vendor/github.com/go-kit/kit/log/benchmark_test.go deleted file mode 100644 index d3695b8..0000000 --- a/vendor/github.com/go-kit/kit/log/benchmark_test.go +++ /dev/null @@ -1,21 +0,0 @@ -package log_test - -import ( - "testing" - - "github.com/go-kit/kit/log" -) - -func benchmarkRunner(b *testing.B, logger log.Logger, f func(log.Logger)) { - lc := log.NewContext(logger).With("common_key", "common_value") - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - f(lc) - } -} - -var ( - baseMessage = func(logger log.Logger) { logger.Log("foo_key", "foo_value") } - withMessage = func(logger log.Logger) { log.NewContext(logger).With("a", "b").Log("c", "d") } -) diff --git a/vendor/github.com/go-kit/kit/log/concurrency_test.go b/vendor/github.com/go-kit/kit/log/concurrency_test.go deleted file mode 100644 index e68d16a..0000000 --- a/vendor/github.com/go-kit/kit/log/concurrency_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package log_test - -import ( - "strconv" - "sync" - "testing" - - "github.com/go-kit/kit/log" -) - -// These test are designed to be run with the race detector. - -func testConcurrency(t *testing.T, logger log.Logger) { - for _, n := range []int{10, 100, 500} { - wg := sync.WaitGroup{} - wg.Add(n) - for i := 0; i < n; i++ { - go func() { spam(logger); wg.Done() }() - } - wg.Wait() - } -} - -func spam(logger log.Logger) { - for i := 0; i < 100; i++ { - logger.Log("key", strconv.FormatInt(int64(i), 10)) - } -} diff --git a/vendor/github.com/go-kit/kit/log/example_test.go b/vendor/github.com/go-kit/kit/log/example_test.go deleted file mode 100644 index 1741615..0000000 --- a/vendor/github.com/go-kit/kit/log/example_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package log_test - -import ( - "os" - - "github.com/go-kit/kit/log" -) - -func ExampleContext() { - logger := log.NewLogfmtLogger(os.Stdout) - logger.Log("foo", 123) - ctx := log.NewContext(logger).With("level", "info") - ctx.Log() - ctx = ctx.With("msg", "hello") - ctx.Log() - ctx.With("a", 1).Log("b", 2) - - // Output: - // foo=123 - // level=info - // level=info msg=hello - // level=info msg=hello a=1 b=2 -} diff --git a/vendor/github.com/go-kit/kit/log/json_logger.go b/vendor/github.com/go-kit/kit/log/json_logger.go deleted file mode 100644 index cef0d19..0000000 --- a/vendor/github.com/go-kit/kit/log/json_logger.go +++ /dev/null @@ -1,90 +0,0 @@ -package log - -import ( - "encoding" - "encoding/json" - "fmt" - "io" - "reflect" -) - -type jsonLogger struct { - io.Writer -} - -// NewJSONLogger returns a Logger that encodes keyvals to the Writer as a -// single JSON object. -func NewJSONLogger(w io.Writer) Logger { - return &jsonLogger{w} -} - -func (l *jsonLogger) Log(keyvals ...interface{}) error { - n := (len(keyvals) + 1) / 2 // +1 to handle case when len is odd - m := make(map[string]interface{}, n) - for i := 0; i < len(keyvals); i += 2 { - k := keyvals[i] - var v interface{} = ErrMissingValue - if i+1 < len(keyvals) { - v = keyvals[i+1] - } - merge(m, k, v) - } - return json.NewEncoder(l.Writer).Encode(m) -} - -func merge(dst map[string]interface{}, k, v interface{}) { - var key string - switch x := k.(type) { - case string: - key = x - case fmt.Stringer: - key = safeString(x) - default: - key = fmt.Sprint(x) - } - if x, ok := v.(error); ok { - v = safeError(x) - } - - // We want json.Marshaler and encoding.TextMarshaller to take priority over - // err.Error() and v.String(). But json.Marshall (called later) does that by - // default so we force a no-op if it's one of those 2 case. - switch x := v.(type) { - case json.Marshaler: - case encoding.TextMarshaler: - case error: - v = safeError(x) - case fmt.Stringer: - v = safeString(x) - } - - dst[key] = v -} - -func safeString(str fmt.Stringer) (s string) { - defer func() { - if panicVal := recover(); panicVal != nil { - if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() { - s = "NULL" - } else { - panic(panicVal) - } - } - }() - s = str.String() - return -} - -func safeError(err error) (s interface{}) { - defer func() { - if panicVal := recover(); panicVal != nil { - if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { - s = nil - } else { - panic(panicVal) - } - } - }() - s = err.Error() - return -} diff --git a/vendor/github.com/go-kit/kit/log/json_logger_test.go b/vendor/github.com/go-kit/kit/log/json_logger_test.go deleted file mode 100644 index 2911577..0000000 --- a/vendor/github.com/go-kit/kit/log/json_logger_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package log_test - -import ( - "bytes" - "errors" - "io/ioutil" - "testing" - - "github.com/go-kit/kit/log" -) - -func TestJSONLoggerCaller(t *testing.T) { - t.Parallel() - buf := &bytes.Buffer{} - logger := log.NewJSONLogger(buf) - logger = log.NewContext(logger).With("caller", log.DefaultCaller) - - if err := logger.Log(); err != nil { - t.Fatal(err) - } - if want, have := `{"caller":"json_logger_test.go:18"}`+"\n", buf.String(); want != have { - t.Errorf("\nwant %#v\nhave %#v", want, have) - } -} - -func TestJSONLogger(t *testing.T) { - t.Parallel() - buf := &bytes.Buffer{} - logger := log.NewJSONLogger(buf) - if err := logger.Log("err", errors.New("err"), "m", map[string]int{"0": 0}, "a", []int{1, 2, 3}); err != nil { - t.Fatal(err) - } - if want, have := `{"a":[1,2,3],"err":"err","m":{"0":0}}`+"\n", buf.String(); want != have { - t.Errorf("\nwant %#v\nhave %#v", want, have) - } -} - -func TestJSONLoggerMissingValue(t *testing.T) { - t.Parallel() - buf := &bytes.Buffer{} - logger := log.NewJSONLogger(buf) - if err := logger.Log("k"); err != nil { - t.Fatal(err) - } - if want, have := `{"k":"(MISSING)"}`+"\n", buf.String(); want != have { - t.Errorf("\nwant %#v\nhave %#v", want, have) - } -} - -func TestJSONLoggerNilStringerKey(t *testing.T) { - t.Parallel() - - buf := &bytes.Buffer{} - logger := log.NewJSONLogger(buf) - if err := logger.Log((*stringer)(nil), "v"); err != nil { - t.Fatal(err) - } - if want, have := `{"NULL":"v"}`+"\n", buf.String(); want != have { - t.Errorf("\nwant %#v\nhave %#v", want, have) - } -} - -func TestJSONLoggerNilErrorValue(t *testing.T) { - t.Parallel() - - buf := &bytes.Buffer{} - logger := log.NewJSONLogger(buf) - if err := logger.Log("err", (*stringError)(nil)); err != nil { - t.Fatal(err) - } - if want, have := `{"err":null}`+"\n", buf.String(); want != have { - t.Errorf("\nwant %#v\nhave %#v", want, have) - } -} - -// aller implements json.Marshaler, encoding.TextMarshaler, and fmt.Stringer. -type aller struct{} - -func (aller) MarshalJSON() ([]byte, error) { - return []byte("\"json\""), nil -} - -func (aller) MarshalText() ([]byte, error) { - return []byte("text"), nil -} - -func (aller) String() string { - return "string" -} - -// textstringer implements encoding.TextMarshaler and fmt.Stringer. -type textstringer struct{} - -func (textstringer) MarshalText() ([]byte, error) { - return []byte("text"), nil -} - -func (textstringer) String() string { - return "string" -} - -func TestJSONLoggerStringValue(t *testing.T) { - tests := []struct { - v interface{} - expected string - }{ - { - v: aller{}, - expected: `{"v":"json"}`, - }, - { - v: textstringer{}, - expected: `{"v":"text"}`, - }, - { - v: stringer("string"), - expected: `{"v":"string"}`, - }, - } - - for _, test := range tests { - buf := &bytes.Buffer{} - logger := log.NewJSONLogger(buf) - if err := logger.Log("v", test.v); err != nil { - t.Fatal(err) - } - - if want, have := test.expected+"\n", buf.String(); want != have { - t.Errorf("\nwant %#v\nhave %#v", want, have) - } - } -} - -type stringer string - -func (s stringer) String() string { - return string(s) -} - -type stringError string - -func (s stringError) Error() string { - return string(s) -} - -func BenchmarkJSONLoggerSimple(b *testing.B) { - benchmarkRunner(b, log.NewJSONLogger(ioutil.Discard), baseMessage) -} - -func BenchmarkJSONLoggerContextual(b *testing.B) { - benchmarkRunner(b, log.NewJSONLogger(ioutil.Discard), withMessage) -} - -func TestJSONLoggerConcurrency(t *testing.T) { - testConcurrency(t, log.NewJSONLogger(ioutil.Discard)) -} diff --git a/vendor/github.com/go-kit/kit/log/levels/levels.go b/vendor/github.com/go-kit/kit/log/levels/levels.go deleted file mode 100644 index da6b681..0000000 --- a/vendor/github.com/go-kit/kit/log/levels/levels.go +++ /dev/null @@ -1,127 +0,0 @@ -package levels - -import "github.com/go-kit/kit/log" - -// Levels provides a leveled logging wrapper around a logger. It has five -// levels: debug, info, warning (warn), error, and critical (crit). If you -// want a different set of levels, you can create your own levels type very -// easily, and you can elide the configuration. -type Levels struct { - ctx *log.Context - levelKey string - - // We have a choice between storing level values in string fields or - // making a separate context for each level. When using string fields the - // Log method must combine the base context, the level data, and the - // logged keyvals; but the With method only requires updating one context. - // If we instead keep a separate context for each level the Log method - // must only append the new keyvals; but the With method would have to - // update all five contexts. - - // Roughly speaking, storing multiple contexts breaks even if the ratio of - // Log/With calls is more than the number of levels. We have chosen to - // make the With method cheap and the Log method a bit more costly because - // we do not expect most applications to Log more than five times for each - // call to With. - - debugValue string - infoValue string - warnValue string - errorValue string - critValue string -} - -// New creates a new leveled logger, wrapping the passed logger. -func New(logger log.Logger, options ...Option) Levels { - l := Levels{ - ctx: log.NewContext(logger), - levelKey: "level", - - debugValue: "debug", - infoValue: "info", - warnValue: "warn", - errorValue: "error", - critValue: "crit", - } - for _, option := range options { - option(&l) - } - return l -} - -// With returns a new leveled logger that includes keyvals in all log events. -func (l Levels) With(keyvals ...interface{}) Levels { - return Levels{ - ctx: l.ctx.With(keyvals...), - levelKey: l.levelKey, - debugValue: l.debugValue, - infoValue: l.infoValue, - warnValue: l.warnValue, - errorValue: l.errorValue, - critValue: l.critValue, - } -} - -// Debug returns a debug level logger. -func (l Levels) Debug() log.Logger { - return l.ctx.WithPrefix(l.levelKey, l.debugValue) -} - -// Info returns an info level logger. -func (l Levels) Info() log.Logger { - return l.ctx.WithPrefix(l.levelKey, l.infoValue) -} - -// Warn returns a warning level logger. -func (l Levels) Warn() log.Logger { - return l.ctx.WithPrefix(l.levelKey, l.warnValue) -} - -// Error returns an error level logger. -func (l Levels) Error() log.Logger { - return l.ctx.WithPrefix(l.levelKey, l.errorValue) -} - -// Crit returns a critical level logger. -func (l Levels) Crit() log.Logger { - return l.ctx.WithPrefix(l.levelKey, l.critValue) -} - -// Option sets a parameter for leveled loggers. -type Option func(*Levels) - -// Key sets the key for the field used to indicate log level. By default, -// the key is "level". -func Key(key string) Option { - return func(l *Levels) { l.levelKey = key } -} - -// DebugValue sets the value for the field used to indicate the debug log -// level. By default, the value is "debug". -func DebugValue(value string) Option { - return func(l *Levels) { l.debugValue = value } -} - -// InfoValue sets the value for the field used to indicate the info log level. -// By default, the value is "info". -func InfoValue(value string) Option { - return func(l *Levels) { l.infoValue = value } -} - -// WarnValue sets the value for the field used to indicate the warning log -// level. By default, the value is "warn". -func WarnValue(value string) Option { - return func(l *Levels) { l.warnValue = value } -} - -// ErrorValue sets the value for the field used to indicate the error log -// level. By default, the value is "error". -func ErrorValue(value string) Option { - return func(l *Levels) { l.errorValue = value } -} - -// CritValue sets the value for the field used to indicate the critical log -// level. By default, the value is "crit". -func CritValue(value string) Option { - return func(l *Levels) { l.critValue = value } -} diff --git a/vendor/github.com/go-kit/kit/log/levels/levels_test.go b/vendor/github.com/go-kit/kit/log/levels/levels_test.go deleted file mode 100644 index 270963c..0000000 --- a/vendor/github.com/go-kit/kit/log/levels/levels_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package levels_test - -import ( - "bytes" - "os" - "testing" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/levels" -) - -func TestDefaultLevels(t *testing.T) { - buf := bytes.Buffer{} - logger := levels.New(log.NewLogfmtLogger(&buf)) - - logger.Debug().Log("msg", "résumé") // of course you'd want to do this - if want, have := "level=debug msg=résumé\n", buf.String(); want != have { - t.Errorf("want %#v, have %#v", want, have) - } - - buf.Reset() - logger.Info().Log("msg", "Ã…hus") - if want, have := "level=info msg=Ã…hus\n", buf.String(); want != have { - t.Errorf("want %#v, have %#v", want, have) - } - - buf.Reset() - logger.Error().Log("msg", "© violation") - if want, have := "level=error msg=\"© violation\"\n", buf.String(); want != have { - t.Errorf("want %#v, have %#v", want, have) - } - - buf.Reset() - logger.Crit().Log("msg", " ") - if want, have := "level=crit msg=\"\\t\"\n", buf.String(); want != have { - t.Errorf("want %#v, have %#v", want, have) - } -} - -func TestModifiedLevels(t *testing.T) { - buf := bytes.Buffer{} - logger := levels.New( - log.NewJSONLogger(&buf), - levels.Key("l"), - levels.DebugValue("dbg"), - levels.InfoValue("nfo"), - levels.WarnValue("wrn"), - levels.ErrorValue("err"), - levels.CritValue("crt"), - ) - logger.With("easter_island", "176°").Debug().Log("msg", "moai") - if want, have := `{"easter_island":"176°","l":"dbg","msg":"moai"}`+"\n", buf.String(); want != have { - t.Errorf("want %#v, have %#v", want, have) - } -} - -func ExampleLevels() { - logger := levels.New(log.NewLogfmtLogger(os.Stdout)) - logger.Debug().Log("msg", "hello") - logger.With("context", "foo").Warn().Log("err", "error") - - // Output: - // level=debug msg=hello - // level=warn context=foo err=error -} diff --git a/vendor/github.com/go-kit/kit/log/log.go b/vendor/github.com/go-kit/kit/log/log.go deleted file mode 100644 index 25e76cb..0000000 --- a/vendor/github.com/go-kit/kit/log/log.go +++ /dev/null @@ -1,181 +0,0 @@ -// Package log provides basic interfaces for structured logging. -// -// The fundamental interface is Logger. Loggers create log events from -// key/value data. -package log - -import ( - "errors" - "sync/atomic" -) - -// Logger is the fundamental interface for all log operations. Log creates a -// log event from keyvals, a variadic sequence of alternating keys and values. -// Implementations must be safe for concurrent use by multiple goroutines. In -// particular, any implementation of Logger that appends to keyvals or -// modifies any of its elements must make a copy first. -type Logger interface { - Log(keyvals ...interface{}) error -} - -// ErrMissingValue is appended to keyvals slices with odd length to substitute -// the missing value. -var ErrMissingValue = errors.New("(MISSING)") - -// NewContext returns a new Context that logs to logger. -func NewContext(logger Logger) *Context { - if c, ok := logger.(*Context); ok { - return c - } - return &Context{logger: logger} -} - -// Context must always have the same number of stack frames between calls to -// its Log method and the eventual binding of Valuers to their value. This -// requirement comes from the functional requirement to allow a context to -// resolve application call site information for a log.Caller stored in the -// context. To do this we must be able to predict the number of logging -// functions on the stack when bindValues is called. -// -// Three implementation details provide the needed stack depth consistency. -// The first two of these details also result in better amortized performance, -// and thus make sense even without the requirements regarding stack depth. -// The third detail, however, is subtle and tied to the implementation of the -// Go compiler. -// -// 1. NewContext avoids introducing an additional layer when asked to -// wrap another Context. -// 2. With avoids introducing an additional layer by returning a newly -// constructed Context with a merged keyvals rather than simply -// wrapping the existing Context. -// 3. All of Context's methods take pointer receivers even though they -// do not mutate the Context. -// -// Before explaining the last detail, first some background. The Go compiler -// generates wrapper methods to implement the auto dereferencing behavior when -// calling a value method through a pointer variable. These wrapper methods -// are also used when calling a value method through an interface variable -// because interfaces store a pointer to the underlying concrete value. -// Calling a pointer receiver through an interface does not require generating -// an additional function. -// -// If Context had value methods then calling Context.Log through a variable -// with type Logger would have an extra stack frame compared to calling -// Context.Log through a variable with type Context. Using pointer receivers -// avoids this problem. - -// A Context wraps a Logger and holds keyvals that it includes in all log -// events. When logging, a Context replaces all value elements (odd indexes) -// containing a Valuer with their generated value for each call to its Log -// method. -type Context struct { - logger Logger - keyvals []interface{} - hasValuer bool -} - -// Log replaces all value elements (odd indexes) containing a Valuer in the -// stored context with their generated value, appends keyvals, and passes the -// result to the wrapped Logger. -func (l *Context) Log(keyvals ...interface{}) error { - kvs := append(l.keyvals, keyvals...) - if len(kvs)%2 != 0 { - kvs = append(kvs, ErrMissingValue) - } - if l.hasValuer { - // If no keyvals were appended above then we must copy l.keyvals so - // that future log events will reevaluate the stored Valuers. - if len(keyvals) == 0 { - kvs = append([]interface{}{}, l.keyvals...) - } - bindValues(kvs[:len(l.keyvals)]) - } - return l.logger.Log(kvs...) -} - -// With returns a new Context with keyvals appended to those of the receiver. -func (l *Context) With(keyvals ...interface{}) *Context { - if len(keyvals) == 0 { - return l - } - kvs := append(l.keyvals, keyvals...) - if len(kvs)%2 != 0 { - kvs = append(kvs, ErrMissingValue) - } - return &Context{ - logger: l.logger, - // Limiting the capacity of the stored keyvals ensures that a new - // backing array is created if the slice must grow in Log or With. - // Using the extra capacity without copying risks a data race that - // would violate the Logger interface contract. - keyvals: kvs[:len(kvs):len(kvs)], - hasValuer: l.hasValuer || containsValuer(keyvals), - } -} - -// WithPrefix returns a new Context with keyvals prepended to those of the -// receiver. -func (l *Context) WithPrefix(keyvals ...interface{}) *Context { - if len(keyvals) == 0 { - return l - } - // Limiting the capacity of the stored keyvals ensures that a new - // backing array is created if the slice must grow in Log or With. - // Using the extra capacity without copying risks a data race that - // would violate the Logger interface contract. - n := len(l.keyvals) + len(keyvals) - if len(keyvals)%2 != 0 { - n++ - } - kvs := make([]interface{}, 0, n) - kvs = append(kvs, keyvals...) - if len(kvs)%2 != 0 { - kvs = append(kvs, ErrMissingValue) - } - kvs = append(kvs, l.keyvals...) - return &Context{ - logger: l.logger, - keyvals: kvs, - hasValuer: l.hasValuer || containsValuer(keyvals), - } -} - -// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If -// f is a function with the appropriate signature, LoggerFunc(f) is a Logger -// object that calls f. -type LoggerFunc func(...interface{}) error - -// Log implements Logger by calling f(keyvals...). -func (f LoggerFunc) Log(keyvals ...interface{}) error { - return f(keyvals...) -} - -// SwapLogger wraps another logger that may be safely replaced while other -// goroutines use the SwapLogger concurrently. The zero value for a SwapLogger -// will discard all log events without error. -// -// SwapLogger serves well as a package global logger that can be changed by -// importers. -type SwapLogger struct { - logger atomic.Value -} - -type loggerStruct struct { - Logger -} - -// Log implements the Logger interface by forwarding keyvals to the currently -// wrapped logger. It does not log anything if the wrapped logger is nil. -func (l *SwapLogger) Log(keyvals ...interface{}) error { - s, ok := l.logger.Load().(loggerStruct) - if !ok || s.Logger == nil { - return nil - } - return s.Log(keyvals...) -} - -// Swap replaces the currently wrapped logger with logger. Swap may be called -// concurrently with calls to Log from other goroutines. -func (l *SwapLogger) Swap(logger Logger) { - l.logger.Store(loggerStruct{logger}) -} diff --git a/vendor/github.com/go-kit/kit/log/log_test.go b/vendor/github.com/go-kit/kit/log/log_test.go deleted file mode 100644 index 7cd0844..0000000 --- a/vendor/github.com/go-kit/kit/log/log_test.go +++ /dev/null @@ -1,255 +0,0 @@ -package log_test - -import ( - "bytes" - "fmt" - "sync" - "testing" - - "github.com/go-kit/kit/log" - "github.com/go-stack/stack" -) - -func TestContext(t *testing.T) { - t.Parallel() - buf := &bytes.Buffer{} - logger := log.NewLogfmtLogger(buf) - - kvs := []interface{}{"a", 123} - lc := log.NewContext(logger).With(kvs...) - kvs[1] = 0 // With should copy its key values - - lc = lc.With("b", "c") // With should stack - if err := lc.Log("msg", "message"); err != nil { - t.Fatal(err) - } - if want, have := "a=123 b=c msg=message\n", buf.String(); want != have { - t.Errorf("\nwant: %shave: %s", want, have) - } - - buf.Reset() - lc = lc.WithPrefix("p", "first") - if err := lc.Log("msg", "message"); err != nil { - t.Fatal(err) - } - if want, have := "p=first a=123 b=c msg=message\n", buf.String(); want != have { - t.Errorf("\nwant: %shave: %s", want, have) - } -} - -func TestContextMissingValue(t *testing.T) { - t.Parallel() - var output []interface{} - logger := log.Logger(log.LoggerFunc(func(keyvals ...interface{}) error { - output = keyvals - return nil - })) - - lc := log.NewContext(logger) - - lc.Log("k") - if want, have := 2, len(output); want != have { - t.Errorf("want len(output) == %v, have %v", want, have) - } - if want, have := log.ErrMissingValue, output[1]; want != have { - t.Errorf("want %#v, have %#v", want, have) - } - - lc.With("k1").WithPrefix("k0").Log("k2") - if want, have := 6, len(output); want != have { - t.Errorf("want len(output) == %v, have %v", want, have) - } - for i := 1; i < 6; i += 2 { - if want, have := log.ErrMissingValue, output[i]; want != have { - t.Errorf("want output[%d] == %#v, have %#v", i, want, have) - } - } -} - -// Test that Context.Log has a consistent function stack depth when binding -// log.Valuers, regardless of how many times Context.With has been called or -// whether Context.Log is called via an interface typed variable or a concrete -// typed variable. -func TestContextStackDepth(t *testing.T) { - fn := fmt.Sprintf("%n", stack.Caller(0)) - - var output []interface{} - - logger := log.Logger(log.LoggerFunc(func(keyvals ...interface{}) error { - output = keyvals - return nil - })) - - stackValuer := log.Valuer(func() interface{} { - for i, c := range stack.Trace() { - if fmt.Sprintf("%n", c) == fn { - return i - } - } - t.Fatal("Test function not found in stack trace.") - return nil - }) - - concrete := log.NewContext(logger).With("stack", stackValuer) - var iface log.Logger = concrete - - // Call through interface to get baseline. - iface.Log("k", "v") - want := output[1].(int) - - for len(output) < 10 { - concrete.Log("k", "v") - if have := output[1]; have != want { - t.Errorf("%d Withs: have %v, want %v", len(output)/2-1, have, want) - } - - iface.Log("k", "v") - if have := output[1]; have != want { - t.Errorf("%d Withs: have %v, want %v", len(output)/2-1, have, want) - } - - wrapped := log.NewContext(concrete) - wrapped.Log("k", "v") - if have := output[1]; have != want { - t.Errorf("%d Withs: have %v, want %v", len(output)/2-1, have, want) - } - - concrete = concrete.With("k", "v") - iface = concrete - } -} - -// Test that With returns a Logger safe for concurrent use. This test -// validates that the stored logging context does not get corrupted when -// multiple clients concurrently log additional keyvals. -// -// This test must be run with go test -cpu 2 (or more) to achieve its goal. -func TestWithConcurrent(t *testing.T) { - // Create some buckets to count how many events each goroutine logs. - const goroutines = 8 - counts := [goroutines]int{} - - // This logger extracts a goroutine id from the last value field and - // increments the referenced bucket. - logger := log.LoggerFunc(func(kv ...interface{}) error { - goroutine := kv[len(kv)-1].(int) - counts[goroutine]++ - return nil - }) - - // With must be careful about handling slices that can grow without - // copying the underlying array, so give it a challenge. - l := log.NewContext(logger).With(make([]interface{}, 0, 2)...) - - // Start logging concurrently. Each goroutine logs its id so the logger - // can bucket the event counts. - var wg sync.WaitGroup - wg.Add(goroutines) - const n = 10000 - for i := 0; i < goroutines; i++ { - go func(idx int) { - defer wg.Done() - for j := 0; j < n; j++ { - l.Log("goroutineIdx", idx) - } - }(i) - } - wg.Wait() - - for bucket, have := range counts { - if want := n; want != have { - t.Errorf("bucket %d: want %d, have %d", bucket, want, have) // note Errorf - } - } -} - -func BenchmarkDiscard(b *testing.B) { - logger := log.NewNopLogger() - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - logger.Log("k", "v") - } -} - -func BenchmarkOneWith(b *testing.B) { - logger := log.NewNopLogger() - lc := log.NewContext(logger).With("k", "v") - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - lc.Log("k", "v") - } -} - -func BenchmarkTwoWith(b *testing.B) { - logger := log.NewNopLogger() - lc := log.NewContext(logger).With("k", "v") - for i := 1; i < 2; i++ { - lc = lc.With("k", "v") - } - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - lc.Log("k", "v") - } -} - -func BenchmarkTenWith(b *testing.B) { - logger := log.NewNopLogger() - lc := log.NewContext(logger).With("k", "v") - for i := 1; i < 10; i++ { - lc = lc.With("k", "v") - } - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - lc.Log("k", "v") - } -} - -func TestSwapLogger(t *testing.T) { - var logger log.SwapLogger - - // Zero value does not panic or error. - err := logger.Log("k", "v") - if got, want := err, error(nil); got != want { - t.Errorf("got %v, want %v", got, want) - } - - buf := &bytes.Buffer{} - json := log.NewJSONLogger(buf) - logger.Swap(json) - - if err := logger.Log("k", "v"); err != nil { - t.Error(err) - } - if got, want := buf.String(), `{"k":"v"}`+"\n"; got != want { - t.Errorf("got %v, want %v", got, want) - } - - buf.Reset() - prefix := log.NewLogfmtLogger(buf) - logger.Swap(prefix) - - if err := logger.Log("k", "v"); err != nil { - t.Error(err) - } - if got, want := buf.String(), "k=v\n"; got != want { - t.Errorf("got %v, want %v", got, want) - } - - buf.Reset() - logger.Swap(nil) - - if err := logger.Log("k", "v"); err != nil { - t.Error(err) - } - if got, want := buf.String(), ""; got != want { - t.Errorf("got %v, want %v", got, want) - } -} - -func TestSwapLoggerConcurrency(t *testing.T) { - testConcurrency(t, &log.SwapLogger{}) -} diff --git a/vendor/github.com/go-kit/kit/log/logfmt_logger.go b/vendor/github.com/go-kit/kit/log/logfmt_logger.go deleted file mode 100644 index 4ff5555..0000000 --- a/vendor/github.com/go-kit/kit/log/logfmt_logger.go +++ /dev/null @@ -1,61 +0,0 @@ -package log - -import ( - "bytes" - "io" - "sync" - - "github.com/go-logfmt/logfmt" -) - -type logfmtEncoder struct { - *logfmt.Encoder - buf bytes.Buffer -} - -func (l *logfmtEncoder) Reset() { - l.Encoder.Reset() - l.buf.Reset() -} - -var logfmtEncoderPool = sync.Pool{ - New: func() interface{} { - var enc logfmtEncoder - enc.Encoder = logfmt.NewEncoder(&enc.buf) - return &enc - }, -} - -type logfmtLogger struct { - w io.Writer -} - -// NewLogfmtLogger returns a logger that encodes keyvals to the Writer in -// logfmt format. The passed Writer must be safe for concurrent use by -// multiple goroutines if the returned Logger will be used concurrently. -func NewLogfmtLogger(w io.Writer) Logger { - return &logfmtLogger{w} -} - -func (l logfmtLogger) Log(keyvals ...interface{}) error { - enc := logfmtEncoderPool.Get().(*logfmtEncoder) - enc.Reset() - defer logfmtEncoderPool.Put(enc) - - if err := enc.EncodeKeyvals(keyvals...); err != nil { - return err - } - - // Add newline to the end of the buffer - if err := enc.EndRecord(); err != nil { - return err - } - - // The Logger interface requires implementations to be safe for concurrent - // use by multiple goroutines. For this implementation that means making - // only one call to l.w.Write() for each call to Log. - if _, err := l.w.Write(enc.buf.Bytes()); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/go-kit/kit/log/logfmt_logger_test.go b/vendor/github.com/go-kit/kit/log/logfmt_logger_test.go deleted file mode 100644 index 185e948..0000000 --- a/vendor/github.com/go-kit/kit/log/logfmt_logger_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package log_test - -import ( - "bytes" - "errors" - "io/ioutil" - "testing" - - "github.com/go-kit/kit/log" - "github.com/go-logfmt/logfmt" -) - -func TestLogfmtLogger(t *testing.T) { - buf := &bytes.Buffer{} - logger := log.NewLogfmtLogger(buf) - - if err := logger.Log("hello", "world"); err != nil { - t.Fatal(err) - } - if want, have := "hello=world\n", buf.String(); want != have { - t.Errorf("want %#v, have %#v", want, have) - } - - buf.Reset() - if err := logger.Log("a", 1, "err", errors.New("error")); err != nil { - t.Fatal(err) - } - if want, have := "a=1 err=error\n", buf.String(); want != have { - t.Errorf("want %#v, have %#v", want, have) - } - - buf.Reset() - if err := logger.Log("std_map", map[int]int{1: 2}, "my_map", mymap{0: 0}); err != nil { - t.Fatal(err) - } - if want, have := "std_map=\""+logfmt.ErrUnsupportedValueType.Error()+"\" my_map=special_behavior\n", buf.String(); want != have { - t.Errorf("want %#v, have %#v", want, have) - } -} - -func BenchmarkLogfmtLoggerSimple(b *testing.B) { - benchmarkRunner(b, log.NewLogfmtLogger(ioutil.Discard), baseMessage) -} - -func BenchmarkLogfmtLoggerContextual(b *testing.B) { - benchmarkRunner(b, log.NewLogfmtLogger(ioutil.Discard), withMessage) -} - -func TestLogfmtLoggerConcurrency(t *testing.T) { - testConcurrency(t, log.NewLogfmtLogger(ioutil.Discard)) -} - -type mymap map[int]int - -func (m mymap) String() string { return "special_behavior" } diff --git a/vendor/github.com/go-kit/kit/log/nop_logger.go b/vendor/github.com/go-kit/kit/log/nop_logger.go deleted file mode 100644 index 1047d62..0000000 --- a/vendor/github.com/go-kit/kit/log/nop_logger.go +++ /dev/null @@ -1,8 +0,0 @@ -package log - -type nopLogger struct{} - -// NewNopLogger returns a logger that doesn't do anything. -func NewNopLogger() Logger { return nopLogger{} } - -func (nopLogger) Log(...interface{}) error { return nil } diff --git a/vendor/github.com/go-kit/kit/log/nop_logger_test.go b/vendor/github.com/go-kit/kit/log/nop_logger_test.go deleted file mode 100644 index 043553e..0000000 --- a/vendor/github.com/go-kit/kit/log/nop_logger_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package log_test - -import ( - "testing" - - "github.com/go-kit/kit/log" -) - -func TestNopLogger(t *testing.T) { - logger := log.NewNopLogger() - if err := logger.Log("abc", 123); err != nil { - t.Error(err) - } - if err := log.NewContext(logger).With("def", "ghi").Log(); err != nil { - t.Error(err) - } -} diff --git a/vendor/github.com/go-kit/kit/log/stdlib.go b/vendor/github.com/go-kit/kit/log/stdlib.go deleted file mode 100644 index 7ffd1ca..0000000 --- a/vendor/github.com/go-kit/kit/log/stdlib.go +++ /dev/null @@ -1,116 +0,0 @@ -package log - -import ( - "io" - "log" - "regexp" - "strings" -) - -// StdlibWriter implements io.Writer by invoking the stdlib log.Print. It's -// designed to be passed to a Go kit logger as the writer, for cases where -// it's necessary to redirect all Go kit log output to the stdlib logger. -// -// If you have any choice in the matter, you shouldn't use this. Prefer to -// redirect the stdlib log to the Go kit logger via NewStdlibAdapter. -type StdlibWriter struct{} - -// Write implements io.Writer. -func (w StdlibWriter) Write(p []byte) (int, error) { - log.Print(strings.TrimSpace(string(p))) - return len(p), nil -} - -// StdlibAdapter wraps a Logger and allows it to be passed to the stdlib -// logger's SetOutput. It will extract date/timestamps, filenames, and -// messages, and place them under relevant keys. -type StdlibAdapter struct { - Logger - timestampKey string - fileKey string - messageKey string -} - -// StdlibAdapterOption sets a parameter for the StdlibAdapter. -type StdlibAdapterOption func(*StdlibAdapter) - -// TimestampKey sets the key for the timestamp field. By default, it's "ts". -func TimestampKey(key string) StdlibAdapterOption { - return func(a *StdlibAdapter) { a.timestampKey = key } -} - -// FileKey sets the key for the file and line field. By default, it's "file". -func FileKey(key string) StdlibAdapterOption { - return func(a *StdlibAdapter) { a.fileKey = key } -} - -// MessageKey sets the key for the actual log message. By default, it's "msg". -func MessageKey(key string) StdlibAdapterOption { - return func(a *StdlibAdapter) { a.messageKey = key } -} - -// NewStdlibAdapter returns a new StdlibAdapter wrapper around the passed -// logger. It's designed to be passed to log.SetOutput. -func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer { - a := StdlibAdapter{ - Logger: logger, - timestampKey: "ts", - fileKey: "file", - messageKey: "msg", - } - for _, option := range options { - option(&a) - } - return a -} - -func (a StdlibAdapter) Write(p []byte) (int, error) { - result := subexps(p) - keyvals := []interface{}{} - var timestamp string - if date, ok := result["date"]; ok && date != "" { - timestamp = date - } - if time, ok := result["time"]; ok && time != "" { - if timestamp != "" { - timestamp += " " - } - timestamp += time - } - if timestamp != "" { - keyvals = append(keyvals, a.timestampKey, timestamp) - } - if file, ok := result["file"]; ok && file != "" { - keyvals = append(keyvals, a.fileKey, file) - } - if msg, ok := result["msg"]; ok { - keyvals = append(keyvals, a.messageKey, msg) - } - if err := a.Logger.Log(keyvals...); err != nil { - return 0, err - } - return len(p), nil -} - -const ( - logRegexpDate = `(?P[0-9]{4}/[0-9]{2}/[0-9]{2})?[ ]?` - logRegexpTime = `(?P[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?)?[ ]?` - logRegexpFile = `(?P.+?:[0-9]+)?` - logRegexpMsg = `(: )?(?P.*)` -) - -var ( - logRegexp = regexp.MustCompile(logRegexpDate + logRegexpTime + logRegexpFile + logRegexpMsg) -) - -func subexps(line []byte) map[string]string { - m := logRegexp.FindSubmatch(line) - if len(m) < len(logRegexp.SubexpNames()) { - return map[string]string{} - } - result := map[string]string{} - for i, name := range logRegexp.SubexpNames() { - result[name] = string(m[i]) - } - return result -} diff --git a/vendor/github.com/go-kit/kit/log/stdlib_test.go b/vendor/github.com/go-kit/kit/log/stdlib_test.go deleted file mode 100644 index bdf94e8..0000000 --- a/vendor/github.com/go-kit/kit/log/stdlib_test.go +++ /dev/null @@ -1,205 +0,0 @@ -package log - -import ( - "bytes" - "fmt" - "log" - "testing" - "time" -) - -func TestStdlibWriter(t *testing.T) { - buf := &bytes.Buffer{} - log.SetOutput(buf) - log.SetFlags(log.LstdFlags) - logger := NewLogfmtLogger(StdlibWriter{}) - logger.Log("key", "val") - timestamp := time.Now().Format("2006/01/02 15:04:05") - if want, have := timestamp+" key=val\n", buf.String(); want != have { - t.Errorf("want %q, have %q", want, have) - } -} - -func TestStdlibAdapterUsage(t *testing.T) { - buf := &bytes.Buffer{} - logger := NewLogfmtLogger(buf) - writer := NewStdlibAdapter(logger) - stdlog := log.New(writer, "", 0) - - now := time.Now() - date := now.Format("2006/01/02") - time := now.Format("15:04:05") - - for flag, want := range map[int]string{ - 0: "msg=hello\n", - log.Ldate: "ts=" + date + " msg=hello\n", - log.Ltime: "ts=" + time + " msg=hello\n", - log.Ldate | log.Ltime: "ts=\"" + date + " " + time + "\" msg=hello\n", - log.Lshortfile: "file=stdlib_test.go:44 msg=hello\n", - log.Lshortfile | log.Ldate: "ts=" + date + " file=stdlib_test.go:44 msg=hello\n", - log.Lshortfile | log.Ldate | log.Ltime: "ts=\"" + date + " " + time + "\" file=stdlib_test.go:44 msg=hello\n", - } { - buf.Reset() - stdlog.SetFlags(flag) - stdlog.Print("hello") - if have := buf.String(); want != have { - t.Errorf("flag=%d: want %#v, have %#v", flag, want, have) - } - } -} - -func TestStdLibAdapterExtraction(t *testing.T) { - buf := &bytes.Buffer{} - logger := NewLogfmtLogger(buf) - writer := NewStdlibAdapter(logger) - for input, want := range map[string]string{ - "hello": "msg=hello\n", - "2009/01/23: hello": "ts=2009/01/23 msg=hello\n", - "2009/01/23 01:23:23: hello": "ts=\"2009/01/23 01:23:23\" msg=hello\n", - "01:23:23: hello": "ts=01:23:23 msg=hello\n", - "2009/01/23 01:23:23.123123: hello": "ts=\"2009/01/23 01:23:23.123123\" msg=hello\n", - "2009/01/23 01:23:23.123123 /a/b/c/d.go:23: hello": "ts=\"2009/01/23 01:23:23.123123\" file=/a/b/c/d.go:23 msg=hello\n", - "01:23:23.123123 /a/b/c/d.go:23: hello": "ts=01:23:23.123123 file=/a/b/c/d.go:23 msg=hello\n", - "2009/01/23 01:23:23 /a/b/c/d.go:23: hello": "ts=\"2009/01/23 01:23:23\" file=/a/b/c/d.go:23 msg=hello\n", - "2009/01/23 /a/b/c/d.go:23: hello": "ts=2009/01/23 file=/a/b/c/d.go:23 msg=hello\n", - "/a/b/c/d.go:23: hello": "file=/a/b/c/d.go:23 msg=hello\n", - } { - buf.Reset() - fmt.Fprint(writer, input) - if have := buf.String(); want != have { - t.Errorf("%q: want %#v, have %#v", input, want, have) - } - } -} - -func TestStdlibAdapterSubexps(t *testing.T) { - for input, wantMap := range map[string]map[string]string{ - "hello world": { - "date": "", - "time": "", - "file": "", - "msg": "hello world", - }, - "2009/01/23: hello world": { - "date": "2009/01/23", - "time": "", - "file": "", - "msg": "hello world", - }, - "2009/01/23 01:23:23: hello world": { - "date": "2009/01/23", - "time": "01:23:23", - "file": "", - "msg": "hello world", - }, - "01:23:23: hello world": { - "date": "", - "time": "01:23:23", - "file": "", - "msg": "hello world", - }, - "2009/01/23 01:23:23.123123: hello world": { - "date": "2009/01/23", - "time": "01:23:23.123123", - "file": "", - "msg": "hello world", - }, - "2009/01/23 01:23:23.123123 /a/b/c/d.go:23: hello world": { - "date": "2009/01/23", - "time": "01:23:23.123123", - "file": "/a/b/c/d.go:23", - "msg": "hello world", - }, - "01:23:23.123123 /a/b/c/d.go:23: hello world": { - "date": "", - "time": "01:23:23.123123", - "file": "/a/b/c/d.go:23", - "msg": "hello world", - }, - "2009/01/23 01:23:23 /a/b/c/d.go:23: hello world": { - "date": "2009/01/23", - "time": "01:23:23", - "file": "/a/b/c/d.go:23", - "msg": "hello world", - }, - "2009/01/23 /a/b/c/d.go:23: hello world": { - "date": "2009/01/23", - "time": "", - "file": "/a/b/c/d.go:23", - "msg": "hello world", - }, - "/a/b/c/d.go:23: hello world": { - "date": "", - "time": "", - "file": "/a/b/c/d.go:23", - "msg": "hello world", - }, - "2009/01/23 01:23:23.123123 C:/a/b/c/d.go:23: hello world": { - "date": "2009/01/23", - "time": "01:23:23.123123", - "file": "C:/a/b/c/d.go:23", - "msg": "hello world", - }, - "01:23:23.123123 C:/a/b/c/d.go:23: hello world": { - "date": "", - "time": "01:23:23.123123", - "file": "C:/a/b/c/d.go:23", - "msg": "hello world", - }, - "2009/01/23 01:23:23 C:/a/b/c/d.go:23: hello world": { - "date": "2009/01/23", - "time": "01:23:23", - "file": "C:/a/b/c/d.go:23", - "msg": "hello world", - }, - "2009/01/23 C:/a/b/c/d.go:23: hello world": { - "date": "2009/01/23", - "time": "", - "file": "C:/a/b/c/d.go:23", - "msg": "hello world", - }, - "C:/a/b/c/d.go:23: hello world": { - "date": "", - "time": "", - "file": "C:/a/b/c/d.go:23", - "msg": "hello world", - }, - "2009/01/23 01:23:23.123123 C:/a/b/c/d.go:23: :.;<>_#{[]}\"\\": { - "date": "2009/01/23", - "time": "01:23:23.123123", - "file": "C:/a/b/c/d.go:23", - "msg": ":.;<>_#{[]}\"\\", - }, - "01:23:23.123123 C:/a/b/c/d.go:23: :.;<>_#{[]}\"\\": { - "date": "", - "time": "01:23:23.123123", - "file": "C:/a/b/c/d.go:23", - "msg": ":.;<>_#{[]}\"\\", - }, - "2009/01/23 01:23:23 C:/a/b/c/d.go:23: :.;<>_#{[]}\"\\": { - "date": "2009/01/23", - "time": "01:23:23", - "file": "C:/a/b/c/d.go:23", - "msg": ":.;<>_#{[]}\"\\", - }, - "2009/01/23 C:/a/b/c/d.go:23: :.;<>_#{[]}\"\\": { - "date": "2009/01/23", - "time": "", - "file": "C:/a/b/c/d.go:23", - "msg": ":.;<>_#{[]}\"\\", - }, - "C:/a/b/c/d.go:23: :.;<>_#{[]}\"\\": { - "date": "", - "time": "", - "file": "C:/a/b/c/d.go:23", - "msg": ":.;<>_#{[]}\"\\", - }, - } { - haveMap := subexps([]byte(input)) - for key, want := range wantMap { - if have := haveMap[key]; want != have { - t.Errorf("%q: %q: want %q, have %q", input, key, want, have) - } - } - } -} diff --git a/vendor/github.com/go-kit/kit/log/term/LICENSE b/vendor/github.com/go-kit/kit/log/term/LICENSE deleted file mode 100644 index f090cb4..0000000 --- a/vendor/github.com/go-kit/kit/log/term/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Simon Eskildsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/go-kit/kit/log/term/colorlogger.go b/vendor/github.com/go-kit/kit/log/term/colorlogger.go deleted file mode 100644 index 76d94ea..0000000 --- a/vendor/github.com/go-kit/kit/log/term/colorlogger.go +++ /dev/null @@ -1,144 +0,0 @@ -package term - -import ( - "bytes" - "fmt" - "io" - "sync" - - "github.com/go-kit/kit/log" -) - -// Color represents an ANSI color. The zero value is Default. -type Color uint8 - -// ANSI colors. -const ( - Default = Color(iota) - - Black - DarkRed - DarkGreen - Brown - DarkBlue - DarkMagenta - DarkCyan - Gray - - DarkGray - Red - Green - Yellow - Blue - Magenta - Cyan - White - - numColors -) - -// For more on ANSI escape codes see -// https://en.wikipedia.org/wiki/ANSI_escape_code. See in particular -// https://en.wikipedia.org/wiki/ANSI_escape_code#Colors. - -var ( - resetColorBytes = []byte("\x1b[39;49m") - fgColorBytes [][]byte - bgColorBytes [][]byte -) - -func init() { - // Default - fgColorBytes = append(fgColorBytes, []byte("\x1b[39m")) - bgColorBytes = append(bgColorBytes, []byte("\x1b[49m")) - - // dark colors - for color := Black; color < DarkGray; color++ { - fgColorBytes = append(fgColorBytes, []byte(fmt.Sprintf("\x1b[%dm", 30+color-Black))) - bgColorBytes = append(bgColorBytes, []byte(fmt.Sprintf("\x1b[%dm", 40+color-Black))) - } - - // bright colors - for color := DarkGray; color < numColors; color++ { - fgColorBytes = append(fgColorBytes, []byte(fmt.Sprintf("\x1b[%d;1m", 30+color-DarkGray))) - bgColorBytes = append(bgColorBytes, []byte(fmt.Sprintf("\x1b[%d;1m", 40+color-DarkGray))) - } -} - -// FgBgColor represents a foreground and background color. -type FgBgColor struct { - Fg, Bg Color -} - -func (c FgBgColor) isZero() bool { - return c.Fg == Default && c.Bg == Default -} - -// NewColorLogger returns a Logger which writes colored logs to w. ANSI color -// codes for the colors returned by color are added to the formatted output -// from the Logger returned by newLogger and the combined result written to w. -func NewColorLogger(w io.Writer, newLogger func(io.Writer) log.Logger, color func(keyvals ...interface{}) FgBgColor) log.Logger { - if color == nil { - panic("color func nil") - } - return &colorLogger{ - w: w, - newLogger: newLogger, - color: color, - bufPool: sync.Pool{New: func() interface{} { return &loggerBuf{} }}, - noColorLogger: newLogger(w), - } -} - -type colorLogger struct { - w io.Writer - newLogger func(io.Writer) log.Logger - color func(keyvals ...interface{}) FgBgColor - bufPool sync.Pool - noColorLogger log.Logger -} - -func (l *colorLogger) Log(keyvals ...interface{}) error { - color := l.color(keyvals...) - if color.isZero() { - return l.noColorLogger.Log(keyvals...) - } - - lb := l.getLoggerBuf() - defer l.putLoggerBuf(lb) - if color.Fg != Default { - lb.buf.Write(fgColorBytes[color.Fg]) - } - if color.Bg != Default { - lb.buf.Write(bgColorBytes[color.Bg]) - } - err := lb.logger.Log(keyvals...) - if err != nil { - return err - } - if color.Fg != Default || color.Bg != Default { - lb.buf.Write(resetColorBytes) - } - _, err = io.Copy(l.w, lb.buf) - return err -} - -type loggerBuf struct { - buf *bytes.Buffer - logger log.Logger -} - -func (l *colorLogger) getLoggerBuf() *loggerBuf { - lb := l.bufPool.Get().(*loggerBuf) - if lb.buf == nil { - lb.buf = &bytes.Buffer{} - lb.logger = l.newLogger(lb.buf) - } else { - lb.buf.Reset() - } - return lb -} - -func (l *colorLogger) putLoggerBuf(cb *loggerBuf) { - l.bufPool.Put(cb) -} diff --git a/vendor/github.com/go-kit/kit/log/term/colorlogger_test.go b/vendor/github.com/go-kit/kit/log/term/colorlogger_test.go deleted file mode 100644 index 030651a..0000000 --- a/vendor/github.com/go-kit/kit/log/term/colorlogger_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package term_test - -import ( - "bytes" - "io" - "io/ioutil" - "strconv" - "sync" - "testing" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/term" -) - -func TestColorLogger(t *testing.T) { - var buf bytes.Buffer - logger := newColorLogger(&buf) - - if err := logger.Log("hello", "world"); err != nil { - t.Fatal(err) - } - if want, have := "hello=world\n", buf.String(); want != have { - t.Errorf("\nwant %#v\nhave %#v", want, have) - } - - buf.Reset() - if err := logger.Log("a", 1); err != nil { - t.Fatal(err) - } - if want, have := "\x1b[32;1m\x1b[47;1ma=1\n\x1b[39;49m", buf.String(); want != have { - t.Errorf("\nwant %#v\nhave %#v", want, have) - } -} - -func newColorLogger(w io.Writer) log.Logger { - return term.NewColorLogger(w, log.NewLogfmtLogger, - func(keyvals ...interface{}) term.FgBgColor { - if keyvals[0] == "a" { - return term.FgBgColor{Fg: term.Green, Bg: term.White} - } - return term.FgBgColor{} - }) -} - -func BenchmarkColorLoggerSimple(b *testing.B) { - benchmarkRunner(b, newColorLogger(ioutil.Discard), baseMessage) -} - -func BenchmarkColorLoggerContextual(b *testing.B) { - benchmarkRunner(b, newColorLogger(ioutil.Discard), withMessage) -} - -func TestColorLoggerConcurrency(t *testing.T) { - testConcurrency(t, newColorLogger(ioutil.Discard)) -} - -// copied from log/benchmark_test.go -func benchmarkRunner(b *testing.B, logger log.Logger, f func(log.Logger)) { - lc := log.NewContext(logger).With("common_key", "common_value") - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - f(lc) - } -} - -var ( - baseMessage = func(logger log.Logger) { logger.Log("foo_key", "foo_value") } - withMessage = func(logger log.Logger) { log.NewContext(logger).With("a", "b").Log("c", "d") } -) - -// copied from log/concurrency_test.go -func testConcurrency(t *testing.T, logger log.Logger) { - for _, n := range []int{10, 100, 500} { - wg := sync.WaitGroup{} - wg.Add(n) - for i := 0; i < n; i++ { - go func() { spam(logger); wg.Done() }() - } - wg.Wait() - } -} - -func spam(logger log.Logger) { - for i := 0; i < 100; i++ { - logger.Log("a", strconv.FormatInt(int64(i), 10)) - } -} diff --git a/vendor/github.com/go-kit/kit/log/term/colorwriter_others.go b/vendor/github.com/go-kit/kit/log/term/colorwriter_others.go deleted file mode 100644 index cc57102..0000000 --- a/vendor/github.com/go-kit/kit/log/term/colorwriter_others.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !windows - -package term - -import "io" - -// NewColorWriter returns an io.Writer that writes to w and provides cross -// platform support for ANSI color codes. If w is not a terminal it is -// returned unmodified. -func NewColorWriter(w io.Writer) io.Writer { - return w -} diff --git a/vendor/github.com/go-kit/kit/log/term/colorwriter_windows.go b/vendor/github.com/go-kit/kit/log/term/colorwriter_windows.go deleted file mode 100644 index 4d2d673..0000000 --- a/vendor/github.com/go-kit/kit/log/term/colorwriter_windows.go +++ /dev/null @@ -1,190 +0,0 @@ -// The code in this file is adapted from github.com/mattn/go-colorable. - -// +build windows - -package term - -import ( - "bytes" - "fmt" - "io" - "strconv" - "strings" - "syscall" - "unsafe" -) - -type colorWriter struct { - out io.Writer - handle syscall.Handle - lastbuf bytes.Buffer - oldattr word -} - -// NewColorWriter returns an io.Writer that writes to w and provides cross -// platform support for ANSI color codes. If w is not a terminal it is -// returned unmodified. -func NewColorWriter(w io.Writer) io.Writer { - if !IsTerminal(w) { - return w - } - - var csbi consoleScreenBufferInfo - handle := syscall.Handle(w.(fder).Fd()) - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - - return &colorWriter{ - out: w, - handle: handle, - oldattr: csbi.attributes, - } -} - -func (w *colorWriter) Write(data []byte) (n int, err error) { - var csbi consoleScreenBufferInfo - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) - - er := bytes.NewBuffer(data) -loop: - for { - r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) - if r1 == 0 { - break loop - } - - c1, _, err := er.ReadRune() - if err != nil { - break loop - } - if c1 != 0x1b { - fmt.Fprint(w.out, string(c1)) - continue - } - c2, _, err := er.ReadRune() - if err != nil { - w.lastbuf.WriteRune(c1) - break loop - } - if c2 != 0x5b { - w.lastbuf.WriteRune(c1) - w.lastbuf.WriteRune(c2) - continue - } - - var buf bytes.Buffer - var m rune - for { - c, _, err := er.ReadRune() - if err != nil { - w.lastbuf.WriteRune(c1) - w.lastbuf.WriteRune(c2) - w.lastbuf.Write(buf.Bytes()) - break loop - } - if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { - m = c - break - } - buf.Write([]byte(string(c))) - } - - switch m { - case 'm': - attr := csbi.attributes - cs := buf.String() - if cs == "" { - procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr)) - continue - } - token := strings.Split(cs, ";") - intensityMode := word(0) - for _, ns := range token { - if n, err = strconv.Atoi(ns); err == nil { - switch { - case n == 0: - attr = w.oldattr - case n == 1: - attr |= intensityMode - case 30 <= n && n <= 37: - attr = (attr & backgroundMask) - if (n-30)&1 != 0 { - attr |= foregroundRed - } - if (n-30)&2 != 0 { - attr |= foregroundGreen - } - if (n-30)&4 != 0 { - attr |= foregroundBlue - } - intensityMode = foregroundIntensity - case n == 39: // reset foreground color - attr &= backgroundMask - attr |= w.oldattr & foregroundMask - case 40 <= n && n <= 47: - attr = (attr & foregroundMask) - if (n-40)&1 != 0 { - attr |= backgroundRed - } - if (n-40)&2 != 0 { - attr |= backgroundGreen - } - if (n-40)&4 != 0 { - attr |= backgroundBlue - } - intensityMode = backgroundIntensity - case n == 49: // reset background color - attr &= foregroundMask - attr |= w.oldattr & backgroundMask - } - procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr)) - } - } - } - } - return len(data) - w.lastbuf.Len(), nil -} - -var ( - procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") - procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") -) - -const ( - foregroundBlue = 0x1 - foregroundGreen = 0x2 - foregroundRed = 0x4 - foregroundIntensity = 0x8 - foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) - backgroundBlue = 0x10 - backgroundGreen = 0x20 - backgroundRed = 0x40 - backgroundIntensity = 0x80 - backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) -) - -type ( - wchar uint16 - short int16 - dword uint32 - word uint16 -) - -type coord struct { - x short - y short -} - -type smallRect struct { - left short - top short - right short - bottom short -} - -type consoleScreenBufferInfo struct { - size coord - cursorPosition coord - attributes word - window smallRect - maximumWindowSize coord -} diff --git a/vendor/github.com/go-kit/kit/log/term/example_test.go b/vendor/github.com/go-kit/kit/log/term/example_test.go deleted file mode 100644 index 12a3d2a..0000000 --- a/vendor/github.com/go-kit/kit/log/term/example_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package term_test - -import ( - "errors" - "os" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/term" -) - -func ExampleNewLogger_redErrors() { - // Color errors red - colorFn := func(keyvals ...interface{}) term.FgBgColor { - for i := 1; i < len(keyvals); i += 2 { - if _, ok := keyvals[i].(error); ok { - return term.FgBgColor{Fg: term.White, Bg: term.Red} - } - } - return term.FgBgColor{} - } - - logger := term.NewLogger(os.Stdout, log.NewLogfmtLogger, colorFn) - - logger.Log("msg", "default color", "err", nil) - logger.Log("msg", "colored because of error", "err", errors.New("coloring error")) -} - -func ExampleNewLogger_levelColors() { - // Color by level value - colorFn := func(keyvals ...interface{}) term.FgBgColor { - for i := 0; i < len(keyvals)-1; i += 2 { - if keyvals[i] != "level" { - continue - } - switch keyvals[i+1] { - case "debug": - return term.FgBgColor{Fg: term.DarkGray} - case "info": - return term.FgBgColor{Fg: term.Gray} - case "warn": - return term.FgBgColor{Fg: term.Yellow} - case "error": - return term.FgBgColor{Fg: term.Red} - case "crit": - return term.FgBgColor{Fg: term.Gray, Bg: term.DarkRed} - default: - return term.FgBgColor{} - } - } - return term.FgBgColor{} - } - - logger := term.NewLogger(os.Stdout, log.NewJSONLogger, colorFn) - - logger.Log("level", "warn", "msg", "yellow") - logger.Log("level", "debug", "msg", "dark gray") -} diff --git a/vendor/github.com/go-kit/kit/log/term/term.go b/vendor/github.com/go-kit/kit/log/term/term.go deleted file mode 100644 index 3965f1c..0000000 --- a/vendor/github.com/go-kit/kit/log/term/term.go +++ /dev/null @@ -1,22 +0,0 @@ -// Package term provides tools for logging to a terminal. -package term - -import ( - "io" - - "github.com/go-kit/kit/log" -) - -// NewLogger returns a Logger that takes advantage of terminal features if -// possible. Log events are formatted by the Logger returned by newLogger. If -// w is a terminal each log event is colored according to the color function. -func NewLogger(w io.Writer, newLogger func(io.Writer) log.Logger, color func(keyvals ...interface{}) FgBgColor) log.Logger { - if !IsTerminal(w) { - return newLogger(w) - } - return NewColorLogger(NewColorWriter(w), newLogger, color) -} - -type fder interface { - Fd() uintptr -} diff --git a/vendor/github.com/go-kit/kit/log/term/terminal_appengine.go b/vendor/github.com/go-kit/kit/log/term/terminal_appengine.go deleted file mode 100644 index b023121..0000000 --- a/vendor/github.com/go-kit/kit/log/term/terminal_appengine.go +++ /dev/null @@ -1,15 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appengine - -package term - -import "io" - -// IsTerminal always returns false on AppEngine. -func IsTerminal(w io.Writer) bool { - return false -} diff --git a/vendor/github.com/go-kit/kit/log/term/terminal_darwin.go b/vendor/github.com/go-kit/kit/log/term/terminal_darwin.go deleted file mode 100644 index 459cf54..0000000 --- a/vendor/github.com/go-kit/kit/log/term/terminal_darwin.go +++ /dev/null @@ -1,10 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package term - -import "syscall" - -const ioctlReadTermios = syscall.TIOCGETA diff --git a/vendor/github.com/go-kit/kit/log/term/terminal_freebsd.go b/vendor/github.com/go-kit/kit/log/term/terminal_freebsd.go deleted file mode 100644 index 791d5c6..0000000 --- a/vendor/github.com/go-kit/kit/log/term/terminal_freebsd.go +++ /dev/null @@ -1,7 +0,0 @@ -package term - -import ( - "syscall" -) - -const ioctlReadTermios = syscall.TIOCGETA diff --git a/vendor/github.com/go-kit/kit/log/term/terminal_linux.go b/vendor/github.com/go-kit/kit/log/term/terminal_linux.go deleted file mode 100644 index ffeab4d..0000000 --- a/vendor/github.com/go-kit/kit/log/term/terminal_linux.go +++ /dev/null @@ -1,12 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine - -package term - -import "syscall" - -const ioctlReadTermios = syscall.TCGETS diff --git a/vendor/github.com/go-kit/kit/log/term/terminal_notwindows.go b/vendor/github.com/go-kit/kit/log/term/terminal_notwindows.go deleted file mode 100644 index 9c72558..0000000 --- a/vendor/github.com/go-kit/kit/log/term/terminal_notwindows.go +++ /dev/null @@ -1,25 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux,!appengine darwin freebsd openbsd - -package term - -import ( - "io" - "syscall" - "unsafe" -) - -// IsTerminal returns true if w writes to a terminal. -func IsTerminal(w io.Writer) bool { - fw, ok := w.(fder) - if !ok { - return false - } - var termios syscall.Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fw.Fd(), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/vendor/github.com/go-kit/kit/log/term/terminal_openbsd.go b/vendor/github.com/go-kit/kit/log/term/terminal_openbsd.go deleted file mode 100644 index f993166..0000000 --- a/vendor/github.com/go-kit/kit/log/term/terminal_openbsd.go +++ /dev/null @@ -1,5 +0,0 @@ -package term - -import "syscall" - -const ioctlReadTermios = syscall.TIOCGETA diff --git a/vendor/github.com/go-kit/kit/log/term/terminal_windows.go b/vendor/github.com/go-kit/kit/log/term/terminal_windows.go deleted file mode 100644 index 5e797f4..0000000 --- a/vendor/github.com/go-kit/kit/log/term/terminal_windows.go +++ /dev/null @@ -1,31 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package term - -import ( - "io" - "syscall" - "unsafe" -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") - -var ( - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") -) - -// IsTerminal returns true if w writes to a terminal. -func IsTerminal(w io.Writer) bool { - fw, ok := w.(fder) - if !ok { - return false - } - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fw.Fd(), uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} diff --git a/vendor/github.com/go-kit/kit/log/value.go b/vendor/github.com/go-kit/kit/log/value.go deleted file mode 100644 index 2b0448e..0000000 --- a/vendor/github.com/go-kit/kit/log/value.go +++ /dev/null @@ -1,62 +0,0 @@ -package log - -import ( - "time" - - "github.com/go-stack/stack" -) - -// A Valuer generates a log value. When passed to Context.With in a value -// element (odd indexes), it represents a dynamic value which is re-evaluated -// with each log event. -type Valuer func() interface{} - -// bindValues replaces all value elements (odd indexes) containing a Valuer -// with their generated value. -func bindValues(keyvals []interface{}) { - for i := 1; i < len(keyvals); i += 2 { - if v, ok := keyvals[i].(Valuer); ok { - keyvals[i] = v() - } - } -} - -// containsValuer returns true if any of the value elements (odd indexes) -// contain a Valuer. -func containsValuer(keyvals []interface{}) bool { - for i := 1; i < len(keyvals); i += 2 { - if _, ok := keyvals[i].(Valuer); ok { - return true - } - } - return false -} - -// Timestamp returns a Valuer that invokes the underlying function when bound, -// returning a time.Time. Users will probably want to use DefaultTimestamp or -// DefaultTimestampUTC. -func Timestamp(t func() time.Time) Valuer { - return func() interface{} { return t() } -} - -var ( - // DefaultTimestamp is a Valuer that returns the current wallclock time, - // respecting time zones, when bound. - DefaultTimestamp Valuer = func() interface{} { return time.Now().Format(time.RFC3339) } - - // DefaultTimestampUTC is a Valuer that returns the current time in UTC - // when bound. - DefaultTimestampUTC Valuer = func() interface{} { return time.Now().UTC().Format(time.RFC3339) } -) - -// Caller returns a Valuer that returns a file and line from a specified depth -// in the callstack. Users will probably want to use DefaultCaller. -func Caller(depth int) Valuer { - return func() interface{} { return stack.Caller(depth) } -} - -var ( - // DefaultCaller is a Valuer that returns the file and line where the Log - // method was invoked. It can only be used with log.With. - DefaultCaller = Caller(3) -) diff --git a/vendor/github.com/go-kit/kit/log/value_test.go b/vendor/github.com/go-kit/kit/log/value_test.go deleted file mode 100644 index 5277361..0000000 --- a/vendor/github.com/go-kit/kit/log/value_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package log_test - -import ( - "fmt" - "testing" - "time" - - "github.com/go-kit/kit/log" -) - -func TestValueBinding(t *testing.T) { - var output []interface{} - - logger := log.Logger(log.LoggerFunc(func(keyvals ...interface{}) error { - output = keyvals - return nil - })) - - start := time.Date(2015, time.April, 25, 0, 0, 0, 0, time.UTC) - now := start - mocktime := func() time.Time { - now = now.Add(time.Second) - return now - } - - lc := log.NewContext(logger).With("ts", log.Timestamp(mocktime), "caller", log.DefaultCaller) - - lc.Log("foo", "bar") - timestamp, ok := output[1].(time.Time) - if !ok { - t.Fatalf("want time.Time, have %T", output[1]) - } - if want, have := start.Add(time.Second), timestamp; want != have { - t.Errorf("output[1]: want %v, have %v", want, have) - } - if want, have := "value_test.go:28", fmt.Sprint(output[3]); want != have { - t.Errorf("output[3]: want %s, have %s", want, have) - } - - // A second attempt to confirm the bindings are truly dynamic. - lc.Log("foo", "bar") - timestamp, ok = output[1].(time.Time) - if !ok { - t.Fatalf("want time.Time, have %T", output[1]) - } - if want, have := start.Add(2*time.Second), timestamp; want != have { - t.Errorf("output[1]: want %v, have %v", want, have) - } - if want, have := "value_test.go:41", fmt.Sprint(output[3]); want != have { - t.Errorf("output[3]: want %s, have %s", want, have) - } -} - -func TestValueBinding_loggingZeroKeyvals(t *testing.T) { - var output []interface{} - - logger := log.Logger(log.LoggerFunc(func(keyvals ...interface{}) error { - output = keyvals - return nil - })) - - start := time.Date(2015, time.April, 25, 0, 0, 0, 0, time.UTC) - now := start - mocktime := func() time.Time { - now = now.Add(time.Second) - return now - } - - logger = log.NewContext(logger).With("ts", log.Timestamp(mocktime)) - - logger.Log() - timestamp, ok := output[1].(time.Time) - if !ok { - t.Fatalf("want time.Time, have %T", output[1]) - } - if want, have := start.Add(time.Second), timestamp; want != have { - t.Errorf("output[1]: want %v, have %v", want, have) - } - - // A second attempt to confirm the bindings are truly dynamic. - logger.Log() - timestamp, ok = output[1].(time.Time) - if !ok { - t.Fatalf("want time.Time, have %T", output[1]) - } - if want, have := start.Add(2*time.Second), timestamp; want != have { - t.Errorf("output[1]: want %v, have %v", want, have) - } -} - -func BenchmarkValueBindingTimestamp(b *testing.B) { - logger := log.NewNopLogger() - lc := log.NewContext(logger).With("ts", log.DefaultTimestamp) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - lc.Log("k", "v") - } -} - -func BenchmarkValueBindingCaller(b *testing.B) { - logger := log.NewNopLogger() - lc := log.NewContext(logger).With("caller", log.DefaultCaller) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - lc.Log("k", "v") - } -} diff --git a/vendor/github.com/go-kit/kit/metrics/README.md b/vendor/github.com/go-kit/kit/metrics/README.md deleted file mode 100644 index 9aa64aa..0000000 --- a/vendor/github.com/go-kit/kit/metrics/README.md +++ /dev/null @@ -1,83 +0,0 @@ -# package metrics - -`package metrics` provides a set of uniform interfaces for service instrumentation. -It has **[counters][]**, **[gauges][]**, and **[histograms][]**, - and provides adapters to popular metrics packages, like **[expvar][]**, **[statsd][]**, and **[Prometheus][]**. - -[counters]: http://prometheus.io/docs/concepts/metric_types/#counter -[gauges]: http://prometheus.io/docs/concepts/metric_types/#gauge -[histograms]: http://prometheus.io/docs/concepts/metric_types/#histogram -[expvar]: https://golang.org/pkg/expvar -[statsd]: https://github.com/etsy/statsd -[Prometheus]: http://prometheus.io - -## Rationale - -Code instrumentation is absolutely essential to achieve [observability][] into a distributed system. -Metrics and instrumentation tools have coalesced around a few well-defined idioms. -`package metrics` provides a common, minimal interface those idioms for service authors. - -[observability]: https://speakerdeck.com/mattheath/observability-in-micro-service-architectures - -## Usage - -A simple counter, exported via expvar. - -```go -import "github.com/go-kit/kit/metrics/expvar" - -func main() { - myCount := expvar.NewCounter("my_count") - myCount.Add(1) -} -``` - -A histogram for request duration, exported via a Prometheus summary with -dynamically-computed quantiles. - -```go -import ( - stdprometheus "github.com/prometheus/client_golang/prometheus" - - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/prometheus" -) - -var requestDuration = prometheus.NewSummary(stdprometheus.SummaryOpts{ - Namespace: "myservice", - Subsystem: "api", - Name: "request_duration_nanoseconds_count", - Help: "Total time spent serving requests.", -}, []string{}) - -func handleRequest() { - defer func(begin time.Time) { requestDuration.Observe(time.Since(begin)) }(time.Now()) - // handle request -} -``` - -A gauge for the number of goroutines currently running, exported via statsd. - -```go -import ( - "net" - "os" - "runtime" - "time" - - "github.com/go-kit/kit/metrics/statsd" -) - -func main() { - statsdWriter, err := net.Dial("udp", "127.0.0.1:8126") - if err != nil { - panic(err) - } - - reportInterval := 5 * time.Second - goroutines := statsd.NewGauge(statsdWriter, "total_goroutines", reportInterval) - for range time.Tick(reportInterval) { - goroutines.Set(float64(runtime.NumGoroutine())) - } -} -``` diff --git a/vendor/github.com/go-kit/kit/metrics/discard/discard.go b/vendor/github.com/go-kit/kit/metrics/discard/discard.go deleted file mode 100644 index e99f766..0000000 --- a/vendor/github.com/go-kit/kit/metrics/discard/discard.go +++ /dev/null @@ -1,43 +0,0 @@ -// Package discard implements a backend for package metrics that succeeds -// without doing anything. -package discard - -import "github.com/go-kit/kit/metrics" - -type counter struct { - name string -} - -// NewCounter returns a Counter that does nothing. -func NewCounter(name string) metrics.Counter { return &counter{name} } - -func (c *counter) Name() string { return c.name } -func (c *counter) With(metrics.Field) metrics.Counter { return c } -func (c *counter) Add(delta uint64) {} - -type gauge struct { - name string -} - -// NewGauge returns a Gauge that does nothing. -func NewGauge(name string) metrics.Gauge { return &gauge{name} } - -func (g *gauge) Name() string { return g.name } -func (g *gauge) With(metrics.Field) metrics.Gauge { return g } -func (g *gauge) Set(value float64) {} -func (g *gauge) Add(delta float64) {} -func (g *gauge) Get() float64 { return 0 } - -type histogram struct { - name string -} - -// NewHistogram returns a Histogram that does nothing. -func NewHistogram(name string) metrics.Histogram { return &histogram{name} } - -func (h *histogram) Name() string { return h.name } -func (h *histogram) With(metrics.Field) metrics.Histogram { return h } -func (h *histogram) Observe(value int64) {} -func (h *histogram) Distribution() ([]metrics.Bucket, []metrics.Quantile) { - return []metrics.Bucket{}, []metrics.Quantile{} -} diff --git a/vendor/github.com/go-kit/kit/metrics/dogstatsd/dogstatsd.go b/vendor/github.com/go-kit/kit/metrics/dogstatsd/dogstatsd.go deleted file mode 100644 index b5c7d6d..0000000 --- a/vendor/github.com/go-kit/kit/metrics/dogstatsd/dogstatsd.go +++ /dev/null @@ -1,248 +0,0 @@ -// Package dogstatsd implements a DogStatsD backend for package metrics. -// -// This implementation supports Datadog tags that provide additional metric -// filtering capabilities. See the DogStatsD documentation for protocol -// specifics: -// http://docs.datadoghq.com/guides/dogstatsd/ -// -package dogstatsd - -import ( - "bytes" - "fmt" - "io" - "log" - "math" - "time" - - "sync/atomic" - - "github.com/go-kit/kit/metrics" -) - -// dogstatsd metrics were based on the statsd package in go-kit - -const maxBufferSize = 1400 // bytes - -type counter struct { - key string - c chan string - tags []metrics.Field -} - -// NewCounter returns a Counter that emits observations in the DogStatsD protocol -// to the passed writer. Observations are buffered for the report interval or -// until the buffer exceeds a max packet size, whichever comes first. -// -// TODO: support for sampling. -func NewCounter(w io.Writer, key string, reportInterval time.Duration, globalTags []metrics.Field) metrics.Counter { - return NewCounterTick(w, key, time.Tick(reportInterval), globalTags) -} - -// NewCounterTick is the same as NewCounter, but allows the user to pass in a -// ticker channel instead of invoking time.Tick. -func NewCounterTick(w io.Writer, key string, reportTicker <-chan time.Time, tags []metrics.Field) metrics.Counter { - c := &counter{ - key: key, - c: make(chan string), - tags: tags, - } - go fwd(w, key, reportTicker, c.c) - return c -} - -func (c *counter) Name() string { return c.key } - -func (c *counter) With(f metrics.Field) metrics.Counter { - return &counter{ - key: c.key, - c: c.c, - tags: append(c.tags, f), - } -} - -func (c *counter) Add(delta uint64) { c.c <- applyTags(fmt.Sprintf("%d|c", delta), c.tags) } - -type gauge struct { - key string - lastValue uint64 // math.Float64frombits - g chan string - tags []metrics.Field -} - -// NewGauge returns a Gauge that emits values in the DogStatsD protocol to the -// passed writer. Values are buffered for the report interval or until the -// buffer exceeds a max packet size, whichever comes first. -// -// TODO: support for sampling. -func NewGauge(w io.Writer, key string, reportInterval time.Duration, tags []metrics.Field) metrics.Gauge { - return NewGaugeTick(w, key, time.Tick(reportInterval), tags) -} - -// NewGaugeTick is the same as NewGauge, but allows the user to pass in a ticker -// channel instead of invoking time.Tick. -func NewGaugeTick(w io.Writer, key string, reportTicker <-chan time.Time, tags []metrics.Field) metrics.Gauge { - g := &gauge{ - key: key, - g: make(chan string), - tags: tags, - } - go fwd(w, key, reportTicker, g.g) - return g -} - -func (g *gauge) Name() string { return g.key } - -func (g *gauge) With(f metrics.Field) metrics.Gauge { - return &gauge{ - key: g.key, - lastValue: g.lastValue, - g: g.g, - tags: append(g.tags, f), - } -} - -func (g *gauge) Add(delta float64) { - // https://github.com/etsy/statsd/blob/master/docs/metric_types.md#gauges - sign := "+" - if delta < 0 { - sign, delta = "-", -delta - } - g.g <- applyTags(fmt.Sprintf("%s%f|g", sign, delta), g.tags) -} - -func (g *gauge) Set(value float64) { - atomic.StoreUint64(&g.lastValue, math.Float64bits(value)) - g.g <- applyTags(fmt.Sprintf("%f|g", value), g.tags) -} - -func (g *gauge) Get() float64 { - return math.Float64frombits(atomic.LoadUint64(&g.lastValue)) -} - -// NewCallbackGauge emits values in the DogStatsD protocol to the passed writer. -// It collects values every scrape interval from the callback. Values are -// buffered for the report interval or until the buffer exceeds a max packet -// size, whichever comes first. The report and scrape intervals may be the -// same. The callback determines the value, and fields are ignored, so -// NewCallbackGauge returns nothing. -func NewCallbackGauge(w io.Writer, key string, reportInterval, scrapeInterval time.Duration, callback func() float64) { - NewCallbackGaugeTick(w, key, time.Tick(reportInterval), time.Tick(scrapeInterval), callback) -} - -// NewCallbackGaugeTick is the same as NewCallbackGauge, but allows the user to -// pass in ticker channels instead of durations to control report and scrape -// intervals. -func NewCallbackGaugeTick(w io.Writer, key string, reportTicker, scrapeTicker <-chan time.Time, callback func() float64) { - go fwd(w, key, reportTicker, emitEvery(scrapeTicker, callback)) -} - -func emitEvery(emitTicker <-chan time.Time, callback func() float64) <-chan string { - c := make(chan string) - go func() { - for range emitTicker { - c <- fmt.Sprintf("%f|g", callback()) - } - }() - return c -} - -type histogram struct { - key string - h chan string - tags []metrics.Field -} - -// NewHistogram returns a Histogram that emits observations in the DogStatsD -// protocol to the passed writer. Observations are buffered for the reporting -// interval or until the buffer exceeds a max packet size, whichever comes -// first. -// -// NewHistogram is mapped to a statsd Timing, so observations should represent -// milliseconds. If you observe in units of nanoseconds, you can make the -// translation with a ScaledHistogram: -// -// NewScaledHistogram(dogstatsdHistogram, time.Millisecond) -// -// You can also enforce the constraint in a typesafe way with a millisecond -// TimeHistogram: -// -// NewTimeHistogram(dogstatsdHistogram, time.Millisecond) -// -// TODO: support for sampling. -func NewHistogram(w io.Writer, key string, reportInterval time.Duration, tags []metrics.Field) metrics.Histogram { - return NewHistogramTick(w, key, time.Tick(reportInterval), tags) -} - -// NewHistogramTick is the same as NewHistogram, but allows the user to pass a -// ticker channel instead of invoking time.Tick. -func NewHistogramTick(w io.Writer, key string, reportTicker <-chan time.Time, tags []metrics.Field) metrics.Histogram { - h := &histogram{ - key: key, - h: make(chan string), - tags: tags, - } - go fwd(w, key, reportTicker, h.h) - return h -} - -func (h *histogram) Name() string { return h.key } - -func (h *histogram) With(f metrics.Field) metrics.Histogram { - return &histogram{ - key: h.key, - h: h.h, - tags: append(h.tags, f), - } -} - -func (h *histogram) Observe(value int64) { - h.h <- applyTags(fmt.Sprintf("%d|ms", value), h.tags) -} - -func (h *histogram) Distribution() ([]metrics.Bucket, []metrics.Quantile) { - // TODO(pb): no way to do this without introducing e.g. codahale/hdrhistogram - return []metrics.Bucket{}, []metrics.Quantile{} -} - -func fwd(w io.Writer, key string, reportTicker <-chan time.Time, c <-chan string) { - buf := &bytes.Buffer{} - for { - select { - case s := <-c: - fmt.Fprintf(buf, "%s:%s\n", key, s) - if buf.Len() > maxBufferSize { - flush(w, buf) - } - - case <-reportTicker: - flush(w, buf) - } - } -} - -func flush(w io.Writer, buf *bytes.Buffer) { - if buf.Len() <= 0 { - return - } - if _, err := w.Write(buf.Bytes()); err != nil { - log.Printf("error: could not write to dogstatsd: %v", err) - } - buf.Reset() -} - -func applyTags(value string, tags []metrics.Field) string { - if len(tags) > 0 { - var tagsString string - for _, t := range tags { - switch tagsString { - case "": - tagsString = t.Key + ":" + t.Value - default: - tagsString = tagsString + "," + t.Key + ":" + t.Value - } - } - value = value + "|#" + tagsString - } - return value -} diff --git a/vendor/github.com/go-kit/kit/metrics/dogstatsd/dogstatsd_test.go b/vendor/github.com/go-kit/kit/metrics/dogstatsd/dogstatsd_test.go deleted file mode 100644 index 383dbd7..0000000 --- a/vendor/github.com/go-kit/kit/metrics/dogstatsd/dogstatsd_test.go +++ /dev/null @@ -1,266 +0,0 @@ -package dogstatsd - -import ( - "bytes" - "fmt" - "net" - "strings" - "sync" - "testing" - "time" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/util/conn" -) - -func TestEmitterCounter(t *testing.T) { - e, buf := testEmitter() - - c := e.NewCounter("test_statsd_counter") - c.Add(1) - c.Add(2) - - // give time for things to emit - time.Sleep(time.Millisecond * 250) - // force a flush and stop - e.Stop() - - want := "prefix.test_statsd_counter:1|c\nprefix.test_statsd_counter:2|c\n" - have := buf.String() - if want != have { - t.Errorf("want %q, have %q", want, have) - } -} - -func TestEmitterGauge(t *testing.T) { - e, buf := testEmitter() - - g := e.NewGauge("test_statsd_gauge") - - delta := 1.0 - g.Add(delta) - - // give time for things to emit - time.Sleep(time.Millisecond * 250) - // force a flush and stop - e.Stop() - - want := fmt.Sprintf("prefix.test_statsd_gauge:+%f|g\n", delta) - have := buf.String() - if want != have { - t.Errorf("want %q, have %q", want, have) - } -} - -func TestEmitterHistogram(t *testing.T) { - e, buf := testEmitter() - h := e.NewHistogram("test_statsd_histogram") - - h.Observe(123) - - // give time for things to emit - time.Sleep(time.Millisecond * 250) - // force a flush and stop - e.Stop() - - want := "prefix.test_statsd_histogram:123|ms\n" - have := buf.String() - if want != have { - t.Errorf("want %q, have %q", want, have) - } -} - -func TestCounter(t *testing.T) { - buf := &syncbuf{buf: &bytes.Buffer{}} - reportc := make(chan time.Time) - tags := []metrics.Field{} - c := NewCounterTick(buf, "test_statsd_counter", reportc, tags) - - c.Add(1) - c.With(metrics.Field{"foo", "bar"}).Add(2) - c.With(metrics.Field{"foo", "bar"}).With(metrics.Field{"abc", "123"}).Add(2) - c.Add(3) - - want, have := "test_statsd_counter:1|c\ntest_statsd_counter:2|c|#foo:bar\ntest_statsd_counter:2|c|#foo:bar,abc:123\ntest_statsd_counter:3|c\n", "" - by(t, 100*time.Millisecond, func() bool { - have = buf.String() - return want == have - }, func() { - reportc <- time.Now() - }, fmt.Sprintf("want %q, have %q", want, have)) -} - -func TestGauge(t *testing.T) { - buf := &syncbuf{buf: &bytes.Buffer{}} - reportc := make(chan time.Time) - tags := []metrics.Field{} - g := NewGaugeTick(buf, "test_statsd_gauge", reportc, tags) - - delta := 1.0 - g.Add(delta) - - want, have := fmt.Sprintf("test_statsd_gauge:+%f|g\n", delta), "" - by(t, 100*time.Millisecond, func() bool { - have = buf.String() - return want == have - }, func() { - reportc <- time.Now() - }, fmt.Sprintf("want %q, have %q", want, have)) - - buf.Reset() - delta = -2.0 - g.With(metrics.Field{"foo", "bar"}).Add(delta) - - want, have = fmt.Sprintf("test_statsd_gauge:%f|g|#foo:bar\n", delta), "" - by(t, 100*time.Millisecond, func() bool { - have = buf.String() - return want == have - }, func() { - reportc <- time.Now() - }, fmt.Sprintf("want %q, have %q", want, have)) - - buf.Reset() - value := 3.0 - g.With(metrics.Field{"foo", "bar"}).With(metrics.Field{"abc", "123"}).Set(value) - - want, have = fmt.Sprintf("test_statsd_gauge:%f|g|#foo:bar,abc:123\n", value), "" - by(t, 100*time.Millisecond, func() bool { - have = buf.String() - return want == have - }, func() { - reportc <- time.Now() - }, fmt.Sprintf("want %q, have %q", want, have)) -} - -func TestCallbackGauge(t *testing.T) { - buf := &syncbuf{buf: &bytes.Buffer{}} - reportc, scrapec := make(chan time.Time), make(chan time.Time) - value := 55.55 - cb := func() float64 { return value } - NewCallbackGaugeTick(buf, "test_statsd_callback_gauge", reportc, scrapec, cb) - - scrapec <- time.Now() - reportc <- time.Now() - - // Travis is annoying - by(t, time.Second, func() bool { - return buf.String() != "" - }, func() { - reportc <- time.Now() - }, "buffer never got write+flush") - - want, have := fmt.Sprintf("test_statsd_callback_gauge:%f|g\n", value), "" - by(t, 100*time.Millisecond, func() bool { - have = buf.String() - return strings.HasPrefix(have, want) // HasPrefix because we might get multiple writes - }, func() { - reportc <- time.Now() - }, fmt.Sprintf("want %q, have %q", want, have)) -} - -func TestHistogram(t *testing.T) { - buf := &syncbuf{buf: &bytes.Buffer{}} - reportc := make(chan time.Time) - tags := []metrics.Field{} - h := NewHistogramTick(buf, "test_statsd_histogram", reportc, tags) - - h.Observe(123) - h.With(metrics.Field{"foo", "bar"}).Observe(456) - - want, have := "test_statsd_histogram:123|ms\ntest_statsd_histogram:456|ms|#foo:bar\n", "" - by(t, 100*time.Millisecond, func() bool { - have = buf.String() - return want == have - }, func() { - reportc <- time.Now() - }, fmt.Sprintf("want %q, have %q", want, have)) -} - -func by(t *testing.T, d time.Duration, check func() bool, execute func(), msg string) { - deadline := time.Now().Add(d) - for !check() { - if time.Now().After(deadline) { - t.Fatal(msg) - } - execute() - } -} - -type syncbuf struct { - mtx sync.Mutex - buf *bytes.Buffer -} - -func (s *syncbuf) Write(p []byte) (int, error) { - s.mtx.Lock() - defer s.mtx.Unlock() - return s.buf.Write(p) -} - -func (s *syncbuf) String() string { - s.mtx.Lock() - defer s.mtx.Unlock() - return s.buf.String() -} - -func (s *syncbuf) Reset() { - s.mtx.Lock() - defer s.mtx.Unlock() - s.buf.Reset() -} - -func testEmitter() (*Emitter, *syncbuf) { - buf := &syncbuf{buf: &bytes.Buffer{}} - e := &Emitter{ - prefix: "prefix.", - mgr: conn.NewManager(mockDialer(buf), "", "", time.After, log.NewNopLogger()), - logger: log.NewNopLogger(), - keyVals: make(chan keyVal), - quitc: make(chan chan struct{}), - } - go e.loop(time.Millisecond * 20) - return e, buf -} - -func mockDialer(buf *syncbuf) conn.Dialer { - return func(net, addr string) (net.Conn, error) { - return &mockConn{buf}, nil - } -} - -type mockConn struct { - buf *syncbuf -} - -func (c *mockConn) Read(b []byte) (n int, err error) { - panic("not implemented") -} - -func (c *mockConn) Write(b []byte) (n int, err error) { - return c.buf.Write(b) -} - -func (c *mockConn) Close() error { - panic("not implemented") -} - -func (c *mockConn) LocalAddr() net.Addr { - panic("not implemented") -} - -func (c *mockConn) RemoteAddr() net.Addr { - panic("not implemented") -} - -func (c *mockConn) SetDeadline(t time.Time) error { - panic("not implemented") -} - -func (c *mockConn) SetReadDeadline(t time.Time) error { - panic("not implemented") -} - -func (c *mockConn) SetWriteDeadline(t time.Time) error { - panic("not implemented") -} diff --git a/vendor/github.com/go-kit/kit/metrics/dogstatsd/emitter.go b/vendor/github.com/go-kit/kit/metrics/dogstatsd/emitter.go deleted file mode 100644 index 9d8cdf3..0000000 --- a/vendor/github.com/go-kit/kit/metrics/dogstatsd/emitter.go +++ /dev/null @@ -1,159 +0,0 @@ -package dogstatsd - -import ( - "bytes" - "fmt" - "net" - "time" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/util/conn" -) - -// Emitter is a struct to manage connections and orchestrate the emission of -// metrics to a DogStatsd process. -type Emitter struct { - prefix string - keyVals chan keyVal - mgr *conn.Manager - logger log.Logger - quitc chan chan struct{} -} - -type keyVal struct { - key string - val string -} - -func stringToKeyVal(key string, keyVals chan keyVal) chan string { - vals := make(chan string) - go func() { - for val := range vals { - keyVals <- keyVal{key: key, val: val} - } - }() - return vals -} - -// NewEmitter will return an Emitter that will prefix all metrics names with the -// given prefix. Once started, it will attempt to create a connection with the -// given network and address via `net.Dial` and periodically post metrics to the -// connection in the DogStatsD protocol. -func NewEmitter(network, address string, metricsPrefix string, flushInterval time.Duration, logger log.Logger) *Emitter { - return NewEmitterDial(net.Dial, network, address, metricsPrefix, flushInterval, logger) -} - -// NewEmitterDial is the same as NewEmitter, but allows you to specify your own -// Dialer function. This is primarily useful for tests. -func NewEmitterDial(dialer conn.Dialer, network, address string, metricsPrefix string, flushInterval time.Duration, logger log.Logger) *Emitter { - e := &Emitter{ - prefix: metricsPrefix, - mgr: conn.NewManager(dialer, network, address, time.After, logger), - logger: logger, - keyVals: make(chan keyVal), - quitc: make(chan chan struct{}), - } - go e.loop(flushInterval) - return e -} - -// NewCounter returns a Counter that emits observations in the DogStatsD protocol -// via the Emitter's connection manager. Observations are buffered for the -// report interval or until the buffer exceeds a max packet size, whichever -// comes first. Fields are ignored. -func (e *Emitter) NewCounter(key string) metrics.Counter { - key = e.prefix + key - return &counter{ - key: key, - c: stringToKeyVal(key, e.keyVals), - } -} - -// NewHistogram returns a Histogram that emits observations in the DogStatsD -// protocol via the Emitter's conection manager. Observations are buffered for -// the reporting interval or until the buffer exceeds a max packet size, -// whichever comes first. Fields are ignored. -// -// NewHistogram is mapped to a statsd Timing, so observations should represent -// milliseconds. If you observe in units of nanoseconds, you can make the -// translation with a ScaledHistogram: -// -// NewScaledHistogram(histogram, time.Millisecond) -// -// You can also enforce the constraint in a typesafe way with a millisecond -// TimeHistogram: -// -// NewTimeHistogram(histogram, time.Millisecond) -// -// TODO: support for sampling. -func (e *Emitter) NewHistogram(key string) metrics.Histogram { - key = e.prefix + key - return &histogram{ - key: key, - h: stringToKeyVal(key, e.keyVals), - } -} - -// NewGauge returns a Gauge that emits values in the DogStatsD protocol via the -// the Emitter's connection manager. Values are buffered for the report -// interval or until the buffer exceeds a max packet size, whichever comes -// first. Fields are ignored. -// -// TODO: support for sampling -func (e *Emitter) NewGauge(key string) metrics.Gauge { - key = e.prefix + key - return &gauge{ - key: key, - g: stringToKeyVal(key, e.keyVals), - } -} - -func (e *Emitter) loop(d time.Duration) { - ticker := time.NewTicker(d) - defer ticker.Stop() - buf := &bytes.Buffer{} - for { - select { - case kv := <-e.keyVals: - fmt.Fprintf(buf, "%s:%s\n", kv.key, kv.val) - if buf.Len() > maxBufferSize { - e.Flush(buf) - } - - case <-ticker.C: - e.Flush(buf) - - case q := <-e.quitc: - e.Flush(buf) - close(q) - return - } - } -} - -// Stop will flush the current metrics and close the active connection. Calling -// stop more than once is a programmer error. -func (e *Emitter) Stop() { - q := make(chan struct{}) - e.quitc <- q - <-q -} - -// Flush will write the given buffer to a connection provided by the Emitter's -// connection manager. -func (e *Emitter) Flush(buf *bytes.Buffer) { - conn := e.mgr.Take() - if conn == nil { - e.logger.Log("during", "flush", "err", "connection unavailable") - return - } - - _, err := conn.Write(buf.Bytes()) - if err != nil { - e.logger.Log("during", "flush", "err", err) - } - buf.Reset() - - e.mgr.Put(err) -} diff --git a/vendor/github.com/go-kit/kit/metrics/expvar/expvar.go b/vendor/github.com/go-kit/kit/metrics/expvar/expvar.go deleted file mode 100644 index 371d103..0000000 --- a/vendor/github.com/go-kit/kit/metrics/expvar/expvar.go +++ /dev/null @@ -1,172 +0,0 @@ -// Package expvar implements an expvar backend for package metrics. -// -// The current implementation ignores fields. In the future, it would be good -// to have an implementation that accepted a set of predeclared field names at -// construction time, and used field values to produce delimiter-separated -// bucket (key) names. That is, -// -// c := NewFieldedCounter(..., "path", "status") -// c.Add(1) // "myprefix_unknown_unknown" += 1 -// c2 := c.With("path", "foo").With("status": "200") -// c2.Add(1) // "myprefix_foo_200" += 1 -// -// It would also be possible to have an implementation that generated more -// sophisticated expvar.Values. For example, a Counter could be implemented as -// a map, representing a tree of key/value pairs whose leaves were the actual -// expvar.Ints. -package expvar - -import ( - "expvar" - "fmt" - "sort" - "strconv" - "sync" - "time" - - "github.com/codahale/hdrhistogram" - - "github.com/go-kit/kit/metrics" -) - -type counter struct { - name string - v *expvar.Int -} - -// NewCounter returns a new Counter backed by an expvar with the given name. -// Fields are ignored. -func NewCounter(name string) metrics.Counter { - return &counter{ - name: name, - v: expvar.NewInt(name), - } -} - -func (c *counter) Name() string { return c.name } -func (c *counter) With(metrics.Field) metrics.Counter { return c } -func (c *counter) Add(delta uint64) { c.v.Add(int64(delta)) } - -type gauge struct { - name string - v *expvar.Float -} - -// NewGauge returns a new Gauge backed by an expvar with the given name. It -// should be updated manually; for a callback-based approach, see -// PublishCallbackGauge. Fields are ignored. -func NewGauge(name string) metrics.Gauge { - return &gauge{ - name: name, - v: expvar.NewFloat(name), - } -} - -func (g *gauge) Name() string { return g.name } -func (g *gauge) With(metrics.Field) metrics.Gauge { return g } -func (g *gauge) Add(delta float64) { g.v.Add(delta) } -func (g *gauge) Set(value float64) { g.v.Set(value) } -func (g *gauge) Get() float64 { return mustParseFloat64(g.v.String()) } - -// PublishCallbackGauge publishes a Gauge as an expvar with the given name, -// whose value is determined at collect time by the passed callback function. -// The callback determines the value, and fields are ignored, so -// PublishCallbackGauge returns nothing. -func PublishCallbackGauge(name string, callback func() float64) { - expvar.Publish(name, callbackGauge(callback)) -} - -type callbackGauge func() float64 - -func (g callbackGauge) String() string { return strconv.FormatFloat(g(), 'g', -1, 64) } - -type histogram struct { - mu sync.Mutex - hist *hdrhistogram.WindowedHistogram - - name string - gauges map[int]metrics.Gauge -} - -// NewHistogram is taken from http://github.com/codahale/metrics. It returns a -// windowed HDR histogram which drops data older than five minutes. -// -// The histogram exposes metrics for each passed quantile as gauges. Quantiles -// should be integers in the range 1..99. The gauge names are assigned by -// using the passed name as a prefix and appending "_pNN" e.g. "_p50". -func NewHistogram(name string, minValue, maxValue int64, sigfigs int, quantiles ...int) metrics.Histogram { - gauges := map[int]metrics.Gauge{} - for _, quantile := range quantiles { - if quantile <= 0 || quantile >= 100 { - panic(fmt.Sprintf("invalid quantile %d", quantile)) - } - gauges[quantile] = NewGauge(fmt.Sprintf("%s_p%02d", name, quantile)) - } - h := &histogram{ - hist: hdrhistogram.NewWindowed(5, minValue, maxValue, sigfigs), - name: name, - gauges: gauges, - } - go h.rotateLoop(1 * time.Minute) - return h -} - -func (h *histogram) Name() string { return h.name } -func (h *histogram) With(metrics.Field) metrics.Histogram { return h } - -func (h *histogram) Observe(value int64) { - h.mu.Lock() - err := h.hist.Current.RecordValue(value) - h.mu.Unlock() - - if err != nil { - panic(err.Error()) - } - - for q, gauge := range h.gauges { - gauge.Set(float64(h.hist.Current.ValueAtQuantile(float64(q)))) - } -} - -func (h *histogram) Distribution() ([]metrics.Bucket, []metrics.Quantile) { - bars := h.hist.Merge().Distribution() - buckets := make([]metrics.Bucket, len(bars)) - for i, bar := range bars { - buckets[i] = metrics.Bucket{ - From: bar.From, - To: bar.To, - Count: bar.Count, - } - } - quantiles := make([]metrics.Quantile, 0, len(h.gauges)) - for quantile, gauge := range h.gauges { - quantiles = append(quantiles, metrics.Quantile{ - Quantile: quantile, - Value: int64(gauge.Get()), - }) - } - sort.Sort(quantileSlice(quantiles)) - return buckets, quantiles -} - -func (h *histogram) rotateLoop(d time.Duration) { - for range time.Tick(d) { - h.mu.Lock() - h.hist.Rotate() - h.mu.Unlock() - } -} - -func mustParseFloat64(s string) float64 { - f, err := strconv.ParseFloat(s, 64) - if err != nil { - panic(err) - } - return f -} - -type quantileSlice []metrics.Quantile - -func (a quantileSlice) Len() int { return len(a) } -func (a quantileSlice) Less(i, j int) bool { return a[i].Quantile < a[j].Quantile } -func (a quantileSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/vendor/github.com/go-kit/kit/metrics/expvar/expvar_test.go b/vendor/github.com/go-kit/kit/metrics/expvar/expvar_test.go deleted file mode 100644 index 644bb40..0000000 --- a/vendor/github.com/go-kit/kit/metrics/expvar/expvar_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package expvar_test - -import ( - stdexpvar "expvar" - "fmt" - "testing" - - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/expvar" - "github.com/go-kit/kit/metrics/teststat" -) - -func TestHistogramQuantiles(t *testing.T) { - var ( - name = "test_histogram_quantiles" - quantiles = []int{50, 90, 95, 99} - h = expvar.NewHistogram(name, 0, 100, 3, quantiles...).With(metrics.Field{Key: "ignored", Value: "field"}) - ) - const seed, mean, stdev int64 = 424242, 50, 10 - teststat.PopulateNormalHistogram(t, h, seed, mean, stdev) - teststat.AssertExpvarNormalHistogram(t, name, mean, stdev, quantiles) -} - -func TestCallbackGauge(t *testing.T) { - var ( - name = "foo" - value = 42.43 - ) - expvar.PublishCallbackGauge(name, func() float64 { return value }) - if want, have := fmt.Sprint(value), stdexpvar.Get(name).String(); want != have { - t.Errorf("want %q, have %q", want, have) - } -} - -func TestCounter(t *testing.T) { - var ( - name = "m" - value = 123 - ) - expvar.NewCounter(name).With(metrics.Field{Key: "ignored", Value: "field"}).Add(uint64(value)) - if want, have := fmt.Sprint(value), stdexpvar.Get(name).String(); want != have { - t.Errorf("want %q, have %q", want, have) - } -} - -func TestGauge(t *testing.T) { - var ( - name = "xyz" - value = 54321 - delta = 12345 - g = expvar.NewGauge(name).With(metrics.Field{Key: "ignored", Value: "field"}) - ) - g.Set(float64(value)) - g.Add(float64(delta)) - if want, have := fmt.Sprint(value+delta), stdexpvar.Get(name).String(); want != have { - t.Errorf("want %q, have %q", want, have) - } -} - -func TestInvalidQuantile(t *testing.T) { - defer func() { - if err := recover(); err == nil { - t.Errorf("expected panic, got none") - } else { - t.Logf("got expected panic: %v", err) - } - }() - expvar.NewHistogram("foo", 0.0, 100.0, 3, 50, 90, 95, 99, 101) -} diff --git a/vendor/github.com/go-kit/kit/metrics/graphite/emitter.go b/vendor/github.com/go-kit/kit/metrics/graphite/emitter.go deleted file mode 100644 index 87e633c..0000000 --- a/vendor/github.com/go-kit/kit/metrics/graphite/emitter.go +++ /dev/null @@ -1,159 +0,0 @@ -package graphite - -import ( - "bufio" - "fmt" - "io" - "net" - "sync" - "time" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/util/conn" -) - -// Emitter is a struct to manage connections and orchestrate the emission of -// metrics to a Graphite system. -type Emitter struct { - mtx sync.Mutex - prefix string - mgr *conn.Manager - counters []*counter - histograms []*windowedHistogram - gauges []*gauge - logger log.Logger - quitc chan chan struct{} -} - -// NewEmitter will return an Emitter that will prefix all metrics names with the -// given prefix. Once started, it will attempt to create a connection with the -// given network and address via `net.Dial` and periodically post metrics to the -// connection in the Graphite plaintext protocol. -func NewEmitter(network, address string, metricsPrefix string, flushInterval time.Duration, logger log.Logger) *Emitter { - return NewEmitterDial(net.Dial, network, address, metricsPrefix, flushInterval, logger) -} - -// NewEmitterDial is the same as NewEmitter, but allows you to specify your own -// Dialer function. This is primarily useful for tests. -func NewEmitterDial(dialer conn.Dialer, network, address string, metricsPrefix string, flushInterval time.Duration, logger log.Logger) *Emitter { - e := &Emitter{ - prefix: metricsPrefix, - mgr: conn.NewManager(dialer, network, address, time.After, logger), - logger: logger, - quitc: make(chan chan struct{}), - } - go e.loop(flushInterval) - return e -} - -// NewCounter returns a Counter whose value will be periodically emitted in -// a Graphite-compatible format once the Emitter is started. Fields are ignored. -func (e *Emitter) NewCounter(name string) metrics.Counter { - e.mtx.Lock() - defer e.mtx.Unlock() - c := newCounter(name) - e.counters = append(e.counters, c) - return c -} - -// NewHistogram is taken from http://github.com/codahale/metrics. It returns a -// windowed HDR histogram which drops data older than five minutes. -// -// The histogram exposes metrics for each passed quantile as gauges. Quantiles -// should be integers in the range 1..99. The gauge names are assigned by using -// the passed name as a prefix and appending "_pNN" e.g. "_p50". -// -// The values of this histogram will be periodically emitted in a -// Graphite-compatible format once the Emitter is started. Fields are ignored. -func (e *Emitter) NewHistogram(name string, minValue, maxValue int64, sigfigs int, quantiles ...int) (metrics.Histogram, error) { - gauges := map[int]metrics.Gauge{} - for _, quantile := range quantiles { - if quantile <= 0 || quantile >= 100 { - return nil, fmt.Errorf("invalid quantile %d", quantile) - } - gauges[quantile] = e.gauge(fmt.Sprintf("%s_p%02d", name, quantile)) - } - h := newWindowedHistogram(name, minValue, maxValue, sigfigs, gauges, e.logger) - - e.mtx.Lock() - defer e.mtx.Unlock() - e.histograms = append(e.histograms, h) - return h, nil -} - -// NewGauge returns a Gauge whose value will be periodically emitted in a -// Graphite-compatible format once the Emitter is started. Fields are ignored. -func (e *Emitter) NewGauge(name string) metrics.Gauge { - e.mtx.Lock() - defer e.mtx.Unlock() - return e.gauge(name) -} - -func (e *Emitter) gauge(name string) metrics.Gauge { - g := &gauge{name, 0} - e.gauges = append(e.gauges, g) - return g -} - -func (e *Emitter) loop(d time.Duration) { - ticker := time.NewTicker(d) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - e.Flush() - - case q := <-e.quitc: - e.Flush() - close(q) - return - } - } -} - -// Stop will flush the current metrics and close the active connection. Calling -// stop more than once is a programmer error. -func (e *Emitter) Stop() { - q := make(chan struct{}) - e.quitc <- q - <-q -} - -// Flush will write the current metrics to the Emitter's connection in the -// Graphite plaintext protocol. -func (e *Emitter) Flush() { - e.mtx.Lock() // one flush at a time - defer e.mtx.Unlock() - - conn := e.mgr.Take() - if conn == nil { - e.logger.Log("during", "flush", "err", "connection unavailable") - return - } - - err := e.flush(conn) - if err != nil { - e.logger.Log("during", "flush", "err", err) - } - e.mgr.Put(err) -} - -func (e *Emitter) flush(w io.Writer) error { - bw := bufio.NewWriter(w) - - for _, c := range e.counters { - c.flush(bw, e.prefix) - } - - for _, h := range e.histograms { - h.flush(bw, e.prefix) - } - - for _, g := range e.gauges { - g.flush(bw, e.prefix) - } - - return bw.Flush() -} diff --git a/vendor/github.com/go-kit/kit/metrics/graphite/graphite.go b/vendor/github.com/go-kit/kit/metrics/graphite/graphite.go deleted file mode 100644 index 6736cc2..0000000 --- a/vendor/github.com/go-kit/kit/metrics/graphite/graphite.go +++ /dev/null @@ -1,186 +0,0 @@ -// Package graphite implements a Graphite backend for package metrics. Metrics -// will be emitted to a Graphite server in the plaintext protocol which looks -// like: -// -// " " -// -// See http://graphite.readthedocs.io/en/latest/feeding-carbon.html#the-plaintext-protocol. -// The current implementation ignores fields. -package graphite - -import ( - "fmt" - "io" - "math" - "sort" - "sync" - "sync/atomic" - "time" - - "github.com/codahale/hdrhistogram" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/metrics" -) - -func newCounter(name string) *counter { - return &counter{name, 0} -} - -func newGauge(name string) *gauge { - return &gauge{name, 0} -} - -// counter implements the metrics.counter interface but also provides a -// Flush method to emit the current counter values in the Graphite plaintext -// protocol. -type counter struct { - key string - count uint64 -} - -func (c *counter) Name() string { return c.key } - -// With currently ignores fields. -func (c *counter) With(metrics.Field) metrics.Counter { return c } - -func (c *counter) Add(delta uint64) { atomic.AddUint64(&c.count, delta) } - -func (c *counter) get() uint64 { return atomic.LoadUint64(&c.count) } - -// flush will emit the current counter value in the Graphite plaintext -// protocol to the given io.Writer. -func (c *counter) flush(w io.Writer, prefix string) { - fmt.Fprintf(w, "%s.count %d %d\n", prefix+c.Name(), c.get(), time.Now().Unix()) -} - -// gauge implements the metrics.gauge interface but also provides a -// Flush method to emit the current counter values in the Graphite plaintext -// protocol. -type gauge struct { - key string - value uint64 // math.Float64bits -} - -func (g *gauge) Name() string { return g.key } - -// With currently ignores fields. -func (g *gauge) With(metrics.Field) metrics.Gauge { return g } - -func (g *gauge) Add(delta float64) { - for { - old := atomic.LoadUint64(&g.value) - new := math.Float64bits(math.Float64frombits(old) + delta) - if atomic.CompareAndSwapUint64(&g.value, old, new) { - return - } - } -} - -func (g *gauge) Set(value float64) { - atomic.StoreUint64(&g.value, math.Float64bits(value)) -} - -func (g *gauge) Get() float64 { - return math.Float64frombits(atomic.LoadUint64(&g.value)) -} - -// Flush will emit the current gauge value in the Graphite plaintext -// protocol to the given io.Writer. -func (g *gauge) flush(w io.Writer, prefix string) { - fmt.Fprintf(w, "%s %.2f %d\n", prefix+g.Name(), g.Get(), time.Now().Unix()) -} - -// windowedHistogram is taken from http://github.com/codahale/metrics. It -// is a windowed HDR histogram which drops data older than five minutes. -// -// The histogram exposes metrics for each passed quantile as gauges. Quantiles -// should be integers in the range 1..99. The gauge names are assigned by using -// the passed name as a prefix and appending "_pNN" e.g. "_p50". -// -// The values of this histogram will be periodically emitted in a -// Graphite-compatible format once the GraphiteProvider is started. Fields are ignored. -type windowedHistogram struct { - mtx sync.Mutex - hist *hdrhistogram.WindowedHistogram - - name string - gauges map[int]metrics.Gauge - logger log.Logger -} - -func newWindowedHistogram(name string, minValue, maxValue int64, sigfigs int, quantiles map[int]metrics.Gauge, logger log.Logger) *windowedHistogram { - h := &windowedHistogram{ - hist: hdrhistogram.NewWindowed(5, minValue, maxValue, sigfigs), - name: name, - gauges: quantiles, - logger: logger, - } - go h.rotateLoop(1 * time.Minute) - return h -} - -func (h *windowedHistogram) Name() string { return h.name } - -func (h *windowedHistogram) With(metrics.Field) metrics.Histogram { return h } - -func (h *windowedHistogram) Observe(value int64) { - h.mtx.Lock() - err := h.hist.Current.RecordValue(value) - h.mtx.Unlock() - - if err != nil { - h.logger.Log("err", err, "msg", "unable to record histogram value") - return - } - - for q, gauge := range h.gauges { - gauge.Set(float64(h.hist.Current.ValueAtQuantile(float64(q)))) - } -} - -func (h *windowedHistogram) Distribution() ([]metrics.Bucket, []metrics.Quantile) { - bars := h.hist.Merge().Distribution() - buckets := make([]metrics.Bucket, len(bars)) - for i, bar := range bars { - buckets[i] = metrics.Bucket{ - From: bar.From, - To: bar.To, - Count: bar.Count, - } - } - quantiles := make([]metrics.Quantile, 0, len(h.gauges)) - for quantile, gauge := range h.gauges { - quantiles = append(quantiles, metrics.Quantile{ - Quantile: quantile, - Value: int64(gauge.Get()), - }) - } - sort.Sort(quantileSlice(quantiles)) - return buckets, quantiles -} - -func (h *windowedHistogram) flush(w io.Writer, prefix string) { - name := prefix + h.Name() - hist := h.hist.Merge() - now := time.Now().Unix() - fmt.Fprintf(w, "%s.count %d %d\n", name, hist.TotalCount(), now) - fmt.Fprintf(w, "%s.min %d %d\n", name, hist.Min(), now) - fmt.Fprintf(w, "%s.max %d %d\n", name, hist.Max(), now) - fmt.Fprintf(w, "%s.mean %.2f %d\n", name, hist.Mean(), now) - fmt.Fprintf(w, "%s.std-dev %.2f %d\n", name, hist.StdDev(), now) -} - -func (h *windowedHistogram) rotateLoop(d time.Duration) { - for range time.Tick(d) { - h.mtx.Lock() - h.hist.Rotate() - h.mtx.Unlock() - } -} - -type quantileSlice []metrics.Quantile - -func (a quantileSlice) Len() int { return len(a) } -func (a quantileSlice) Less(i, j int) bool { return a[i].Quantile < a[j].Quantile } -func (a quantileSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/vendor/github.com/go-kit/kit/metrics/graphite/graphite_test.go b/vendor/github.com/go-kit/kit/metrics/graphite/graphite_test.go deleted file mode 100644 index 627efea..0000000 --- a/vendor/github.com/go-kit/kit/metrics/graphite/graphite_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package graphite - -import ( - "bytes" - "fmt" - "strings" - "testing" - "time" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/teststat" -) - -func TestHistogramQuantiles(t *testing.T) { - prefix := "prefix." - e := NewEmitter("", "", prefix, time.Second, log.NewNopLogger()) - var ( - name = "test_histogram_quantiles" - quantiles = []int{50, 90, 95, 99} - ) - h, err := e.NewHistogram(name, 0, 100, 3, quantiles...) - if err != nil { - t.Fatalf("unable to create test histogram: %v", err) - } - h = h.With(metrics.Field{Key: "ignored", Value: "field"}) - const seed, mean, stdev int64 = 424242, 50, 10 - teststat.PopulateNormalHistogram(t, h, seed, mean, stdev) - - // flush the current metrics into a buffer to examine - var b bytes.Buffer - e.flush(&b) - teststat.AssertGraphiteNormalHistogram(t, prefix, name, mean, stdev, quantiles, b.String()) -} - -func TestCounter(t *testing.T) { - var ( - prefix = "prefix." - name = "m" - value = 123 - e = NewEmitter("", "", prefix, time.Second, log.NewNopLogger()) - b bytes.Buffer - ) - e.NewCounter(name).With(metrics.Field{Key: "ignored", Value: "field"}).Add(uint64(value)) - e.flush(&b) - want := fmt.Sprintf("%s%s.count %d", prefix, name, value) - payload := b.String() - if !strings.HasPrefix(payload, want) { - t.Errorf("counter %s want\n%s, have\n%s", name, want, payload) - } -} - -func TestGauge(t *testing.T) { - var ( - prefix = "prefix." - name = "xyz" - value = 54321 - delta = 12345 - e = NewEmitter("", "", prefix, time.Second, log.NewNopLogger()) - b bytes.Buffer - g = e.NewGauge(name).With(metrics.Field{Key: "ignored", Value: "field"}) - ) - - g.Set(float64(value)) - g.Add(float64(delta)) - - e.flush(&b) - payload := b.String() - - want := fmt.Sprintf("%s%s %d", prefix, name, value+delta) - if !strings.HasPrefix(payload, want) { - t.Errorf("gauge %s want\n%s, have\n%s", name, want, payload) - } -} - -func TestEmitterStops(t *testing.T) { - e := NewEmitter("foo", "bar", "baz", time.Second, log.NewNopLogger()) - time.Sleep(100 * time.Millisecond) - e.Stop() -} diff --git a/vendor/github.com/go-kit/kit/metrics/influxdb/influxdb.go b/vendor/github.com/go-kit/kit/metrics/influxdb/influxdb.go deleted file mode 100644 index 26bccc4..0000000 --- a/vendor/github.com/go-kit/kit/metrics/influxdb/influxdb.go +++ /dev/null @@ -1,254 +0,0 @@ -// Package influxdb implements a InfluxDB backend for package metrics. -package influxdb - -import ( - "fmt" - "sort" - "sync" - "time" - - "github.com/codahale/hdrhistogram" - stdinflux "github.com/influxdata/influxdb/client/v2" - - "github.com/go-kit/kit/metrics" -) - -type counter struct { - key string - tags []metrics.Field - fields []metrics.Field - value uint64 - bp stdinflux.BatchPoints -} - -// NewCounter returns a Counter that writes values in the reportInterval -// to the given InfluxDB client, utilizing batching. -func NewCounter(client stdinflux.Client, bp stdinflux.BatchPoints, key string, tags []metrics.Field, reportInterval time.Duration) metrics.Counter { - return NewCounterTick(client, bp, key, tags, time.Tick(reportInterval)) -} - -// NewCounterTick is the same as NewCounter, but allows the user to pass a own -// channel to trigger the write process to the client. -func NewCounterTick(client stdinflux.Client, bp stdinflux.BatchPoints, key string, tags []metrics.Field, reportTicker <-chan time.Time) metrics.Counter { - c := &counter{ - key: key, - tags: tags, - value: 0, - bp: bp, - } - go watch(client, bp, reportTicker) - return c -} - -func (c *counter) Name() string { - return c.key -} - -func (c *counter) With(field metrics.Field) metrics.Counter { - return &counter{ - key: c.key, - tags: c.tags, - value: c.value, - bp: c.bp, - fields: append(c.fields, field), - } -} - -func (c *counter) Add(delta uint64) { - c.value = c.value + delta - - tags := map[string]string{} - - for _, tag := range c.tags { - tags[tag.Key] = tag.Value - } - - fields := map[string]interface{}{} - - for _, field := range c.fields { - fields[field.Key] = field.Value - } - fields["value"] = c.value - pt, _ := stdinflux.NewPoint(c.key, tags, fields, time.Now()) - c.bp.AddPoint(pt) -} - -type gauge struct { - key string - tags []metrics.Field - fields []metrics.Field - value float64 - bp stdinflux.BatchPoints -} - -// NewGauge creates a new gauge instance, reporting points in the defined reportInterval. -func NewGauge(client stdinflux.Client, bp stdinflux.BatchPoints, key string, tags []metrics.Field, reportInterval time.Duration) metrics.Gauge { - return NewGaugeTick(client, bp, key, tags, time.Tick(reportInterval)) -} - -// NewGaugeTick is the same as NewGauge with a ticker channel instead of a interval. -func NewGaugeTick(client stdinflux.Client, bp stdinflux.BatchPoints, key string, tags []metrics.Field, reportTicker <-chan time.Time) metrics.Gauge { - g := &gauge{ - key: key, - tags: tags, - value: 0, - bp: bp, - } - go watch(client, bp, reportTicker) - return g -} - -func (g *gauge) Name() string { - return g.key -} - -func (g *gauge) With(field metrics.Field) metrics.Gauge { - return &gauge{ - key: g.key, - tags: g.tags, - value: g.value, - bp: g.bp, - fields: append(g.fields, field), - } -} - -func (g *gauge) Add(delta float64) { - g.value = g.value + delta - g.createPoint() -} - -func (g *gauge) Set(value float64) { - g.value = value - g.createPoint() -} - -func (g *gauge) Get() float64 { - return g.value -} - -func (g *gauge) createPoint() { - tags := map[string]string{} - - for _, tag := range g.tags { - tags[tag.Key] = tag.Value - } - - fields := map[string]interface{}{} - - for _, field := range g.fields { - fields[field.Key] = field.Value - } - fields["value"] = g.value - pt, _ := stdinflux.NewPoint(g.key, tags, fields, time.Now()) - g.bp.AddPoint(pt) -} - -// The implementation from histogram is taken from metrics/expvar - -type histogram struct { - mu sync.Mutex - hist *hdrhistogram.WindowedHistogram - - key string - gauges map[int]metrics.Gauge -} - -// NewHistogram is taken from http://github.com/codahale/metrics. It returns a -// windowed HDR histogram which drops data older than five minutes. -// -// The histogram exposes metrics for each passed quantile as gauges. Quantiles -// should be integers in the range 1..99. The gauge names are assigned by -// using the passed name as a prefix and appending "_pNN" e.g. "_p50". -func NewHistogram(client stdinflux.Client, bp stdinflux.BatchPoints, key string, tags []metrics.Field, - reportInterval time.Duration, minValue, maxValue int64, sigfigs int, quantiles ...int) metrics.Histogram { - return NewHistogramTick(client, bp, key, tags, time.Tick(reportInterval), minValue, maxValue, sigfigs, quantiles...) -} - -// NewHistogramTick is the same as NewHistoGram, but allows to pass a custom reportTicker. -func NewHistogramTick(client stdinflux.Client, bp stdinflux.BatchPoints, key string, tags []metrics.Field, - reportTicker <-chan time.Time, minValue, maxValue int64, sigfigs int, quantiles ...int) metrics.Histogram { - gauges := map[int]metrics.Gauge{} - - for _, quantile := range quantiles { - if quantile <= 0 || quantile >= 100 { - panic(fmt.Sprintf("invalid quantile %d", quantile)) - } - gauges[quantile] = NewGaugeTick(client, bp, fmt.Sprintf("%s_p%02d", key, quantile), tags, reportTicker) - } - - h := &histogram{ - hist: hdrhistogram.NewWindowed(5, minValue, maxValue, sigfigs), - key: key, - gauges: gauges, - } - - go h.rotateLoop(1 * time.Minute) - return h -} - -func (h *histogram) Name() string { - return h.key -} - -func (h *histogram) With(field metrics.Field) metrics.Histogram { - for q, gauge := range h.gauges { - h.gauges[q] = gauge.With(field) - } - - return h -} - -func (h *histogram) Observe(value int64) { - h.mu.Lock() - err := h.hist.Current.RecordValue(value) - h.mu.Unlock() - - if err != nil { - panic(err.Error()) - } - - for q, gauge := range h.gauges { - gauge.Set(float64(h.hist.Current.ValueAtQuantile(float64(q)))) - } -} - -func (h *histogram) Distribution() ([]metrics.Bucket, []metrics.Quantile) { - bars := h.hist.Merge().Distribution() - buckets := make([]metrics.Bucket, len(bars)) - for i, bar := range bars { - buckets[i] = metrics.Bucket{ - From: bar.From, - To: bar.To, - Count: bar.Count, - } - } - quantiles := make([]metrics.Quantile, 0, len(h.gauges)) - for quantile, gauge := range h.gauges { - quantiles = append(quantiles, metrics.Quantile{ - Quantile: quantile, - Value: int64(gauge.Get()), - }) - } - sort.Sort(quantileSlice(quantiles)) - return buckets, quantiles -} - -func (h *histogram) rotateLoop(d time.Duration) { - for range time.Tick(d) { - h.mu.Lock() - h.hist.Rotate() - h.mu.Unlock() - } -} - -type quantileSlice []metrics.Quantile - -func (a quantileSlice) Len() int { return len(a) } -func (a quantileSlice) Less(i, j int) bool { return a[i].Quantile < a[j].Quantile } -func (a quantileSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -func watch(client stdinflux.Client, bp stdinflux.BatchPoints, reportTicker <-chan time.Time) { - for range reportTicker { - client.Write(bp) - } -} diff --git a/vendor/github.com/go-kit/kit/metrics/influxdb/influxdb_test.go b/vendor/github.com/go-kit/kit/metrics/influxdb/influxdb_test.go deleted file mode 100644 index 5bb4b3f..0000000 --- a/vendor/github.com/go-kit/kit/metrics/influxdb/influxdb_test.go +++ /dev/null @@ -1,348 +0,0 @@ -package influxdb_test - -import ( - "reflect" - "sync" - "testing" - "time" - - stdinflux "github.com/influxdata/influxdb/client/v2" - - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/influxdb" -) - -func TestCounter(t *testing.T) { - expectedName := "test_counter" - expectedTags := map[string]string{} - expectedFields := []map[string]interface{}{ - {"value": "2"}, - {"value": "7"}, - {"value": "10"}, - } - - cl := &mockClient{} - cl.Add(3) - bp, _ := stdinflux.NewBatchPoints(stdinflux.BatchPointsConfig{ - Database: "testing", - Precision: "s", - }) - - tags := []metrics.Field{} - for key, value := range expectedTags { - tags = append(tags, metrics.Field{Key: key, Value: value}) - } - - triggerChan := make(chan time.Time) - counter := influxdb.NewCounterTick(cl, bp, expectedName, tags, triggerChan) - counter.Add(2) - counter.Add(5) - counter.Add(3) - - triggerChan <- time.Now() - cl.Wait() - - for i := 0; i <= 2; i++ { - givenPoint := mockPoint{ - Name: expectedName, - Tags: expectedTags, - Fields: expectedFields[i], - } - comparePoint(t, i, givenPoint, cl.Points[i]) - } -} - -func TestCounterWithTags(t *testing.T) { - expectedName := "test_counter" - expectedTags := map[string]string{ - "key1": "value1", - "key2": "value2", - } - expectedFields := []map[string]interface{}{ - {"value": "2"}, - {"Test": "Test", "value": "7"}, - {"Test": "Test", "value": "10"}, - } - - cl := &mockClient{} - cl.Add(3) - bp, _ := stdinflux.NewBatchPoints(stdinflux.BatchPointsConfig{ - Database: "testing", - Precision: "s", - }) - - tags := []metrics.Field{} - for key, value := range expectedTags { - tags = append(tags, metrics.Field{Key: key, Value: value}) - } - - triggerChan := make(chan time.Time) - counter := influxdb.NewCounterTick(cl, bp, expectedName, tags, triggerChan) - counter.Add(2) - counter = counter.With(metrics.Field{Key: "Test", Value: "Test"}) - counter.Add(5) - counter.Add(3) - - triggerChan <- time.Now() - cl.Wait() - - for i := 0; i <= 2; i++ { - givenPoint := mockPoint{ - Name: expectedName, - Tags: expectedTags, - Fields: expectedFields[i], - } - comparePoint(t, i, givenPoint, cl.Points[i]) - } -} - -func TestGauge(t *testing.T) { - expectedName := "test_gauge" - expectedTags := map[string]string{} - expectedFields := []map[string]interface{}{ - {"value": 2.1}, - {"value": 1.0}, - {"value": 10.5}, - } - - cl := &mockClient{} - cl.Add(3) - bp, _ := stdinflux.NewBatchPoints(stdinflux.BatchPointsConfig{ - Database: "testing", - Precision: "s", - }) - - tags := []metrics.Field{} - for key, value := range expectedTags { - tags = append(tags, metrics.Field{Key: key, Value: value}) - } - - triggerChan := make(chan time.Time) - counter := influxdb.NewGaugeTick(cl, bp, expectedName, tags, triggerChan) - counter.Add(2.1) - counter.Set(1) - counter.Add(9.5) - - triggerChan <- time.Now() - cl.Wait() - - for i := 0; i <= 2; i++ { - givenPoint := mockPoint{ - Name: expectedName, - Tags: expectedTags, - Fields: expectedFields[i], - } - comparePoint(t, i, givenPoint, cl.Points[i]) - } -} - -func TestGaugeWithTags(t *testing.T) { - expectedName := "test_counter" - expectedTags := map[string]string{ - "key1": "value1", - "key2": "value2", - } - expectedFields := []map[string]interface{}{ - {"value": 2.3}, - {"Test": "Test", "value": 1.0}, - {"Test": "Test", "value": 13.6}, - } - - cl := &mockClient{} - cl.Add(3) - bp, _ := stdinflux.NewBatchPoints(stdinflux.BatchPointsConfig{ - Database: "testing", - Precision: "s", - }) - - tags := []metrics.Field{} - for key, value := range expectedTags { - tags = append(tags, metrics.Field{Key: key, Value: value}) - } - - triggerChan := make(chan time.Time) - gauge := influxdb.NewGaugeTick(cl, bp, expectedName, tags, triggerChan) - gauge.Add(2.3) - gauge = gauge.With(metrics.Field{Key: "Test", Value: "Test"}) - gauge.Set(1) - gauge.Add(12.6) - - triggerChan <- time.Now() - cl.Wait() - - for i := 0; i <= 2; i++ { - givenPoint := mockPoint{ - Name: expectedName, - Tags: expectedTags, - Fields: expectedFields[i], - } - comparePoint(t, i, givenPoint, cl.Points[i]) - } -} - -func TestHistogram(t *testing.T) { - expectedName := "test_histogram" - expectedTags := map[string]string{} - expectedFields := []map[string]map[string]interface{}{ - { - "test_histogram_p50": {"value": 5.0}, - "test_histogram_p90": {"value": 5.0}, - "test_histogram_p95": {"value": 5.0}, - "test_histogram_p99": {"value": 5.0}, - }, - { - "test_histogram_p50": {"Test": "Test", "value": 5.0}, - "test_histogram_p90": {"Test": "Test", "value": 10.0}, - "test_histogram_p95": {"Test": "Test", "value": 10.0}, - "test_histogram_p99": {"Test": "Test", "value": 10.0}, - }, - { - "test_histogram_p50": {"Test": "Test", "value": 5.0}, - "test_histogram_p90": {"Test": "Test", "value": 10.0}, - "test_histogram_p95": {"Test": "Test", "value": 10.0}, - "test_histogram_p99": {"Test": "Test", "value": 10.0}, - }, - } - quantiles := []int{50, 90, 95, 99} - - cl := &mockClient{} - cl.Add(12) - bp, _ := stdinflux.NewBatchPoints(stdinflux.BatchPointsConfig{ - Database: "testing", - Precision: "s", - }) - - tags := []metrics.Field{} - for key, value := range expectedTags { - tags = append(tags, metrics.Field{Key: key, Value: value}) - } - - triggerChan := make(chan time.Time) - histogram := influxdb.NewHistogramTick(cl, bp, expectedName, tags, triggerChan, 0, 100, 3, quantiles...) - histogram.Observe(5) - histogram = histogram.With(metrics.Field{Key: "Test", Value: "Test"}) - histogram.Observe(10) - histogram.Observe(4) - triggerChan <- time.Now() - cl.Wait() - - for i := 0; i <= 11; i++ { - actualName := cl.Points[i].Name() - givenName := expectedName + actualName[len(actualName)-4:] - givenPoint := mockPoint{ - Name: givenName, - Tags: expectedTags, - Fields: expectedFields[i/4][actualName], - } - comparePoint(t, i, givenPoint, cl.Points[i]) - } -} - -func TestHistogramWithTags(t *testing.T) { - expectedName := "test_histogram" - expectedTags := map[string]string{ - "key1": "value1", - "key2": "value2", - } - expectedFields := []map[string]map[string]interface{}{ - { - "test_histogram_p50": {"value": 5.0}, - "test_histogram_p90": {"value": 5.0}, - "test_histogram_p95": {"value": 5.0}, - "test_histogram_p99": {"value": 5.0}, - }, - { - "test_histogram_p50": {"Test": "Test", "value": 5.0}, - "test_histogram_p90": {"Test": "Test", "value": 10.0}, - "test_histogram_p95": {"Test": "Test", "value": 10.0}, - "test_histogram_p99": {"Test": "Test", "value": 10.0}, - }, - { - "test_histogram_p50": {"Test": "Test", "value": 5.0}, - "test_histogram_p90": {"Test": "Test", "value": 10.0}, - "test_histogram_p95": {"Test": "Test", "value": 10.0}, - "test_histogram_p99": {"Test": "Test", "value": 10.0}, - }, - } - quantiles := []int{50, 90, 95, 99} - - cl := &mockClient{} - cl.Add(12) - bp, _ := stdinflux.NewBatchPoints(stdinflux.BatchPointsConfig{ - Database: "testing", - Precision: "s", - }) - - tags := []metrics.Field{} - for key, value := range expectedTags { - tags = append(tags, metrics.Field{Key: key, Value: value}) - } - - triggerChan := make(chan time.Time) - histogram := influxdb.NewHistogramTick(cl, bp, expectedName, tags, triggerChan, 0, 100, 3, quantiles...) - histogram.Observe(5) - histogram = histogram.With(metrics.Field{Key: "Test", Value: "Test"}) - histogram.Observe(10) - histogram.Observe(4) - triggerChan <- time.Now() - cl.Wait() - - for i := 0; i <= 11; i++ { - actualName := cl.Points[i].Name() - givenName := expectedName + actualName[len(actualName)-4:] - givenPoint := mockPoint{ - Name: givenName, - Tags: expectedTags, - Fields: expectedFields[i/4][actualName], - } - comparePoint(t, i, givenPoint, cl.Points[i]) - } -} - -func comparePoint(t *testing.T, i int, expected mockPoint, given stdinflux.Point) { - - if want, have := expected.Name, given.Name(); want != have { - t.Errorf("point %d: want %q, have %q", i, want, have) - } - - if want, have := expected.Tags, given.Tags(); !reflect.DeepEqual(want, have) { - t.Errorf("point %d: want %v, have %v", i, want, have) - } - - if want, have := expected.Fields, given.Fields(); !reflect.DeepEqual(want, have) { - t.Errorf("point %d: want %v, have %v", i, want, have) - } -} - -type mockClient struct { - Points []stdinflux.Point - sync.WaitGroup -} - -func (m *mockClient) Ping(timeout time.Duration) (time.Duration, string, error) { - t := 0 * time.Millisecond - return t, "", nil -} - -func (m *mockClient) Write(bp stdinflux.BatchPoints) error { - for _, p := range bp.Points() { - m.Points = append(m.Points, *p) - m.Done() - } - - return nil -} - -func (m *mockClient) Query(q stdinflux.Query) (*stdinflux.Response, error) { - return nil, nil -} - -func (m *mockClient) Close() error { - return nil -} - -type mockPoint struct { - Name string - Tags map[string]string - Fields map[string]interface{} -} diff --git a/vendor/github.com/go-kit/kit/metrics/metrics.go b/vendor/github.com/go-kit/kit/metrics/metrics.go deleted file mode 100644 index 3871dd6..0000000 --- a/vendor/github.com/go-kit/kit/metrics/metrics.go +++ /dev/null @@ -1,55 +0,0 @@ -// Package metrics provides an extensible framework to instrument your -// application. All metrics are safe for concurrent use. Considerable design -// influence has been taken from https://github.com/codahale/metrics and -// https://prometheus.io. -package metrics - -// Counter is a monotonically-increasing, unsigned, 64-bit integer used to -// capture the number of times an event has occurred. By tracking the deltas -// between measurements of a counter over intervals of time, an aggregation -// layer can derive rates, acceleration, etc. -type Counter interface { - Name() string - With(Field) Counter - Add(delta uint64) -} - -// Gauge captures instantaneous measurements of something using signed, 64-bit -// floats. The value does not need to be monotonic. -type Gauge interface { - Name() string - With(Field) Gauge - Set(value float64) - Add(delta float64) - Get() float64 -} - -// Histogram tracks the distribution of a stream of values (e.g. the number of -// milliseconds it takes to handle requests). Implementations may choose to -// add gauges for values at meaningful quantiles. -type Histogram interface { - Name() string - With(Field) Histogram - Observe(value int64) - Distribution() ([]Bucket, []Quantile) -} - -// Field is a key/value pair associated with an observation for a specific -// metric. Fields may be ignored by implementations. -type Field struct { - Key string - Value string -} - -// Bucket is a range in a histogram which aggregates observations. -type Bucket struct { - From int64 - To int64 - Count int64 -} - -// Quantile is a pair of quantile (0..100) and its observed maximum value. -type Quantile struct { - Quantile int // 0..100 - Value int64 -} diff --git a/vendor/github.com/go-kit/kit/metrics/multi.go b/vendor/github.com/go-kit/kit/metrics/multi.go deleted file mode 100644 index 114d0c1..0000000 --- a/vendor/github.com/go-kit/kit/metrics/multi.go +++ /dev/null @@ -1,112 +0,0 @@ -package metrics - -type multiCounter struct { - name string - a []Counter -} - -// NewMultiCounter returns a wrapper around multiple Counters. -func NewMultiCounter(name string, counters ...Counter) Counter { - return &multiCounter{ - name: name, - a: counters, - } -} - -func (c multiCounter) Name() string { return c.name } - -func (c multiCounter) With(f Field) Counter { - next := &multiCounter{ - name: c.name, - a: make([]Counter, len(c.a)), - } - for i, counter := range c.a { - next.a[i] = counter.With(f) - } - return next -} - -func (c multiCounter) Add(delta uint64) { - for _, counter := range c.a { - counter.Add(delta) - } -} - -type multiGauge struct { - name string - a []Gauge -} - -func (g multiGauge) Name() string { return g.name } - -// NewMultiGauge returns a wrapper around multiple Gauges. -func NewMultiGauge(name string, gauges ...Gauge) Gauge { - return &multiGauge{ - name: name, - a: gauges, - } -} - -func (g multiGauge) With(f Field) Gauge { - next := &multiGauge{ - name: g.name, - a: make([]Gauge, len(g.a)), - } - for i, gauge := range g.a { - next.a[i] = gauge.With(f) - } - return next -} - -func (g multiGauge) Set(value float64) { - for _, gauge := range g.a { - gauge.Set(value) - } -} - -func (g multiGauge) Add(delta float64) { - for _, gauge := range g.a { - gauge.Add(delta) - } -} - -func (g multiGauge) Get() float64 { - panic("cannot call Get on a MultiGauge") -} - -type multiHistogram struct { - name string - a []Histogram -} - -// NewMultiHistogram returns a wrapper around multiple Histograms. -func NewMultiHistogram(name string, histograms ...Histogram) Histogram { - return &multiHistogram{ - name: name, - a: histograms, - } -} - -func (h multiHistogram) Name() string { return h.name } - -func (h multiHistogram) With(f Field) Histogram { - next := &multiHistogram{ - name: h.name, - a: make([]Histogram, len(h.a)), - } - for i, histogram := range h.a { - next.a[i] = histogram.With(f) - } - return next -} - -func (h multiHistogram) Observe(value int64) { - for _, histogram := range h.a { - histogram.Observe(value) - } -} - -func (h multiHistogram) Distribution() ([]Bucket, []Quantile) { - // TODO(pb): there may be a way to do this - panic("cannot call Distribution on a MultiHistogram") -} diff --git a/vendor/github.com/go-kit/kit/metrics/multi_test.go b/vendor/github.com/go-kit/kit/metrics/multi_test.go deleted file mode 100644 index 8102dac..0000000 --- a/vendor/github.com/go-kit/kit/metrics/multi_test.go +++ /dev/null @@ -1,233 +0,0 @@ -package metrics_test - -import ( - stdexpvar "expvar" - "fmt" - "io/ioutil" - "math" - "net/http" - "net/http/httptest" - "regexp" - "strconv" - "strings" - "testing" - - stdprometheus "github.com/prometheus/client_golang/prometheus" - - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/expvar" - "github.com/go-kit/kit/metrics/prometheus" - "github.com/go-kit/kit/metrics/teststat" -) - -func TestMultiWith(t *testing.T) { - c := metrics.NewMultiCounter( - "multifoo", - expvar.NewCounter("foo"), - prometheus.NewCounter(stdprometheus.CounterOpts{ - Namespace: "test", - Subsystem: "multi_with", - Name: "bar", - Help: "Bar counter.", - }, []string{"a"}), - ) - - c.Add(1) - c.With(metrics.Field{Key: "a", Value: "1"}).Add(2) - c.Add(3) - - if want, have := strings.Join([]string{ - `# HELP test_multi_with_bar Bar counter.`, - `# TYPE test_multi_with_bar counter`, - `test_multi_with_bar{a="1"} 2`, - `test_multi_with_bar{a="unknown"} 4`, - }, "\n"), scrapePrometheus(t); !strings.Contains(have, want) { - t.Errorf("Prometheus metric stanza not found or incorrect\n%s", have) - } -} - -func TestMultiCounter(t *testing.T) { - metrics.NewMultiCounter( - "multialpha", - expvar.NewCounter("alpha"), - prometheus.NewCounter(stdprometheus.CounterOpts{ - Namespace: "test", - Subsystem: "multi_counter", - Name: "beta", - Help: "Beta counter.", - }, []string{"a"}), - ).With(metrics.Field{Key: "a", Value: "b"}).Add(123) - - if want, have := "123", stdexpvar.Get("alpha").String(); want != have { - t.Errorf("expvar: want %q, have %q", want, have) - } - - if want, have := strings.Join([]string{ - `# HELP test_multi_counter_beta Beta counter.`, - `# TYPE test_multi_counter_beta counter`, - `test_multi_counter_beta{a="b"} 123`, - }, "\n"), scrapePrometheus(t); !strings.Contains(have, want) { - t.Errorf("Prometheus metric stanza not found or incorrect\n%s", have) - } -} - -func TestMultiGauge(t *testing.T) { - g := metrics.NewMultiGauge( - "multidelta", - expvar.NewGauge("delta"), - prometheus.NewGauge(stdprometheus.GaugeOpts{ - Namespace: "test", - Subsystem: "multi_gauge", - Name: "kappa", - Help: "Kappa gauge.", - }, []string{"a"}), - ) - - f := metrics.Field{Key: "a", Value: "aaa"} - g.With(f).Set(34) - - if want, have := "34", stdexpvar.Get("delta").String(); want != have { - t.Errorf("expvar: want %q, have %q", want, have) - } - if want, have := strings.Join([]string{ - `# HELP test_multi_gauge_kappa Kappa gauge.`, - `# TYPE test_multi_gauge_kappa gauge`, - `test_multi_gauge_kappa{a="aaa"} 34`, - }, "\n"), scrapePrometheus(t); !strings.Contains(have, want) { - t.Errorf("Prometheus metric stanza not found or incorrect\n%s", have) - } - - g.With(f).Add(-40) - - if want, have := "-6", stdexpvar.Get("delta").String(); want != have { - t.Errorf("expvar: want %q, have %q", want, have) - } - if want, have := strings.Join([]string{ - `# HELP test_multi_gauge_kappa Kappa gauge.`, - `# TYPE test_multi_gauge_kappa gauge`, - `test_multi_gauge_kappa{a="aaa"} -6`, - }, "\n"), scrapePrometheus(t); !strings.Contains(have, want) { - t.Errorf("Prometheus metric stanza not found or incorrect\n%s", have) - } -} - -func TestMultiHistogram(t *testing.T) { - quantiles := []int{50, 90, 99} - h := metrics.NewMultiHistogram( - "multiomicron", - expvar.NewHistogram("omicron", 0, 100, 3, quantiles...), - prometheus.NewSummary(stdprometheus.SummaryOpts{ - Namespace: "test", - Subsystem: "multi_histogram", - Name: "nu", - Help: "Nu histogram.", - }, []string{}), - ) - - const seed, mean, stdev int64 = 123, 50, 10 - teststat.PopulateNormalHistogram(t, h, seed, mean, stdev) - assertExpvarNormalHistogram(t, "omicron", mean, stdev, quantiles) - assertPrometheusNormalHistogram(t, `test_multi_histogram_nu`, mean, stdev) -} - -func assertExpvarNormalHistogram(t *testing.T, metricName string, mean, stdev int64, quantiles []int) { - const tolerance int = 2 - for _, quantile := range quantiles { - want := normalValueAtQuantile(mean, stdev, quantile) - s := stdexpvar.Get(fmt.Sprintf("%s_p%02d", metricName, quantile)).String() - have, err := strconv.Atoi(s) - if err != nil { - t.Fatal(err) - } - if int(math.Abs(float64(want)-float64(have))) > tolerance { - t.Errorf("quantile %d: want %d, have %d", quantile, want, have) - } - } -} - -func assertPrometheusNormalHistogram(t *testing.T, metricName string, mean, stdev int64) { - scrape := scrapePrometheus(t) - const tolerance int = 5 // Prometheus approximates higher quantiles badly -_-; - for quantileInt, quantileStr := range map[int]string{50: "0.5", 90: "0.9", 99: "0.99"} { - want := normalValueAtQuantile(mean, stdev, quantileInt) - have := getPrometheusQuantile(t, scrape, metricName, quantileStr) - if int(math.Abs(float64(want)-float64(have))) > tolerance { - t.Errorf("%q: want %d, have %d", quantileStr, want, have) - } - } -} - -// https://en.wikipedia.org/wiki/Normal_distribution#Quantile_function -func normalValueAtQuantile(mean, stdev int64, quantile int) int64 { - return int64(float64(mean) + float64(stdev)*math.Sqrt2*erfinv(2*(float64(quantile)/100)-1)) -} - -// https://stackoverflow.com/questions/5971830/need-code-for-inverse-error-function -func erfinv(y float64) float64 { - if y < -1.0 || y > 1.0 { - panic("invalid input") - } - - var ( - a = [4]float64{0.886226899, -1.645349621, 0.914624893, -0.140543331} - b = [4]float64{-2.118377725, 1.442710462, -0.329097515, 0.012229801} - c = [4]float64{-1.970840454, -1.624906493, 3.429567803, 1.641345311} - d = [2]float64{3.543889200, 1.637067800} - ) - - const y0 = 0.7 - var x, z float64 - - if math.Abs(y) == 1.0 { - x = -y * math.Log(0.0) - } else if y < -y0 { - z = math.Sqrt(-math.Log((1.0 + y) / 2.0)) - x = -(((c[3]*z+c[2])*z+c[1])*z + c[0]) / ((d[1]*z+d[0])*z + 1.0) - } else { - if y < y0 { - z = y * y - x = y * (((a[3]*z+a[2])*z+a[1])*z + a[0]) / ((((b[3]*z+b[3])*z+b[1])*z+b[0])*z + 1.0) - } else { - z = math.Sqrt(-math.Log((1.0 - y) / 2.0)) - x = (((c[3]*z+c[2])*z+c[1])*z + c[0]) / ((d[1]*z+d[0])*z + 1.0) - } - x = x - (math.Erf(x)-y)/(2.0/math.SqrtPi*math.Exp(-x*x)) - x = x - (math.Erf(x)-y)/(2.0/math.SqrtPi*math.Exp(-x*x)) - } - - return x -} - -func scrapePrometheus(t *testing.T) string { - server := httptest.NewServer(stdprometheus.UninstrumentedHandler()) - defer server.Close() - - resp, err := http.Get(server.URL) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - - buf, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - - return strings.TrimSpace(string(buf)) -} - -func getPrometheusQuantile(t *testing.T, scrape, name, quantileStr string) int { - re := name + `{quantile="` + quantileStr + `"} ([0-9]+)` - matches := regexp.MustCompile(re).FindAllStringSubmatch(scrape, -1) - if len(matches) < 1 { - t.Fatalf("%q: quantile %q not found in scrape (%s)", name, quantileStr, re) - } - if len(matches[0]) < 2 { - t.Fatalf("%q: quantile %q not found in scrape (%s)", name, quantileStr, re) - } - i, err := strconv.Atoi(matches[0][1]) - if err != nil { - t.Fatal(err) - } - return i -} diff --git a/vendor/github.com/go-kit/kit/metrics/print.go b/vendor/github.com/go-kit/kit/metrics/print.go deleted file mode 100644 index d3feae7..0000000 --- a/vendor/github.com/go-kit/kit/metrics/print.go +++ /dev/null @@ -1,42 +0,0 @@ -package metrics - -import ( - "fmt" - "io" - "text/tabwriter" -) - -const ( - bs = "####################################################################################################" - bsz = float64(len(bs)) -) - -// PrintDistribution writes a human-readable graph of the distribution to the -// passed writer. -func PrintDistribution(w io.Writer, h Histogram) { - buckets, quantiles := h.Distribution() - - fmt.Fprintf(w, "name: %v\n", h.Name()) - fmt.Fprintf(w, "quantiles: %v\n", quantiles) - - var total float64 - for _, bucket := range buckets { - total += float64(bucket.Count) - } - - tw := tabwriter.NewWriter(w, 0, 2, 2, ' ', 0) - fmt.Fprintf(tw, "From\tTo\tCount\tProb\tBar\n") - - axis := "|" - for _, bucket := range buckets { - if bucket.Count > 0 { - p := float64(bucket.Count) / total - fmt.Fprintf(tw, "%d\t%d\t%d\t%.4f\t%s%s\n", bucket.From, bucket.To, bucket.Count, p, axis, bs[:int(p*bsz)]) - axis = "|" - } else { - axis = ":" // show that some bars were skipped - } - } - - tw.Flush() -} diff --git a/vendor/github.com/go-kit/kit/metrics/print_test.go b/vendor/github.com/go-kit/kit/metrics/print_test.go deleted file mode 100644 index 5291784..0000000 --- a/vendor/github.com/go-kit/kit/metrics/print_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package metrics_test - -import ( - "bytes" - "testing" - - "math" - - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/expvar" - "github.com/go-kit/kit/metrics/teststat" -) - -func TestPrintDistribution(t *testing.T) { - var ( - quantiles = []int{50, 90, 95, 99} - h = expvar.NewHistogram("test_print_distribution", 0, 100, 3, quantiles...) - seed = int64(555) - mean = int64(5) - stdev = int64(1) - ) - teststat.PopulateNormalHistogram(t, h, seed, mean, stdev) - - var buf bytes.Buffer - metrics.PrintDistribution(&buf, h) - t.Logf("\n%s\n", buf.String()) - - // Count the number of bar chart characters. - // We should have ca. 100 in any distribution with a small-enough stdev. - - var n int - for _, r := range buf.String() { - if r == '#' { - n++ - } - } - if want, have, tol := 100, n, 5; int(math.Abs(float64(want-have))) > tol { - t.Errorf("want %d, have %d (tolerance %d)", want, have, tol) - } -} diff --git a/vendor/github.com/go-kit/kit/metrics/prometheus/prometheus.go b/vendor/github.com/go-kit/kit/metrics/prometheus/prometheus.go deleted file mode 100644 index 3fc774d..0000000 --- a/vendor/github.com/go-kit/kit/metrics/prometheus/prometheus.go +++ /dev/null @@ -1,202 +0,0 @@ -// Package prometheus implements a Prometheus backend for package metrics. -package prometheus - -import ( - "github.com/prometheus/client_golang/prometheus" - - "github.com/go-kit/kit/metrics" -) - -// Prometheus has strong opinions about the dimensionality of fields. Users -// must predeclare every field key they intend to use. On every observation, -// fields with keys that haven't been predeclared will be silently dropped, -// and predeclared field keys without values will receive the value -// PrometheusLabelValueUnknown. -var PrometheusLabelValueUnknown = "unknown" - -type counter struct { - *prometheus.CounterVec - name string - Pairs map[string]string -} - -// NewCounter returns a new Counter backed by a Prometheus metric. The counter -// is automatically registered via prometheus.Register. -func NewCounter(opts prometheus.CounterOpts, fieldKeys []string) metrics.Counter { - m := prometheus.NewCounterVec(opts, fieldKeys) - prometheus.MustRegister(m) - p := map[string]string{} - for _, fieldName := range fieldKeys { - p[fieldName] = PrometheusLabelValueUnknown - } - return counter{ - CounterVec: m, - name: opts.Name, - Pairs: p, - } -} - -func (c counter) Name() string { return c.name } - -func (c counter) With(f metrics.Field) metrics.Counter { - return counter{ - CounterVec: c.CounterVec, - name: c.name, - Pairs: merge(c.Pairs, f), - } -} - -func (c counter) Add(delta uint64) { - c.CounterVec.With(prometheus.Labels(c.Pairs)).Add(float64(delta)) -} - -type gauge struct { - *prometheus.GaugeVec - name string - Pairs map[string]string -} - -// NewGauge returns a new Gauge backed by a Prometheus metric. The gauge is -// automatically registered via prometheus.Register. -func NewGauge(opts prometheus.GaugeOpts, fieldKeys []string) metrics.Gauge { - m := prometheus.NewGaugeVec(opts, fieldKeys) - prometheus.MustRegister(m) - return gauge{ - GaugeVec: m, - name: opts.Name, - Pairs: pairsFrom(fieldKeys), - } -} - -func (g gauge) Name() string { return g.name } - -func (g gauge) With(f metrics.Field) metrics.Gauge { - return gauge{ - GaugeVec: g.GaugeVec, - name: g.name, - Pairs: merge(g.Pairs, f), - } -} - -func (g gauge) Set(value float64) { - g.GaugeVec.With(prometheus.Labels(g.Pairs)).Set(value) -} - -func (g gauge) Add(delta float64) { - g.GaugeVec.With(prometheus.Labels(g.Pairs)).Add(delta) -} - -func (g gauge) Get() float64 { - // TODO(pb): see https://github.com/prometheus/client_golang/issues/58 - return 0.0 -} - -// RegisterCallbackGauge registers a Gauge with Prometheus whose value is -// determined at collect time by the passed callback function. The callback -// determines the value, and fields are ignored, so RegisterCallbackGauge -// returns nothing. -func RegisterCallbackGauge(opts prometheus.GaugeOpts, callback func() float64) { - prometheus.MustRegister(prometheus.NewGaugeFunc(opts, callback)) -} - -type summary struct { - *prometheus.SummaryVec - name string - Pairs map[string]string -} - -// NewSummary returns a new Histogram backed by a Prometheus summary. The -// histogram is automatically registered via prometheus.Register. -// -// For more information on Prometheus histograms and summaries, refer to -// http://prometheus.io/docs/practices/histograms. -func NewSummary(opts prometheus.SummaryOpts, fieldKeys []string) metrics.Histogram { - m := prometheus.NewSummaryVec(opts, fieldKeys) - prometheus.MustRegister(m) - return summary{ - SummaryVec: m, - name: opts.Name, - Pairs: pairsFrom(fieldKeys), - } -} - -func (s summary) Name() string { return s.name } - -func (s summary) With(f metrics.Field) metrics.Histogram { - return summary{ - SummaryVec: s.SummaryVec, - name: s.name, - Pairs: merge(s.Pairs, f), - } -} - -func (s summary) Observe(value int64) { - s.SummaryVec.With(prometheus.Labels(s.Pairs)).Observe(float64(value)) -} - -func (s summary) Distribution() ([]metrics.Bucket, []metrics.Quantile) { - // TODO(pb): see https://github.com/prometheus/client_golang/issues/58 - return []metrics.Bucket{}, []metrics.Quantile{} -} - -type histogram struct { - *prometheus.HistogramVec - name string - Pairs map[string]string -} - -// NewHistogram returns a new Histogram backed by a Prometheus Histogram. The -// histogram is automatically registered via prometheus.Register. -// -// For more information on Prometheus histograms and summaries, refer to -// http://prometheus.io/docs/practices/histograms. -func NewHistogram(opts prometheus.HistogramOpts, fieldKeys []string) metrics.Histogram { - m := prometheus.NewHistogramVec(opts, fieldKeys) - prometheus.MustRegister(m) - return histogram{ - HistogramVec: m, - name: opts.Name, - Pairs: pairsFrom(fieldKeys), - } -} - -func (h histogram) Name() string { return h.name } - -func (h histogram) With(f metrics.Field) metrics.Histogram { - return histogram{ - HistogramVec: h.HistogramVec, - name: h.name, - Pairs: merge(h.Pairs, f), - } -} - -func (h histogram) Observe(value int64) { - h.HistogramVec.With(prometheus.Labels(h.Pairs)).Observe(float64(value)) -} - -func (h histogram) Distribution() ([]metrics.Bucket, []metrics.Quantile) { - // TODO(pb): see https://github.com/prometheus/client_golang/issues/58 - return []metrics.Bucket{}, []metrics.Quantile{} -} - -func pairsFrom(fieldKeys []string) map[string]string { - p := map[string]string{} - for _, fieldName := range fieldKeys { - p[fieldName] = PrometheusLabelValueUnknown - } - return p -} - -func merge(orig map[string]string, f metrics.Field) map[string]string { - if _, ok := orig[f.Key]; !ok { - return orig - } - - newPairs := make(map[string]string, len(orig)) - for k, v := range orig { - newPairs[k] = v - } - - newPairs[f.Key] = f.Value - return newPairs -} diff --git a/vendor/github.com/go-kit/kit/metrics/prometheus/prometheus_test.go b/vendor/github.com/go-kit/kit/metrics/prometheus/prometheus_test.go deleted file mode 100644 index 5115f17..0000000 --- a/vendor/github.com/go-kit/kit/metrics/prometheus/prometheus_test.go +++ /dev/null @@ -1,130 +0,0 @@ -package prometheus_test - -import ( - "strings" - "testing" - - stdprometheus "github.com/prometheus/client_golang/prometheus" - - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/prometheus" - "github.com/go-kit/kit/metrics/teststat" -) - -func TestPrometheusLabelBehavior(t *testing.T) { - c := prometheus.NewCounter(stdprometheus.CounterOpts{ - Namespace: "test", - Subsystem: "prometheus_label_behavior", - Name: "foobar", - Help: "Abc def.", - }, []string{"used_key", "unused_key"}) - c.With(metrics.Field{Key: "used_key", Value: "declared"}).Add(1) - c.Add(1) - - if want, have := strings.Join([]string{ - `# HELP test_prometheus_label_behavior_foobar Abc def.`, - `# TYPE test_prometheus_label_behavior_foobar counter`, - `test_prometheus_label_behavior_foobar{unused_key="unknown",used_key="declared"} 1`, - `test_prometheus_label_behavior_foobar{unused_key="unknown",used_key="unknown"} 1`, - }, "\n"), teststat.ScrapePrometheus(t); !strings.Contains(have, want) { - t.Errorf("metric stanza not found or incorrect\n%s", have) - } -} - -func TestPrometheusCounter(t *testing.T) { - c := prometheus.NewCounter(stdprometheus.CounterOpts{ - Namespace: "test", - Subsystem: "prometheus_counter", - Name: "foobar", - Help: "Lorem ipsum.", - }, []string{}) - c.Add(1) - c.Add(2) - if want, have := strings.Join([]string{ - `# HELP test_prometheus_counter_foobar Lorem ipsum.`, - `# TYPE test_prometheus_counter_foobar counter`, - `test_prometheus_counter_foobar 3`, - }, "\n"), teststat.ScrapePrometheus(t); !strings.Contains(have, want) { - t.Errorf("metric stanza not found or incorrect\n%s", have) - } - c.Add(3) - c.Add(4) - if want, have := strings.Join([]string{ - `# HELP test_prometheus_counter_foobar Lorem ipsum.`, - `# TYPE test_prometheus_counter_foobar counter`, - `test_prometheus_counter_foobar 10`, - }, "\n"), teststat.ScrapePrometheus(t); !strings.Contains(have, want) { - t.Errorf("metric stanza not found or incorrect\n%s", have) - } -} - -func TestPrometheusGauge(t *testing.T) { - c := prometheus.NewGauge(stdprometheus.GaugeOpts{ - Namespace: "test", - Subsystem: "prometheus_gauge", - Name: "foobar", - Help: "Dolor sit.", - }, []string{}) - c.Set(42) - if want, have := strings.Join([]string{ - `# HELP test_prometheus_gauge_foobar Dolor sit.`, - `# TYPE test_prometheus_gauge_foobar gauge`, - `test_prometheus_gauge_foobar 42`, - }, "\n"), teststat.ScrapePrometheus(t); !strings.Contains(have, want) { - t.Errorf("metric stanza not found or incorrect\n%s", have) - } - c.Add(-43) - if want, have := strings.Join([]string{ - `# HELP test_prometheus_gauge_foobar Dolor sit.`, - `# TYPE test_prometheus_gauge_foobar gauge`, - `test_prometheus_gauge_foobar -1`, - }, "\n"), teststat.ScrapePrometheus(t); !strings.Contains(have, want) { - t.Errorf("metric stanza not found or incorrect\n%s", have) - } -} - -func TestPrometheusCallbackGauge(t *testing.T) { - value := 123.456 - cb := func() float64 { return value } - prometheus.RegisterCallbackGauge(stdprometheus.GaugeOpts{ - Namespace: "test", - Subsystem: "prometheus_gauge", - Name: "bazbaz", - Help: "Help string.", - }, cb) - if want, have := strings.Join([]string{ - `# HELP test_prometheus_gauge_bazbaz Help string.`, - `# TYPE test_prometheus_gauge_bazbaz gauge`, - `test_prometheus_gauge_bazbaz 123.456`, - }, "\n"), teststat.ScrapePrometheus(t); !strings.Contains(have, want) { - t.Errorf("metric stanza not found or incorrect\n%s", have) - } -} - -func TestPrometheusSummary(t *testing.T) { - h := prometheus.NewSummary(stdprometheus.SummaryOpts{ - Namespace: "test", - Subsystem: "prometheus_summary_histogram", - Name: "foobar", - Help: "Qwerty asdf.", - }, []string{}) - - const mean, stdev int64 = 50, 10 - teststat.PopulateNormalHistogram(t, h, 34, mean, stdev) - teststat.AssertPrometheusNormalSummary(t, "test_prometheus_summary_histogram_foobar", mean, stdev) -} - -func TestPrometheusHistogram(t *testing.T) { - buckets := []float64{20, 40, 60, 80, 100} - h := prometheus.NewHistogram(stdprometheus.HistogramOpts{ - Namespace: "test", - Subsystem: "prometheus_histogram_histogram", - Name: "quux", - Help: "Qwerty asdf.", - Buckets: buckets, - }, []string{}) - - const mean, stdev int64 = 50, 10 - teststat.PopulateNormalHistogram(t, h, 34, mean, stdev) - teststat.AssertPrometheusBucketedHistogram(t, "test_prometheus_histogram_histogram_quux_bucket", mean, stdev, buckets) -} diff --git a/vendor/github.com/go-kit/kit/metrics/provider/providers.go b/vendor/github.com/go-kit/kit/metrics/provider/providers.go deleted file mode 100644 index 34cdf33..0000000 --- a/vendor/github.com/go-kit/kit/metrics/provider/providers.go +++ /dev/null @@ -1,259 +0,0 @@ -package provider - -import ( - "errors" - "time" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/discard" - "github.com/go-kit/kit/metrics/dogstatsd" - kitexp "github.com/go-kit/kit/metrics/expvar" - "github.com/go-kit/kit/metrics/graphite" - kitprom "github.com/go-kit/kit/metrics/prometheus" - "github.com/go-kit/kit/metrics/statsd" -) - -// Provider represents a union set of constructors and lifecycle management -// functions for each supported metrics backend. It should be used by those who -// need to easily swap out implementations, e.g. dynamically, or at a single -// point in an intermediating framework. -type Provider interface { - NewCounter(name, help string) metrics.Counter - NewHistogram(name, help string, min, max int64, sigfigs int, quantiles ...int) (metrics.Histogram, error) - NewGauge(name, help string) metrics.Gauge - Stop() -} - -// NewGraphiteProvider will return a Provider implementation that is a simple -// wrapper around a graphite.Emitter. All metric names will be prefixed with the -// given value and data will be emitted once every interval. If no network value -// is given, it will default to "udp". -func NewGraphiteProvider(network, address, prefix string, interval time.Duration, logger log.Logger) (Provider, error) { - if network == "" { - network = "udp" - } - if address == "" { - return nil, errors.New("address is required") - } - return graphiteProvider{ - e: graphite.NewEmitter(network, address, prefix, interval, logger), - }, nil -} - -type graphiteProvider struct { - e *graphite.Emitter -} - -var _ Provider = graphiteProvider{} - -// NewCounter implements Provider. Help is ignored. -func (p graphiteProvider) NewCounter(name, _ string) metrics.Counter { - return p.e.NewCounter(name) -} - -// NewHistogram implements Provider. Help is ignored. -func (p graphiteProvider) NewHistogram(name, _ string, min, max int64, sigfigs int, quantiles ...int) (metrics.Histogram, error) { - return p.e.NewHistogram(name, min, max, sigfigs, quantiles...) -} - -// NewGauge implements Provider. Help is ignored. -func (p graphiteProvider) NewGauge(name, _ string) metrics.Gauge { - return p.e.NewGauge(name) -} - -// Stop implements Provider. -func (p graphiteProvider) Stop() { - p.e.Stop() -} - -// NewStatsdProvider will return a Provider implementation that is a simple -// wrapper around a statsd.Emitter. All metric names will be prefixed with the -// given value and data will be emitted once every interval or when the buffer -// has reached its max size. If no network value is given, it will default to -// "udp". -func NewStatsdProvider(network, address, prefix string, interval time.Duration, logger log.Logger) (Provider, error) { - if network == "" { - network = "udp" - } - if address == "" { - return nil, errors.New("address is required") - } - return statsdProvider{ - e: statsd.NewEmitter(network, address, prefix, interval, logger), - }, nil -} - -type statsdProvider struct { - e *statsd.Emitter -} - -var _ Provider = statsdProvider{} - -// NewCounter implements Provider. Help is ignored. -func (p statsdProvider) NewCounter(name, _ string) metrics.Counter { - return p.e.NewCounter(name) -} - -// NewHistogram implements Provider. Help is ignored. -func (p statsdProvider) NewHistogram(name, _ string, min, max int64, sigfigs int, quantiles ...int) (metrics.Histogram, error) { - return p.e.NewHistogram(name), nil -} - -// NewGauge implements Provider. Help is ignored. -func (p statsdProvider) NewGauge(name, _ string) metrics.Gauge { - return p.e.NewGauge(name) -} - -// Stop will call the underlying statsd.Emitter's Stop method. -func (p statsdProvider) Stop() { - p.e.Stop() -} - -// NewDogStatsdProvider will return a Provider implementation that is a simple -// wrapper around a dogstatsd.Emitter. All metric names will be prefixed with -// the given value and data will be emitted once every interval or when the -// buffer has reached its max size. If no network value is given, it will -// default to "udp". -func NewDogStatsdProvider(network, address, prefix string, interval time.Duration, logger log.Logger) (Provider, error) { - if network == "" { - network = "udp" - } - if address == "" { - return nil, errors.New("address is required") - } - return dogstatsdProvider{ - e: dogstatsd.NewEmitter(network, address, prefix, interval, logger), - }, nil -} - -type dogstatsdProvider struct { - e *dogstatsd.Emitter -} - -var _ Provider = dogstatsdProvider{} - -// NewCounter implements Provider. Help is ignored. -func (p dogstatsdProvider) NewCounter(name, _ string) metrics.Counter { - return p.e.NewCounter(name) -} - -// NewHistogram implements Provider. Help is ignored. -func (p dogstatsdProvider) NewHistogram(name, _ string, min, max int64, sigfigs int, quantiles ...int) (metrics.Histogram, error) { - return p.e.NewHistogram(name), nil -} - -// NewGauge implements Provider. Help is ignored. -func (p dogstatsdProvider) NewGauge(name, _ string) metrics.Gauge { - return p.e.NewGauge(name) -} - -// Stop will call the underlying statsd.Emitter's Stop method. -func (p dogstatsdProvider) Stop() { - p.e.Stop() -} - -// NewExpvarProvider is a very thin wrapper over the expvar package. -// If a prefix is provided, it will prefix all metric names. -func NewExpvarProvider(prefix string) Provider { - return expvarProvider{prefix: prefix} -} - -type expvarProvider struct { - prefix string -} - -var _ Provider = expvarProvider{} - -// NewCounter implements Provider. Help is ignored. -func (p expvarProvider) NewCounter(name, _ string) metrics.Counter { - return kitexp.NewCounter(p.prefix + name) -} - -// NewHistogram implements Provider. Help is ignored. -func (p expvarProvider) NewHistogram(name, _ string, min, max int64, sigfigs int, quantiles ...int) (metrics.Histogram, error) { - return kitexp.NewHistogram(p.prefix+name, min, max, sigfigs, quantiles...), nil -} - -// NewGauge implements Provider. Help is ignored. -func (p expvarProvider) NewGauge(name, _ string) metrics.Gauge { - return kitexp.NewGauge(p.prefix + name) -} - -// Stop is a no-op. -func (expvarProvider) Stop() {} - -type prometheusProvider struct { - namespace string - subsystem string -} - -var _ Provider = prometheusProvider{} - -// NewPrometheusProvider returns a Prometheus provider that uses the provided -// namespace and subsystem for all metrics. -func NewPrometheusProvider(namespace, subsystem string) Provider { - return prometheusProvider{ - namespace: namespace, - subsystem: subsystem, - } -} - -// NewCounter implements Provider. -func (p prometheusProvider) NewCounter(name, help string) metrics.Counter { - return kitprom.NewCounter(prometheus.CounterOpts{ - Namespace: p.namespace, - Subsystem: p.subsystem, - Name: name, - Help: help, - }, nil) -} - -// NewHistogram ignores all parameters except name and help. -func (p prometheusProvider) NewHistogram(name, help string, _, _ int64, _ int, _ ...int) (metrics.Histogram, error) { - return kitprom.NewHistogram(prometheus.HistogramOpts{ - Namespace: p.namespace, - Subsystem: p.subsystem, - Name: name, - Help: help, - }, nil), nil -} - -// NewGauge implements Provider. -func (p prometheusProvider) NewGauge(name, help string) metrics.Gauge { - return kitprom.NewGauge(prometheus.GaugeOpts{ - Namespace: p.namespace, - Subsystem: p.subsystem, - Name: name, - Help: help, - }, nil) -} - -// Stop is a no-op. -func (prometheusProvider) Stop() {} - -var _ Provider = discardProvider{} - -// NewDiscardProvider returns a provider that will discard all metrics. -func NewDiscardProvider() Provider { - return discardProvider{} -} - -type discardProvider struct{} - -func (p discardProvider) NewCounter(name string, _ string) metrics.Counter { - return discard.NewCounter(name) -} - -func (p discardProvider) NewHistogram(name string, _ string, _ int64, _ int64, _ int, _ ...int) (metrics.Histogram, error) { - return discard.NewHistogram(name), nil -} - -func (p discardProvider) NewGauge(name string, _ string) metrics.Gauge { - return discard.NewGauge(name) -} - -// Stop is a no-op. -func (p discardProvider) Stop() {} diff --git a/vendor/github.com/go-kit/kit/metrics/provider/providers_test.go b/vendor/github.com/go-kit/kit/metrics/provider/providers_test.go deleted file mode 100644 index 15d3e4d..0000000 --- a/vendor/github.com/go-kit/kit/metrics/provider/providers_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package provider - -import ( - "testing" - "time" - - "github.com/go-kit/kit/log" -) - -func TestGraphite(t *testing.T) { - p, err := NewGraphiteProvider("network", "address", "prefix", time.Second, log.NewNopLogger()) - if err != nil { - t.Fatal(err) - } - testProvider(t, "Graphite", p) -} - -func TestStatsd(t *testing.T) { - p, err := NewStatsdProvider("network", "address", "prefix", time.Second, log.NewNopLogger()) - if err != nil { - t.Fatal(err) - } - testProvider(t, "Statsd", p) -} - -func TestDogStatsd(t *testing.T) { - p, err := NewDogStatsdProvider("network", "address", "prefix", time.Second, log.NewNopLogger()) - if err != nil { - t.Fatal(err) - } - testProvider(t, "DogStatsd", p) -} - -func TestExpvar(t *testing.T) { - testProvider(t, "Expvar", NewExpvarProvider("prefix")) -} - -func TestPrometheus(t *testing.T) { - testProvider(t, "Prometheus", NewPrometheusProvider("namespace", "subsystem")) -} - -func testProvider(t *testing.T, what string, p Provider) { - c := p.NewCounter("counter", "Counter help.") - c.Add(1) - - h, err := p.NewHistogram("histogram", "Histogram help.", 1, 100, 3, 50, 95, 99) - if err != nil { - t.Errorf("%s: NewHistogram: %v", what, err) - } - h.Observe(99) - - g := p.NewGauge("gauge", "Gauge help.") - g.Set(123) - - p.Stop() -} diff --git a/vendor/github.com/go-kit/kit/metrics/scaled_histogram.go b/vendor/github.com/go-kit/kit/metrics/scaled_histogram.go deleted file mode 100644 index d63bb97..0000000 --- a/vendor/github.com/go-kit/kit/metrics/scaled_histogram.go +++ /dev/null @@ -1,23 +0,0 @@ -package metrics - -type scaledHistogram struct { - Histogram - scale int64 -} - -// NewScaledHistogram returns a Histogram whose observed values are downscaled -// (divided) by scale. -func NewScaledHistogram(h Histogram, scale int64) Histogram { - return scaledHistogram{h, scale} -} - -func (h scaledHistogram) With(f Field) Histogram { - return scaledHistogram{ - Histogram: h.Histogram.With(f), - scale: h.scale, - } -} - -func (h scaledHistogram) Observe(value int64) { - h.Histogram.Observe(value / h.scale) -} diff --git a/vendor/github.com/go-kit/kit/metrics/scaled_histogram_test.go b/vendor/github.com/go-kit/kit/metrics/scaled_histogram_test.go deleted file mode 100644 index 138f572..0000000 --- a/vendor/github.com/go-kit/kit/metrics/scaled_histogram_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package metrics_test - -import ( - "testing" - - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/expvar" - "github.com/go-kit/kit/metrics/teststat" -) - -func TestScaledHistogram(t *testing.T) { - var ( - quantiles = []int{50, 90, 99} - scale = int64(10) - metricName = "test_scaled_histogram" - ) - - var h metrics.Histogram - h = expvar.NewHistogram(metricName, 0, 1000, 3, quantiles...) - h = metrics.NewScaledHistogram(h, scale) - h = h.With(metrics.Field{Key: "a", Value: "b"}) - - const seed, mean, stdev = 333, 500, 100 // input values - teststat.PopulateNormalHistogram(t, h, seed, mean, stdev) // will be scaled down - assertExpvarNormalHistogram(t, metricName, mean/scale, stdev/scale, quantiles) -} diff --git a/vendor/github.com/go-kit/kit/metrics/statsd/emitter.go b/vendor/github.com/go-kit/kit/metrics/statsd/emitter.go deleted file mode 100644 index f47aada..0000000 --- a/vendor/github.com/go-kit/kit/metrics/statsd/emitter.go +++ /dev/null @@ -1,159 +0,0 @@ -package statsd - -import ( - "bytes" - "fmt" - "net" - "time" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/util/conn" -) - -// Emitter is a struct to manage connections and orchestrate the emission of -// metrics to a Statsd process. -type Emitter struct { - prefix string - keyVals chan keyVal - mgr *conn.Manager - logger log.Logger - quitc chan chan struct{} -} - -type keyVal struct { - key string - val string -} - -func stringToKeyVal(key string, keyVals chan keyVal) chan string { - vals := make(chan string) - go func() { - for val := range vals { - keyVals <- keyVal{key: key, val: val} - } - }() - return vals -} - -// NewEmitter will return an Emitter that will prefix all metrics names with the -// given prefix. Once started, it will attempt to create a connection with the -// given network and address via `net.Dial` and periodically post metrics to the -// connection in the statsd protocol. -func NewEmitter(network, address string, metricsPrefix string, flushInterval time.Duration, logger log.Logger) *Emitter { - return NewEmitterDial(net.Dial, network, address, metricsPrefix, flushInterval, logger) -} - -// NewEmitterDial is the same as NewEmitter, but allows you to specify your own -// Dialer function. This is primarily useful for tests. -func NewEmitterDial(dialer conn.Dialer, network, address string, metricsPrefix string, flushInterval time.Duration, logger log.Logger) *Emitter { - e := &Emitter{ - prefix: metricsPrefix, - mgr: conn.NewManager(dialer, network, address, time.After, logger), - logger: logger, - keyVals: make(chan keyVal), - quitc: make(chan chan struct{}), - } - go e.loop(flushInterval) - return e -} - -// NewCounter returns a Counter that emits observations in the statsd protocol -// via the Emitter's connection manager. Observations are buffered for the -// report interval or until the buffer exceeds a max packet size, whichever -// comes first. Fields are ignored. -func (e *Emitter) NewCounter(key string) metrics.Counter { - key = e.prefix + key - return &counter{ - key: key, - c: stringToKeyVal(key, e.keyVals), - } -} - -// NewHistogram returns a Histogram that emits observations in the statsd -// protocol via the Emitter's conection manager. Observations are buffered for -// the reporting interval or until the buffer exceeds a max packet size, -// whichever comes first. Fields are ignored. -// -// NewHistogram is mapped to a statsd Timing, so observations should represent -// milliseconds. If you observe in units of nanoseconds, you can make the -// translation with a ScaledHistogram: -// -// NewScaledHistogram(histogram, time.Millisecond) -// -// You can also enforce the constraint in a typesafe way with a millisecond -// TimeHistogram: -// -// NewTimeHistogram(histogram, time.Millisecond) -// -// TODO: support for sampling. -func (e *Emitter) NewHistogram(key string) metrics.Histogram { - key = e.prefix + key - return &histogram{ - key: key, - h: stringToKeyVal(key, e.keyVals), - } -} - -// NewGauge returns a Gauge that emits values in the statsd protocol via the -// the Emitter's connection manager. Values are buffered for the report -// interval or until the buffer exceeds a max packet size, whichever comes -// first. Fields are ignored. -// -// TODO: support for sampling -func (e *Emitter) NewGauge(key string) metrics.Gauge { - key = e.prefix + key - return &gauge{ - key: key, - g: stringToKeyVal(key, e.keyVals), - } -} - -func (e *Emitter) loop(d time.Duration) { - ticker := time.NewTicker(d) - defer ticker.Stop() - buf := &bytes.Buffer{} - for { - select { - case kv := <-e.keyVals: - fmt.Fprintf(buf, "%s:%s\n", kv.key, kv.val) - if buf.Len() > maxBufferSize { - e.Flush(buf) - } - - case <-ticker.C: - e.Flush(buf) - - case q := <-e.quitc: - e.Flush(buf) - close(q) - return - } - } -} - -// Stop will flush the current metrics and close the active connection. Calling -// stop more than once is a programmer error. -func (e *Emitter) Stop() { - q := make(chan struct{}) - e.quitc <- q - <-q -} - -// Flush will write the given buffer to a connection provided by the Emitter's -// connection manager. -func (e *Emitter) Flush(buf *bytes.Buffer) { - conn := e.mgr.Take() - if conn == nil { - e.logger.Log("during", "flush", "err", "connection unavailable") - return - } - - _, err := conn.Write(buf.Bytes()) - if err != nil { - e.logger.Log("during", "flush", "err", err) - } - buf.Reset() - - e.mgr.Put(err) -} diff --git a/vendor/github.com/go-kit/kit/metrics/statsd/statsd.go b/vendor/github.com/go-kit/kit/metrics/statsd/statsd.go deleted file mode 100644 index 4a87b7a..0000000 --- a/vendor/github.com/go-kit/kit/metrics/statsd/statsd.go +++ /dev/null @@ -1,215 +0,0 @@ -// Package statsd implements a statsd backend for package metrics. -// -// The current implementation ignores fields. In the future, it would be good -// to have an implementation that accepted a set of predeclared field names at -// construction time, and used field values to produce delimiter-separated -// bucket (key) names. That is, -// -// c := NewFieldedCounter(..., "path", "status") -// c.Add(1) // "myprefix.unknown.unknown:1|c\n" -// c2 := c.With("path", "foo").With("status": "200") -// c2.Add(1) // "myprefix.foo.200:1|c\n" -// -package statsd - -import ( - "bytes" - "fmt" - "io" - "log" - "math" - "time" - - "sync/atomic" - - "github.com/go-kit/kit/metrics" -) - -// statsd metrics take considerable influence from -// https://github.com/streadway/handy package statsd. - -const maxBufferSize = 1400 // bytes - -type counter struct { - key string - c chan string -} - -// NewCounter returns a Counter that emits observations in the statsd protocol -// to the passed writer. Observations are buffered for the report interval or -// until the buffer exceeds a max packet size, whichever comes first. Fields -// are ignored. -// -// TODO: support for sampling. -func NewCounter(w io.Writer, key string, reportInterval time.Duration) metrics.Counter { - return NewCounterTick(w, key, time.Tick(reportInterval)) -} - -// NewCounterTick is the same as NewCounter, but allows the user to pass in a -// ticker channel instead of invoking time.Tick. -func NewCounterTick(w io.Writer, key string, reportTicker <-chan time.Time) metrics.Counter { - c := &counter{ - key: key, - c: make(chan string), - } - go fwd(w, key, reportTicker, c.c) - return c -} - -func (c *counter) Name() string { return c.key } - -func (c *counter) With(metrics.Field) metrics.Counter { return c } - -func (c *counter) Add(delta uint64) { c.c <- fmt.Sprintf("%d|c", delta) } - -type gauge struct { - key string - lastValue uint64 // math.Float64frombits - g chan string -} - -// NewGauge returns a Gauge that emits values in the statsd protocol to the -// passed writer. Values are buffered for the report interval or until the -// buffer exceeds a max packet size, whichever comes first. Fields are -// ignored. -// -// TODO: support for sampling. -func NewGauge(w io.Writer, key string, reportInterval time.Duration) metrics.Gauge { - return NewGaugeTick(w, key, time.Tick(reportInterval)) -} - -// NewGaugeTick is the same as NewGauge, but allows the user to pass in a ticker -// channel instead of invoking time.Tick. -func NewGaugeTick(w io.Writer, key string, reportTicker <-chan time.Time) metrics.Gauge { - g := &gauge{ - key: key, - g: make(chan string), - } - go fwd(w, key, reportTicker, g.g) - return g -} - -func (g *gauge) Name() string { return g.key } - -func (g *gauge) With(metrics.Field) metrics.Gauge { return g } - -func (g *gauge) Add(delta float64) { - // https://github.com/etsy/statsd/blob/master/docs/metric_types.md#gauges - sign := "+" - if delta < 0 { - sign, delta = "-", -delta - } - g.g <- fmt.Sprintf("%s%f|g", sign, delta) -} - -func (g *gauge) Set(value float64) { - atomic.StoreUint64(&g.lastValue, math.Float64bits(value)) - g.g <- fmt.Sprintf("%f|g", value) -} - -func (g *gauge) Get() float64 { - return math.Float64frombits(atomic.LoadUint64(&g.lastValue)) -} - -// NewCallbackGauge emits values in the statsd protocol to the passed writer. -// It collects values every scrape interval from the callback. Values are -// buffered for the report interval or until the buffer exceeds a max packet -// size, whichever comes first. The report and scrape intervals may be the -// same. The callback determines the value, and fields are ignored, so -// NewCallbackGauge returns nothing. -func NewCallbackGauge(w io.Writer, key string, reportInterval, scrapeInterval time.Duration, callback func() float64) { - NewCallbackGaugeTick(w, key, time.Tick(reportInterval), time.Tick(scrapeInterval), callback) -} - -// NewCallbackGaugeTick is the same as NewCallbackGauge, but allows the user to -// pass in ticker channels instead of durations to control report and scrape -// intervals. -func NewCallbackGaugeTick(w io.Writer, key string, reportTicker, scrapeTicker <-chan time.Time, callback func() float64) { - go fwd(w, key, reportTicker, emitEvery(scrapeTicker, callback)) -} - -func emitEvery(emitTicker <-chan time.Time, callback func() float64) <-chan string { - c := make(chan string) - go func() { - for range emitTicker { - c <- fmt.Sprintf("%f|g", callback()) - } - }() - return c -} - -type histogram struct { - key string - h chan string -} - -// NewHistogram returns a Histogram that emits observations in the statsd -// protocol to the passed writer. Observations are buffered for the reporting -// interval or until the buffer exceeds a max packet size, whichever comes -// first. Fields are ignored. -// -// NewHistogram is mapped to a statsd Timing, so observations should represent -// milliseconds. If you observe in units of nanoseconds, you can make the -// translation with a ScaledHistogram: -// -// NewScaledHistogram(statsdHistogram, time.Millisecond) -// -// You can also enforce the constraint in a typesafe way with a millisecond -// TimeHistogram: -// -// NewTimeHistogram(statsdHistogram, time.Millisecond) -// -// TODO: support for sampling. -func NewHistogram(w io.Writer, key string, reportInterval time.Duration) metrics.Histogram { - return NewHistogramTick(w, key, time.Tick(reportInterval)) -} - -// NewHistogramTick is the same as NewHistogram, but allows the user to pass a -// ticker channel instead of invoking time.Tick. -func NewHistogramTick(w io.Writer, key string, reportTicker <-chan time.Time) metrics.Histogram { - h := &histogram{ - key: key, - h: make(chan string), - } - go fwd(w, key, reportTicker, h.h) - return h -} - -func (h *histogram) Name() string { return h.key } - -func (h *histogram) With(metrics.Field) metrics.Histogram { return h } - -func (h *histogram) Observe(value int64) { - h.h <- fmt.Sprintf("%d|ms", value) -} - -func (h *histogram) Distribution() ([]metrics.Bucket, []metrics.Quantile) { - // TODO(pb): no way to do this without introducing e.g. codahale/hdrhistogram - return []metrics.Bucket{}, []metrics.Quantile{} -} - -func fwd(w io.Writer, key string, reportTicker <-chan time.Time, c <-chan string) { - buf := &bytes.Buffer{} - for { - select { - case s := <-c: - fmt.Fprintf(buf, "%s:%s\n", key, s) - if buf.Len() > maxBufferSize { - flush(w, buf) - } - - case <-reportTicker: - flush(w, buf) - } - } -} - -func flush(w io.Writer, buf *bytes.Buffer) { - if buf.Len() <= 0 { - return - } - if _, err := w.Write(buf.Bytes()); err != nil { - log.Printf("error: could not write to statsd: %v", err) - } - buf.Reset() -} diff --git a/vendor/github.com/go-kit/kit/metrics/statsd/statsd_test.go b/vendor/github.com/go-kit/kit/metrics/statsd/statsd_test.go deleted file mode 100644 index 516520d..0000000 --- a/vendor/github.com/go-kit/kit/metrics/statsd/statsd_test.go +++ /dev/null @@ -1,259 +0,0 @@ -package statsd - -import ( - "bytes" - "fmt" - "net" - "strings" - "sync" - "testing" - "time" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/util/conn" -) - -func TestEmitterCounter(t *testing.T) { - e, buf := testEmitter() - - c := e.NewCounter("test_statsd_counter") - c.Add(1) - c.Add(2) - - // give time for things to emit - time.Sleep(time.Millisecond * 250) - // force a flush and stop - e.Stop() - - want := "prefix.test_statsd_counter:1|c\nprefix.test_statsd_counter:2|c\n" - have := buf.String() - if want != have { - t.Errorf("want %q, have %q", want, have) - } -} - -func TestEmitterGauge(t *testing.T) { - e, buf := testEmitter() - - g := e.NewGauge("test_statsd_gauge") - - delta := 1.0 - g.Add(delta) - - // give time for things to emit - time.Sleep(time.Millisecond * 250) - // force a flush and stop - e.Stop() - - want := fmt.Sprintf("prefix.test_statsd_gauge:+%f|g\n", delta) - have := buf.String() - if want != have { - t.Errorf("want %q, have %q", want, have) - } -} - -func TestEmitterHistogram(t *testing.T) { - e, buf := testEmitter() - h := e.NewHistogram("test_statsd_histogram") - - h.Observe(123) - - // give time for things to emit - time.Sleep(time.Millisecond * 250) - // force a flush and stop - e.Stop() - - want := "prefix.test_statsd_histogram:123|ms\n" - have := buf.String() - if want != have { - t.Errorf("want %q, have %q", want, have) - } -} - -func TestCounter(t *testing.T) { - buf := &syncbuf{buf: &bytes.Buffer{}} - reportc := make(chan time.Time) - c := NewCounterTick(buf, "test_statsd_counter", reportc) - - c.Add(1) - c.Add(2) - - want, have := "test_statsd_counter:1|c\ntest_statsd_counter:2|c\n", "" - by(t, 100*time.Millisecond, func() bool { - have = buf.String() - return want == have - }, func() { - reportc <- time.Now() - }, fmt.Sprintf("want %q, have %q", want, have)) -} - -func TestGauge(t *testing.T) { - buf := &syncbuf{buf: &bytes.Buffer{}} - reportc := make(chan time.Time) - g := NewGaugeTick(buf, "test_statsd_gauge", reportc) - - delta := 1.0 - g.Add(delta) - - want, have := fmt.Sprintf("test_statsd_gauge:+%f|g\n", delta), "" - by(t, 100*time.Millisecond, func() bool { - have = buf.String() - return want == have - }, func() { - reportc <- time.Now() - }, fmt.Sprintf("want %q, have %q", want, have)) - - buf.Reset() - delta = -2.0 - g.Add(delta) - - want, have = fmt.Sprintf("test_statsd_gauge:%f|g\n", delta), "" - by(t, 100*time.Millisecond, func() bool { - have = buf.String() - return want == have - }, func() { - reportc <- time.Now() - }, fmt.Sprintf("want %q, have %q", want, have)) - - buf.Reset() - value := 3.0 - g.Set(value) - - want, have = fmt.Sprintf("test_statsd_gauge:%f|g\n", value), "" - by(t, 100*time.Millisecond, func() bool { - have = buf.String() - return want == have - }, func() { - reportc <- time.Now() - }, fmt.Sprintf("want %q, have %q", want, have)) -} - -func TestCallbackGauge(t *testing.T) { - buf := &syncbuf{buf: &bytes.Buffer{}} - reportc, scrapec := make(chan time.Time), make(chan time.Time) - value := 55.55 - cb := func() float64 { return value } - NewCallbackGaugeTick(buf, "test_statsd_callback_gauge", reportc, scrapec, cb) - - scrapec <- time.Now() - reportc <- time.Now() - - // Travis is annoying - by(t, time.Second, func() bool { - return buf.String() != "" - }, func() { - reportc <- time.Now() - }, "buffer never got write+flush") - - want, have := fmt.Sprintf("test_statsd_callback_gauge:%f|g\n", value), "" - by(t, 100*time.Millisecond, func() bool { - have = buf.String() - return strings.HasPrefix(have, want) // HasPrefix because we might get multiple writes - }, func() { - reportc <- time.Now() - }, fmt.Sprintf("want %q, have %q", want, have)) -} - -func TestHistogram(t *testing.T) { - buf := &syncbuf{buf: &bytes.Buffer{}} - reportc := make(chan time.Time) - h := NewHistogramTick(buf, "test_statsd_histogram", reportc) - - h.Observe(123) - - want, have := "test_statsd_histogram:123|ms\n", "" - by(t, 100*time.Millisecond, func() bool { - have = buf.String() - return want == have - }, func() { - reportc <- time.Now() - }, fmt.Sprintf("want %q, have %q", want, have)) -} - -func by(t *testing.T, d time.Duration, check func() bool, execute func(), msg string) { - deadline := time.Now().Add(d) - for !check() { - if time.Now().After(deadline) { - t.Fatal(msg) - } - execute() - } -} - -type syncbuf struct { - mtx sync.Mutex - buf *bytes.Buffer -} - -func (s *syncbuf) Write(p []byte) (int, error) { - s.mtx.Lock() - defer s.mtx.Unlock() - return s.buf.Write(p) -} - -func (s *syncbuf) String() string { - s.mtx.Lock() - defer s.mtx.Unlock() - return s.buf.String() -} - -func (s *syncbuf) Reset() { - s.mtx.Lock() - defer s.mtx.Unlock() - s.buf.Reset() -} - -func testEmitter() (*Emitter, *syncbuf) { - buf := &syncbuf{buf: &bytes.Buffer{}} - e := &Emitter{ - prefix: "prefix.", - mgr: conn.NewManager(mockDialer(buf), "", "", time.After, log.NewNopLogger()), - logger: log.NewNopLogger(), - keyVals: make(chan keyVal), - quitc: make(chan chan struct{}), - } - go e.loop(time.Millisecond * 20) - return e, buf -} - -func mockDialer(buf *syncbuf) conn.Dialer { - return func(net, addr string) (net.Conn, error) { - return &mockConn{buf}, nil - } -} - -type mockConn struct { - buf *syncbuf -} - -func (c *mockConn) Read(b []byte) (n int, err error) { - panic("not implemented") -} - -func (c *mockConn) Write(b []byte) (n int, err error) { - return c.buf.Write(b) -} - -func (c *mockConn) Close() error { - panic("not implemented") -} - -func (c *mockConn) LocalAddr() net.Addr { - panic("not implemented") -} - -func (c *mockConn) RemoteAddr() net.Addr { - panic("not implemented") -} - -func (c *mockConn) SetDeadline(t time.Time) error { - panic("not implemented") -} - -func (c *mockConn) SetReadDeadline(t time.Time) error { - panic("not implemented") -} - -func (c *mockConn) SetWriteDeadline(t time.Time) error { - panic("not implemented") -} diff --git a/vendor/github.com/go-kit/kit/metrics/teststat/circonus.go b/vendor/github.com/go-kit/kit/metrics/teststat/circonus.go deleted file mode 100644 index c070d37..0000000 --- a/vendor/github.com/go-kit/kit/metrics/teststat/circonus.go +++ /dev/null @@ -1,55 +0,0 @@ -package teststat - -import ( - "math" - "strconv" - "strings" - "testing" - - "github.com/codahale/hdrhistogram" -) - -// AssertCirconusNormalHistogram ensures the Circonus Histogram data captured in -// the result slice abides a normal distribution. -func AssertCirconusNormalHistogram(t *testing.T, mean, stdev, min, max int64, result []string) { - if len(result) <= 0 { - t.Fatal("no results") - } - - // Circonus just dumps the raw counts. We need to do our own statistical analysis. - h := hdrhistogram.New(min, max, 3) - - for _, s := range result { - // "H[1.23e04]=123" - toks := strings.Split(s, "=") - if len(toks) != 2 { - t.Fatalf("bad H value: %q", s) - } - - var bucket string - bucket = toks[0] - bucket = bucket[2 : len(bucket)-1] // "H[1.23e04]" -> "1.23e04" - f, err := strconv.ParseFloat(bucket, 64) - if err != nil { - t.Fatalf("error parsing H value: %q: %v", s, err) - } - - count, err := strconv.ParseFloat(toks[1], 64) - if err != nil { - t.Fatalf("error parsing H count: %q: %v", s, err) - } - - h.RecordValues(int64(f), int64(count)) - } - - // Apparently Circonus buckets observations by dropping a sigfig, so we have - // very coarse tolerance. - var tolerance int64 = 30 - for _, quantile := range []int{50, 90, 99} { - want := normalValueAtQuantile(mean, stdev, quantile) - have := h.ValueAtQuantile(float64(quantile)) - if int64(math.Abs(float64(want)-float64(have))) > tolerance { - t.Errorf("quantile %d: want %d, have %d", quantile, want, have) - } - } -} diff --git a/vendor/github.com/go-kit/kit/metrics/teststat/common.go b/vendor/github.com/go-kit/kit/metrics/teststat/common.go deleted file mode 100644 index 9f2d1af..0000000 --- a/vendor/github.com/go-kit/kit/metrics/teststat/common.go +++ /dev/null @@ -1,73 +0,0 @@ -// Package teststat contains helper functions for statistical testing of -// metrics implementations. -package teststat - -import ( - "math" - "math/rand" - "testing" - - "github.com/go-kit/kit/metrics" -) - -const population = 1234 - -// PopulateNormalHistogram populates the Histogram with a normal distribution -// of observations. -func PopulateNormalHistogram(t *testing.T, h metrics.Histogram, seed int64, mean, stdev int64) { - r := rand.New(rand.NewSource(seed)) - for i := 0; i < population; i++ { - sample := int64(r.NormFloat64()*float64(stdev) + float64(mean)) - if sample < 0 { - sample = 0 - } - h.Observe(sample) - } -} - -// https://en.wikipedia.org/wiki/Normal_distribution#Quantile_function -func normalValueAtQuantile(mean, stdev int64, quantile int) int64 { - return int64(float64(mean) + float64(stdev)*math.Sqrt2*erfinv(2*(float64(quantile)/100)-1)) -} - -// https://code.google.com/p/gostat/source/browse/stat/normal.go -func observationsLessThan(mean, stdev int64, x float64, total int) int { - cdf := ((1.0 / 2.0) * (1 + math.Erf((x-float64(mean))/(float64(stdev)*math.Sqrt2)))) - return int(cdf * float64(total)) -} - -// https://stackoverflow.com/questions/5971830/need-code-for-inverse-error-function -func erfinv(y float64) float64 { - if y < -1.0 || y > 1.0 { - panic("invalid input") - } - - var ( - a = [4]float64{0.886226899, -1.645349621, 0.914624893, -0.140543331} - b = [4]float64{-2.118377725, 1.442710462, -0.329097515, 0.012229801} - c = [4]float64{-1.970840454, -1.624906493, 3.429567803, 1.641345311} - d = [2]float64{3.543889200, 1.637067800} - ) - - const y0 = 0.7 - var x, z float64 - - if math.Abs(y) == 1.0 { - x = -y * math.Log(0.0) - } else if y < -y0 { - z = math.Sqrt(-math.Log((1.0 + y) / 2.0)) - x = -(((c[3]*z+c[2])*z+c[1])*z + c[0]) / ((d[1]*z+d[0])*z + 1.0) - } else { - if y < y0 { - z = y * y - x = y * (((a[3]*z+a[2])*z+a[1])*z + a[0]) / ((((b[3]*z+b[3])*z+b[1])*z+b[0])*z + 1.0) - } else { - z = math.Sqrt(-math.Log((1.0 - y) / 2.0)) - x = (((c[3]*z+c[2])*z+c[1])*z + c[0]) / ((d[1]*z+d[0])*z + 1.0) - } - x = x - (math.Erf(x)-y)/(2.0/math.SqrtPi*math.Exp(-x*x)) - x = x - (math.Erf(x)-y)/(2.0/math.SqrtPi*math.Exp(-x*x)) - } - - return x -} diff --git a/vendor/github.com/go-kit/kit/metrics/teststat/expvar.go b/vendor/github.com/go-kit/kit/metrics/teststat/expvar.go deleted file mode 100644 index 35215b0..0000000 --- a/vendor/github.com/go-kit/kit/metrics/teststat/expvar.go +++ /dev/null @@ -1,26 +0,0 @@ -package teststat - -import ( - "expvar" - "fmt" - "math" - "strconv" - "testing" -) - -// AssertExpvarNormalHistogram ensures the expvar Histogram referenced by -// metricName abides a normal distribution. -func AssertExpvarNormalHistogram(t *testing.T, metricName string, mean, stdev int64, quantiles []int) { - const tolerance int = 2 - for _, quantile := range quantiles { - want := normalValueAtQuantile(mean, stdev, quantile) - s := expvar.Get(fmt.Sprintf("%s_p%02d", metricName, quantile)).String() - have, err := strconv.Atoi(s) - if err != nil { - t.Fatal(err) - } - if int(math.Abs(float64(want)-float64(have))) > tolerance { - t.Errorf("quantile %d: want %d, have %d", quantile, want, have) - } - } -} diff --git a/vendor/github.com/go-kit/kit/metrics/teststat/graphite.go b/vendor/github.com/go-kit/kit/metrics/teststat/graphite.go deleted file mode 100644 index 6b52e71..0000000 --- a/vendor/github.com/go-kit/kit/metrics/teststat/graphite.go +++ /dev/null @@ -1,63 +0,0 @@ -package teststat - -import ( - "fmt" - "math" - "regexp" - "strconv" - "testing" -) - -// AssertGraphiteNormalHistogram ensures the expvar Histogram referenced by -// metricName abides a normal distribution. -func AssertGraphiteNormalHistogram(t *testing.T, prefix, metricName string, mean, stdev int64, quantiles []int, gPayload string) { - // check for hdr histo data - wants := map[string]int64{"count": 1234, "min": 15, "max": 83} - for key, want := range wants { - re := regexp.MustCompile(fmt.Sprintf("%s%s.%s (\\d*)", prefix, metricName, key)) - res := re.FindAllStringSubmatch(gPayload, 1) - if res == nil { - t.Error("did not find metrics log for", key, "in \n", gPayload) - continue - } - - if len(res[0]) == 1 { - t.Fatalf("%q: bad regex, please check the test scenario", key) - } - - have, err := strconv.ParseInt(res[0][1], 10, 64) - if err != nil { - t.Fatal(err) - } - - if want != have { - t.Errorf("key %s: want %d, have %d", key, want, have) - } - } - - const tolerance int = 2 - wants = map[string]int64{".std-dev": stdev, ".mean": mean} - for _, quantile := range quantiles { - wants[fmt.Sprintf("_p%02d", quantile)] = normalValueAtQuantile(mean, stdev, quantile) - } - // check for quantile gauges - for key, want := range wants { - re := regexp.MustCompile(fmt.Sprintf("%s%s%s (\\d*\\.\\d*)", prefix, metricName, key)) - res := re.FindAllStringSubmatch(gPayload, 1) - if res == nil { - t.Errorf("did not find metrics log for %s", key) - continue - } - - if len(res[0]) == 1 { - t.Fatalf("%q: bad regex found, please check the test scenario", key) - } - have, err := strconv.ParseFloat(res[0][1], 64) - if err != nil { - t.Fatal(err) - } - if int(math.Abs(float64(want)-have)) > tolerance { - t.Errorf("key %s: want %.2f, have %.2f", key, want, have) - } - } -} diff --git a/vendor/github.com/go-kit/kit/metrics/teststat/prometheus.go b/vendor/github.com/go-kit/kit/metrics/teststat/prometheus.go deleted file mode 100644 index d3cae89..0000000 --- a/vendor/github.com/go-kit/kit/metrics/teststat/prometheus.go +++ /dev/null @@ -1,93 +0,0 @@ -package teststat - -import ( - "io/ioutil" - "math" - "net/http" - "net/http/httptest" - "regexp" - "strconv" - "strings" - "testing" - - "github.com/prometheus/client_golang/prometheus" -) - -// ScrapePrometheus returns the text encoding of the current state of -// Prometheus. -func ScrapePrometheus(t *testing.T) string { - server := httptest.NewServer(prometheus.UninstrumentedHandler()) - defer server.Close() - - resp, err := http.Get(server.URL) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - - buf, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - - return strings.TrimSpace(string(buf)) -} - -// AssertPrometheusNormalSummary ensures the Prometheus Summary referenced by -// name abides a normal distribution. -func AssertPrometheusNormalSummary(t *testing.T, metricName string, mean, stdev int64) { - scrape := ScrapePrometheus(t) - const tolerance int = 5 // Prometheus approximates higher quantiles badly -_-; - for quantileInt, quantileStr := range map[int]string{50: "0.5", 90: "0.9", 99: "0.99"} { - want := normalValueAtQuantile(mean, stdev, quantileInt) - have := getPrometheusQuantile(t, scrape, metricName, quantileStr) - if int(math.Abs(float64(want)-float64(have))) > tolerance { - t.Errorf("%q: want %d, have %d", quantileStr, want, have) - } - } -} - -// AssertPrometheusBucketedHistogram ensures the Prometheus Histogram -// referenced by name has observations in the expected quantity and bucket. -func AssertPrometheusBucketedHistogram(t *testing.T, metricName string, mean, stdev int64, buckets []float64) { - scrape := ScrapePrometheus(t) - const tolerance int = population / 50 // pretty coarse-grained - for _, bucket := range buckets { - want := observationsLessThan(mean, stdev, bucket, population) - have := getPrometheusLessThan(t, scrape, metricName, strconv.FormatFloat(bucket, 'f', 0, 64)) - if int(math.Abs(float64(want)-float64(have))) > tolerance { - t.Errorf("%.0f: want %d, have %d", bucket, want, have) - } - } -} - -func getPrometheusQuantile(t *testing.T, scrape, name, quantileStr string) int { - matches := regexp.MustCompile(name+`{quantile="`+quantileStr+`"} ([0-9]+)`).FindAllStringSubmatch(scrape, -1) - if len(matches) < 1 { - t.Fatalf("%q: quantile %q not found in scrape", name, quantileStr) - } - if len(matches[0]) < 2 { - t.Fatalf("%q: quantile %q not found in scrape", name, quantileStr) - } - i, err := strconv.Atoi(matches[0][1]) - if err != nil { - t.Fatal(err) - } - return i -} - -func getPrometheusLessThan(t *testing.T, scrape, name, target string) int { - matches := regexp.MustCompile(name+`{le="`+target+`"} ([0-9]+)`).FindAllStringSubmatch(scrape, -1) - if len(matches) < 1 { - t.Logf(">>>\n%s\n", scrape) - t.Fatalf("%q: bucket %q not found in scrape", name, target) - } - if len(matches[0]) < 2 { - t.Fatalf("%q: bucket %q not found in scrape", name, target) - } - i, err := strconv.Atoi(matches[0][1]) - if err != nil { - t.Fatal(err) - } - return i -} diff --git a/vendor/github.com/go-kit/kit/metrics/time_histogram.go b/vendor/github.com/go-kit/kit/metrics/time_histogram.go deleted file mode 100644 index a8fc54c..0000000 --- a/vendor/github.com/go-kit/kit/metrics/time_histogram.go +++ /dev/null @@ -1,34 +0,0 @@ -package metrics - -import "time" - -// TimeHistogram is a convenience wrapper for a Histogram of time.Durations. -type TimeHistogram interface { - With(Field) TimeHistogram - Observe(time.Duration) -} - -type timeHistogram struct { - unit time.Duration - Histogram -} - -// NewTimeHistogram returns a TimeHistogram wrapper around the passed -// Histogram, in units of unit. -func NewTimeHistogram(unit time.Duration, h Histogram) TimeHistogram { - return &timeHistogram{ - unit: unit, - Histogram: h, - } -} - -func (h *timeHistogram) With(f Field) TimeHistogram { - return &timeHistogram{ - Histogram: h.Histogram.With(f), - unit: h.unit, - } -} - -func (h *timeHistogram) Observe(d time.Duration) { - h.Histogram.Observe(int64(d / h.unit)) -} diff --git a/vendor/github.com/go-kit/kit/metrics/time_histogram_test.go b/vendor/github.com/go-kit/kit/metrics/time_histogram_test.go deleted file mode 100644 index e7ea24b..0000000 --- a/vendor/github.com/go-kit/kit/metrics/time_histogram_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package metrics_test - -import ( - "math/rand" - "testing" - "time" - - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/expvar" -) - -func TestTimeHistogram(t *testing.T) { - var ( - metricName = "test_time_histogram" - minValue = int64(0) - maxValue = int64(200) - sigfigs = 3 - quantiles = []int{50, 90, 99} - h = expvar.NewHistogram(metricName, minValue, maxValue, sigfigs, quantiles...) - th = metrics.NewTimeHistogram(time.Millisecond, h).With(metrics.Field{Key: "a", Value: "b"}) - ) - - const seed, mean, stdev int64 = 321, 100, 20 - r := rand.New(rand.NewSource(seed)) - - for i := 0; i < 4321; i++ { - sample := time.Duration(r.NormFloat64()*float64(stdev)+float64(mean)) * time.Millisecond - th.Observe(sample) - } - - assertExpvarNormalHistogram(t, metricName, mean, stdev, quantiles) -} diff --git a/vendor/github.com/go-kit/kit/ratelimit/token_bucket.go b/vendor/github.com/go-kit/kit/ratelimit/token_bucket.go deleted file mode 100644 index 48a4f60..0000000 --- a/vendor/github.com/go-kit/kit/ratelimit/token_bucket.go +++ /dev/null @@ -1,42 +0,0 @@ -package ratelimit - -import ( - "errors" - "time" - - "github.com/juju/ratelimit" - "golang.org/x/net/context" - - "github.com/go-kit/kit/endpoint" -) - -// ErrLimited is returned in the request path when the rate limiter is -// triggered and the request is rejected. -var ErrLimited = errors.New("rate limit exceeded") - -// NewTokenBucketLimiter returns an endpoint.Middleware that acts as a rate -// limiter based on a token-bucket algorithm. Requests that would exceed the -// maximum request rate are simply rejected with an error. -func NewTokenBucketLimiter(tb *ratelimit.Bucket) endpoint.Middleware { - return func(next endpoint.Endpoint) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - if tb.TakeAvailable(1) == 0 { - return nil, ErrLimited - } - return next(ctx, request) - } - } -} - -// NewTokenBucketThrottler returns an endpoint.Middleware that acts as a -// request throttler based on a token-bucket algorithm. Requests that would -// exceed the maximum request rate are delayed via the parameterized sleep -// function. By default you may pass time.Sleep. -func NewTokenBucketThrottler(tb *ratelimit.Bucket, sleep func(time.Duration)) endpoint.Middleware { - return func(next endpoint.Endpoint) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - sleep(tb.Take(1)) - return next(ctx, request) - } - } -} diff --git a/vendor/github.com/go-kit/kit/ratelimit/token_bucket_test.go b/vendor/github.com/go-kit/kit/ratelimit/token_bucket_test.go deleted file mode 100644 index 6f815de..0000000 --- a/vendor/github.com/go-kit/kit/ratelimit/token_bucket_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package ratelimit_test - -import ( - "math" - "testing" - "time" - - jujuratelimit "github.com/juju/ratelimit" - "golang.org/x/net/context" - - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/ratelimit" -) - -func TestTokenBucketLimiter(t *testing.T) { - e := func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil } - for _, n := range []int{1, 2, 100} { - tb := jujuratelimit.NewBucketWithRate(float64(n), int64(n)) - testLimiter(t, ratelimit.NewTokenBucketLimiter(tb)(e), n) - } -} - -func TestTokenBucketThrottler(t *testing.T) { - d := time.Duration(0) - s := func(d0 time.Duration) { d = d0 } - - e := func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil } - e = ratelimit.NewTokenBucketThrottler(jujuratelimit.NewBucketWithRate(1, 1), s)(e) - - // First request should go through with no delay. - e(context.Background(), struct{}{}) - if want, have := time.Duration(0), d; want != have { - t.Errorf("want %s, have %s", want, have) - } - - // Next request should request a ~1s sleep. - e(context.Background(), struct{}{}) - if want, have, tol := time.Second, d, time.Millisecond; math.Abs(float64(want-have)) > float64(tol) { - t.Errorf("want %s, have %s", want, have) - } -} - -func testLimiter(t *testing.T, e endpoint.Endpoint, rate int) { - // First requests should succeed. - for i := 0; i < rate; i++ { - if _, err := e(context.Background(), struct{}{}); err != nil { - t.Fatalf("rate=%d: request %d/%d failed: %v", rate, i+1, rate, err) - } - } - - // Next request should fail. - if _, err := e(context.Background(), struct{}{}); err != ratelimit.ErrLimited { - t.Errorf("rate=%d: want %v, have %v", rate, ratelimit.ErrLimited, err) - } -} diff --git a/vendor/github.com/go-kit/kit/sd/cache/benchmark_test.go b/vendor/github.com/go-kit/kit/sd/cache/benchmark_test.go deleted file mode 100644 index 41f1821..0000000 --- a/vendor/github.com/go-kit/kit/sd/cache/benchmark_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package cache - -import ( - "io" - "testing" - - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/log" -) - -func BenchmarkEndpoints(b *testing.B) { - var ( - ca = make(closer) - cb = make(closer) - cmap = map[string]io.Closer{"a": ca, "b": cb} - factory = func(instance string) (endpoint.Endpoint, io.Closer, error) { return endpoint.Nop, cmap[instance], nil } - c = New(factory, log.NewNopLogger()) - ) - - b.ReportAllocs() - - c.Update([]string{"a", "b"}) - - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - c.Endpoints() - } - }) -} diff --git a/vendor/github.com/go-kit/kit/sd/cache/cache.go b/vendor/github.com/go-kit/kit/sd/cache/cache.go deleted file mode 100644 index 82af86b..0000000 --- a/vendor/github.com/go-kit/kit/sd/cache/cache.go +++ /dev/null @@ -1,96 +0,0 @@ -package cache - -import ( - "io" - "sort" - "sync" - "sync/atomic" - - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/sd" -) - -// Cache collects the most recent set of endpoints from a service discovery -// system via a subscriber, and makes them available to consumers. Cache is -// meant to be embedded inside of a concrete subscriber, and can serve Service -// invocations directly. -type Cache struct { - mtx sync.RWMutex - factory sd.Factory - cache map[string]endpointCloser - slice atomic.Value // []endpoint.Endpoint - logger log.Logger -} - -type endpointCloser struct { - endpoint.Endpoint - io.Closer -} - -// New returns a new, empty endpoint cache. -func New(factory sd.Factory, logger log.Logger) *Cache { - return &Cache{ - factory: factory, - cache: map[string]endpointCloser{}, - logger: logger, - } -} - -// Update should be invoked by clients with a complete set of current instance -// strings whenever that set changes. The cache manufactures new endpoints via -// the factory, closes old endpoints when they disappear, and persists existing -// endpoints if they survive through an update. -func (c *Cache) Update(instances []string) { - c.mtx.Lock() - defer c.mtx.Unlock() - - // Deterministic order (for later). - sort.Strings(instances) - - // Produce the current set of services. - cache := make(map[string]endpointCloser, len(instances)) - for _, instance := range instances { - // If it already exists, just copy it over. - if sc, ok := c.cache[instance]; ok { - cache[instance] = sc - delete(c.cache, instance) - continue - } - - // If it doesn't exist, create it. - service, closer, err := c.factory(instance) - if err != nil { - c.logger.Log("instance", instance, "err", err) - continue - } - cache[instance] = endpointCloser{service, closer} - } - - // Close any leftover endpoints. - for _, sc := range c.cache { - if sc.Closer != nil { - sc.Closer.Close() - } - } - - // Populate the slice of endpoints. - slice := make([]endpoint.Endpoint, 0, len(cache)) - for _, instance := range instances { - // A bad factory may mean an instance is not present. - if _, ok := cache[instance]; !ok { - continue - } - slice = append(slice, cache[instance].Endpoint) - } - - // Swap and trigger GC for old copies. - c.slice.Store(slice) - c.cache = cache -} - -// Endpoints yields the current set of (presumably identical) endpoints, ordered -// lexicographically by the corresponding instance string. -func (c *Cache) Endpoints() []endpoint.Endpoint { - return c.slice.Load().([]endpoint.Endpoint) -} diff --git a/vendor/github.com/go-kit/kit/sd/cache/cache_test.go b/vendor/github.com/go-kit/kit/sd/cache/cache_test.go deleted file mode 100644 index be9abaf..0000000 --- a/vendor/github.com/go-kit/kit/sd/cache/cache_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package cache - -import ( - "errors" - "io" - "testing" - "time" - - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/log" -) - -func TestCache(t *testing.T) { - var ( - ca = make(closer) - cb = make(closer) - c = map[string]io.Closer{"a": ca, "b": cb} - f = func(instance string) (endpoint.Endpoint, io.Closer, error) { return endpoint.Nop, c[instance], nil } - cache = New(f, log.NewNopLogger()) - ) - - // Populate - cache.Update([]string{"a", "b"}) - select { - case <-ca: - t.Errorf("endpoint a closed, not good") - case <-cb: - t.Errorf("endpoint b closed, not good") - case <-time.After(time.Millisecond): - t.Logf("no closures yet, good") - } - if want, have := 2, len(cache.Endpoints()); want != have { - t.Errorf("want %d, have %d", want, have) - } - - // Duplicate, should be no-op - cache.Update([]string{"a", "b"}) - select { - case <-ca: - t.Errorf("endpoint a closed, not good") - case <-cb: - t.Errorf("endpoint b closed, not good") - case <-time.After(time.Millisecond): - t.Logf("no closures yet, good") - } - if want, have := 2, len(cache.Endpoints()); want != have { - t.Errorf("want %d, have %d", want, have) - } - - // Delete b - go cache.Update([]string{"a"}) - select { - case <-ca: - t.Errorf("endpoint a closed, not good") - case <-cb: - t.Logf("endpoint b closed, good") - case <-time.After(time.Second): - t.Errorf("didn't close the deleted instance in time") - } - if want, have := 1, len(cache.Endpoints()); want != have { - t.Errorf("want %d, have %d", want, have) - } - - // Delete a - go cache.Update([]string{}) - select { - // case <-cb: will succeed, as it's closed - case <-ca: - t.Logf("endpoint a closed, good") - case <-time.After(time.Second): - t.Errorf("didn't close the deleted instance in time") - } - if want, have := 0, len(cache.Endpoints()); want != have { - t.Errorf("want %d, have %d", want, have) - } -} - -func TestBadFactory(t *testing.T) { - cache := New(func(string) (endpoint.Endpoint, io.Closer, error) { - return nil, nil, errors.New("bad factory") - }, log.NewNopLogger()) - - cache.Update([]string{"foo:1234", "bar:5678"}) - if want, have := 0, len(cache.Endpoints()); want != have { - t.Errorf("want %d, have %d", want, have) - } -} - -type closer chan struct{} - -func (c closer) Close() error { close(c); return nil } diff --git a/vendor/github.com/go-kit/kit/sd/consul/client.go b/vendor/github.com/go-kit/kit/sd/consul/client.go deleted file mode 100644 index 4d88ce3..0000000 --- a/vendor/github.com/go-kit/kit/sd/consul/client.go +++ /dev/null @@ -1,37 +0,0 @@ -package consul - -import consul "github.com/hashicorp/consul/api" - -// Client is a wrapper around the Consul API. -type Client interface { - // Register a service with the local agent. - Register(r *consul.AgentServiceRegistration) error - - // Deregister a service with the local agent. - Deregister(r *consul.AgentServiceRegistration) error - - // Service - Service(service, tag string, passingOnly bool, queryOpts *consul.QueryOptions) ([]*consul.ServiceEntry, *consul.QueryMeta, error) -} - -type client struct { - consul *consul.Client -} - -// NewClient returns an implementation of the Client interface, wrapping a -// concrete Consul client. -func NewClient(c *consul.Client) Client { - return &client{consul: c} -} - -func (c *client) Register(r *consul.AgentServiceRegistration) error { - return c.consul.Agent().ServiceRegister(r) -} - -func (c *client) Deregister(r *consul.AgentServiceRegistration) error { - return c.consul.Agent().ServiceDeregister(r.ID) -} - -func (c *client) Service(service, tag string, passingOnly bool, queryOpts *consul.QueryOptions) ([]*consul.ServiceEntry, *consul.QueryMeta, error) { - return c.consul.Health().Service(service, tag, passingOnly, queryOpts) -} diff --git a/vendor/github.com/go-kit/kit/sd/consul/client_test.go b/vendor/github.com/go-kit/kit/sd/consul/client_test.go deleted file mode 100644 index cf02aea..0000000 --- a/vendor/github.com/go-kit/kit/sd/consul/client_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package consul - -import ( - "errors" - "io" - "reflect" - "testing" - - stdconsul "github.com/hashicorp/consul/api" - "golang.org/x/net/context" - - "github.com/go-kit/kit/endpoint" -) - -func TestClientRegistration(t *testing.T) { - c := newTestClient(nil) - - services, _, err := c.Service(testRegistration.Name, "", true, &stdconsul.QueryOptions{}) - if err != nil { - t.Error(err) - } - if want, have := 0, len(services); want != have { - t.Errorf("want %d, have %d", want, have) - } - - if err := c.Register(testRegistration); err != nil { - t.Error(err) - } - - if err := c.Register(testRegistration); err == nil { - t.Errorf("want error, have %v", err) - } - - services, _, err = c.Service(testRegistration.Name, "", true, &stdconsul.QueryOptions{}) - if err != nil { - t.Error(err) - } - if want, have := 1, len(services); want != have { - t.Errorf("want %d, have %d", want, have) - } - - if err := c.Deregister(testRegistration); err != nil { - t.Error(err) - } - - if err := c.Deregister(testRegistration); err == nil { - t.Errorf("want error, have %v", err) - } - - services, _, err = c.Service(testRegistration.Name, "", true, &stdconsul.QueryOptions{}) - if err != nil { - t.Error(err) - } - if want, have := 0, len(services); want != have { - t.Errorf("want %d, have %d", want, have) - } -} - -type testClient struct { - entries []*stdconsul.ServiceEntry -} - -func newTestClient(entries []*stdconsul.ServiceEntry) *testClient { - return &testClient{ - entries: entries, - } -} - -var _ Client = &testClient{} - -func (c *testClient) Service(service, tag string, _ bool, opts *stdconsul.QueryOptions) ([]*stdconsul.ServiceEntry, *stdconsul.QueryMeta, error) { - var results []*stdconsul.ServiceEntry - - for _, entry := range c.entries { - if entry.Service.Service != service { - continue - } - if tag != "" { - tagMap := map[string]struct{}{} - - for _, t := range entry.Service.Tags { - tagMap[t] = struct{}{} - } - - if _, ok := tagMap[tag]; !ok { - continue - } - } - - results = append(results, entry) - } - - return results, &stdconsul.QueryMeta{}, nil -} - -func (c *testClient) Register(r *stdconsul.AgentServiceRegistration) error { - toAdd := registration2entry(r) - - for _, entry := range c.entries { - if reflect.DeepEqual(*entry, *toAdd) { - return errors.New("duplicate") - } - } - - c.entries = append(c.entries, toAdd) - return nil -} - -func (c *testClient) Deregister(r *stdconsul.AgentServiceRegistration) error { - toDelete := registration2entry(r) - - var newEntries []*stdconsul.ServiceEntry - for _, entry := range c.entries { - if reflect.DeepEqual(*entry, *toDelete) { - continue - } - newEntries = append(newEntries, entry) - } - if len(newEntries) == len(c.entries) { - return errors.New("not found") - } - - c.entries = newEntries - return nil -} - -func registration2entry(r *stdconsul.AgentServiceRegistration) *stdconsul.ServiceEntry { - return &stdconsul.ServiceEntry{ - Node: &stdconsul.Node{ - Node: "some-node", - Address: r.Address, - }, - Service: &stdconsul.AgentService{ - ID: r.ID, - Service: r.Name, - Tags: r.Tags, - Port: r.Port, - Address: r.Address, - }, - // Checks ignored - } -} - -func testFactory(instance string) (endpoint.Endpoint, io.Closer, error) { - return func(context.Context, interface{}) (interface{}, error) { - return instance, nil - }, nil, nil -} - -var testRegistration = &stdconsul.AgentServiceRegistration{ - ID: "my-id", - Name: "my-name", - Tags: []string{"my-tag-1", "my-tag-2"}, - Port: 12345, - Address: "my-address", -} diff --git a/vendor/github.com/go-kit/kit/sd/consul/integration_test.go b/vendor/github.com/go-kit/kit/sd/consul/integration_test.go deleted file mode 100644 index 495adad..0000000 --- a/vendor/github.com/go-kit/kit/sd/consul/integration_test.go +++ /dev/null @@ -1,86 +0,0 @@ -// +build integration - -package consul - -import ( - "io" - "os" - "testing" - "time" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/service" - stdconsul "github.com/hashicorp/consul/api" -) - -func TestIntegration(t *testing.T) { - // Connect to Consul. - // docker run -p 8500:8500 progrium/consul -server -bootstrap - consulAddr := os.Getenv("CONSUL_ADDRESS") - if consulAddr == "" { - t.Fatal("CONSUL_ADDRESS is not set") - } - stdClient, err := stdconsul.NewClient(&stdconsul.Config{ - Address: consulAddr, - }) - if err != nil { - t.Fatal(err) - } - client := NewClient(stdClient) - logger := log.NewLogfmtLogger(os.Stderr) - - // Produce a fake service registration. - r := &stdconsul.AgentServiceRegistration{ - ID: "my-service-ID", - Name: "my-service-name", - Tags: []string{"alpha", "beta"}, - Port: 12345, - Address: "my-address", - EnableTagOverride: false, - // skipping check(s) - } - - // Build a subscriber on r.Name + r.Tags. - factory := func(instance string) (service.Service, io.Closer, error) { - t.Logf("factory invoked for %q", instance) - return service.Fixed{}, nil, nil - } - subscriber, err := NewSubscriber( - client, - factory, - log.NewContext(logger).With("component", "subscriber"), - r.Name, - r.Tags, - true, - ) - if err != nil { - t.Fatal(err) - } - - time.Sleep(time.Second) - - // Before we publish, we should have no services. - services, err := subscriber.Services() - if err != nil { - t.Error(err) - } - if want, have := 0, len(services); want != have { - t.Errorf("want %d, have %d", want, have) - } - - // Build a registrar for r. - registrar := NewRegistrar(client, r, log.NewContext(logger).With("component", "registrar")) - registrar.Register() - defer registrar.Deregister() - - time.Sleep(time.Second) - - // Now we should have one active service. - services, err = subscriber.Services() - if err != nil { - t.Error(err) - } - if want, have := 1, len(services); want != have { - t.Errorf("want %d, have %d", want, have) - } -} diff --git a/vendor/github.com/go-kit/kit/sd/consul/registrar.go b/vendor/github.com/go-kit/kit/sd/consul/registrar.go deleted file mode 100644 index e89fef6..0000000 --- a/vendor/github.com/go-kit/kit/sd/consul/registrar.go +++ /dev/null @@ -1,44 +0,0 @@ -package consul - -import ( - "fmt" - - stdconsul "github.com/hashicorp/consul/api" - - "github.com/go-kit/kit/log" -) - -// Registrar registers service instance liveness information to Consul. -type Registrar struct { - client Client - registration *stdconsul.AgentServiceRegistration - logger log.Logger -} - -// NewRegistrar returns a Consul Registrar acting on the provided catalog -// registration. -func NewRegistrar(client Client, r *stdconsul.AgentServiceRegistration, logger log.Logger) *Registrar { - return &Registrar{ - client: client, - registration: r, - logger: log.NewContext(logger).With("service", r.Name, "tags", fmt.Sprint(r.Tags), "address", r.Address), - } -} - -// Register implements sd.Registrar interface. -func (p *Registrar) Register() { - if err := p.client.Register(p.registration); err != nil { - p.logger.Log("err", err) - } else { - p.logger.Log("action", "register") - } -} - -// Deregister implements sd.Registrar interface. -func (p *Registrar) Deregister() { - if err := p.client.Deregister(p.registration); err != nil { - p.logger.Log("err", err) - } else { - p.logger.Log("action", "deregister") - } -} diff --git a/vendor/github.com/go-kit/kit/sd/consul/registrar_test.go b/vendor/github.com/go-kit/kit/sd/consul/registrar_test.go deleted file mode 100644 index edc7723..0000000 --- a/vendor/github.com/go-kit/kit/sd/consul/registrar_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package consul - -import ( - "testing" - - stdconsul "github.com/hashicorp/consul/api" - - "github.com/go-kit/kit/log" -) - -func TestRegistrar(t *testing.T) { - client := newTestClient([]*stdconsul.ServiceEntry{}) - p := NewRegistrar(client, testRegistration, log.NewNopLogger()) - if want, have := 0, len(client.entries); want != have { - t.Errorf("want %d, have %d", want, have) - } - - p.Register() - if want, have := 1, len(client.entries); want != have { - t.Errorf("want %d, have %d", want, have) - } - - p.Deregister() - if want, have := 0, len(client.entries); want != have { - t.Errorf("want %d, have %d", want, have) - } -} diff --git a/vendor/github.com/go-kit/kit/sd/consul/subscriber.go b/vendor/github.com/go-kit/kit/sd/consul/subscriber.go deleted file mode 100644 index ee3ae34..0000000 --- a/vendor/github.com/go-kit/kit/sd/consul/subscriber.go +++ /dev/null @@ -1,166 +0,0 @@ -package consul - -import ( - "fmt" - "io" - - consul "github.com/hashicorp/consul/api" - - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/sd" - "github.com/go-kit/kit/sd/cache" -) - -const defaultIndex = 0 - -// Subscriber yields endpoints for a service in Consul. Updates to the service -// are watched and will update the Subscriber endpoints. -type Subscriber struct { - cache *cache.Cache - client Client - logger log.Logger - service string - tags []string - passingOnly bool - endpointsc chan []endpoint.Endpoint - quitc chan struct{} -} - -var _ sd.Subscriber = &Subscriber{} - -// NewSubscriber returns a Consul subscriber which returns endpoints for the -// requested service. It only returns instances for which all of the passed tags -// are present. -func NewSubscriber(client Client, factory sd.Factory, logger log.Logger, service string, tags []string, passingOnly bool) *Subscriber { - s := &Subscriber{ - cache: cache.New(factory, logger), - client: client, - logger: log.NewContext(logger).With("service", service, "tags", fmt.Sprint(tags)), - service: service, - tags: tags, - passingOnly: passingOnly, - quitc: make(chan struct{}), - } - - instances, index, err := s.getInstances(defaultIndex, nil) - if err == nil { - s.logger.Log("instances", len(instances)) - } else { - s.logger.Log("err", err) - } - - s.cache.Update(instances) - go s.loop(index) - return s -} - -// Endpoints implements the Subscriber interface. -func (s *Subscriber) Endpoints() ([]endpoint.Endpoint, error) { - return s.cache.Endpoints(), nil -} - -// Stop terminates the subscriber. -func (s *Subscriber) Stop() { - close(s.quitc) -} - -func (s *Subscriber) loop(lastIndex uint64) { - var ( - instances []string - err error - ) - for { - instances, lastIndex, err = s.getInstances(lastIndex, s.quitc) - switch { - case err == io.EOF: - return // stopped via quitc - case err != nil: - s.logger.Log("err", err) - default: - s.cache.Update(instances) - } - } -} - -func (s *Subscriber) getInstances(lastIndex uint64, interruptc chan struct{}) ([]string, uint64, error) { - tag := "" - if len(s.tags) > 0 { - tag = s.tags[0] - } - - // Consul doesn't support more than one tag in its service query method. - // https://github.com/hashicorp/consul/issues/294 - // Hashi suggest prepared queries, but they don't support blocking. - // https://www.consul.io/docs/agent/http/query.html#execute - // If we want blocking for efficiency, we must filter tags manually. - - type response struct { - instances []string - index uint64 - } - - var ( - errc = make(chan error, 1) - resc = make(chan response, 1) - ) - - go func() { - entries, meta, err := s.client.Service(s.service, tag, s.passingOnly, &consul.QueryOptions{ - WaitIndex: lastIndex, - }) - if err != nil { - errc <- err - return - } - if len(s.tags) > 1 { - entries = filterEntries(entries, s.tags[1:]...) - } - resc <- response{ - instances: makeInstances(entries), - index: meta.LastIndex, - } - }() - - select { - case err := <-errc: - return nil, 0, err - case res := <-resc: - return res.instances, res.index, nil - case <-interruptc: - return nil, 0, io.EOF - } -} - -func filterEntries(entries []*consul.ServiceEntry, tags ...string) []*consul.ServiceEntry { - var es []*consul.ServiceEntry - -ENTRIES: - for _, entry := range entries { - ts := make(map[string]struct{}, len(entry.Service.Tags)) - for _, tag := range entry.Service.Tags { - ts[tag] = struct{}{} - } - - for _, tag := range tags { - if _, ok := ts[tag]; !ok { - continue ENTRIES - } - } - es = append(es, entry) - } - - return es -} - -func makeInstances(entries []*consul.ServiceEntry) []string { - instances := make([]string, len(entries)) - for i, entry := range entries { - addr := entry.Node.Address - if entry.Service.Address != "" { - addr = entry.Service.Address - } - instances[i] = fmt.Sprintf("%s:%d", addr, entry.Service.Port) - } - return instances -} diff --git a/vendor/github.com/go-kit/kit/sd/consul/subscriber_test.go b/vendor/github.com/go-kit/kit/sd/consul/subscriber_test.go deleted file mode 100644 index f581216..0000000 --- a/vendor/github.com/go-kit/kit/sd/consul/subscriber_test.go +++ /dev/null @@ -1,138 +0,0 @@ -package consul - -import ( - "testing" - - consul "github.com/hashicorp/consul/api" - "golang.org/x/net/context" - - "github.com/go-kit/kit/log" -) - -var consulState = []*consul.ServiceEntry{ - { - Node: &consul.Node{ - Address: "10.0.0.0", - Node: "app00.local", - }, - Service: &consul.AgentService{ - ID: "search-api-0", - Port: 8000, - Service: "search", - Tags: []string{ - "api", - "v1", - }, - }, - }, - { - Node: &consul.Node{ - Address: "10.0.0.1", - Node: "app01.local", - }, - Service: &consul.AgentService{ - ID: "search-api-1", - Port: 8001, - Service: "search", - Tags: []string{ - "api", - "v2", - }, - }, - }, - { - Node: &consul.Node{ - Address: "10.0.0.1", - Node: "app01.local", - }, - Service: &consul.AgentService{ - Address: "10.0.0.10", - ID: "search-db-0", - Port: 9000, - Service: "search", - Tags: []string{ - "db", - }, - }, - }, -} - -func TestSubscriber(t *testing.T) { - var ( - logger = log.NewNopLogger() - client = newTestClient(consulState) - ) - - s := NewSubscriber(client, testFactory, logger, "search", []string{"api"}, true) - defer s.Stop() - - endpoints, err := s.Endpoints() - if err != nil { - t.Fatal(err) - } - - if want, have := 2, len(endpoints); want != have { - t.Errorf("want %d, have %d", want, have) - } -} - -func TestSubscriberNoService(t *testing.T) { - var ( - logger = log.NewNopLogger() - client = newTestClient(consulState) - ) - - s := NewSubscriber(client, testFactory, logger, "feed", []string{}, true) - defer s.Stop() - - endpoints, err := s.Endpoints() - if err != nil { - t.Fatal(err) - } - - if want, have := 0, len(endpoints); want != have { - t.Fatalf("want %d, have %d", want, have) - } -} - -func TestSubscriberWithTags(t *testing.T) { - var ( - logger = log.NewNopLogger() - client = newTestClient(consulState) - ) - - s := NewSubscriber(client, testFactory, logger, "search", []string{"api", "v2"}, true) - defer s.Stop() - - endpoints, err := s.Endpoints() - if err != nil { - t.Fatal(err) - } - - if want, have := 1, len(endpoints); want != have { - t.Fatalf("want %d, have %d", want, have) - } -} - -func TestSubscriberAddressOverride(t *testing.T) { - s := NewSubscriber(newTestClient(consulState), testFactory, log.NewNopLogger(), "search", []string{"db"}, true) - defer s.Stop() - - endpoints, err := s.Endpoints() - if err != nil { - t.Fatal(err) - } - - if want, have := 1, len(endpoints); want != have { - t.Fatalf("want %d, have %d", want, have) - } - - response, err := endpoints[0](context.Background(), struct{}{}) - if err != nil { - t.Fatal(err) - } - - if want, have := "10.0.0.10:9000", response.(string); want != have { - t.Errorf("want %q, have %q", want, have) - } -} diff --git a/vendor/github.com/go-kit/kit/sd/dnssrv/lookup.go b/vendor/github.com/go-kit/kit/sd/dnssrv/lookup.go deleted file mode 100644 index 9d46ea6..0000000 --- a/vendor/github.com/go-kit/kit/sd/dnssrv/lookup.go +++ /dev/null @@ -1,7 +0,0 @@ -package dnssrv - -import "net" - -// Lookup is a function that resolves a DNS SRV record to multiple addresses. -// It has the same signature as net.LookupSRV. -type Lookup func(service, proto, name string) (cname string, addrs []*net.SRV, err error) diff --git a/vendor/github.com/go-kit/kit/sd/dnssrv/subscriber.go b/vendor/github.com/go-kit/kit/sd/dnssrv/subscriber.go deleted file mode 100644 index 422fdaa..0000000 --- a/vendor/github.com/go-kit/kit/sd/dnssrv/subscriber.go +++ /dev/null @@ -1,100 +0,0 @@ -package dnssrv - -import ( - "fmt" - "net" - "time" - - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/sd" - "github.com/go-kit/kit/sd/cache" -) - -// Subscriber yields endpoints taken from the named DNS SRV record. The name is -// resolved on a fixed schedule. Priorities and weights are ignored. -type Subscriber struct { - name string - cache *cache.Cache - logger log.Logger - quit chan struct{} -} - -// NewSubscriber returns a DNS SRV subscriber. -func NewSubscriber( - name string, - ttl time.Duration, - factory sd.Factory, - logger log.Logger, -) *Subscriber { - return NewSubscriberDetailed(name, time.NewTicker(ttl), net.LookupSRV, factory, logger) -} - -// NewSubscriberDetailed is the same as NewSubscriber, but allows users to -// provide an explicit lookup refresh ticker instead of a TTL, and specify the -// lookup function instead of using net.LookupSRV. -func NewSubscriberDetailed( - name string, - refresh *time.Ticker, - lookup Lookup, - factory sd.Factory, - logger log.Logger, -) *Subscriber { - p := &Subscriber{ - name: name, - cache: cache.New(factory, logger), - logger: logger, - quit: make(chan struct{}), - } - - instances, err := p.resolve(lookup) - if err == nil { - logger.Log("name", name, "instances", len(instances)) - } else { - logger.Log("name", name, "err", err) - } - p.cache.Update(instances) - - go p.loop(refresh, lookup) - return p -} - -// Stop terminates the Subscriber. -func (p *Subscriber) Stop() { - close(p.quit) -} - -func (p *Subscriber) loop(t *time.Ticker, lookup Lookup) { - defer t.Stop() - for { - select { - case <-t.C: - instances, err := p.resolve(lookup) - if err != nil { - p.logger.Log("name", p.name, "err", err) - continue // don't replace potentially-good with bad - } - p.cache.Update(instances) - - case <-p.quit: - return - } - } -} - -// Endpoints implements the Subscriber interface. -func (p *Subscriber) Endpoints() ([]endpoint.Endpoint, error) { - return p.cache.Endpoints(), nil -} - -func (p *Subscriber) resolve(lookup Lookup) ([]string, error) { - _, addrs, err := lookup("", "", p.name) - if err != nil { - return []string{}, err - } - instances := make([]string, len(addrs)) - for i, addr := range addrs { - instances[i] = net.JoinHostPort(addr.Target, fmt.Sprint(addr.Port)) - } - return instances, nil -} diff --git a/vendor/github.com/go-kit/kit/sd/dnssrv/subscriber_test.go b/vendor/github.com/go-kit/kit/sd/dnssrv/subscriber_test.go deleted file mode 100644 index 5a9036a..0000000 --- a/vendor/github.com/go-kit/kit/sd/dnssrv/subscriber_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package dnssrv - -import ( - "io" - "net" - "sync/atomic" - "testing" - "time" - - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/log" -) - -func TestRefresh(t *testing.T) { - name := "some.service.internal" - - ticker := time.NewTicker(time.Second) - ticker.Stop() - tickc := make(chan time.Time) - ticker.C = tickc - - var lookups uint64 - records := []*net.SRV{} - lookup := func(service, proto, name string) (string, []*net.SRV, error) { - t.Logf("lookup(%q, %q, %q)", service, proto, name) - atomic.AddUint64(&lookups, 1) - return "cname", records, nil - } - - var generates uint64 - factory := func(instance string) (endpoint.Endpoint, io.Closer, error) { - t.Logf("factory(%q)", instance) - atomic.AddUint64(&generates, 1) - return endpoint.Nop, nopCloser{}, nil - } - - subscriber := NewSubscriberDetailed(name, ticker, lookup, factory, log.NewNopLogger()) - defer subscriber.Stop() - - // First lookup, empty - endpoints, err := subscriber.Endpoints() - if err != nil { - t.Error(err) - } - if want, have := 0, len(endpoints); want != have { - t.Errorf("want %d, have %d", want, have) - } - if want, have := uint64(1), atomic.LoadUint64(&lookups); want != have { - t.Errorf("want %d, have %d", want, have) - } - if want, have := uint64(0), atomic.LoadUint64(&generates); want != have { - t.Errorf("want %d, have %d", want, have) - } - - // Load some records and lookup again - records = []*net.SRV{ - &net.SRV{Target: "1.0.0.1", Port: 1001}, - &net.SRV{Target: "1.0.0.2", Port: 1002}, - &net.SRV{Target: "1.0.0.3", Port: 1003}, - } - tickc <- time.Now() - - // There is a race condition where the subscriber.Endpoints call below - // invokes the cache before it is updated by the tick above. - // TODO(pb): solve by running the read through the loop goroutine. - time.Sleep(100 * time.Millisecond) - - endpoints, err = subscriber.Endpoints() - if err != nil { - t.Error(err) - } - if want, have := 3, len(endpoints); want != have { - t.Errorf("want %d, have %d", want, have) - } - if want, have := uint64(2), atomic.LoadUint64(&lookups); want != have { - t.Errorf("want %d, have %d", want, have) - } - if want, have := uint64(len(records)), atomic.LoadUint64(&generates); want != have { - t.Errorf("want %d, have %d", want, have) - } -} - -type nopCloser struct{} - -func (nopCloser) Close() error { return nil } diff --git a/vendor/github.com/go-kit/kit/sd/doc.go b/vendor/github.com/go-kit/kit/sd/doc.go deleted file mode 100644 index b10d96f..0000000 --- a/vendor/github.com/go-kit/kit/sd/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package sd provides utilities related to service discovery. That includes -// subscribing to service discovery systems in order to reach remote instances, -// and publishing to service discovery systems to make an instance available. -// Implementations are provided for most common systems. -package sd diff --git a/vendor/github.com/go-kit/kit/sd/etcd/client.go b/vendor/github.com/go-kit/kit/sd/etcd/client.go deleted file mode 100644 index b9e2904..0000000 --- a/vendor/github.com/go-kit/kit/sd/etcd/client.go +++ /dev/null @@ -1,131 +0,0 @@ -package etcd - -import ( - "crypto/tls" - "crypto/x509" - "io/ioutil" - "net" - "net/http" - "time" - - etcd "github.com/coreos/etcd/client" - "golang.org/x/net/context" -) - -// Client is a wrapper around the etcd client. -type Client interface { - // GetEntries will query the given prefix in etcd and returns a set of entries. - GetEntries(prefix string) ([]string, error) - - // WatchPrefix starts watching every change for given prefix in etcd. When an - // change is detected it will populate the responseChan when an *etcd.Response. - WatchPrefix(prefix string, responseChan chan *etcd.Response) -} - -type client struct { - keysAPI etcd.KeysAPI - ctx context.Context -} - -// ClientOptions defines options for the etcd client. -type ClientOptions struct { - Cert string - Key string - CaCert string - DialTimeout time.Duration - DialKeepAline time.Duration - HeaderTimeoutPerRequest time.Duration -} - -// NewClient returns an *etcd.Client with a connection to the named machines. -// It will return an error if a connection to the cluster cannot be made. -// The parameter machines needs to be a full URL with schemas. -// e.g. "http://localhost:2379" will work, but "localhost:2379" will not. -func NewClient(ctx context.Context, machines []string, options ClientOptions) (Client, error) { - var ( - c etcd.KeysAPI - err error - caCertCt []byte - tlsCert tls.Certificate - ) - - if options.Cert != "" && options.Key != "" { - tlsCert, err = tls.LoadX509KeyPair(options.Cert, options.Key) - if err != nil { - return nil, err - } - - caCertCt, err = ioutil.ReadFile(options.CaCert) - if err != nil { - return nil, err - } - caCertPool := x509.NewCertPool() - caCertPool.AppendCertsFromPEM(caCertCt) - - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{tlsCert}, - RootCAs: caCertPool, - } - - transport := &http.Transport{ - TLSClientConfig: tlsConfig, - Dial: func(network, addr string) (net.Conn, error) { - dial := &net.Dialer{ - Timeout: options.DialTimeout, - KeepAlive: options.DialKeepAline, - } - return dial.Dial(network, addr) - }, - } - - cfg := etcd.Config{ - Endpoints: machines, - Transport: transport, - HeaderTimeoutPerRequest: options.HeaderTimeoutPerRequest, - } - ce, err := etcd.New(cfg) - if err != nil { - return nil, err - } - c = etcd.NewKeysAPI(ce) - } else { - cfg := etcd.Config{ - Endpoints: machines, - Transport: etcd.DefaultTransport, - HeaderTimeoutPerRequest: options.HeaderTimeoutPerRequest, - } - ce, err := etcd.New(cfg) - if err != nil { - return nil, err - } - c = etcd.NewKeysAPI(ce) - } - - return &client{c, ctx}, nil -} - -// GetEntries implements the etcd Client interface. -func (c *client) GetEntries(key string) ([]string, error) { - resp, err := c.keysAPI.Get(c.ctx, key, &etcd.GetOptions{Recursive: true}) - if err != nil { - return nil, err - } - - entries := make([]string, len(resp.Node.Nodes)) - for i, node := range resp.Node.Nodes { - entries[i] = node.Value - } - return entries, nil -} - -// WatchPrefix implements the etcd Client interface. -func (c *client) WatchPrefix(prefix string, responseChan chan *etcd.Response) { - watch := c.keysAPI.Watcher(prefix, &etcd.WatcherOptions{AfterIndex: 0, Recursive: true}) - for { - res, err := watch.Next(c.ctx) - if err != nil { - return - } - responseChan <- res - } -} diff --git a/vendor/github.com/go-kit/kit/sd/etcd/subscriber.go b/vendor/github.com/go-kit/kit/sd/etcd/subscriber.go deleted file mode 100644 index 1d579eb..0000000 --- a/vendor/github.com/go-kit/kit/sd/etcd/subscriber.go +++ /dev/null @@ -1,74 +0,0 @@ -package etcd - -import ( - etcd "github.com/coreos/etcd/client" - - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/sd" - "github.com/go-kit/kit/sd/cache" -) - -// Subscriber yield endpoints stored in a certain etcd keyspace. Any kind of -// change in that keyspace is watched and will update the Subscriber endpoints. -type Subscriber struct { - client Client - prefix string - cache *cache.Cache - logger log.Logger - quitc chan struct{} -} - -var _ sd.Subscriber = &Subscriber{} - -// NewSubscriber returns an etcd subscriber. It will start watching the given -// prefix for changes, and update the endpoints. -func NewSubscriber(c Client, prefix string, factory sd.Factory, logger log.Logger) (*Subscriber, error) { - s := &Subscriber{ - client: c, - prefix: prefix, - cache: cache.New(factory, logger), - logger: logger, - quitc: make(chan struct{}), - } - - instances, err := s.client.GetEntries(s.prefix) - if err == nil { - logger.Log("prefix", s.prefix, "instances", len(instances)) - } else { - logger.Log("prefix", s.prefix, "err", err) - } - s.cache.Update(instances) - - go s.loop() - return s, nil -} - -func (s *Subscriber) loop() { - responseChan := make(chan *etcd.Response) - go s.client.WatchPrefix(s.prefix, responseChan) - for { - select { - case <-responseChan: - instances, err := s.client.GetEntries(s.prefix) - if err != nil { - s.logger.Log("msg", "failed to retrieve entries", "err", err) - continue - } - s.cache.Update(instances) - - case <-s.quitc: - return - } - } -} - -// Endpoints implements the Subscriber interface. -func (s *Subscriber) Endpoints() ([]endpoint.Endpoint, error) { - return s.cache.Endpoints(), nil -} - -// Stop terminates the Subscriber. -func (s *Subscriber) Stop() { - close(s.quitc) -} diff --git a/vendor/github.com/go-kit/kit/sd/etcd/subscriber_test.go b/vendor/github.com/go-kit/kit/sd/etcd/subscriber_test.go deleted file mode 100644 index 0073e1e..0000000 --- a/vendor/github.com/go-kit/kit/sd/etcd/subscriber_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package etcd - -import ( - "errors" - "io" - "testing" - - stdetcd "github.com/coreos/etcd/client" - - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/log" -) - -var ( - node = &stdetcd.Node{ - Key: "/foo", - Nodes: []*stdetcd.Node{ - {Key: "/foo/1", Value: "1:1"}, - {Key: "/foo/2", Value: "1:2"}, - }, - } - fakeResponse = &stdetcd.Response{ - Node: node, - } -) - -func TestSubscriber(t *testing.T) { - factory := func(string) (endpoint.Endpoint, io.Closer, error) { - return endpoint.Nop, nil, nil - } - - client := &fakeClient{ - responses: map[string]*stdetcd.Response{"/foo": fakeResponse}, - } - - s, err := NewSubscriber(client, "/foo", factory, log.NewNopLogger()) - if err != nil { - t.Fatal(err) - } - defer s.Stop() - - if _, err := s.Endpoints(); err != nil { - t.Fatal(err) - } -} - -func TestBadFactory(t *testing.T) { - factory := func(string) (endpoint.Endpoint, io.Closer, error) { - return nil, nil, errors.New("kaboom") - } - - client := &fakeClient{ - responses: map[string]*stdetcd.Response{"/foo": fakeResponse}, - } - - s, err := NewSubscriber(client, "/foo", factory, log.NewNopLogger()) - if err != nil { - t.Fatal(err) - } - defer s.Stop() - - endpoints, err := s.Endpoints() - if err != nil { - t.Fatal(err) - } - - if want, have := 0, len(endpoints); want != have { - t.Errorf("want %d, have %d", want, have) - } -} - -type fakeClient struct { - responses map[string]*stdetcd.Response -} - -func (c *fakeClient) GetEntries(prefix string) ([]string, error) { - response, ok := c.responses[prefix] - if !ok { - return nil, errors.New("key not exist") - } - - entries := make([]string, len(response.Node.Nodes)) - for i, node := range response.Node.Nodes { - entries[i] = node.Value - } - return entries, nil -} - -func (c *fakeClient) WatchPrefix(prefix string, responseChan chan *stdetcd.Response) {} diff --git a/vendor/github.com/go-kit/kit/sd/factory.go b/vendor/github.com/go-kit/kit/sd/factory.go deleted file mode 100644 index af99817..0000000 --- a/vendor/github.com/go-kit/kit/sd/factory.go +++ /dev/null @@ -1,17 +0,0 @@ -package sd - -import ( - "io" - - "github.com/go-kit/kit/endpoint" -) - -// Factory is a function that converts an instance string (e.g. host:port) to a -// specific endpoint. Instances that provide multiple endpoints require multiple -// factories. A factory also returns an io.Closer that's invoked when the -// instance goes away and needs to be cleaned up. Factories may return nil -// closers. -// -// Users are expected to provide their own factory functions that assume -// specific transports, or can deduce transports by parsing the instance string. -type Factory func(instance string) (endpoint.Endpoint, io.Closer, error) diff --git a/vendor/github.com/go-kit/kit/sd/fixed_subscriber.go b/vendor/github.com/go-kit/kit/sd/fixed_subscriber.go deleted file mode 100644 index 98fd503..0000000 --- a/vendor/github.com/go-kit/kit/sd/fixed_subscriber.go +++ /dev/null @@ -1,9 +0,0 @@ -package sd - -import "github.com/go-kit/kit/endpoint" - -// FixedSubscriber yields a fixed set of services. -type FixedSubscriber []endpoint.Endpoint - -// Endpoints implements Subscriber. -func (s FixedSubscriber) Endpoints() ([]endpoint.Endpoint, error) { return s, nil } diff --git a/vendor/github.com/go-kit/kit/sd/lb/balancer.go b/vendor/github.com/go-kit/kit/sd/lb/balancer.go deleted file mode 100644 index 40aa0ef..0000000 --- a/vendor/github.com/go-kit/kit/sd/lb/balancer.go +++ /dev/null @@ -1,15 +0,0 @@ -package lb - -import ( - "errors" - - "github.com/go-kit/kit/endpoint" -) - -// Balancer yields endpoints according to some heuristic. -type Balancer interface { - Endpoint() (endpoint.Endpoint, error) -} - -// ErrNoEndpoints is returned when no qualifying endpoints are available. -var ErrNoEndpoints = errors.New("no endpoints available") diff --git a/vendor/github.com/go-kit/kit/sd/lb/doc.go b/vendor/github.com/go-kit/kit/sd/lb/doc.go deleted file mode 100644 index 82a9516..0000000 --- a/vendor/github.com/go-kit/kit/sd/lb/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package lb deals with client-side load balancing across multiple identical -// instances of services and endpoints. When combined with a service discovery -// system of record, it enables a more decentralized architecture, removing the -// need for separate load balancers like HAProxy. -package lb diff --git a/vendor/github.com/go-kit/kit/sd/lb/random.go b/vendor/github.com/go-kit/kit/sd/lb/random.go deleted file mode 100644 index 78b0956..0000000 --- a/vendor/github.com/go-kit/kit/sd/lb/random.go +++ /dev/null @@ -1,32 +0,0 @@ -package lb - -import ( - "math/rand" - - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/sd" -) - -// NewRandom returns a load balancer that selects services randomly. -func NewRandom(s sd.Subscriber, seed int64) Balancer { - return &random{ - s: s, - r: rand.New(rand.NewSource(seed)), - } -} - -type random struct { - s sd.Subscriber - r *rand.Rand -} - -func (r *random) Endpoint() (endpoint.Endpoint, error) { - endpoints, err := r.s.Endpoints() - if err != nil { - return nil, err - } - if len(endpoints) <= 0 { - return nil, ErrNoEndpoints - } - return endpoints[r.r.Intn(len(endpoints))], nil -} diff --git a/vendor/github.com/go-kit/kit/sd/lb/random_test.go b/vendor/github.com/go-kit/kit/sd/lb/random_test.go deleted file mode 100644 index c9b0117..0000000 --- a/vendor/github.com/go-kit/kit/sd/lb/random_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package lb - -import ( - "math" - "testing" - - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/sd" - "golang.org/x/net/context" -) - -func TestRandom(t *testing.T) { - var ( - n = 7 - endpoints = make([]endpoint.Endpoint, n) - counts = make([]int, n) - seed = int64(12345) - iterations = 1000000 - want = iterations / n - tolerance = want / 100 // 1% - ) - - for i := 0; i < n; i++ { - i0 := i - endpoints[i] = func(context.Context, interface{}) (interface{}, error) { counts[i0]++; return struct{}{}, nil } - } - - subscriber := sd.FixedSubscriber(endpoints) - balancer := NewRandom(subscriber, seed) - - for i := 0; i < iterations; i++ { - endpoint, _ := balancer.Endpoint() - endpoint(context.Background(), struct{}{}) - } - - for i, have := range counts { - delta := int(math.Abs(float64(want - have))) - if delta > tolerance { - t.Errorf("%d: want %d, have %d, delta %d > %d tolerance", i, want, have, delta, tolerance) - } - } -} - -func TestRandomNoEndpoints(t *testing.T) { - subscriber := sd.FixedSubscriber{} - balancer := NewRandom(subscriber, 1415926) - _, err := balancer.Endpoint() - if want, have := ErrNoEndpoints, err; want != have { - t.Errorf("want %v, have %v", want, have) - } - -} diff --git a/vendor/github.com/go-kit/kit/sd/lb/retry.go b/vendor/github.com/go-kit/kit/sd/lb/retry.go deleted file mode 100644 index a933eeb..0000000 --- a/vendor/github.com/go-kit/kit/sd/lb/retry.go +++ /dev/null @@ -1,57 +0,0 @@ -package lb - -import ( - "fmt" - "strings" - "time" - - "golang.org/x/net/context" - - "github.com/go-kit/kit/endpoint" -) - -// Retry wraps a service load balancer and returns an endpoint oriented load -// balancer for the specified service method. -// Requests to the endpoint will be automatically load balanced via the load -// balancer. Requests that return errors will be retried until they succeed, -// up to max times, or until the timeout is elapsed, whichever comes first. -func Retry(max int, timeout time.Duration, b Balancer) endpoint.Endpoint { - if b == nil { - panic("nil Balancer") - } - return func(ctx context.Context, request interface{}) (response interface{}, err error) { - var ( - newctx, cancel = context.WithTimeout(ctx, timeout) - responses = make(chan interface{}, 1) - errs = make(chan error, 1) - a = []string{} - ) - defer cancel() - for i := 1; i <= max; i++ { - go func() { - e, err := b.Endpoint() - if err != nil { - errs <- err - return - } - response, err := e(newctx, request) - if err != nil { - errs <- err - return - } - responses <- response - }() - - select { - case <-newctx.Done(): - return nil, newctx.Err() - case response := <-responses: - return response, nil - case err := <-errs: - a = append(a, err.Error()) - continue - } - } - return nil, fmt.Errorf("retry attempts exceeded (%s)", strings.Join(a, "; ")) - } -} diff --git a/vendor/github.com/go-kit/kit/sd/lb/retry_test.go b/vendor/github.com/go-kit/kit/sd/lb/retry_test.go deleted file mode 100644 index 07b1afd..0000000 --- a/vendor/github.com/go-kit/kit/sd/lb/retry_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package lb_test - -import ( - "errors" - "testing" - "time" - - "golang.org/x/net/context" - - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/sd" - loadbalancer "github.com/go-kit/kit/sd/lb" -) - -func TestRetryMaxTotalFail(t *testing.T) { - var ( - endpoints = sd.FixedSubscriber{} // no endpoints - lb = loadbalancer.NewRoundRobin(endpoints) - retry = loadbalancer.Retry(999, time.Second, lb) // lots of retries - ctx = context.Background() - ) - if _, err := retry(ctx, struct{}{}); err == nil { - t.Errorf("expected error, got none") // should fail - } -} - -func TestRetryMaxPartialFail(t *testing.T) { - var ( - endpoints = []endpoint.Endpoint{ - func(context.Context, interface{}) (interface{}, error) { return nil, errors.New("error one") }, - func(context.Context, interface{}) (interface{}, error) { return nil, errors.New("error two") }, - func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil /* OK */ }, - } - subscriber = sd.FixedSubscriber{ - 0: endpoints[0], - 1: endpoints[1], - 2: endpoints[2], - } - retries = len(endpoints) - 1 // not quite enough retries - lb = loadbalancer.NewRoundRobin(subscriber) - ctx = context.Background() - ) - if _, err := loadbalancer.Retry(retries, time.Second, lb)(ctx, struct{}{}); err == nil { - t.Errorf("expected error, got none") - } -} - -func TestRetryMaxSuccess(t *testing.T) { - var ( - endpoints = []endpoint.Endpoint{ - func(context.Context, interface{}) (interface{}, error) { return nil, errors.New("error one") }, - func(context.Context, interface{}) (interface{}, error) { return nil, errors.New("error two") }, - func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil /* OK */ }, - } - subscriber = sd.FixedSubscriber{ - 0: endpoints[0], - 1: endpoints[1], - 2: endpoints[2], - } - retries = len(endpoints) // exactly enough retries - lb = loadbalancer.NewRoundRobin(subscriber) - ctx = context.Background() - ) - if _, err := loadbalancer.Retry(retries, time.Second, lb)(ctx, struct{}{}); err != nil { - t.Error(err) - } -} - -func TestRetryTimeout(t *testing.T) { - var ( - step = make(chan struct{}) - e = func(context.Context, interface{}) (interface{}, error) { <-step; return struct{}{}, nil } - timeout = time.Millisecond - retry = loadbalancer.Retry(999, timeout, loadbalancer.NewRoundRobin(sd.FixedSubscriber{0: e})) - errs = make(chan error, 1) - invoke = func() { _, err := retry(context.Background(), struct{}{}); errs <- err } - ) - - go func() { step <- struct{}{} }() // queue up a flush of the endpoint - invoke() // invoke the endpoint and trigger the flush - if err := <-errs; err != nil { // that should succeed - t.Error(err) - } - - go func() { time.Sleep(10 * timeout); step <- struct{}{} }() // a delayed flush - invoke() // invoke the endpoint - if err := <-errs; err != context.DeadlineExceeded { // that should not succeed - t.Errorf("wanted %v, got none", context.DeadlineExceeded) - } -} diff --git a/vendor/github.com/go-kit/kit/sd/lb/round_robin.go b/vendor/github.com/go-kit/kit/sd/lb/round_robin.go deleted file mode 100644 index 74b86ca..0000000 --- a/vendor/github.com/go-kit/kit/sd/lb/round_robin.go +++ /dev/null @@ -1,34 +0,0 @@ -package lb - -import ( - "sync/atomic" - - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/sd" -) - -// NewRoundRobin returns a load balancer that returns services in sequence. -func NewRoundRobin(s sd.Subscriber) Balancer { - return &roundRobin{ - s: s, - c: 0, - } -} - -type roundRobin struct { - s sd.Subscriber - c uint64 -} - -func (rr *roundRobin) Endpoint() (endpoint.Endpoint, error) { - endpoints, err := rr.s.Endpoints() - if err != nil { - return nil, err - } - if len(endpoints) <= 0 { - return nil, ErrNoEndpoints - } - old := atomic.AddUint64(&rr.c, 1) - 1 - idx := old % uint64(len(endpoints)) - return endpoints[idx], nil -} diff --git a/vendor/github.com/go-kit/kit/sd/lb/round_robin_test.go b/vendor/github.com/go-kit/kit/sd/lb/round_robin_test.go deleted file mode 100644 index 64a8baa..0000000 --- a/vendor/github.com/go-kit/kit/sd/lb/round_robin_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package lb - -import ( - "reflect" - "sync" - "sync/atomic" - "testing" - "time" - - "golang.org/x/net/context" - - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/sd" -) - -func TestRoundRobin(t *testing.T) { - var ( - counts = []int{0, 0, 0} - endpoints = []endpoint.Endpoint{ - func(context.Context, interface{}) (interface{}, error) { counts[0]++; return struct{}{}, nil }, - func(context.Context, interface{}) (interface{}, error) { counts[1]++; return struct{}{}, nil }, - func(context.Context, interface{}) (interface{}, error) { counts[2]++; return struct{}{}, nil }, - } - ) - - subscriber := sd.FixedSubscriber(endpoints) - balancer := NewRoundRobin(subscriber) - - for i, want := range [][]int{ - {1, 0, 0}, - {1, 1, 0}, - {1, 1, 1}, - {2, 1, 1}, - {2, 2, 1}, - {2, 2, 2}, - {3, 2, 2}, - } { - endpoint, err := balancer.Endpoint() - if err != nil { - t.Fatal(err) - } - endpoint(context.Background(), struct{}{}) - if have := counts; !reflect.DeepEqual(want, have) { - t.Fatalf("%d: want %v, have %v", i, want, have) - } - } -} - -func TestRoundRobinNoEndpoints(t *testing.T) { - subscriber := sd.FixedSubscriber{} - balancer := NewRoundRobin(subscriber) - _, err := balancer.Endpoint() - if want, have := ErrNoEndpoints, err; want != have { - t.Errorf("want %v, have %v", want, have) - } -} - -func TestRoundRobinNoRace(t *testing.T) { - balancer := NewRoundRobin(sd.FixedSubscriber([]endpoint.Endpoint{ - endpoint.Nop, - endpoint.Nop, - endpoint.Nop, - endpoint.Nop, - endpoint.Nop, - })) - - var ( - n = 100 - done = make(chan struct{}) - wg sync.WaitGroup - count uint64 - ) - - wg.Add(n) - - for i := 0; i < n; i++ { - go func() { - defer wg.Done() - for { - select { - case <-done: - return - default: - _, _ = balancer.Endpoint() - atomic.AddUint64(&count, 1) - } - } - }() - } - - time.Sleep(time.Second) - close(done) - wg.Wait() - - t.Logf("made %d calls", atomic.LoadUint64(&count)) -} diff --git a/vendor/github.com/go-kit/kit/sd/registrar.go b/vendor/github.com/go-kit/kit/sd/registrar.go deleted file mode 100644 index 49a0c9f..0000000 --- a/vendor/github.com/go-kit/kit/sd/registrar.go +++ /dev/null @@ -1,13 +0,0 @@ -package sd - -// Registrar registers instance information to a service discovery system when -// an instance becomes alive and healthy, and deregisters that information when -// the service becomes unhealthy or goes away. -// -// Registrar implementations exist for various service discovery systems. Note -// that identifying instance information (e.g. host:port) must be given via the -// concrete constructor; this interface merely signals lifecycle changes. -type Registrar interface { - Register() - Deregister() -} diff --git a/vendor/github.com/go-kit/kit/sd/subscriber.go b/vendor/github.com/go-kit/kit/sd/subscriber.go deleted file mode 100644 index 8267b51..0000000 --- a/vendor/github.com/go-kit/kit/sd/subscriber.go +++ /dev/null @@ -1,11 +0,0 @@ -package sd - -import "github.com/go-kit/kit/endpoint" - -// Subscriber listens to a service discovery system and yields a set of -// identical endpoints on demand. An error indicates a problem with connectivity -// to the service discovery system, or within the system itself; a subscriber -// may yield no endpoints without error. -type Subscriber interface { - Endpoints() ([]endpoint.Endpoint, error) -} diff --git a/vendor/github.com/go-kit/kit/sd/zk/client.go b/vendor/github.com/go-kit/kit/sd/zk/client.go deleted file mode 100644 index 70cdab3..0000000 --- a/vendor/github.com/go-kit/kit/sd/zk/client.go +++ /dev/null @@ -1,273 +0,0 @@ -package zk - -import ( - "errors" - "net" - "strings" - "time" - - "github.com/samuel/go-zookeeper/zk" - - "github.com/go-kit/kit/log" -) - -// DefaultACL is the default ACL to use for creating znodes. -var ( - DefaultACL = zk.WorldACL(zk.PermAll) - ErrInvalidCredentials = errors.New("invalid credentials provided") - ErrClientClosed = errors.New("client service closed") - ErrNotRegistered = errors.New("not registered") - ErrNodeNotFound = errors.New("node not found") -) - -const ( - // DefaultConnectTimeout is the default timeout to establish a connection to - // a ZooKeeper node. - DefaultConnectTimeout = 2 * time.Second - // DefaultSessionTimeout is the default timeout to keep the current - // ZooKeeper session alive during a temporary disconnect. - DefaultSessionTimeout = 5 * time.Second -) - -// Client is a wrapper around a lower level ZooKeeper client implementation. -type Client interface { - // GetEntries should query the provided path in ZooKeeper, place a watch on - // it and retrieve data from its current child nodes. - GetEntries(path string) ([]string, <-chan zk.Event, error) - // CreateParentNodes should try to create the path in case it does not exist - // yet on ZooKeeper. - CreateParentNodes(path string) error - // Register a service with ZooKeeper. - Register(s *Service) error - // Deregister a service with ZooKeeper. - Deregister(s *Service) error - // Stop should properly shutdown the client implementation - Stop() -} - -type clientConfig struct { - logger log.Logger - acl []zk.ACL - credentials []byte - connectTimeout time.Duration - sessionTimeout time.Duration - rootNodePayload [][]byte - eventHandler func(zk.Event) -} - -// Option functions enable friendly APIs. -type Option func(*clientConfig) error - -type client struct { - *zk.Conn - clientConfig - active bool - quit chan struct{} -} - -// ACL returns an Option specifying a non-default ACL for creating parent nodes. -func ACL(acl []zk.ACL) Option { - return func(c *clientConfig) error { - c.acl = acl - return nil - } -} - -// Credentials returns an Option specifying a user/password combination which -// the client will use to authenticate itself with. -func Credentials(user, pass string) Option { - return func(c *clientConfig) error { - if user == "" || pass == "" { - return ErrInvalidCredentials - } - c.credentials = []byte(user + ":" + pass) - return nil - } -} - -// ConnectTimeout returns an Option specifying a non-default connection timeout -// when we try to establish a connection to a ZooKeeper server. -func ConnectTimeout(t time.Duration) Option { - return func(c *clientConfig) error { - if t.Seconds() < 1 { - return errors.New("invalid connect timeout (minimum value is 1 second)") - } - c.connectTimeout = t - return nil - } -} - -// SessionTimeout returns an Option specifying a non-default session timeout. -func SessionTimeout(t time.Duration) Option { - return func(c *clientConfig) error { - if t.Seconds() < 1 { - return errors.New("invalid session timeout (minimum value is 1 second)") - } - c.sessionTimeout = t - return nil - } -} - -// Payload returns an Option specifying non-default data values for each znode -// created by CreateParentNodes. -func Payload(payload [][]byte) Option { - return func(c *clientConfig) error { - c.rootNodePayload = payload - return nil - } -} - -// EventHandler returns an Option specifying a callback function to handle -// incoming zk.Event payloads (ZooKeeper connection events). -func EventHandler(handler func(zk.Event)) Option { - return func(c *clientConfig) error { - c.eventHandler = handler - return nil - } -} - -// NewClient returns a ZooKeeper client with a connection to the server cluster. -// It will return an error if the server cluster cannot be resolved. -func NewClient(servers []string, logger log.Logger, options ...Option) (Client, error) { - defaultEventHandler := func(event zk.Event) { - logger.Log("eventtype", event.Type.String(), "server", event.Server, "state", event.State.String(), "err", event.Err) - } - config := clientConfig{ - acl: DefaultACL, - connectTimeout: DefaultConnectTimeout, - sessionTimeout: DefaultSessionTimeout, - eventHandler: defaultEventHandler, - logger: logger, - } - for _, option := range options { - if err := option(&config); err != nil { - return nil, err - } - } - // dialer overrides the default ZooKeeper library Dialer so we can configure - // the connectTimeout. The current library has a hardcoded value of 1 second - // and there are reports of race conditions, due to slow DNS resolvers and - // other network latency issues. - dialer := func(network, address string, _ time.Duration) (net.Conn, error) { - return net.DialTimeout(network, address, config.connectTimeout) - } - conn, eventc, err := zk.Connect(servers, config.sessionTimeout, withLogger(logger), zk.WithDialer(dialer)) - - if err != nil { - return nil, err - } - - if len(config.credentials) > 0 { - err = conn.AddAuth("digest", config.credentials) - if err != nil { - return nil, err - } - } - - c := &client{conn, config, true, make(chan struct{})} - - // Start listening for incoming Event payloads and callback the set - // eventHandler. - go func() { - for { - select { - case event := <-eventc: - config.eventHandler(event) - case <-c.quit: - return - } - } - }() - return c, nil -} - -// CreateParentNodes implements the ZooKeeper Client interface. -func (c *client) CreateParentNodes(path string) error { - if !c.active { - return ErrClientClosed - } - if path[0] != '/' { - return zk.ErrInvalidPath - } - payload := []byte("") - pathString := "" - pathNodes := strings.Split(path, "/") - for i := 1; i < len(pathNodes); i++ { - if i <= len(c.rootNodePayload) { - payload = c.rootNodePayload[i-1] - } else { - payload = []byte("") - } - pathString += "/" + pathNodes[i] - _, err := c.Create(pathString, payload, 0, c.acl) - // not being able to create the node because it exists or not having - // sufficient rights is not an issue. It is ok for the node to already - // exist and/or us to only have read rights - if err != nil && err != zk.ErrNodeExists && err != zk.ErrNoAuth { - return err - } - } - return nil -} - -// GetEntries implements the ZooKeeper Client interface. -func (c *client) GetEntries(path string) ([]string, <-chan zk.Event, error) { - // retrieve list of child nodes for given path and add watch to path - znodes, _, eventc, err := c.ChildrenW(path) - - if err != nil { - return nil, eventc, err - } - - var resp []string - for _, znode := range znodes { - // retrieve payload for child znode and add to response array - if data, _, err := c.Get(path + "/" + znode); err == nil { - resp = append(resp, string(data)) - } - } - return resp, eventc, nil -} - -// Register implements the ZooKeeper Client interface. -func (c *client) Register(s *Service) error { - if s.Path[len(s.Path)-1] != '/' { - s.Path += "/" - } - path := s.Path + s.Name - if err := c.CreateParentNodes(path); err != nil { - return err - } - node, err := c.CreateProtectedEphemeralSequential(path, s.Data, c.acl) - if err != nil { - return err - } - s.node = node - return nil -} - -// Deregister implements the ZooKeeper Client interface. -func (c *client) Deregister(s *Service) error { - if s.node == "" { - return ErrNotRegistered - } - path := s.Path + s.Name - found, stat, err := c.Exists(path) - if err != nil { - return err - } - if !found { - return ErrNodeNotFound - } - if err := c.Delete(path, stat.Version); err != nil { - return err - } - return nil -} - -// Stop implements the ZooKeeper Client interface. -func (c *client) Stop() { - c.active = false - close(c.quit) - c.Close() -} diff --git a/vendor/github.com/go-kit/kit/sd/zk/client_test.go b/vendor/github.com/go-kit/kit/sd/zk/client_test.go deleted file mode 100644 index fbb2a5a..0000000 --- a/vendor/github.com/go-kit/kit/sd/zk/client_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package zk - -import ( - "bytes" - "testing" - "time" - - stdzk "github.com/samuel/go-zookeeper/zk" - - "github.com/go-kit/kit/log" -) - -func TestNewClient(t *testing.T) { - var ( - acl = stdzk.WorldACL(stdzk.PermRead) - connectTimeout = 3 * time.Second - sessionTimeout = 20 * time.Second - payload = [][]byte{[]byte("Payload"), []byte("Test")} - ) - - c, err := NewClient( - []string{"FailThisInvalidHost!!!"}, - log.NewNopLogger(), - ) - if err == nil { - t.Errorf("expected error, got nil") - } - - hasFired := false - calledEventHandler := make(chan struct{}) - eventHandler := func(event stdzk.Event) { - if !hasFired { - // test is successful if this function has fired at least once - hasFired = true - close(calledEventHandler) - } - } - - c, err = NewClient( - []string{"localhost"}, - log.NewNopLogger(), - ACL(acl), - ConnectTimeout(connectTimeout), - SessionTimeout(sessionTimeout), - Payload(payload), - EventHandler(eventHandler), - ) - if err != nil { - t.Fatal(err) - } - defer c.Stop() - - clientImpl, ok := c.(*client) - if !ok { - t.Fatal("retrieved incorrect Client implementation") - } - if want, have := acl, clientImpl.acl; want[0] != have[0] { - t.Errorf("want %+v, have %+v", want, have) - } - if want, have := connectTimeout, clientImpl.connectTimeout; want != have { - t.Errorf("want %d, have %d", want, have) - } - if want, have := sessionTimeout, clientImpl.sessionTimeout; want != have { - t.Errorf("want %d, have %d", want, have) - } - if want, have := payload, clientImpl.rootNodePayload; bytes.Compare(want[0], have[0]) != 0 || bytes.Compare(want[1], have[1]) != 0 { - t.Errorf("want %s, have %s", want, have) - } - - select { - case <-calledEventHandler: - case <-time.After(100 * time.Millisecond): - t.Errorf("event handler never called") - } -} - -func TestOptions(t *testing.T) { - _, err := NewClient([]string{"localhost"}, log.NewNopLogger(), Credentials("valid", "credentials")) - if err != nil && err != stdzk.ErrNoServer { - t.Errorf("unexpected error: %v", err) - } - - _, err = NewClient([]string{"localhost"}, log.NewNopLogger(), Credentials("nopass", "")) - if want, have := err, ErrInvalidCredentials; want != have { - t.Errorf("want %v, have %v", want, have) - } - - _, err = NewClient([]string{"localhost"}, log.NewNopLogger(), ConnectTimeout(0)) - if err == nil { - t.Errorf("expected connect timeout error") - } - - _, err = NewClient([]string{"localhost"}, log.NewNopLogger(), SessionTimeout(0)) - if err == nil { - t.Errorf("expected connect timeout error") - } -} - -func TestCreateParentNodes(t *testing.T) { - payload := [][]byte{[]byte("Payload"), []byte("Test")} - - c, err := NewClient([]string{"localhost:65500"}, log.NewNopLogger()) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if c == nil { - t.Fatal("expected new Client, got nil") - } - - s, err := NewSubscriber(c, "/validpath", newFactory(""), log.NewNopLogger()) - if err != stdzk.ErrNoServer { - t.Errorf("unexpected error: %v", err) - } - if s != nil { - t.Error("expected failed new Subscriber") - } - - s, err = NewSubscriber(c, "invalidpath", newFactory(""), log.NewNopLogger()) - if err != stdzk.ErrInvalidPath { - t.Errorf("unexpected error: %v", err) - } - _, _, err = c.GetEntries("/validpath") - if err != stdzk.ErrNoServer { - t.Errorf("unexpected error: %v", err) - } - - c.Stop() - - err = c.CreateParentNodes("/validpath") - if err != ErrClientClosed { - t.Errorf("unexpected error: %v", err) - } - - s, err = NewSubscriber(c, "/validpath", newFactory(""), log.NewNopLogger()) - if err != ErrClientClosed { - t.Errorf("unexpected error: %v", err) - } - if s != nil { - t.Error("expected failed new Subscriber") - } - - c, err = NewClient([]string{"localhost:65500"}, log.NewNopLogger(), Payload(payload)) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if c == nil { - t.Fatal("expected new Client, got nil") - } - - s, err = NewSubscriber(c, "/validpath", newFactory(""), log.NewNopLogger()) - if err != stdzk.ErrNoServer { - t.Errorf("unexpected error: %v", err) - } - if s != nil { - t.Error("expected failed new Subscriber") - } -} diff --git a/vendor/github.com/go-kit/kit/sd/zk/integration_test.go b/vendor/github.com/go-kit/kit/sd/zk/integration_test.go deleted file mode 100644 index 6084415..0000000 --- a/vendor/github.com/go-kit/kit/sd/zk/integration_test.go +++ /dev/null @@ -1,212 +0,0 @@ -// +build integration - -package zk - -import ( - "bytes" - "flag" - "fmt" - "os" - "testing" - "time" - - stdzk "github.com/samuel/go-zookeeper/zk" -) - -var ( - host []string -) - -func TestMain(m *testing.M) { - flag.Parse() - - fmt.Println("Starting ZooKeeper server...") - - ts, err := stdzk.StartTestCluster(1, nil, nil) - if err != nil { - fmt.Printf("ZooKeeper server error: %v\n", err) - os.Exit(1) - } - - host = []string{fmt.Sprintf("localhost:%d", ts.Servers[0].Port)} - code := m.Run() - - ts.Stop() - os.Exit(code) -} - -func TestCreateParentNodesOnServer(t *testing.T) { - payload := [][]byte{[]byte("Payload"), []byte("Test")} - c1, err := NewClient(host, logger, Payload(payload)) - if err != nil { - t.Fatalf("Connect returned error: %v", err) - } - if c1 == nil { - t.Fatal("Expected pointer to client, got nil") - } - defer c1.Stop() - - s, err := NewSubscriber(c1, path, newFactory(""), logger) - if err != nil { - t.Fatalf("Unable to create Subscriber: %v", err) - } - defer s.Stop() - - services, err := s.Endpoints() - if err != nil { - t.Fatal(err) - } - if want, have := 0, len(services); want != have { - t.Errorf("want %d, have %d", want, have) - } - - c2, err := NewClient(host, logger) - if err != nil { - t.Fatalf("Connect returned error: %v", err) - } - defer c2.Stop() - data, _, err := c2.(*client).Get(path) - if err != nil { - t.Fatal(err) - } - // test Client implementation of CreateParentNodes. It should have created - // our payload - if bytes.Compare(data, payload[1]) != 0 { - t.Errorf("want %s, have %s", payload[1], data) - } - -} - -func TestCreateBadParentNodesOnServer(t *testing.T) { - c, _ := NewClient(host, logger) - defer c.Stop() - - _, err := NewSubscriber(c, "invalid/path", newFactory(""), logger) - - if want, have := stdzk.ErrInvalidPath, err; want != have { - t.Errorf("want %v, have %v", want, have) - } -} - -func TestCredentials1(t *testing.T) { - acl := stdzk.DigestACL(stdzk.PermAll, "user", "secret") - c, _ := NewClient(host, logger, ACL(acl), Credentials("user", "secret")) - defer c.Stop() - - _, err := NewSubscriber(c, "/acl-issue-test", newFactory(""), logger) - - if err != nil { - t.Fatal(err) - } -} - -func TestCredentials2(t *testing.T) { - acl := stdzk.DigestACL(stdzk.PermAll, "user", "secret") - c, _ := NewClient(host, logger, ACL(acl)) - defer c.Stop() - - _, err := NewSubscriber(c, "/acl-issue-test", newFactory(""), logger) - - if err != stdzk.ErrNoAuth { - t.Errorf("want %v, have %v", stdzk.ErrNoAuth, err) - } -} - -func TestConnection(t *testing.T) { - c, _ := NewClient(host, logger) - c.Stop() - - _, err := NewSubscriber(c, "/acl-issue-test", newFactory(""), logger) - - if err != ErrClientClosed { - t.Errorf("want %v, have %v", ErrClientClosed, err) - } -} - -func TestGetEntriesOnServer(t *testing.T) { - var instancePayload = "10.0.3.204:8002" - - c1, err := NewClient(host, logger) - if err != nil { - t.Fatalf("Connect returned error: %v", err) - } - - defer c1.Stop() - - c2, err := NewClient(host, logger) - s, err := NewSubscriber(c2, path, newFactory(""), logger) - if err != nil { - t.Fatal(err) - } - defer c2.Stop() - - instance1 := &Service{ - Path: path, - Name: "instance1", - Data: []byte(instancePayload), - } - if err = c2.Register(instance1); err != nil { - t.Fatalf("Unable to create test ephemeral znode 1: %+v", err) - } - instance2 := &Service{ - Path: path, - Name: "instance2", - Data: []byte(instancePayload), - } - if err = c2.Register(instance2); err != nil { - t.Fatalf("Unable to create test ephemeral znode 2: %+v", err) - } - - time.Sleep(50 * time.Millisecond) - - services, err := s.Endpoints() - if err != nil { - t.Fatal(err) - } - if want, have := 2, len(services); want != have { - t.Errorf("want %d, have %d", want, have) - } -} - -func TestGetEntriesPayloadOnServer(t *testing.T) { - c, err := NewClient(host, logger) - if err != nil { - t.Fatalf("Connect returned error: %v", err) - } - _, eventc, err := c.GetEntries(path) - if err != nil { - t.Fatal(err) - } - - instance3 := Service{ - Path: path, - Name: "instance3", - Data: []byte("just some payload"), - } - registrar := NewRegistrar(c, instance3, logger) - registrar.Register() - select { - case event := <-eventc: - if want, have := stdzk.EventNodeChildrenChanged.String(), event.Type.String(); want != have { - t.Errorf("want %s, have %s", want, have) - } - case <-time.After(100 * time.Millisecond): - t.Errorf("expected incoming watch event, timeout occurred") - } - - _, eventc, err = c.GetEntries(path) - if err != nil { - t.Fatal(err) - } - - registrar.Deregister() - select { - case event := <-eventc: - if want, have := stdzk.EventNodeChildrenChanged.String(), event.Type.String(); want != have { - t.Errorf("want %s, have %s", want, have) - } - case <-time.After(100 * time.Millisecond): - t.Errorf("expected incoming watch event, timeout occurred") - } - -} diff --git a/vendor/github.com/go-kit/kit/sd/zk/logwrapper.go b/vendor/github.com/go-kit/kit/sd/zk/logwrapper.go deleted file mode 100644 index abb7b6d..0000000 --- a/vendor/github.com/go-kit/kit/sd/zk/logwrapper.go +++ /dev/null @@ -1,27 +0,0 @@ -package zk - -import ( - "fmt" - - "github.com/samuel/go-zookeeper/zk" - - "github.com/go-kit/kit/log" -) - -// wrapLogger wraps a Go kit logger so we can use it as the logging service for -// the ZooKeeper library, which expects a Printf method to be available. -type wrapLogger struct { - log.Logger -} - -func (logger wrapLogger) Printf(format string, args ...interface{}) { - logger.Log("msg", fmt.Sprintf(format, args...)) -} - -// withLogger replaces the ZooKeeper library's default logging service with our -// own Go kit logger. -func withLogger(logger log.Logger) func(c *zk.Conn) { - return func(c *zk.Conn) { - c.SetLogger(wrapLogger{logger}) - } -} diff --git a/vendor/github.com/go-kit/kit/sd/zk/registrar.go b/vendor/github.com/go-kit/kit/sd/zk/registrar.go deleted file mode 100644 index dcfae39..0000000 --- a/vendor/github.com/go-kit/kit/sd/zk/registrar.go +++ /dev/null @@ -1,51 +0,0 @@ -package zk - -import "github.com/go-kit/kit/log" - -// Registrar registers service instance liveness information to ZooKeeper. -type Registrar struct { - client Client - service Service - logger log.Logger -} - -// Service holds the root path, service name and instance identifying data you -// want to publish to ZooKeeper. -type Service struct { - Path string // discovery namespace, example: /myorganization/myplatform/ - Name string // service name, example: addscv - Data []byte // instance data to store for discovery, example: 10.0.2.10:80 - node string // Client will record the ephemeral node name so we can deregister -} - -// NewRegistrar returns a ZooKeeper Registrar acting on the provided catalog -// registration. -func NewRegistrar(client Client, service Service, logger log.Logger) *Registrar { - return &Registrar{ - client: client, - service: service, - logger: log.NewContext(logger).With( - "service", service.Name, - "path", service.Path, - "data", string(service.Data), - ), - } -} - -// Register implements sd.Registrar interface. -func (r *Registrar) Register() { - if err := r.client.Register(&r.service); err != nil { - r.logger.Log("err", err) - } else { - r.logger.Log("action", "register") - } -} - -// Deregister implements sd.Registrar interface. -func (r *Registrar) Deregister() { - if err := r.client.Deregister(&r.service); err != nil { - r.logger.Log("err", err) - } else { - r.logger.Log("action", "deregister") - } -} diff --git a/vendor/github.com/go-kit/kit/sd/zk/subscriber.go b/vendor/github.com/go-kit/kit/sd/zk/subscriber.go deleted file mode 100644 index b9c67db..0000000 --- a/vendor/github.com/go-kit/kit/sd/zk/subscriber.go +++ /dev/null @@ -1,86 +0,0 @@ -package zk - -import ( - "github.com/samuel/go-zookeeper/zk" - - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/sd" - "github.com/go-kit/kit/sd/cache" -) - -// Subscriber yield endpoints stored in a certain ZooKeeper path. Any kind of -// change in that path is watched and will update the Subscriber endpoints. -type Subscriber struct { - client Client - path string - cache *cache.Cache - logger log.Logger - quitc chan struct{} -} - -var _ sd.Subscriber = &Subscriber{} - -// NewSubscriber returns a ZooKeeper subscriber. ZooKeeper will start watching -// the given path for changes and update the Subscriber endpoints. -func NewSubscriber(c Client, path string, factory sd.Factory, logger log.Logger) (*Subscriber, error) { - s := &Subscriber{ - client: c, - path: path, - cache: cache.New(factory, logger), - logger: logger, - quitc: make(chan struct{}), - } - - err := s.client.CreateParentNodes(s.path) - if err != nil { - return nil, err - } - - instances, eventc, err := s.client.GetEntries(s.path) - if err != nil { - logger.Log("path", s.path, "msg", "failed to retrieve entries", "err", err) - return nil, err - } - logger.Log("path", s.path, "instances", len(instances)) - s.cache.Update(instances) - - go s.loop(eventc) - - return s, nil -} - -func (s *Subscriber) loop(eventc <-chan zk.Event) { - var ( - instances []string - err error - ) - for { - select { - case <-eventc: - // We received a path update notification. Call GetEntries to - // retrieve child node data, and set a new watch, as ZK watches are - // one-time triggers. - instances, eventc, err = s.client.GetEntries(s.path) - if err != nil { - s.logger.Log("path", s.path, "msg", "failed to retrieve entries", "err", err) - continue - } - s.logger.Log("path", s.path, "instances", len(instances)) - s.cache.Update(instances) - - case <-s.quitc: - return - } - } -} - -// Endpoints implements the Subscriber interface. -func (s *Subscriber) Endpoints() ([]endpoint.Endpoint, error) { - return s.cache.Endpoints(), nil -} - -// Stop terminates the Subscriber. -func (s *Subscriber) Stop() { - close(s.quitc) -} diff --git a/vendor/github.com/go-kit/kit/sd/zk/subscriber_test.go b/vendor/github.com/go-kit/kit/sd/zk/subscriber_test.go deleted file mode 100644 index 79bdb84..0000000 --- a/vendor/github.com/go-kit/kit/sd/zk/subscriber_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package zk - -import ( - "testing" - "time" -) - -func TestSubscriber(t *testing.T) { - client := newFakeClient() - - s, err := NewSubscriber(client, path, newFactory(""), logger) - if err != nil { - t.Fatalf("failed to create new Subscriber: %v", err) - } - defer s.Stop() - - if _, err := s.Endpoints(); err != nil { - t.Fatal(err) - } -} - -func TestBadFactory(t *testing.T) { - client := newFakeClient() - - s, err := NewSubscriber(client, path, newFactory("kaboom"), logger) - if err != nil { - t.Fatalf("failed to create new Subscriber: %v", err) - } - defer s.Stop() - - // instance1 came online - client.AddService(path+"/instance1", "kaboom") - - // instance2 came online - client.AddService(path+"/instance2", "zookeeper_node_data") - - if err = asyncTest(100*time.Millisecond, 1, s); err != nil { - t.Error(err) - } -} - -func TestServiceUpdate(t *testing.T) { - client := newFakeClient() - - s, err := NewSubscriber(client, path, newFactory(""), logger) - if err != nil { - t.Fatalf("failed to create new Subscriber: %v", err) - } - defer s.Stop() - - endpoints, err := s.Endpoints() - if err != nil { - t.Fatal(err) - } - if want, have := 0, len(endpoints); want != have { - t.Errorf("want %d, have %d", want, have) - } - - // instance1 came online - client.AddService(path+"/instance1", "zookeeper_node_data1") - - // instance2 came online - client.AddService(path+"/instance2", "zookeeper_node_data2") - - // we should have 2 instances - if err = asyncTest(100*time.Millisecond, 2, s); err != nil { - t.Error(err) - } - - // TODO(pb): this bit is flaky - // - //// watch triggers an error... - //client.SendErrorOnWatch() - // - //// test if error was consumed - //if err = client.ErrorIsConsumedWithin(100 * time.Millisecond); err != nil { - // t.Error(err) - //} - - // instance3 came online - client.AddService(path+"/instance3", "zookeeper_node_data3") - - // we should have 3 instances - if err = asyncTest(100*time.Millisecond, 3, s); err != nil { - t.Error(err) - } - - // instance1 goes offline - client.RemoveService(path + "/instance1") - - // instance2 goes offline - client.RemoveService(path + "/instance2") - - // we should have 1 instance - if err = asyncTest(100*time.Millisecond, 1, s); err != nil { - t.Error(err) - } -} - -func TestBadSubscriberCreate(t *testing.T) { - client := newFakeClient() - client.SendErrorOnWatch() - s, err := NewSubscriber(client, path, newFactory(""), logger) - if err == nil { - t.Error("expected error on new Subscriber") - } - if s != nil { - t.Error("expected Subscriber not to be created") - } - s, err = NewSubscriber(client, "BadPath", newFactory(""), logger) - if err == nil { - t.Error("expected error on new Subscriber") - } - if s != nil { - t.Error("expected Subscriber not to be created") - } -} diff --git a/vendor/github.com/go-kit/kit/sd/zk/util_test.go b/vendor/github.com/go-kit/kit/sd/zk/util_test.go deleted file mode 100644 index c77fde9..0000000 --- a/vendor/github.com/go-kit/kit/sd/zk/util_test.go +++ /dev/null @@ -1,134 +0,0 @@ -package zk - -import ( - "errors" - "fmt" - "io" - "sync" - "time" - - "github.com/samuel/go-zookeeper/zk" - "golang.org/x/net/context" - - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/sd" -) - -var ( - path = "/gokit.test/service.name" - e = func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil } - logger = log.NewNopLogger() -) - -type fakeClient struct { - mtx sync.Mutex - ch chan zk.Event - responses map[string]string - result bool -} - -func newFakeClient() *fakeClient { - return &fakeClient{ - ch: make(chan zk.Event, 1), - responses: make(map[string]string), - result: true, - } -} - -func (c *fakeClient) CreateParentNodes(path string) error { - if path == "BadPath" { - return errors.New("dummy error") - } - return nil -} - -func (c *fakeClient) GetEntries(path string) ([]string, <-chan zk.Event, error) { - c.mtx.Lock() - defer c.mtx.Unlock() - if c.result == false { - c.result = true - return []string{}, c.ch, errors.New("dummy error") - } - responses := []string{} - for _, data := range c.responses { - responses = append(responses, data) - } - return responses, c.ch, nil -} - -func (c *fakeClient) AddService(node, data string) { - c.mtx.Lock() - defer c.mtx.Unlock() - c.responses[node] = data - c.ch <- zk.Event{} -} - -func (c *fakeClient) RemoveService(node string) { - c.mtx.Lock() - defer c.mtx.Unlock() - delete(c.responses, node) - c.ch <- zk.Event{} -} - -func (c *fakeClient) Register(s *Service) error { - return nil -} - -func (c *fakeClient) Deregister(s *Service) error { - return nil -} - -func (c *fakeClient) SendErrorOnWatch() { - c.mtx.Lock() - defer c.mtx.Unlock() - c.result = false - c.ch <- zk.Event{} -} - -func (c *fakeClient) ErrorIsConsumedWithin(timeout time.Duration) error { - t := time.After(timeout) - for { - select { - case <-t: - return fmt.Errorf("expected error not consumed after timeout %s", timeout) - default: - c.mtx.Lock() - if c.result == false { - c.mtx.Unlock() - return nil - } - c.mtx.Unlock() - } - } -} - -func (c *fakeClient) Stop() {} - -func newFactory(fakeError string) sd.Factory { - return func(instance string) (endpoint.Endpoint, io.Closer, error) { - if fakeError == instance { - return nil, nil, errors.New(fakeError) - } - return endpoint.Nop, nil, nil - } -} - -func asyncTest(timeout time.Duration, want int, s *Subscriber) (err error) { - var endpoints []endpoint.Endpoint - have := -1 // want can never be <0 - t := time.After(timeout) - for { - select { - case <-t: - return fmt.Errorf("want %d, have %d (timeout %s)", want, have, timeout.String()) - default: - endpoints, err = s.Endpoints() - have = len(endpoints) - if err != nil || want == have { - return - } - time.Sleep(timeout / 10) - } - } -} diff --git a/vendor/github.com/go-kit/kit/tracing/README.md b/vendor/github.com/go-kit/kit/tracing/README.md deleted file mode 100644 index d5e60e8..0000000 --- a/vendor/github.com/go-kit/kit/tracing/README.md +++ /dev/null @@ -1,60 +0,0 @@ -# package tracing - -`package tracing` provides [Dapper][]-style request tracing to services. - -## Rationale - -Request tracing is a fundamental building block for large distributed -applications. It's instrumental in understanding request flows, identifying -hot spots, and diagnosing errors. All microservice infrastructures will -benefit from request tracing; sufficiently large infrastructures will require -it. - -## OpenTracing - -Go kit builds on top of the [OpenTracing] API and uses the [opentracing-go] -package to provide tracing middlewares for its servers and clients. Currently -`kit/transport/http` and `kit/transport/grpc` transports are supported. - -Since [OpenTracing] is an upcoming standard API, Go kit should support a -multitude of tracing backends. If a Tracer implementation in Go for your -back-end exists, it should work out of the box. The following tracing back-ends -are known to work with Go kit through the OpenTracing interface and are -highlighted in the [addsvc] example. - - -### LightStep - -[LightStep] support is available through their standard Go package -[lightstep-tracer-go]. - -### AppDash - -[Appdash] support is available straight from their system repository in the -[appdash/opentracing] directory. - -### Zipkin - -[Zipkin] support is now available from the [zipkin-go-opentracing] package which -can be found at the [Open Zipkin GitHub] page. This means our old custom -`tracing/zipkin` package is now deprecated. In the `kit/tracing/zipkin` -directory you can still find the `docker-compose` script to bootstrap a Zipkin -development environment and a [README] detailing how to transition from the -old package to the new. - -[Dapper]: http://research.google.com/pubs/pub36356.html -[addsvc]:https://github.com/go-kit/kit/tree/master/examples/addsvc -[README]: https://github.com/go-kit/kit/blob/master/tracing/zipkin/README.md - -[OpenTracing]: http://opentracing.io -[opentracing-go]: https://github.com/opentracing/opentracing-go - -[Zipkin]: http://zipkin.io/ -[Open Zipkin GitHub]: https://github.com/openzipkin -[zipkin-go-opentracing]: https://github.com/openzipkin/zipkin-go-opentracing - -[Appdash]: https://github.com/sourcegraph/appdash -[appdash/opentracing]: https://github.com/sourcegraph/appdash/tree/master/opentracing - -[LightStep]: http://lightstep.com/ -[lightstep-tracer-go]: https://github.com/lightstep/lightstep-tracer-go diff --git a/vendor/github.com/go-kit/kit/tracing/opentracing/endpoint.go b/vendor/github.com/go-kit/kit/tracing/opentracing/endpoint.go deleted file mode 100644 index 9f626a7..0000000 --- a/vendor/github.com/go-kit/kit/tracing/opentracing/endpoint.go +++ /dev/null @@ -1,50 +0,0 @@ -package opentracing - -import ( - "github.com/opentracing/opentracing-go" - otext "github.com/opentracing/opentracing-go/ext" - "golang.org/x/net/context" - - "github.com/go-kit/kit/endpoint" -) - -// TraceServer returns a Middleware that wraps the `next` Endpoint in an -// OpenTracing Span called `operationName`. -// -// If `ctx` already has a Span, it is re-used and the operation name is -// overwritten. If `ctx` does not yet have a Span, one is created here. -func TraceServer(tracer opentracing.Tracer, operationName string) endpoint.Middleware { - return func(next endpoint.Endpoint) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - serverSpan := opentracing.SpanFromContext(ctx) - if serverSpan == nil { - // All we can do is create a new root span. - serverSpan = tracer.StartSpan(operationName) - } else { - serverSpan.SetOperationName(operationName) - } - defer serverSpan.Finish() - otext.SpanKind.Set(serverSpan, otext.SpanKindRPCServer) - ctx = opentracing.ContextWithSpan(ctx, serverSpan) - return next(ctx, request) - } - } -} - -// TraceClient returns a Middleware that wraps the `next` Endpoint in an -// OpenTracing Span called `operationName`. -func TraceClient(tracer opentracing.Tracer, operationName string) endpoint.Middleware { - return func(next endpoint.Endpoint) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - parentSpan := opentracing.SpanFromContext(ctx) - clientSpan := tracer.StartSpanWithOptions(opentracing.StartSpanOptions{ - OperationName: operationName, - Parent: parentSpan, // may be nil - }) - defer clientSpan.Finish() - otext.SpanKind.Set(clientSpan, otext.SpanKindRPCClient) - ctx = opentracing.ContextWithSpan(ctx, clientSpan) - return next(ctx, request) - } - } -} diff --git a/vendor/github.com/go-kit/kit/tracing/opentracing/endpoint_test.go b/vendor/github.com/go-kit/kit/tracing/opentracing/endpoint_test.go deleted file mode 100644 index 02da72e..0000000 --- a/vendor/github.com/go-kit/kit/tracing/opentracing/endpoint_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package opentracing_test - -import ( - "testing" - - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/mocktracer" - "golang.org/x/net/context" - - "github.com/go-kit/kit/endpoint" - kitot "github.com/go-kit/kit/tracing/opentracing" -) - -func TestTraceServer(t *testing.T) { - tracer := mocktracer.New() - - // Initialize the ctx with a nameless Span. - contextSpan := tracer.StartSpan("").(*mocktracer.MockSpan) - ctx := opentracing.ContextWithSpan(context.Background(), contextSpan) - - var innerEndpoint endpoint.Endpoint - innerEndpoint = func(context.Context, interface{}) (interface{}, error) { - return struct{}{}, nil - } - tracedEndpoint := kitot.TraceServer(tracer, "testOp")(innerEndpoint) - if _, err := tracedEndpoint(ctx, struct{}{}); err != nil { - t.Fatal(err) - } - if want, have := 1, len(tracer.FinishedSpans); want != have { - t.Fatalf("Want %v span(s), found %v", want, have) - } - - endpointSpan := tracer.FinishedSpans[0] - // Test that the op name is updated - if want, have := "testOp", endpointSpan.OperationName; want != have { - t.Fatalf("Want %q, have %q", want, have) - } - // ... and that the ID is unmodified. - if want, have := contextSpan.SpanID, endpointSpan.SpanID; want != have { - t.Errorf("Want SpanID %q, have %q", want, have) - } -} - -func TestTraceServerNoContextSpan(t *testing.T) { - tracer := mocktracer.New() - - var innerEndpoint endpoint.Endpoint - innerEndpoint = func(context.Context, interface{}) (interface{}, error) { - return struct{}{}, nil - } - tracedEndpoint := kitot.TraceServer(tracer, "testOp")(innerEndpoint) - // Empty/background context: - if _, err := tracedEndpoint(context.Background(), struct{}{}); err != nil { - t.Fatal(err) - } - // tracedEndpoint created a new Span: - if want, have := 1, len(tracer.FinishedSpans); want != have { - t.Fatalf("Want %v span(s), found %v", want, have) - } - - endpointSpan := tracer.FinishedSpans[0] - if want, have := "testOp", endpointSpan.OperationName; want != have { - t.Fatalf("Want %q, have %q", want, have) - } -} - -func TestTraceClient(t *testing.T) { - tracer := mocktracer.New() - - // Initialize the ctx with a parent Span. - parentSpan := tracer.StartSpan("parent").(*mocktracer.MockSpan) - defer parentSpan.Finish() - ctx := opentracing.ContextWithSpan(context.Background(), parentSpan) - - var innerEndpoint endpoint.Endpoint - innerEndpoint = func(context.Context, interface{}) (interface{}, error) { - return struct{}{}, nil - } - tracedEndpoint := kitot.TraceClient(tracer, "testOp")(innerEndpoint) - if _, err := tracedEndpoint(ctx, struct{}{}); err != nil { - t.Fatal(err) - } - // tracedEndpoint created a new Span: - if want, have := 1, len(tracer.FinishedSpans); want != have { - t.Fatalf("Want %v span(s), found %v", want, have) - } - - endpointSpan := tracer.FinishedSpans[0] - if want, have := "testOp", endpointSpan.OperationName; want != have { - t.Fatalf("Want %q, have %q", want, have) - } - // ... and that the parent ID is set appropriately. - if want, have := parentSpan.SpanID, endpointSpan.ParentID; want != have { - t.Errorf("Want ParentID %q, have %q", want, have) - } -} diff --git a/vendor/github.com/go-kit/kit/tracing/opentracing/grpc.go b/vendor/github.com/go-kit/kit/tracing/opentracing/grpc.go deleted file mode 100644 index 258dff2..0000000 --- a/vendor/github.com/go-kit/kit/tracing/opentracing/grpc.go +++ /dev/null @@ -1,74 +0,0 @@ -package opentracing - -import ( - "encoding/base64" - "strings" - - "github.com/opentracing/opentracing-go" - "golang.org/x/net/context" - "google.golang.org/grpc/metadata" - - "github.com/go-kit/kit/log" -) - -// ToGRPCRequest returns a grpc RequestFunc that injects an OpenTracing Span -// found in `ctx` into the grpc Metadata. If no such Span can be found, the -// RequestFunc is a noop. -// -// The logger is used to report errors and may be nil. -func ToGRPCRequest(tracer opentracing.Tracer, logger log.Logger) func(ctx context.Context, md *metadata.MD) context.Context { - return func(ctx context.Context, md *metadata.MD) context.Context { - if span := opentracing.SpanFromContext(ctx); span != nil { - // There's nothing we can do with an error here. - if err := tracer.Inject(span, opentracing.TextMap, metadataReaderWriter{md}); err != nil { - logger.Log("err", err) - } - } - return ctx - } -} - -// FromGRPCRequest returns a grpc RequestFunc that tries to join with an -// OpenTracing trace found in `req` and starts a new Span called -// `operationName` accordingly. If no trace could be found in `req`, the Span -// will be a trace root. The Span is incorporated in the returned Context and -// can be retrieved with opentracing.SpanFromContext(ctx). -// -// The logger is used to report errors and may be nil. -func FromGRPCRequest(tracer opentracing.Tracer, operationName string, logger log.Logger) func(ctx context.Context, md *metadata.MD) context.Context { - return func(ctx context.Context, md *metadata.MD) context.Context { - span, err := tracer.Join(operationName, opentracing.TextMap, metadataReaderWriter{md}) - if err != nil { - span = tracer.StartSpan(operationName) - if err != opentracing.ErrTraceNotFound { - logger.Log("err", err) - } - } - return opentracing.ContextWithSpan(ctx, span) - } -} - -// A type that conforms to opentracing.TextMapReader and -// opentracing.TextMapWriter. -type metadataReaderWriter struct { - *metadata.MD -} - -func (w metadataReaderWriter) Set(key, val string) { - key = strings.ToLower(key) - if strings.HasSuffix(key, "-bin") { - val = string(base64.StdEncoding.EncodeToString([]byte(val))) - } - (*w.MD)[key] = append((*w.MD)[key], val) -} - -func (w metadataReaderWriter) ForeachKey(handler func(key, val string) error) error { - for k, vals := range *w.MD { - for _, v := range vals { - if err := handler(k, v); err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/github.com/go-kit/kit/tracing/opentracing/grpc_test.go b/vendor/github.com/go-kit/kit/tracing/opentracing/grpc_test.go deleted file mode 100644 index d9de49b..0000000 --- a/vendor/github.com/go-kit/kit/tracing/opentracing/grpc_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package opentracing_test - -import ( - "testing" - - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/mocktracer" - "golang.org/x/net/context" - "google.golang.org/grpc/metadata" - - kitot "github.com/go-kit/kit/tracing/opentracing" - "github.com/go-kit/kit/transport/grpc" -) - -func TestTraceGRPCRequestRoundtrip(t *testing.T) { - tracer := mocktracer.New() - - // Initialize the ctx with a Span to inject. - beforeSpan := tracer.StartSpan("to_inject").(*mocktracer.MockSpan) - defer beforeSpan.Finish() - beforeSpan.SetBaggageItem("baggage", "check") - beforeCtx := opentracing.ContextWithSpan(context.Background(), beforeSpan) - - var toGRPCFunc grpc.RequestFunc = kitot.ToGRPCRequest(tracer, nil) - md := metadata.Pairs() - // Call the RequestFunc. - afterCtx := toGRPCFunc(beforeCtx, &md) - - // The Span should not have changed. - afterSpan := opentracing.SpanFromContext(afterCtx) - if beforeSpan != afterSpan { - t.Errorf("Should not swap in a new span") - } - - // No spans should have finished yet. - if want, have := 0, len(tracer.FinishedSpans); want != have { - t.Errorf("Want %v span(s), found %v", want, have) - } - - // Use FromGRPCRequest to verify that we can join with the trace given MD. - var fromGRPCFunc grpc.RequestFunc = kitot.FromGRPCRequest(tracer, "joined", nil) - joinCtx := fromGRPCFunc(afterCtx, &md) - joinedSpan := opentracing.SpanFromContext(joinCtx).(*mocktracer.MockSpan) - - if joinedSpan.SpanID == beforeSpan.SpanID { - t.Error("SpanID should have changed", joinedSpan.SpanID, beforeSpan.SpanID) - } - - // Check that the parent/child relationship is as expected for the joined span. - if want, have := beforeSpan.SpanID, joinedSpan.ParentID; want != have { - t.Errorf("Want ParentID %q, have %q", want, have) - } - if want, have := "joined", joinedSpan.OperationName; want != have { - t.Errorf("Want %q, have %q", want, have) - } - if want, have := "check", joinedSpan.BaggageItem("baggage"); want != have { - t.Errorf("Want %q, have %q", want, have) - } -} diff --git a/vendor/github.com/go-kit/kit/tracing/opentracing/http.go b/vendor/github.com/go-kit/kit/tracing/opentracing/http.go deleted file mode 100644 index 63c59f8..0000000 --- a/vendor/github.com/go-kit/kit/tracing/opentracing/http.go +++ /dev/null @@ -1,73 +0,0 @@ -package opentracing - -import ( - "net" - "net/http" - "strconv" - - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - "golang.org/x/net/context" - - "github.com/go-kit/kit/log" - kithttp "github.com/go-kit/kit/transport/http" -) - -// ToHTTPRequest returns an http RequestFunc that injects an OpenTracing Span -// found in `ctx` into the http headers. If no such Span can be found, the -// RequestFunc is a noop. -// -// The logger is used to report errors and may be nil. -func ToHTTPRequest(tracer opentracing.Tracer, logger log.Logger) kithttp.RequestFunc { - return func(ctx context.Context, req *http.Request) context.Context { - // Try to find a Span in the Context. - if span := opentracing.SpanFromContext(ctx); span != nil { - // Add standard OpenTracing tags. - ext.HTTPMethod.Set(span, req.URL.RequestURI()) - host, portString, err := net.SplitHostPort(req.URL.Host) - if err == nil { - ext.PeerHostname.Set(span, host) - if port, err := strconv.Atoi(portString); err != nil { - ext.PeerPort.Set(span, uint16(port)) - } - } else { - ext.PeerHostname.Set(span, req.URL.Host) - } - - // There's nothing we can do with any errors here. - if err = tracer.Inject( - span, - opentracing.TextMap, - opentracing.HTTPHeaderTextMapCarrier(req.Header), - ); err != nil { - logger.Log("err", err) - } - } - return ctx - } -} - -// FromHTTPRequest returns an http RequestFunc that tries to join with an -// OpenTracing trace found in `req` and starts a new Span called -// `operationName` accordingly. If no trace could be found in `req`, the Span -// will be a trace root. The Span is incorporated in the returned Context and -// can be retrieved with opentracing.SpanFromContext(ctx). -// -// The logger is used to report errors and may be nil. -func FromHTTPRequest(tracer opentracing.Tracer, operationName string, logger log.Logger) kithttp.RequestFunc { - return func(ctx context.Context, req *http.Request) context.Context { - // Try to join to a trace propagated in `req`. - span, err := tracer.Join( - operationName, - opentracing.TextMap, - opentracing.HTTPHeaderTextMapCarrier(req.Header), - ) - if err != nil { - span = tracer.StartSpan(operationName) - if err != opentracing.ErrTraceNotFound { - logger.Log("err", err) - } - } - return opentracing.ContextWithSpan(ctx, span) - } -} diff --git a/vendor/github.com/go-kit/kit/tracing/opentracing/http_test.go b/vendor/github.com/go-kit/kit/tracing/opentracing/http_test.go deleted file mode 100644 index d49acbf..0000000 --- a/vendor/github.com/go-kit/kit/tracing/opentracing/http_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package opentracing_test - -import ( - "net/http" - "testing" - - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/mocktracer" - "golang.org/x/net/context" - - kitot "github.com/go-kit/kit/tracing/opentracing" - kithttp "github.com/go-kit/kit/transport/http" -) - -func TestTraceHTTPRequestRoundtrip(t *testing.T) { - tracer := mocktracer.New() - - // Initialize the ctx with a Span to inject. - beforeSpan := tracer.StartSpan("to_inject").(*mocktracer.MockSpan) - defer beforeSpan.Finish() - beforeSpan.SetBaggageItem("baggage", "check") - beforeCtx := opentracing.ContextWithSpan(context.Background(), beforeSpan) - - var toHTTPFunc kithttp.RequestFunc = kitot.ToHTTPRequest(tracer, nil) - req, _ := http.NewRequest("GET", "http://test.biz/url", nil) - // Call the RequestFunc. - afterCtx := toHTTPFunc(beforeCtx, req) - - // The Span should not have changed. - afterSpan := opentracing.SpanFromContext(afterCtx) - if beforeSpan != afterSpan { - t.Errorf("Should not swap in a new span") - } - - // No spans should have finished yet. - if want, have := 0, len(tracer.FinishedSpans); want != have { - t.Errorf("Want %v span(s), found %v", want, have) - } - - // Use FromHTTPRequest to verify that we can join with the trace given a req. - var fromHTTPFunc kithttp.RequestFunc = kitot.FromHTTPRequest(tracer, "joined", nil) - joinCtx := fromHTTPFunc(afterCtx, req) - joinedSpan := opentracing.SpanFromContext(joinCtx).(*mocktracer.MockSpan) - - if joinedSpan.SpanID == beforeSpan.SpanID { - t.Error("SpanID should have changed", joinedSpan.SpanID, beforeSpan.SpanID) - } - - // Check that the parent/child relationship is as expected for the joined span. - if want, have := beforeSpan.SpanID, joinedSpan.ParentID; want != have { - t.Errorf("Want ParentID %q, have %q", want, have) - } - if want, have := "joined", joinedSpan.OperationName; want != have { - t.Errorf("Want %q, have %q", want, have) - } - if want, have := "check", joinedSpan.BaggageItem("baggage"); want != have { - t.Errorf("Want %q, have %q", want, have) - } -} diff --git a/vendor/github.com/go-kit/kit/tracing/zipkin/README.md b/vendor/github.com/go-kit/kit/tracing/zipkin/README.md deleted file mode 100644 index 0606ea9..0000000 --- a/vendor/github.com/go-kit/kit/tracing/zipkin/README.md +++ /dev/null @@ -1,168 +0,0 @@ -# Zipkin - -## Development and Testing Set-up - -Setting up [Zipkin] is not an easy thing to do. It will also demand quite some -resources. To help you get started with development and testing we've made a -docker-compose file available for running a full Zipkin stack. - -You will need [docker-compose] 1.6.0+ and [docker-engine] 1.10.0+. - -If running on Linux `HOSTNAME` can be set to `localhost`. If running on Mac OS X -or Windows you probably need to set the hostname environment variable to the -hostname of the VM running the docker containers. - -```sh -cd tracing/zipkin -HOSTNAME=localhost docker-compose -f docker-compose-zipkin.yml up -``` - -[Zipkin]: http://zipkin.io/ -[docker-compose]: https://docs.docker.com/compose/ -[docker-engine]: https://docs.docker.com/engine/ - -As mentioned the [Zipkin] stack is quite heavy and may take a few minutes to -fully initialize. - -The following services have been set-up to run: -- Apache Cassandra (port: 9160 (thrift), 9042 (native)) -- Apache ZooKeeper (port: 2181) -- Apache Kafka (port: 9092) -- Zipkin Collector -- Zipkin Query -- Zipkin Web (port: 8080, 9990) - - -## Middleware Usage - -Follow the [addsvc] example to check out how to wire the Zipkin Middleware. The -changes should be relatively minor. - -The [zipkin-go-opentracing] package has support for Kafka and Scribe collectors -as well as using Go Kit's [Log] package for logging. - -### Span per Node vs. Span per RPC -By default Zipkin V1 considers either side of an RPC to have the same identity -and differs in that respect from many other tracing systems which consider the -caller to be the parent and the receiver the child. The OpenTracing -specification does not dictate one model over the other, but the Zipkin team is -looking into these [single-host-spans] to potentially bring Zipkin more in-line -with the other tracing systems. - -[single-host-spans]: https://github.com/openzipkin/zipkin/issues/963 - -In case of a `span per node` the receiver will create a child span from the -propagated parent span like this: - -``` -Span per Node propagation and identities - -CALLER: RECEIVER: ---------------------------------- -traceId -> traceId - spanId (new) -spanId -> parentSpanId -parentSpanId -``` - -**Note:** most tracing implementations supporting the `span per node` model -therefore do not propagate their `parentSpanID` as its not needed. - -A typical Zipkin implementation will use the `span per RPC` model and recreate -the span identity from the caller on the receiver's end and then annotates its -values on top of it. Propagation will happen like this: - -``` -Span per RPC propagation and identities - -CALLER: RECEIVER: ---------------------------------- -traceId -> traceId -spanId -> spanId -parentSpanId -> parentSpanId -``` - -The [zipkin-go-opentracing] implementation allows you to choose which model you -wish to use. Make sure you select the same model consistently for all your -services that are required to communicate with each other or you will have trace -propagation issues. If using non OpenTracing / legacy instrumentation, it's -probably best to use the `span per RPC call` model. - -To adhere to the more common tracing philosophy of `span per node`, the Tracer -defaults to `span per node`. To set the `span per RPC call` mode start your -tracer like this: - -```go -tracer, err = zipkin.NewTracer( - zipkin.NewRecorder(...), - zipkin.ClientServerSameSpan(true), -) -``` - -[zipkin-go-opentracing]: https://github.com/openzipkin/zipkin-go-opentracing -[addsvc]:https://github.com/go-kit/kit/tree/master/examples/addsvc -[Log]: https://github.com/go-kit/kit/tree/master/log - -### Tracing Resources - -In our legacy implementation we had the `NewChildSpan` method to allow -annotation of resources such as databases, caches and other services that do not -have server side tracing support. Since OpenTracing has no specific method of -dealing with these items explicitely that is compatible with Zipkin's `SA` -annotation, the [zipkin-go-opentracing] has implemented support using the -OpenTracing Tags system. Here is an example of how one would be able to record -a resource span compatible with standard OpenTracing and triggering an `SA` -annotation in [zipkin-go-opentracing]: - -```go -// you need to import the ext package for the Tag helper functions -import ( - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" -) - -func (svc *Service) GetMeSomeExamples(ctx context.Context, ...) ([]Examples, error) { - // Example of annotating a database query: - var ( - serviceName = "MySQL" - serviceHost = "mysql.example.com" - servicePort = uint16(3306) - queryLabel = "GetExamplesByParam" - query = "select * from example where param = 'value'" - ) - - // retrieve the parent span, if not found create a new trace - parentSpan := opentracing.SpanFromContext(ctx) - if parentSpan == nil { - parentSpan = opentracing.StartSpan(queryLabel) - } - - // create a new span to record the resource interaction - span := opentracing.StartChildSpan(parentSpan, queryLabel) - - // span.kind "resource" triggers SA annotation - ext.SpanKind.Set(span, "resource") - - // this will label the span's service & hostPort (called Endpoint in Zipkin) - ext.PeerService.Set(span, serviceName) - ext.PeerHostname,Set(span, serviceHost) - ext.PeerPort.Set(span, servicePort) - - // a Tag is the equivalent of a Zipkin Binary Annotation (key:value pair) - span.SetTag("query", query) - - // a LogEvent is the equivalent of a Zipkin Annotation (timestamped) - span.LogEvent("query:start") - - // do the actual query... - - // let's annotate the end... - span.LogEvent("query:end") - - // we're done with this span. - span.Finish() - - // do other stuff - ... -} -``` diff --git a/vendor/github.com/go-kit/kit/tracing/zipkin/docker-compose-zipkin.yml b/vendor/github.com/go-kit/kit/tracing/zipkin/docker-compose-zipkin.yml deleted file mode 100644 index 6e0fa4c..0000000 --- a/vendor/github.com/go-kit/kit/tracing/zipkin/docker-compose-zipkin.yml +++ /dev/null @@ -1,76 +0,0 @@ - # This file uses the version 2 docker-compose file format, described here: -# https://docs.docker.com/compose/compose-file/#version-2 -# -# It runs the zipkin-cassandra, zipkin-collector, zipkin-query, zipkin-web, and -# zookeeper-exhibitor containers. -# -# On linux you probably want to start this composition like this: -# -# HOSTNAME=localhost docker-compose -f docker-compose-zipkin.yml up -# -# On OS X you will probably start like this: -# -# HOSTNAME=default docker-compose -f docker-compose-zipkin.yml up - -version: '2' -services: - cassandra: - image: openzipkin/zipkin-cassandra:1.39.4 - network_mode: host - - zookeeper: - image: mbabineau/zookeeper-exhibitor:latest - network_mode: host - environment: - HOSTNAME: ${HOSTNAME} - - kafka: - image: wurstmeister/kafka - network_mode: host - environment: - KAFKA_CREATE_TOPICS: "zipkin:1:1" - KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 60000 - KAFKA_ADVERTISED_PORT: 9092 - KAFKA_ADVERTISED_HOST_NAME: ${HOSTNAME} - KAFKA_ZOOKEEPER_CONNECT: ${HOSTNAME}:2181 - depends_on: - - zookeeper - - collector: - image: openzipkin/zipkin-collector:1.39.4 - network_mode: host - environment: - STORAGE_TYPE: cassandra - TRANSPORT_TYPE: kafka - CASSANDRA_CONTACT_POINTS: ${HOSTNAME} - KAFKA_ZOOKEEPER: ${HOSTNAME}:2181 - METADATA_BROKER_LIST: ${HOSTNAME}:9092 - depends_on: - - cassandra - - kafka - - query: - image: openzipkin/zipkin-query:1.39.4 - network_mode: host - environment: - STORAGE_TYPE: cassandra - TRANSPORT_TYPE: kafka - CASSANDRA_CONTACT_POINTS: ${HOSTNAME} - KAFKA_ZOOKEEPER: ${HOSTNAME}:2181 - METADATA_BROKER_LIST: ${HOSTNAME}:9092 - depends_on: - - cassandra - - kafka - - web: - image: openzipkin/zipkin-web:1.39.4 - network_mode: host - environment: - TRANSPORT_TYPE: kafka - KAFKA_ZOOKEEPER: ${HOSTNAME}:2181 - METADATA_BROKER_LIST: ${HOSTNAME}:9092 - QUERY_PORT_9411_TCP_ADDR: ${HOSTNAME} - ROOTURL: http://${HOSTNAME}:8080 - depends_on: - - cassandra - - kafka diff --git a/vendor/github.com/go-kit/kit/transport/grpc/README.md b/vendor/github.com/go-kit/kit/transport/grpc/README.md deleted file mode 100644 index 6c35583..0000000 --- a/vendor/github.com/go-kit/kit/transport/grpc/README.md +++ /dev/null @@ -1,37 +0,0 @@ -# grpc - -[gRPC](http://www.grpc.io/) is an excellent, modern IDL and transport for microservices. -If you're starting a greenfield project, Go kit strongly recommends gRPC as your default transport. -And using gRPC and Go kit together is very simple. - -First, define your service using protobuf3. -This is explained [in gRPC documentation](http://www.grpc.io/docs/#defining-a-service). -See [add.proto](https://github.com/go-kit/kit/blob/ec8b02591ee873433565a1ae9d317353412d1d27/examples/addsvc/pb/add.proto) for an example. -Make sure the proto definition matches your service's Go kit (interface) definition. - -Next, get the protoc compiler. -Unfortunately, this needs to be done from source. -Fortunately, it's pretty straightforward. - -``` -brew install autoconf automake libtool -git clone https://github.com/google/protobuf -cd protobuf -./autogen.sh ; ./configure ; make ; make install -``` - -Then, compile your service definition, from .proto to .go. - -``` -protoc add.proto --go_out=plugins=grpc:. -``` - -Finally, write a tiny binding from your service definition to the gRPC definition. -It's a simple conversion from one domain to another. -See [grpc_binding.go](https://github.com/go-kit/kit/blob/ec8b02591ee873433565a1ae9d317353412d1d27/examples/addsvc/grpc_binding.go) for an example. - -That's it! -The gRPC binding can be bound to a listener and serve normal gRPC requests. -And within your service, you can use standard Go kit components and idioms. -See [addsvc](https://github.com/go-kit/kit/tree/master/examples/addsvc) for a complete working example with gRPC support. -And remember: Go kit services can support multiple transports simultaneously. diff --git a/vendor/github.com/go-kit/kit/transport/grpc/client.go b/vendor/github.com/go-kit/kit/transport/grpc/client.go deleted file mode 100644 index 2dffb4f..0000000 --- a/vendor/github.com/go-kit/kit/transport/grpc/client.go +++ /dev/null @@ -1,102 +0,0 @@ -package grpc - -import ( - "fmt" - "reflect" - "strings" - - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" - - "github.com/go-kit/kit/endpoint" -) - -// Client wraps a gRPC connection and provides a method that implements -// endpoint.Endpoint. -type Client struct { - client *grpc.ClientConn - serviceName string - method string - enc EncodeRequestFunc - dec DecodeResponseFunc - grpcReply reflect.Type - before []RequestFunc -} - -// NewClient constructs a usable Client for a single remote endpoint. -// Pass an zero-value protobuf message of the RPC response type as -// the grpcReply argument. -func NewClient( - cc *grpc.ClientConn, - serviceName string, - method string, - enc EncodeRequestFunc, - dec DecodeResponseFunc, - grpcReply interface{}, - options ...ClientOption, -) *Client { - if strings.IndexByte(serviceName, '.') == -1 { - serviceName = "pb." + serviceName - } - c := &Client{ - client: cc, - method: fmt.Sprintf("/%s/%s", serviceName, method), - enc: enc, - dec: dec, - // We are using reflect.Indirect here to allow both reply structs and - // pointers to these reply structs. New consumers of the client should - // use structs directly, while existing consumers will not break if they - // remain to use pointers to structs. - grpcReply: reflect.TypeOf( - reflect.Indirect( - reflect.ValueOf(grpcReply), - ).Interface(), - ), - before: []RequestFunc{}, - } - for _, option := range options { - option(c) - } - return c -} - -// ClientOption sets an optional parameter for clients. -type ClientOption func(*Client) - -// ClientBefore sets the RequestFuncs that are applied to the outgoing gRPC -// request before it's invoked. -func ClientBefore(before ...RequestFunc) ClientOption { - return func(c *Client) { c.before = before } -} - -// Endpoint returns a usable endpoint that will invoke the gRPC specified by the -// client. -func (c Client) Endpoint() endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - req, err := c.enc(ctx, request) - if err != nil { - return nil, fmt.Errorf("Encode: %v", err) - } - - md := &metadata.MD{} - for _, f := range c.before { - ctx = f(ctx, md) - } - ctx = metadata.NewContext(ctx, *md) - - grpcReply := reflect.New(c.grpcReply).Interface() - if err = grpc.Invoke(ctx, c.method, req, grpcReply, c.client); err != nil { - return nil, fmt.Errorf("Invoke: %v", err) - } - - response, err := c.dec(ctx, grpcReply) - if err != nil { - return nil, fmt.Errorf("Decode: %v", err) - } - return response, nil - } -} diff --git a/vendor/github.com/go-kit/kit/transport/grpc/encode_decode.go b/vendor/github.com/go-kit/kit/transport/grpc/encode_decode.go deleted file mode 100644 index fb16c99..0000000 --- a/vendor/github.com/go-kit/kit/transport/grpc/encode_decode.go +++ /dev/null @@ -1,27 +0,0 @@ -package grpc - -import "golang.org/x/net/context" - -// DecodeRequestFunc extracts a user-domain request object from a gRPC request. -// It's designed to be used in gRPC servers, for server-side endpoints. One -// straightforward DecodeRequestFunc could be something that -// decodes from the gRPC request message to the concrete request type. -type DecodeRequestFunc func(context.Context, interface{}) (request interface{}, err error) - -// EncodeRequestFunc encodes the passed request object into the gRPC request -// object. It's designed to be used in gRPC clients, for client-side -// endpoints. One straightforward EncodeRequestFunc could something that -// encodes the object directly to the gRPC request message. -type EncodeRequestFunc func(context.Context, interface{}) (request interface{}, err error) - -// EncodeResponseFunc encodes the passed response object to the gRPC response -// message. It's designed to be used in gRPC servers, for server-side -// endpoints. One straightforward EncodeResponseFunc could be something that -// encodes the object directly to the gRPC response message. -type EncodeResponseFunc func(context.Context, interface{}) (response interface{}, err error) - -// DecodeResponseFunc extracts a user-domain response object from a gRPC -// response object. It's designed to be used in gRPC clients, for client-side -// endpoints. One straightforward DecodeResponseFunc could be something that -// decodes from the gRPC response message to the concrete response type. -type DecodeResponseFunc func(context.Context, interface{}) (response interface{}, err error) diff --git a/vendor/github.com/go-kit/kit/transport/grpc/request_response_funcs.go b/vendor/github.com/go-kit/kit/transport/grpc/request_response_funcs.go deleted file mode 100644 index aceb84f..0000000 --- a/vendor/github.com/go-kit/kit/transport/grpc/request_response_funcs.go +++ /dev/null @@ -1,53 +0,0 @@ -package grpc - -import ( - "encoding/base64" - "strings" - - "golang.org/x/net/context" - "google.golang.org/grpc/metadata" -) - -const ( - binHdrSuffix = "-bin" -) - -// RequestFunc may take information from an gRPC request and put it into a -// request context. In Servers, BeforeFuncs are executed prior to invoking the -// endpoint. In Clients, BeforeFuncs are executed after creating the request -// but prior to invoking the gRPC client. -type RequestFunc func(context.Context, *metadata.MD) context.Context - -// ResponseFunc may take information from a request context and use it to -// manipulate the gRPC metadata header. ResponseFuncs are only executed in -// servers, after invoking the endpoint but prior to writing a response. -type ResponseFunc func(context.Context, *metadata.MD) - -// SetResponseHeader returns a ResponseFunc that sets the specified metadata -// key-value pair. -func SetResponseHeader(key, val string) ResponseFunc { - return func(_ context.Context, md *metadata.MD) { - key, val := EncodeKeyValue(key, val) - (*md)[key] = append((*md)[key], val) - } -} - -// SetRequestHeader returns a RequestFunc that sets the specified metadata -// key-value pair. -func SetRequestHeader(key, val string) RequestFunc { - return func(ctx context.Context, md *metadata.MD) context.Context { - key, val := EncodeKeyValue(key, val) - (*md)[key] = append((*md)[key], val) - return ctx - } -} - -// EncodeKeyValue sanitizes a key-value pair for use in gRPC metadata headers. -func EncodeKeyValue(key, val string) (string, string) { - key = strings.ToLower(key) - if strings.HasSuffix(key, binHdrSuffix) { - v := base64.StdEncoding.EncodeToString([]byte(val)) - val = string(v) - } - return key, val -} diff --git a/vendor/github.com/go-kit/kit/transport/grpc/server.go b/vendor/github.com/go-kit/kit/transport/grpc/server.go deleted file mode 100644 index acb542a..0000000 --- a/vendor/github.com/go-kit/kit/transport/grpc/server.go +++ /dev/null @@ -1,129 +0,0 @@ -package grpc - -import ( - "golang.org/x/net/context" - "google.golang.org/grpc/metadata" - - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/log" -) - -// Handler which should be called from the grpc binding of the service -// implementation. The incoming request parameter, and returned response -// parameter, are both gRPC types, not user-domain. -type Handler interface { - ServeGRPC(ctx context.Context, request interface{}) (context.Context, interface{}, error) -} - -// Server wraps an endpoint and implements grpc.Handler. -type Server struct { - ctx context.Context - e endpoint.Endpoint - dec DecodeRequestFunc - enc EncodeResponseFunc - before []RequestFunc - after []ResponseFunc - logger log.Logger -} - -// NewServer constructs a new server, which implements wraps the provided -// endpoint and implements the Handler interface. Consumers should write -// bindings that adapt the concrete gRPC methods from their compiled protobuf -// definitions to individual handlers. Request and response objects are from the -// caller business domain, not gRPC request and reply types. -func NewServer( - ctx context.Context, - e endpoint.Endpoint, - dec DecodeRequestFunc, - enc EncodeResponseFunc, - options ...ServerOption, -) *Server { - s := &Server{ - ctx: ctx, - e: e, - dec: dec, - enc: enc, - logger: log.NewNopLogger(), - } - for _, option := range options { - option(s) - } - return s -} - -// ServerOption sets an optional parameter for servers. -type ServerOption func(*Server) - -// ServerBefore functions are executed on the HTTP request object before the -// request is decoded. -func ServerBefore(before ...RequestFunc) ServerOption { - return func(s *Server) { s.before = before } -} - -// ServerAfter functions are executed on the HTTP response writer after the -// endpoint is invoked, but before anything is written to the client. -func ServerAfter(after ...ResponseFunc) ServerOption { - return func(s *Server) { s.after = after } -} - -// ServerErrorLogger is used to log non-terminal errors. By default, no errors -// are logged. -func ServerErrorLogger(logger log.Logger) ServerOption { - return func(s *Server) { s.logger = logger } -} - -// ServeGRPC implements the Handler interface. -func (s Server) ServeGRPC(grpcCtx context.Context, req interface{}) (context.Context, interface{}, error) { - ctx, cancel := context.WithCancel(s.ctx) - defer cancel() - - // Retrieve gRPC metadata. - md, ok := metadata.FromContext(grpcCtx) - if !ok { - md = metadata.MD{} - } - - for _, f := range s.before { - ctx = f(ctx, &md) - } - - // Store potentially updated metadata in the gRPC context. - grpcCtx = metadata.NewContext(grpcCtx, md) - - request, err := s.dec(grpcCtx, req) - if err != nil { - s.logger.Log("err", err) - return grpcCtx, nil, BadRequestError{err} - } - - response, err := s.e(ctx, request) - if err != nil { - s.logger.Log("err", err) - return grpcCtx, nil, err - } - - for _, f := range s.after { - f(ctx, &md) - } - - // Store potentially updated metadata in the gRPC context. - grpcCtx = metadata.NewContext(grpcCtx, md) - - grpcResp, err := s.enc(grpcCtx, response) - if err != nil { - s.logger.Log("err", err) - return grpcCtx, nil, err - } - - return grpcCtx, grpcResp, nil -} - -// BadRequestError is an error in decoding the request. -type BadRequestError struct { - Err error -} - -// Error implements the error interface. -func (err BadRequestError) Error() string { - return err.Err.Error() -} diff --git a/vendor/github.com/go-kit/kit/transport/http/client.go b/vendor/github.com/go-kit/kit/transport/http/client.go deleted file mode 100644 index a8aca26..0000000 --- a/vendor/github.com/go-kit/kit/transport/http/client.go +++ /dev/null @@ -1,115 +0,0 @@ -package http - -import ( - "net/http" - "net/url" - - "golang.org/x/net/context" - "golang.org/x/net/context/ctxhttp" - - "github.com/go-kit/kit/endpoint" -) - -// Client wraps a URL and provides a method that implements endpoint.Endpoint. -type Client struct { - client *http.Client - method string - tgt *url.URL - enc EncodeRequestFunc - dec DecodeResponseFunc - before []RequestFunc - after []ClientResponseFunc - bufferedStream bool -} - -// NewClient constructs a usable Client for a single remote method. -func NewClient( - method string, - tgt *url.URL, - enc EncodeRequestFunc, - dec DecodeResponseFunc, - options ...ClientOption, -) *Client { - c := &Client{ - client: http.DefaultClient, - method: method, - tgt: tgt, - enc: enc, - dec: dec, - before: []RequestFunc{}, - after: []ClientResponseFunc{}, - bufferedStream: false, - } - for _, option := range options { - option(c) - } - return c -} - -// ClientOption sets an optional parameter for clients. -type ClientOption func(*Client) - -// SetClient sets the underlying HTTP client used for requests. -// By default, http.DefaultClient is used. -func SetClient(client *http.Client) ClientOption { - return func(c *Client) { c.client = client } -} - -// ClientBefore sets the RequestFuncs that are applied to the outgoing HTTP -// request before it's invoked. -func ClientBefore(before ...RequestFunc) ClientOption { - return func(c *Client) { c.before = before } -} - -// ClientAfter sets the ClientResponseFuncs applied to the incoming HTTP -// request prior to it being decoded. This is useful for obtaining anything off -// of the response and adding onto the context prior to decoding. -func ClientAfter(after ...ClientResponseFunc) ClientOption { - return func(c *Client) { c.after = after } -} - -// BufferedStream sets whether the Response.Body is left open, allowing it -// to be read from later. Useful for transporting a file as a buffered stream. -func BufferedStream(buffered bool) ClientOption { - return func(c *Client) { c.bufferedStream = buffered } -} - -// Endpoint returns a usable endpoint that invokes the remote endpoint. -func (c Client) Endpoint() endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - req, err := http.NewRequest(c.method, c.tgt.String(), nil) - if err != nil { - return nil, Error{Domain: DomainNewRequest, Err: err} - } - - if err = c.enc(ctx, req, request); err != nil { - return nil, Error{Domain: DomainEncode, Err: err} - } - - for _, f := range c.before { - ctx = f(ctx, req) - } - - resp, err := ctxhttp.Do(ctx, c.client, req) - if err != nil { - return nil, Error{Domain: DomainDo, Err: err} - } - if !c.bufferedStream { - defer resp.Body.Close() - } - - for _, f := range c.after { - ctx = f(ctx, resp) - } - - response, err := c.dec(ctx, resp) - if err != nil { - return nil, Error{Domain: DomainDecode, Err: err} - } - - return response, nil - } -} diff --git a/vendor/github.com/go-kit/kit/transport/http/client_test.go b/vendor/github.com/go-kit/kit/transport/http/client_test.go deleted file mode 100644 index 5569681..0000000 --- a/vendor/github.com/go-kit/kit/transport/http/client_test.go +++ /dev/null @@ -1,149 +0,0 @@ -package http_test - -import ( - "io" - "net/http" - "net/http/httptest" - "net/url" - "testing" - "time" - - "golang.org/x/net/context" - - httptransport "github.com/go-kit/kit/transport/http" -) - -type TestResponse struct { - Body io.ReadCloser - String string -} - -func TestHTTPClient(t *testing.T) { - var ( - testbody = "testbody" - encode = func(context.Context, *http.Request, interface{}) error { return nil } - decode = func(_ context.Context, r *http.Response) (interface{}, error) { - buffer := make([]byte, len(testbody)) - r.Body.Read(buffer) - return TestResponse{r.Body, string(buffer)}, nil - } - headers = make(chan string, 1) - headerKey = "X-Foo" - headerVal = "abcde" - afterHeaderKey = "X-The-Dude" - afterHeaderVal = "Abides" - afterVal = "" - afterFunc = func(ctx context.Context, r *http.Response) context.Context { - afterVal = r.Header.Get(afterHeaderKey) - return ctx - } - ) - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - headers <- r.Header.Get(headerKey) - w.Header().Set(afterHeaderKey, afterHeaderVal) - w.WriteHeader(http.StatusOK) - w.Write([]byte(testbody)) - })) - - client := httptransport.NewClient( - "GET", - mustParse(server.URL), - encode, - decode, - httptransport.ClientBefore(httptransport.SetRequestHeader(headerKey, headerVal)), - httptransport.ClientAfter(afterFunc), - ) - - res, err := client.Endpoint()(context.Background(), struct{}{}) - if err != nil { - t.Fatal(err) - } - - var have string - select { - case have = <-headers: - case <-time.After(time.Millisecond): - t.Fatalf("timeout waiting for %s", headerKey) - } - // Check that Request Header was successfully received - if want := headerVal; want != have { - t.Errorf("want %q, have %q", want, have) - } - - // Check that Response header set from server was received in SetClientAfter - if want, have := afterVal, afterHeaderVal; want != have { - t.Errorf("want %q, have %q", want, have) - } - - // Check that the response was successfully decoded - response, ok := res.(TestResponse) - if !ok { - t.Fatal("response should be TestResponse") - } - if want, have := testbody, response.String; want != have { - t.Errorf("want %q, have %q", want, have) - } - - // Check that response body was closed - b := make([]byte, 1) - _, err = response.Body.Read(b) - if err == nil { - t.Fatal("wanted error, got none") - } - if doNotWant, have := io.EOF, err; doNotWant == have { - t.Errorf("do not want %q, have %q", doNotWant, have) - } -} - -func TestHTTPClientBufferedStream(t *testing.T) { - var ( - testbody = "testbody" - encode = func(context.Context, *http.Request, interface{}) error { return nil } - decode = func(_ context.Context, r *http.Response) (interface{}, error) { - return TestResponse{r.Body, ""}, nil - } - ) - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - w.Write([]byte(testbody)) - })) - - client := httptransport.NewClient( - "GET", - mustParse(server.URL), - encode, - decode, - httptransport.BufferedStream(true), - ) - - res, err := client.Endpoint()(context.Background(), struct{}{}) - if err != nil { - t.Fatal(err) - } - - // Check that the response was successfully decoded - response, ok := res.(TestResponse) - if !ok { - t.Fatal("response should be TestResponse") - } - - // Check that response body was NOT closed - b := make([]byte, len(testbody)) - _, err = response.Body.Read(b) - if want, have := io.EOF, err; have != want { - t.Fatalf("want %q, have %q", want, have) - } - if want, have := testbody, string(b); want != have { - t.Errorf("want %q, have %q", want, have) - } -} - -func mustParse(s string) *url.URL { - u, err := url.Parse(s) - if err != nil { - panic(err) - } - return u -} diff --git a/vendor/github.com/go-kit/kit/transport/http/encode_decode.go b/vendor/github.com/go-kit/kit/transport/http/encode_decode.go deleted file mode 100644 index 8b8f6d7..0000000 --- a/vendor/github.com/go-kit/kit/transport/http/encode_decode.go +++ /dev/null @@ -1,31 +0,0 @@ -package http - -import ( - "net/http" - - "golang.org/x/net/context" -) - -// DecodeRequestFunc extracts a user-domain request object from an HTTP -// request object. It's designed to be used in HTTP servers, for server-side -// endpoints. One straightforward DecodeRequestFunc could be something that -// JSON decodes from the request body to the concrete response type. -type DecodeRequestFunc func(context.Context, *http.Request) (request interface{}, err error) - -// EncodeRequestFunc encodes the passed request object into the HTTP request -// object. It's designed to be used in HTTP clients, for client-side -// endpoints. One straightforward EncodeRequestFunc could something that JSON -// encodes the object directly to the request body. -type EncodeRequestFunc func(context.Context, *http.Request, interface{}) error - -// EncodeResponseFunc encodes the passed response object to the HTTP response -// writer. It's designed to be used in HTTP servers, for server-side -// endpoints. One straightforward EncodeResponseFunc could be something that -// JSON encodes the object directly to the response body. -type EncodeResponseFunc func(context.Context, http.ResponseWriter, interface{}) error - -// DecodeResponseFunc extracts a user-domain response object from an HTTP -// response object. It's designed to be used in HTTP clients, for client-side -// endpoints. One straightforward DecodeResponseFunc could be something that -// JSON decodes from the response body to the concrete response type. -type DecodeResponseFunc func(context.Context, *http.Response) (response interface{}, err error) diff --git a/vendor/github.com/go-kit/kit/transport/http/err.go b/vendor/github.com/go-kit/kit/transport/http/err.go deleted file mode 100644 index 15270c6..0000000 --- a/vendor/github.com/go-kit/kit/transport/http/err.go +++ /dev/null @@ -1,33 +0,0 @@ -package http - -import ( - "fmt" -) - -const ( - // DomainNewRequest is an error during request generation. - DomainNewRequest = "NewRequest" - - // DomainEncode is an error during request or response encoding. - DomainEncode = "Encode" - - // DomainDo is an error during the execution phase of the request. - DomainDo = "Do" - - // DomainDecode is an error during request or response decoding. - DomainDecode = "Decode" -) - -// Error is an error that occurred at some phase within the transport. -type Error struct { - // Domain is the phase in which the error was generated. - Domain string - - // Err is the concrete error. - Err error -} - -// Error implements the error interface. -func (e Error) Error() string { - return fmt.Sprintf("%s: %s", e.Domain, e.Err) -} diff --git a/vendor/github.com/go-kit/kit/transport/http/err_test.go b/vendor/github.com/go-kit/kit/transport/http/err_test.go deleted file mode 100644 index 75a1838..0000000 --- a/vendor/github.com/go-kit/kit/transport/http/err_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package http_test - -import ( - "errors" - "fmt" - "net/http" - "net/url" - "testing" - - "golang.org/x/net/context" - - httptransport "github.com/go-kit/kit/transport/http" -) - -func TestClientEndpointEncodeError(t *testing.T) { - var ( - sampleErr = errors.New("Oh no, an error") - enc = func(context.Context, *http.Request, interface{}) error { return sampleErr } - dec = func(context.Context, *http.Response) (interface{}, error) { return nil, nil } - ) - - u := &url.URL{ - Scheme: "https", - Host: "localhost", - Path: "/does/not/matter", - } - - c := httptransport.NewClient( - "GET", - u, - enc, - dec, - ) - - _, err := c.Endpoint()(context.Background(), nil) - if err == nil { - t.Fatal("err == nil") - } - - e, ok := err.(httptransport.Error) - if !ok { - t.Fatal("err is not of type github.com/go-kit/kit/transport/http.Error") - } - - if want, have := sampleErr, e.Err; want != have { - t.Fatalf("want %v, have %v", want, have) - } -} - -func ExampleErrOutput() { - sampleErr := errors.New("oh no, an error") - err := httptransport.Error{Domain: httptransport.DomainDo, Err: sampleErr} - fmt.Println(err) - // Output: - // Do: oh no, an error -} diff --git a/vendor/github.com/go-kit/kit/transport/http/request_response_funcs.go b/vendor/github.com/go-kit/kit/transport/http/request_response_funcs.go deleted file mode 100644 index 1a3ef9b..0000000 --- a/vendor/github.com/go-kit/kit/transport/http/request_response_funcs.go +++ /dev/null @@ -1,45 +0,0 @@ -package http - -import ( - "net/http" - - "golang.org/x/net/context" -) - -// RequestFunc may take information from an HTTP request and put it into a -// request context. In Servers, RequestFuncs are executed prior to invoking the -// endpoint. In Clients, RequestFuncs are executed after creating the request -// but prior to invoking the HTTP client. -type RequestFunc func(context.Context, *http.Request) context.Context - -// ServerResponseFunc may take information from a request context and use it to -// manipulate a ResponseWriter. ServerResponseFuncs are only executed in -// servers, after invoking the endpoint but prior to writing a response. -type ServerResponseFunc func(context.Context, http.ResponseWriter) context.Context - -// ClientResponseFunc may take information from an HTTP request and make the -// response available for consumption. ClientResponseFuncs are only executed in -// clients, after a request has been made, but prior to it being decoded. -type ClientResponseFunc func(context.Context, *http.Response) context.Context - -// SetContentType returns a ResponseFunc that sets the Content-Type header to -// the provided value. -func SetContentType(contentType string) ServerResponseFunc { - return SetResponseHeader("Content-Type", contentType) -} - -// SetResponseHeader returns a ResponseFunc that sets the specified header. -func SetResponseHeader(key, val string) ServerResponseFunc { - return func(ctx context.Context, w http.ResponseWriter) context.Context { - w.Header().Set(key, val) - return ctx - } -} - -// SetRequestHeader returns a RequestFunc that sets the specified header. -func SetRequestHeader(key, val string) RequestFunc { - return func(ctx context.Context, r *http.Request) context.Context { - r.Header.Set(key, val) - return ctx - } -} diff --git a/vendor/github.com/go-kit/kit/transport/http/request_response_funcs_test.go b/vendor/github.com/go-kit/kit/transport/http/request_response_funcs_test.go deleted file mode 100644 index 4fb87d0..0000000 --- a/vendor/github.com/go-kit/kit/transport/http/request_response_funcs_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package http_test - -import ( - "net/http/httptest" - "testing" - - "golang.org/x/net/context" - - httptransport "github.com/go-kit/kit/transport/http" -) - -func TestSetHeader(t *testing.T) { - const ( - key = "X-Foo" - val = "12345" - ) - r := httptest.NewRecorder() - httptransport.SetResponseHeader(key, val)(context.Background(), r) - if want, have := val, r.Header().Get(key); want != have { - t.Errorf("want %q, have %q", want, have) - } -} - -func TestSetContentType(t *testing.T) { - const contentType = "application/json" - r := httptest.NewRecorder() - httptransport.SetContentType(contentType)(context.Background(), r) - if want, have := contentType, r.Header().Get("Content-Type"); want != have { - t.Errorf("want %q, have %q", want, have) - } -} diff --git a/vendor/github.com/go-kit/kit/transport/http/server.go b/vendor/github.com/go-kit/kit/transport/http/server.go deleted file mode 100644 index 4b1b734..0000000 --- a/vendor/github.com/go-kit/kit/transport/http/server.go +++ /dev/null @@ -1,134 +0,0 @@ -package http - -import ( - "net/http" - - "golang.org/x/net/context" - - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/log" -) - -// Server wraps an endpoint and implements http.Handler. -type Server struct { - ctx context.Context - e endpoint.Endpoint - dec DecodeRequestFunc - enc EncodeResponseFunc - before []RequestFunc - after []ServerResponseFunc - errorEncoder ErrorEncoder - logger log.Logger -} - -// NewServer constructs a new server, which implements http.Server and wraps -// the provided endpoint. -func NewServer( - ctx context.Context, - e endpoint.Endpoint, - dec DecodeRequestFunc, - enc EncodeResponseFunc, - options ...ServerOption, -) *Server { - s := &Server{ - ctx: ctx, - e: e, - dec: dec, - enc: enc, - errorEncoder: defaultErrorEncoder, - logger: log.NewNopLogger(), - } - for _, option := range options { - option(s) - } - return s -} - -// ServerOption sets an optional parameter for servers. -type ServerOption func(*Server) - -// ServerBefore functions are executed on the HTTP request object before the -// request is decoded. -func ServerBefore(before ...RequestFunc) ServerOption { - return func(s *Server) { s.before = before } -} - -// ServerAfter functions are executed on the HTTP response writer after the -// endpoint is invoked, but before anything is written to the client. -func ServerAfter(after ...ServerResponseFunc) ServerOption { - return func(s *Server) { s.after = after } -} - -// ServerErrorEncoder is used to encode errors to the http.ResponseWriter -// whenever they're encountered in the processing of a request. Clients can -// use this to provide custom error formatting and response codes. By default, -// errors will be written as plain text with an appropriate, if generic, -// status code. -func ServerErrorEncoder(ee ErrorEncoder) ServerOption { - return func(s *Server) { s.errorEncoder = ee } -} - -// ServerErrorLogger is used to log non-terminal errors. By default, no errors -// are logged. -func ServerErrorLogger(logger log.Logger) ServerOption { - return func(s *Server) { s.logger = logger } -} - -// ServeHTTP implements http.Handler. -func (s Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - ctx, cancel := context.WithCancel(s.ctx) - defer cancel() - - for _, f := range s.before { - ctx = f(ctx, r) - } - - request, err := s.dec(ctx, r) - if err != nil { - s.logger.Log("err", err) - s.errorEncoder(ctx, Error{Domain: DomainDecode, Err: err}, w) - return - } - - response, err := s.e(ctx, request) - if err != nil { - s.logger.Log("err", err) - s.errorEncoder(ctx, Error{Domain: DomainDo, Err: err}, w) - return - } - - for _, f := range s.after { - ctx = f(ctx, w) - } - - if err := s.enc(ctx, w, response); err != nil { - s.logger.Log("err", err) - s.errorEncoder(ctx, Error{Domain: DomainEncode, Err: err}, w) - return - } -} - -// ErrorEncoder is responsible for encoding an error to the ResponseWriter. -// -// In the server implementation, only kit/transport/http.Error values are ever -// passed to this function, so you might be tempted to have this function take -// one of those directly. But, users are encouraged to use custom ErrorEncoders -// to encode all HTTP errors to their clients, and so may want to pass and check -// for their own error types. See the example shipping/handling service. -type ErrorEncoder func(ctx context.Context, err error, w http.ResponseWriter) - -func defaultErrorEncoder(_ context.Context, err error, w http.ResponseWriter) { - switch e := err.(type) { - case Error: - switch e.Domain { - case DomainDecode: - http.Error(w, err.Error(), http.StatusBadRequest) - case DomainDo: - http.Error(w, err.Error(), http.StatusServiceUnavailable) // too aggressive? - default: - http.Error(w, err.Error(), http.StatusInternalServerError) - } - default: - http.Error(w, err.Error(), http.StatusInternalServerError) - } -} diff --git a/vendor/github.com/go-kit/kit/transport/http/server_test.go b/vendor/github.com/go-kit/kit/transport/http/server_test.go deleted file mode 100644 index 752f010..0000000 --- a/vendor/github.com/go-kit/kit/transport/http/server_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package http_test - -import ( - "errors" - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" - - "golang.org/x/net/context" - - httptransport "github.com/go-kit/kit/transport/http" -) - -func TestServerBadDecode(t *testing.T) { - handler := httptransport.NewServer( - context.Background(), - func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil }, - func(context.Context, *http.Request) (interface{}, error) { return struct{}{}, errors.New("dang") }, - func(context.Context, http.ResponseWriter, interface{}) error { return nil }, - ) - server := httptest.NewServer(handler) - defer server.Close() - resp, _ := http.Get(server.URL) - if want, have := http.StatusBadRequest, resp.StatusCode; want != have { - t.Errorf("want %d, have %d", want, have) - } -} - -func TestServerBadEndpoint(t *testing.T) { - handler := httptransport.NewServer( - context.Background(), - func(context.Context, interface{}) (interface{}, error) { return struct{}{}, errors.New("dang") }, - func(context.Context, *http.Request) (interface{}, error) { return struct{}{}, nil }, - func(context.Context, http.ResponseWriter, interface{}) error { return nil }, - ) - server := httptest.NewServer(handler) - defer server.Close() - resp, _ := http.Get(server.URL) - if want, have := http.StatusServiceUnavailable, resp.StatusCode; want != have { - t.Errorf("want %d, have %d", want, have) - } -} - -func TestServerBadEncode(t *testing.T) { - handler := httptransport.NewServer( - context.Background(), - func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil }, - func(context.Context, *http.Request) (interface{}, error) { return struct{}{}, nil }, - func(context.Context, http.ResponseWriter, interface{}) error { return errors.New("dang") }, - ) - server := httptest.NewServer(handler) - defer server.Close() - resp, _ := http.Get(server.URL) - if want, have := http.StatusInternalServerError, resp.StatusCode; want != have { - t.Errorf("want %d, have %d", want, have) - } -} - -func TestServerErrorEncoder(t *testing.T) { - errTeapot := errors.New("teapot") - code := func(err error) int { - if e, ok := err.(httptransport.Error); ok && e.Err == errTeapot { - return http.StatusTeapot - } - return http.StatusInternalServerError - } - handler := httptransport.NewServer( - context.Background(), - func(context.Context, interface{}) (interface{}, error) { return struct{}{}, errTeapot }, - func(context.Context, *http.Request) (interface{}, error) { return struct{}{}, nil }, - func(context.Context, http.ResponseWriter, interface{}) error { return nil }, - httptransport.ServerErrorEncoder(func(_ context.Context, err error, w http.ResponseWriter) { w.WriteHeader(code(err)) }), - ) - server := httptest.NewServer(handler) - defer server.Close() - resp, _ := http.Get(server.URL) - if want, have := http.StatusTeapot, resp.StatusCode; want != have { - t.Errorf("want %d, have %d", want, have) - } -} - -func TestServerHappyPath(t *testing.T) { - _, step, response := testServer(t) - step() - resp := <-response - defer resp.Body.Close() - buf, _ := ioutil.ReadAll(resp.Body) - if want, have := http.StatusOK, resp.StatusCode; want != have { - t.Errorf("want %d, have %d (%s)", want, have, buf) - } -} - -func testServer(t *testing.T) (cancel, step func(), resp <-chan *http.Response) { - var ( - ctx, cancelfn = context.WithCancel(context.Background()) - stepch = make(chan bool) - endpoint = func(context.Context, interface{}) (interface{}, error) { <-stepch; return struct{}{}, nil } - response = make(chan *http.Response) - handler = httptransport.NewServer( - ctx, - endpoint, - func(context.Context, *http.Request) (interface{}, error) { return struct{}{}, nil }, - func(context.Context, http.ResponseWriter, interface{}) error { return nil }, - httptransport.ServerBefore(func(ctx context.Context, r *http.Request) context.Context { return ctx }), - httptransport.ServerAfter(func(ctx context.Context, w http.ResponseWriter) context.Context { return ctx }), - ) - ) - go func() { - server := httptest.NewServer(handler) - defer server.Close() - resp, err := http.Get(server.URL) - if err != nil { - t.Error(err) - return - } - response <- resp - }() - return cancelfn, func() { stepch <- true }, response -} diff --git a/vendor/github.com/go-kit/kit/transport/httprp/README.md b/vendor/github.com/go-kit/kit/transport/httprp/README.md deleted file mode 100644 index 23853e2..0000000 --- a/vendor/github.com/go-kit/kit/transport/httprp/README.md +++ /dev/null @@ -1,48 +0,0 @@ -# package transport/httprp - -`package transport/httprp` provides an HTTP reverse-proxy transport. - -## Rationale - -HTTP server applications often associate multiple handlers with a single HTTP listener, each handler differentiated by the request URI and/or HTTP method. Handlers that perform business-logic in the app can implement the `Endpoint` interface and be exposed using the `package transport/http` server. Handlers that need to proxy the request to another HTTP endpoint can do so with this package by simply specifying the base URL to forward the request to. - -## Usage - -The following example uses the [Gorilla Mux](https://github.com/gorilla/mux) router to illustrate how a mixture of proxying and non-proxying request handlers can be used with a single listener: - -```go -import ( - "net/http" - "net/url" - - kithttp "github.com/go-kit/kit/transport/http" - kithttprp "github.com/go-kit/kit/transport/httprp" - "github.com/gorilla/mux" - "golang.org/x/net/context" -) - -func main() { - router := mux.NewRouter() - - // server HTTP endpoint handled here - router.Handle("/foo", - kithttp.NewServer( - context.Background(), - func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil }, - func(*http.Request) (interface{}, error) { return struct{}{}, nil }, - func(http.ResponseWriter, interface{}) error { return nil }, - )).Methods("GET") - - // proxy endpoint, forwards requests to http://other.service.local/base/bar - remoteServiceURL, _ := url.Parse("http://other.service.local/base") - router.Handle("/bar", - kithttprp.NewServer( - context.Background(), - remoteServiceURL, - )).Methods("GET") - - http.ListenAndServe(":8080", router) -} -``` - -You can also supply a set of `RequestFunc` functions to be run before proxying the request. This can be useful for adding request headers required by the backend system (e.g. API tokens). diff --git a/vendor/github.com/go-kit/kit/transport/httprp/server.go b/vendor/github.com/go-kit/kit/transport/httprp/server.go deleted file mode 100644 index d34e22b..0000000 --- a/vendor/github.com/go-kit/kit/transport/httprp/server.go +++ /dev/null @@ -1,62 +0,0 @@ -package httprp - -import ( - "net/http" - "net/http/httputil" - "net/url" - - "golang.org/x/net/context" -) - -// RequestFunc may take information from an HTTP request and put it into a -// request context. BeforeFuncs are executed prior to invoking the -// endpoint. -type RequestFunc func(context.Context, *http.Request) context.Context - -// Server is a proxying request handler. -type Server struct { - ctx context.Context - proxy http.Handler - before []RequestFunc - errorEncoder func(w http.ResponseWriter, err error) -} - -// NewServer constructs a new server that implements http.Server and will proxy -// requests to the given base URL using its scheme, host, and base path. -// If the target's path is "/base" and the incoming request was for "/dir", -// the target request will be for /base/dir. -func NewServer( - ctx context.Context, - baseURL *url.URL, - options ...ServerOption, -) *Server { - s := &Server{ - ctx: ctx, - proxy: httputil.NewSingleHostReverseProxy(baseURL), - } - for _, option := range options { - option(s) - } - return s -} - -// ServerOption sets an optional parameter for servers. -type ServerOption func(*Server) - -// ServerBefore functions are executed on the HTTP request object before the -// request is decoded. -func ServerBefore(before ...RequestFunc) ServerOption { - return func(s *Server) { s.before = before } -} - -// ServeHTTP implements http.Handler. -func (s Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - ctx, cancel := context.WithCancel(s.ctx) - defer cancel() - - for _, f := range s.before { - ctx = f(ctx, r) - } - - s.proxy.ServeHTTP(w, r) -} diff --git a/vendor/github.com/go-kit/kit/transport/httprp/server_test.go b/vendor/github.com/go-kit/kit/transport/httprp/server_test.go deleted file mode 100644 index 06946a7..0000000 --- a/vendor/github.com/go-kit/kit/transport/httprp/server_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package httprp_test - -import ( - "io/ioutil" - "net/http" - "net/http/httptest" - "net/url" - "testing" - - "golang.org/x/net/context" - - httptransport "github.com/go-kit/kit/transport/httprp" -) - -func TestServerHappyPathSingleServer(t *testing.T) { - originServer := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - w.Write([]byte("hey")) - })) - defer originServer.Close() - originURL, _ := url.Parse(originServer.URL) - - handler := httptransport.NewServer( - context.Background(), - originURL, - ) - proxyServer := httptest.NewServer(handler) - defer proxyServer.Close() - - resp, _ := http.Get(proxyServer.URL) - if want, have := http.StatusOK, resp.StatusCode; want != have { - t.Errorf("want %d, have %d", want, have) - } - - responseBody, _ := ioutil.ReadAll(resp.Body) - if want, have := "hey", string(responseBody); want != have { - t.Errorf("want %d, have %d", want, have) - } -} - -func TestServerHappyPathSingleServerWithServerOptions(t *testing.T) { - const ( - headerKey = "X-TEST-HEADER" - headerVal = "go-kit-proxy" - ) - - originServer := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if want, have := headerVal, r.Header.Get(headerKey); want != have { - t.Errorf("want %d, have %d", want, have) - } - - w.WriteHeader(http.StatusOK) - w.Write([]byte("hey")) - })) - defer originServer.Close() - originURL, _ := url.Parse(originServer.URL) - - handler := httptransport.NewServer( - context.Background(), - originURL, - httptransport.ServerBefore(func(ctx context.Context, r *http.Request) context.Context { - r.Header.Add(headerKey, headerVal) - return ctx - }), - ) - proxyServer := httptest.NewServer(handler) - defer proxyServer.Close() - - resp, _ := http.Get(proxyServer.URL) - if want, have := http.StatusOK, resp.StatusCode; want != have { - t.Errorf("want %d, have %d", want, have) - } - - responseBody, _ := ioutil.ReadAll(resp.Body) - if want, have := "hey", string(responseBody); want != have { - t.Errorf("want %d, have %d", want, have) - } -} - -func TestServerOriginServerNotFoundResponse(t *testing.T) { - originServer := httptest.NewServer(http.NotFoundHandler()) - defer originServer.Close() - originURL, _ := url.Parse(originServer.URL) - - handler := httptransport.NewServer( - context.Background(), - originURL, - ) - proxyServer := httptest.NewServer(handler) - defer proxyServer.Close() - - resp, _ := http.Get(proxyServer.URL) - if want, have := http.StatusNotFound, resp.StatusCode; want != have { - t.Errorf("want %d, have %d", want, have) - } -} - -func TestServerOriginServerUnreachable(t *testing.T) { - // create a server, then promptly shut it down - originServer := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - })) - originURL, _ := url.Parse(originServer.URL) - originServer.Close() - - handler := httptransport.NewServer( - context.Background(), - originURL, - ) - proxyServer := httptest.NewServer(handler) - defer proxyServer.Close() - - resp, _ := http.Get(proxyServer.URL) - if want, have := http.StatusInternalServerError, resp.StatusCode; want != have { - t.Errorf("want %d, have %d", want, have) - } -} diff --git a/vendor/github.com/go-kit/kit/transport/netrpc/README.md b/vendor/github.com/go-kit/kit/transport/netrpc/README.md deleted file mode 100644 index d899501..0000000 --- a/vendor/github.com/go-kit/kit/transport/netrpc/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# net/rpc - -[net/rpc](https://golang.org/pkg/net/rpc) is an RPC transport that's part of the Go standard library. -It's a simple and fast transport that's appropriate when all of your services are written in Go. - -Using net/rpc with Go kit is very simple. -Just write a simple binding from your service definition to the net/rpc definition. -See [netrpc_binding.go](https://github.com/go-kit/kit/blob/ec8b02591ee873433565a1ae9d317353412d1d27/examples/addsvc/netrpc_binding.go) for an example. - -That's it! -The net/rpc binding can be registered to a name, and bound to an HTTP handler, the same as any other net/rpc endpoint. -And within your service, you can use standard Go kit components and idioms. -See [addsvc](https://github.com/go-kit/kit/tree/master/examples/addsvc) for a complete working example with net/rpc support. -And remember: Go kit services can support multiple transports simultaneously. diff --git a/vendor/github.com/go-kit/kit/transport/thrift/README.md b/vendor/github.com/go-kit/kit/transport/thrift/README.md deleted file mode 100644 index 04cd5ae..0000000 --- a/vendor/github.com/go-kit/kit/transport/thrift/README.md +++ /dev/null @@ -1,32 +0,0 @@ -# Thrift - -[Thrift](https://thrift.apache.org/) is a large IDL and transport package from Apache, popularized by Facebook. -Thrift is well-supported in Go kit, for organizations that already have significant Thrift investment. -And using Thrift with Go kit is very simple. - -First, define your service in the Thrift IDL. -The [Thrift IDL documentation](https://thrift.apache.org/docs/idl) provides more details. -See [add.thrift](https://github.com/go-kit/kit/blob/ec8b02591ee873433565a1ae9d317353412d1d27/examples/addsvc/_thrift/add.thrift) for an example. -Make sure the Thrift definition matches your service's Go kit (interface) definition. - -Next, [download Thrift](https://thrift.apache.org/download) and [install the compiler](https://thrift.apache.org/docs/install/). -On a Mac, you may be able to `brew install thrift`. - -Then, compile your service definition, from .thrift to .go. -You'll probably want to specify the package_prefix option to the --gen go flag. -See [THRIFT-3021](https://issues.apache.org/jira/browse/THRIFT-3021) for more details. - -``` -thrift -r --gen go:package_prefix=github.com/my-org/my-repo/thrift/gen-go/ add.thrift -``` - -Finally, write a tiny binding from your service definition to the Thrift definition. -It's a straightforward conversion from one domain to the other. -See [thrift_binding.go](https://github.com/go-kit/kit/blob/ec8b02591ee873433565a1ae9d317353412d1d27/examples/addsvc/thrift_binding.go) for an example. - -That's it! -The Thrift binding can be bound to a listener and serve normal Thrift requests. -And within your service, you can use standard Go kit components and idioms. -Unfortunately, setting up a Thrift listener is rather laborious and nonidiomatic in Go. -Fortunately, [addsvc](https://github.com/go-kit/kit/tree/master/examples/addsvc) is a complete working example with Thrift support. -And remember: Go kit services can support multiple transports simultaneously. diff --git a/vendor/github.com/go-kit/kit/update_deps.bash b/vendor/github.com/go-kit/kit/update_deps.bash deleted file mode 100755 index 0a349a8..0000000 --- a/vendor/github.com/go-kit/kit/update_deps.bash +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash - -# This script updates each non-stdlib, non-Go-kit dependency to its most recent -# commit. It can be invoked to aid in debugging after a dependency-related -# failure on continuous integration. - -function deps { - go list -f '{{join .Deps "\n"}}' ./... -} - -function not_stdlib { - xargs go list -f '{{if not .Standard}}{{.ImportPath}}{{end}}' -} - -function not_gokit { - grep -v 'go-kit/kit' -} - -function go_get_update { - while read d - do - echo $d - go get -u $d - done -} - -deps | not_stdlib | not_gokit | go_get_update - diff --git a/vendor/github.com/go-kit/kit/util/README.md b/vendor/github.com/go-kit/kit/util/README.md deleted file mode 100644 index 20ed85c..0000000 --- a/vendor/github.com/go-kit/kit/util/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# util - -This directory holds packages of general utility to multiple consumers within Go kit, - and potentially other consumers in the wider Go ecosystem. -There is no `package util` and will never be. diff --git a/vendor/github.com/go-kit/kit/util/conn/doc.go b/vendor/github.com/go-kit/kit/util/conn/doc.go deleted file mode 100644 index 7ad4d96..0000000 --- a/vendor/github.com/go-kit/kit/util/conn/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package conn provides utilities related to connections. -package conn diff --git a/vendor/github.com/go-kit/kit/util/conn/manager.go b/vendor/github.com/go-kit/kit/util/conn/manager.go deleted file mode 100644 index 75c997f..0000000 --- a/vendor/github.com/go-kit/kit/util/conn/manager.go +++ /dev/null @@ -1,124 +0,0 @@ -package conn - -import ( - "net" - "time" - - "github.com/go-kit/kit/log" -) - -// Dialer imitates net.Dial. Dialer is assumed to yield connections that are -// safe for use by multiple concurrent goroutines. -type Dialer func(network, address string) (net.Conn, error) - -// AfterFunc imitates time.After. -type AfterFunc func(time.Duration) <-chan time.Time - -// Manager manages a net.Conn. -// -// Clients provide a way to create the connection with a Dialer, network, and -// address. Clients should Take the connection when they want to use it, and Put -// back whatever error they receive from its use. When a non-nil error is Put, -// the connection is invalidated, and a new connection is established. -// Connection failures are retried after an exponential backoff. -type Manager struct { - dialer Dialer - network string - address string - after AfterFunc - logger log.Logger - - takec chan net.Conn - putc chan error -} - -// NewManager returns a connection manager using the passed Dialer, network, and -// address. The AfterFunc is used to control exponential backoff and retries. -// For normal use, pass net.Dial and time.After as the Dialer and AfterFunc -// respectively. The logger is used to log errors; pass a log.NopLogger if you -// don't care to receive them. -func NewManager(d Dialer, network, address string, after AfterFunc, logger log.Logger) *Manager { - m := &Manager{ - dialer: d, - network: network, - address: address, - after: after, - logger: logger, - - takec: make(chan net.Conn), - putc: make(chan error), - } - go m.loop() - return m -} - -// Take yields the current connection. It may be nil. -func (m *Manager) Take() net.Conn { - return <-m.takec -} - -// Put accepts an error that came from a previously yielded connection. If the -// error is non-nil, the manager will invalidate the current connection and try -// to reconnect, with exponential backoff. Putting a nil error is a no-op. -func (m *Manager) Put(err error) { - m.putc <- err -} - -func (m *Manager) loop() { - var ( - conn = dial(m.dialer, m.network, m.address, m.logger) // may block slightly - connc = make(chan net.Conn, 1) - reconnectc <-chan time.Time // initially nil - backoff = time.Second - ) - - // If the initial dial fails, we need to trigger a reconnect via the loop - // body, below. If we did this in a goroutine, we would race on the conn - // variable. So we use a buffered chan instead. - connc <- conn - - for { - select { - case <-reconnectc: - reconnectc = nil // one-shot - go func() { connc <- dial(m.dialer, m.network, m.address, m.logger) }() - - case conn = <-connc: - if conn == nil { - // didn't work - backoff = exponential(backoff) // wait longer - reconnectc = m.after(backoff) // try again - } else { - // worked! - backoff = time.Second // reset wait time - reconnectc = nil // no retry necessary - } - - case m.takec <- conn: - - case err := <-m.putc: - if err != nil && conn != nil { - m.logger.Log("err", err) - conn = nil // connection is bad - reconnectc = m.after(time.Nanosecond) // trigger immediately - } - } - } -} - -func dial(d Dialer, network, address string, logger log.Logger) net.Conn { - conn, err := d(network, address) - if err != nil { - logger.Log("err", err) - conn = nil // just to be sure - } - return conn -} - -func exponential(d time.Duration) time.Duration { - d *= 2 - if d > time.Minute { - d = time.Minute - } - return d -} diff --git a/vendor/github.com/go-kit/kit/util/conn/manager_test.go b/vendor/github.com/go-kit/kit/util/conn/manager_test.go deleted file mode 100644 index 86bddbc..0000000 --- a/vendor/github.com/go-kit/kit/util/conn/manager_test.go +++ /dev/null @@ -1,159 +0,0 @@ -package conn - -import ( - "errors" - "net" - "sync/atomic" - "testing" - "time" - - "github.com/go-kit/kit/log" -) - -func TestManager(t *testing.T) { - var ( - tickc = make(chan time.Time) - after = func(time.Duration) <-chan time.Time { return tickc } - dialconn = &mockConn{} - dialerr = error(nil) - dialer = func(string, string) (net.Conn, error) { return dialconn, dialerr } - mgr = NewManager(dialer, "netw", "addr", after, log.NewNopLogger()) - ) - - // First conn should be fine. - conn := mgr.Take() - if conn == nil { - t.Fatal("nil conn") - } - - // Write and check it went thru. - if _, err := conn.Write([]byte{1, 2, 3}); err != nil { - t.Fatal(err) - } - if want, have := uint64(3), atomic.LoadUint64(&dialconn.wr); want != have { - t.Errorf("want %d, have %d", want, have) - } - - // Put an error to kill the conn. - mgr.Put(errors.New("should kill the connection")) - - // First takes should fail. - for i := 0; i < 10; i++ { - if conn = mgr.Take(); conn != nil { - t.Fatalf("want nil conn, got real conn") - } - } - - // Trigger the reconnect. - tickc <- time.Now() - - // The dial should eventually succeed and yield a good conn. - if !within(100*time.Millisecond, func() bool { - conn = mgr.Take() - return conn != nil - }) { - t.Fatal("conn remained nil") - } - - // Write and check it went thru. - if _, err := conn.Write([]byte{4, 5}); err != nil { - t.Fatal(err) - } - if want, have := uint64(5), atomic.LoadUint64(&dialconn.wr); want != have { - t.Errorf("want %d, have %d", want, have) - } - - // Dial starts failing. - dialconn, dialerr = nil, errors.New("oh noes") - mgr.Put(errors.New("trigger that reconnect y'all")) - if conn = mgr.Take(); conn != nil { - t.Fatalf("want nil conn, got real conn") - } - - // As many reconnects as they want. - go func() { - done := time.After(100 * time.Millisecond) - for { - select { - case tickc <- time.Now(): - case <-done: - return - } - } - }() - - // The dial should never succeed. - if within(100*time.Millisecond, func() bool { - conn = mgr.Take() - return conn != nil - }) { - t.Fatal("eventually got a good conn, despite failing dialer") - } -} - -func TestIssue292(t *testing.T) { - // The util/conn.Manager won't attempt to reconnect to the provided endpoint - // if the endpoint is initially unavailable (e.g. dial tcp :8080: - // getsockopt: connection refused). If the endpoint is up when - // conn.NewManager is called and then goes down/up, it reconnects just fine. - - var ( - tickc = make(chan time.Time) - after = func(time.Duration) <-chan time.Time { return tickc } - dialconn = net.Conn(nil) - dialerr = errors.New("fail") - dialer = func(string, string) (net.Conn, error) { return dialconn, dialerr } - mgr = NewManager(dialer, "netw", "addr", after, log.NewNopLogger()) - ) - - if conn := mgr.Take(); conn != nil { - t.Fatal("first Take should have yielded nil conn, but didn't") - } - - dialconn, dialerr = &mockConn{}, nil - select { - case tickc <- time.Now(): - case <-time.After(time.Second): - t.Fatal("manager isn't listening for a tick, despite a failed dial") - } - - if !within(time.Second, func() bool { - return mgr.Take() != nil - }) { - t.Fatal("second Take should have yielded good conn, but didn't") - } -} - -type mockConn struct { - rd, wr uint64 -} - -func (c *mockConn) Read(b []byte) (n int, err error) { - atomic.AddUint64(&c.rd, uint64(len(b))) - return len(b), nil -} - -func (c *mockConn) Write(b []byte) (n int, err error) { - atomic.AddUint64(&c.wr, uint64(len(b))) - return len(b), nil -} - -func (c *mockConn) Close() error { return nil } -func (c *mockConn) LocalAddr() net.Addr { return nil } -func (c *mockConn) RemoteAddr() net.Addr { return nil } -func (c *mockConn) SetDeadline(t time.Time) error { return nil } -func (c *mockConn) SetReadDeadline(t time.Time) error { return nil } -func (c *mockConn) SetWriteDeadline(t time.Time) error { return nil } - -func within(d time.Duration, f func() bool) bool { - deadline := time.Now().Add(d) - for { - if time.Now().After(deadline) { - return false - } - if f() { - return true - } - time.Sleep(d / 10) - } -} diff --git a/vendor/github.com/go-logfmt/logfmt/.gitignore b/vendor/github.com/go-logfmt/logfmt/.gitignore deleted file mode 100644 index 320e53e..0000000 --- a/vendor/github.com/go-logfmt/logfmt/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -_testdata/ -_testdata2/ -logfmt-fuzz.zip -logfmt.test.exe diff --git a/vendor/github.com/go-logfmt/logfmt/.travis.yml b/vendor/github.com/go-logfmt/logfmt/.travis.yml deleted file mode 100644 index b599f65..0000000 --- a/vendor/github.com/go-logfmt/logfmt/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go -sudo: false -go: - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - tip - -before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover - -script: - - goveralls -service=travis-ci diff --git a/vendor/github.com/go-logfmt/logfmt/LICENSE b/vendor/github.com/go-logfmt/logfmt/LICENSE deleted file mode 100644 index c026508..0000000 --- a/vendor/github.com/go-logfmt/logfmt/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 go-logfmt - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/go-logfmt/logfmt/README.md b/vendor/github.com/go-logfmt/logfmt/README.md deleted file mode 100644 index 3a8f10b..0000000 --- a/vendor/github.com/go-logfmt/logfmt/README.md +++ /dev/null @@ -1,33 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/go-logfmt/logfmt?status.svg)](https://godoc.org/github.com/go-logfmt/logfmt) -[![Go Report Card](https://goreportcard.com/badge/go-logfmt/logfmt)](https://goreportcard.com/report/go-logfmt/logfmt) -[![TravisCI](https://travis-ci.org/go-logfmt/logfmt.svg?branch=master)](https://travis-ci.org/go-logfmt/logfmt) -[![Coverage Status](https://coveralls.io/repos/github/go-logfmt/logfmt/badge.svg?branch=master)](https://coveralls.io/github/go-logfmt/logfmt?branch=master) - -# logfmt - -Package logfmt implements utilities to marshal and unmarshal data in the [logfmt -format](https://brandur.org/logfmt). It provides an API similar to -[encoding/json](http://golang.org/pkg/encoding/json/) and -[encoding/xml](http://golang.org/pkg/encoding/xml/). - -The logfmt format was first documented by Brandur Leach in [this -article](https://brandur.org/logfmt). The format has not been formally -standardized. The most authoritative public specification to date has been the -documentation of a Go Language [package](http://godoc.org/github.com/kr/logfmt) -written by Blake Mizerany and Keith Rarick. - -## Goals - -This project attempts to conform as closely as possible to the prior art, while -also removing ambiguity where necessary to provide well behaved encoder and -decoder implementations. - -## Non-goals - -This project does not attempt to formally standardize the logfmt format. In the -event that logfmt is standardized this project would take conforming to the -standard as a goal. - -## Versioning - -Package logfmt publishes releases via [semver](http://semver.org/) compatible Git tags prefixed with a single 'v'. diff --git a/vendor/github.com/go-logfmt/logfmt/decode-bench_test.go b/vendor/github.com/go-logfmt/logfmt/decode-bench_test.go deleted file mode 100644 index f66dc25..0000000 --- a/vendor/github.com/go-logfmt/logfmt/decode-bench_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package logfmt - -import ( - "bufio" - "bytes" - "testing" - - kr "github.com/kr/logfmt" -) - -func BenchmarkDecodeKeyval(b *testing.B) { - const rows = 10000 - data := []byte{} - for i := 0; i < rows; i++ { - data = append(data, "a=1 b=\"bar\" ƒ=2h3s r=\"esc\\tmore stuff\" d x=sf \n"...) - } - - b.SetBytes(int64(len(data))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - var ( - dec = NewDecoder(bytes.NewReader(data)) - j = 0 - ) - for dec.ScanRecord() { - for dec.ScanKeyval() { - } - j++ - } - if err := dec.Err(); err != nil { - b.Errorf("got %v, want %v", err, nil) - } - if j != rows { - b.Errorf("got %v, want %v", j, rows) - } - } -} - -func BenchmarkKRDecode(b *testing.B) { - const rows = 10000 - data := []byte{} - for i := 0; i < rows; i++ { - data = append(data, "a=1 b=\"bar\" ƒ=2h3s r=\"esc\\tmore stuff\" d x=sf \n"...) - } - - b.SetBytes(int64(len(data))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - var ( - s = bufio.NewScanner(bytes.NewReader(data)) - err error - j = 0 - dh discardHandler - ) - for err == nil && s.Scan() { - err = kr.Unmarshal(s.Bytes(), &dh) - j++ - } - if err == nil { - err = s.Err() - } - if err != nil { - b.Errorf("got %v, want %v", err, nil) - } - if j != rows { - b.Errorf("got %v, want %v", j, rows) - } - } -} - -type discardHandler struct{} - -func (discardHandler) HandleLogfmt(key, val []byte) error { - return nil -} diff --git a/vendor/github.com/go-logfmt/logfmt/decode.go b/vendor/github.com/go-logfmt/logfmt/decode.go deleted file mode 100644 index a04981f..0000000 --- a/vendor/github.com/go-logfmt/logfmt/decode.go +++ /dev/null @@ -1,222 +0,0 @@ -package logfmt - -import ( - "bufio" - "fmt" - "io" -) - -// A Decoder reads and decodes logfmt records from an input stream. -type Decoder struct { - pos int - key []byte - value []byte - lineNum int - s *bufio.Scanner - err error -} - -// NewDecoder returns a new decoder that reads from r. -// -// The decoder introduces its own buffering and may read data from r beyond -// the logfmt records requested. -func NewDecoder(r io.Reader) *Decoder { - dec := &Decoder{ - s: bufio.NewScanner(r), - } - return dec -} - -// ScanRecord advances the Decoder to the next record, which can then be -// parsed with the ScanKeyval method. It returns false when decoding stops, -// either by reaching the end of the input or an error. After ScanRecord -// returns false, the Err method will return any error that occurred during -// decoding, except that if it was io.EOF, Err will return nil. -func (dec *Decoder) ScanRecord() bool { - if dec.err != nil { - return false - } - if !dec.s.Scan() { - dec.err = dec.s.Err() - return false - } - dec.lineNum++ - dec.pos = 0 - return true -} - -// ScanKeyval advances the Decoder to the next key/value pair of the current -// record, which can then be retrieved with the Key and Value methods. It -// returns false when decoding stops, either by reaching the end of the -// current record or an error. -func (dec *Decoder) ScanKeyval() bool { - dec.key, dec.value = nil, nil - if dec.err != nil { - return false - } - - line := dec.s.Bytes() - - // garbage - for p, c := range line[dec.pos:] { - if c > ' ' { - dec.pos += p - goto key - } - } - dec.pos = len(line) - return false - -key: - start := dec.pos - for p, c := range line[dec.pos:] { - switch { - case c == '=': - dec.pos += p - if dec.pos > start { - dec.key = line[start:dec.pos] - } - if dec.key == nil { - dec.unexpectedByte(c) - return false - } - goto equal - case c == '"': - dec.pos += p - dec.unexpectedByte(c) - return false - case c <= ' ': - dec.pos += p - if dec.pos > start { - dec.key = line[start:dec.pos] - } - return true - } - } - dec.pos = len(line) - if dec.pos > start { - dec.key = line[start:dec.pos] - } - return true - -equal: - dec.pos++ - if dec.pos >= len(line) { - return true - } - switch c := line[dec.pos]; { - case c <= ' ': - return true - case c == '"': - goto qvalue - } - - // value - start = dec.pos - for p, c := range line[dec.pos:] { - switch { - case c == '=' || c == '"': - dec.pos += p - dec.unexpectedByte(c) - return false - case c <= ' ': - dec.pos += p - if dec.pos > start { - dec.value = line[start:dec.pos] - } - return true - } - } - dec.pos = len(line) - if dec.pos > start { - dec.value = line[start:dec.pos] - } - return true - -qvalue: - const ( - untermQuote = "unterminated quoted value" - invalidQuote = "invalid quoted value" - ) - - hasEsc, esc := false, false - start = dec.pos - for p, c := range line[dec.pos+1:] { - switch { - case esc: - esc = false - case c == '\\': - hasEsc, esc = true, true - case c == '"': - dec.pos += p + 2 - if hasEsc { - v, ok := unquoteBytes(line[start:dec.pos]) - if !ok { - dec.syntaxError(invalidQuote) - return false - } - dec.value = v - } else { - start++ - end := dec.pos - 1 - if end > start { - dec.value = line[start:end] - } - } - return true - } - } - dec.pos = len(line) - dec.syntaxError(untermQuote) - return false -} - -// Key returns the most recent key found by a call to ScanKeyval. The returned -// slice may point to internal buffers and is only valid until the next call -// to ScanRecord. It does no allocation. -func (dec *Decoder) Key() []byte { - return dec.key -} - -// Value returns the most recent value found by a call to ScanKeyval. The -// returned slice may point to internal buffers and is only valid until the -// next call to ScanRecord. It does no allocation when the value has no -// escape sequences. -func (dec *Decoder) Value() []byte { - return dec.value -} - -// func (dec *Decoder) DecodeValue() ([]byte, error) { -// } - -// Err returns the first non-EOF error that was encountered by the Scanner. -func (dec *Decoder) Err() error { - return dec.err -} - -func (dec *Decoder) syntaxError(msg string) { - dec.err = &SyntaxError{ - Msg: msg, - Line: dec.lineNum, - Pos: dec.pos + 1, - } -} - -func (dec *Decoder) unexpectedByte(c byte) { - dec.err = &SyntaxError{ - Msg: fmt.Sprintf("unexpected %q", c), - Line: dec.lineNum, - Pos: dec.pos + 1, - } -} - -// A SyntaxError represents a syntax error in the logfmt input stream. -type SyntaxError struct { - Msg string - Line int - Pos int -} - -func (e *SyntaxError) Error() string { - return fmt.Sprintf("logfmt syntax error at pos %d on line %d: %s", e.Pos, e.Line, e.Msg) -} diff --git a/vendor/github.com/go-logfmt/logfmt/decode_test.go b/vendor/github.com/go-logfmt/logfmt/decode_test.go deleted file mode 100644 index d261880..0000000 --- a/vendor/github.com/go-logfmt/logfmt/decode_test.go +++ /dev/null @@ -1,181 +0,0 @@ -package logfmt - -import ( - "bytes" - "fmt" - "reflect" - "strings" - "testing" -) - -type kv struct { - k, v []byte -} - -func (s kv) String() string { - return fmt.Sprintf("{k:%q v:%q}", s.k, s.v) -} - -func TestDecoder_scan(t *testing.T) { - tests := []struct { - data string - want [][]kv - }{ - {"", nil}, - {"\n\n", [][]kv{nil, nil}}, - {`x= `, [][]kv{{{[]byte("x"), nil}}}}, - {`y=`, [][]kv{{{[]byte("y"), nil}}}}, - {`y`, [][]kv{{{[]byte("y"), nil}}}}, - {`y=f`, [][]kv{{{[]byte("y"), []byte("f")}}}}, - {"y=\"\\tf\"", [][]kv{{{[]byte("y"), []byte("\tf")}}}}, - {"a=1\n", [][]kv{{{[]byte("a"), []byte("1")}}}}, - { - `a=1 b="bar" ƒ=2h3s r="esc\t" d x=sf `, - [][]kv{{ - {[]byte("a"), []byte("1")}, - {[]byte("b"), []byte("bar")}, - {[]byte("ƒ"), []byte("2h3s")}, - {[]byte("r"), []byte("esc\t")}, - {[]byte("d"), nil}, - {[]byte("x"), []byte("sf")}, - }}, - }, - { - "y=f\ny=g", - [][]kv{ - {{[]byte("y"), []byte("f")}}, - {{[]byte("y"), []byte("g")}}, - }, - }, - { - "y=f \n\x1e y=g", - [][]kv{ - {{[]byte("y"), []byte("f")}}, - {{[]byte("y"), []byte("g")}}, - }, - }, - { - "y= d y=g", - [][]kv{{ - {[]byte("y"), nil}, - {[]byte("d"), nil}, - {[]byte("y"), []byte("g")}, - }}, - }, - { - "y=\"f\"\ny=g", - [][]kv{ - {{[]byte("y"), []byte("f")}}, - {{[]byte("y"), []byte("g")}}, - }, - }, - { - "y=\"f\\n\"y=g", - [][]kv{{ - {[]byte("y"), []byte("f\n")}, - {[]byte("y"), []byte("g")}, - }}, - }, - } - - for _, test := range tests { - var got [][]kv - dec := NewDecoder(strings.NewReader(test.data)) - - for dec.ScanRecord() { - var kvs []kv - for dec.ScanKeyval() { - k := dec.Key() - v := dec.Value() - if k != nil { - kvs = append(kvs, kv{k, v}) - } - } - got = append(got, kvs) - } - if err := dec.Err(); err != nil { - t.Errorf("got err: %v", err) - } - if !reflect.DeepEqual(got, test.want) { - t.Errorf("\n in: %q\n got: %+v\nwant: %+v", test.data, got, test.want) - } - } -} - -func TestDecoder_errors(t *testing.T) { - tests := []struct { - data string - want error - }{ - {"a=1\n=bar", &SyntaxError{Msg: "unexpected '='", Line: 2, Pos: 1}}, - {"a=1\n\"k\"=bar", &SyntaxError{Msg: "unexpected '\"'", Line: 2, Pos: 1}}, - {"a=1\nk\"ey=bar", &SyntaxError{Msg: "unexpected '\"'", Line: 2, Pos: 2}}, - {"a=1\nk=b\"ar", &SyntaxError{Msg: "unexpected '\"'", Line: 2, Pos: 4}}, - {"a=1\nk=b =ar", &SyntaxError{Msg: "unexpected '='", Line: 2, Pos: 5}}, - {"a==", &SyntaxError{Msg: "unexpected '='", Line: 1, Pos: 3}}, - {"a=1\nk=b=ar", &SyntaxError{Msg: "unexpected '='", Line: 2, Pos: 4}}, - {"a=\"1", &SyntaxError{Msg: "unterminated quoted value", Line: 1, Pos: 5}}, - {"a=\"1\\", &SyntaxError{Msg: "unterminated quoted value", Line: 1, Pos: 6}}, - {"a=\"\\t1", &SyntaxError{Msg: "unterminated quoted value", Line: 1, Pos: 7}}, - {"a=\"\\u1\"", &SyntaxError{Msg: "invalid quoted value", Line: 1, Pos: 8}}, - } - - for _, test := range tests { - dec := NewDecoder(strings.NewReader(test.data)) - - for dec.ScanRecord() { - for dec.ScanKeyval() { - } - } - if got, want := dec.Err(), test.want; !reflect.DeepEqual(got, want) { - t.Errorf("got: %v, want: %v", got, want) - } - } -} - -func TestDecoder_decode_encode(t *testing.T) { - tests := []struct { - in, out string - }{ - {"", ""}, - {"\n", "\n"}, - {"\n \n", "\n\n"}, - { - "a=1\nb=2\n", - "a=1\nb=2\n", - }, - { - "a=1 b=\"bar\" ƒ=2h3s r=\"esc\\t\" d x=sf ", - "a=1 b=bar ƒ=2h3s r=\"esc\\t\" d= x=sf\n", - }, - } - - for _, test := range tests { - dec := NewDecoder(strings.NewReader(test.in)) - buf := bytes.Buffer{} - enc := NewEncoder(&buf) - - var err error - loop: - for dec.ScanRecord() && err == nil { - for dec.ScanKeyval() { - if dec.Key() == nil { - continue - } - if err = enc.EncodeKeyval(dec.Key(), dec.Value()); err != nil { - break loop - } - } - enc.EndRecord() - } - if err == nil { - err = dec.Err() - } - if err != nil { - t.Errorf("got err: %v", err) - } - if got, want := buf.String(), test.out; got != want { - t.Errorf("\n got: %q\nwant: %q", got, want) - } - } -} diff --git a/vendor/github.com/go-logfmt/logfmt/doc.go b/vendor/github.com/go-logfmt/logfmt/doc.go deleted file mode 100644 index 378e9ad..0000000 --- a/vendor/github.com/go-logfmt/logfmt/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Package logfmt implements utilities to marshal and unmarshal data in the -// logfmt format. The logfmt format records key/value pairs in a way that -// balances readability for humans and simplicity of computer parsing. It is -// most commonly used as a more human friendly alternative to JSON for -// structured logging. -package logfmt diff --git a/vendor/github.com/go-logfmt/logfmt/encode.go b/vendor/github.com/go-logfmt/logfmt/encode.go deleted file mode 100644 index 4d0fa23..0000000 --- a/vendor/github.com/go-logfmt/logfmt/encode.go +++ /dev/null @@ -1,312 +0,0 @@ -package logfmt - -import ( - "bytes" - "encoding" - "errors" - "fmt" - "io" - "reflect" - "strings" -) - -// MarshalKeyvals returns the logfmt encoding of keyvals, a variadic sequence -// of alternating keys and values. -func MarshalKeyvals(keyvals ...interface{}) ([]byte, error) { - buf := &bytes.Buffer{} - if err := NewEncoder(buf).EncodeKeyvals(keyvals...); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// An Encoder writes logfmt data to an output stream. -type Encoder struct { - w io.Writer - scratch bytes.Buffer - needSep bool -} - -// NewEncoder returns a new encoder that writes to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: w, - } -} - -var ( - space = []byte(" ") - equals = []byte("=") - newline = []byte("\n") - null = []byte("null") -) - -// EncodeKeyval writes the logfmt encoding of key and value to the stream. A -// single space is written before the second and subsequent keys in a record. -// Nothing is written if a non-nil error is returned. -func (enc *Encoder) EncodeKeyval(key, value interface{}) error { - enc.scratch.Reset() - if enc.needSep { - if _, err := enc.scratch.Write(space); err != nil { - return err - } - } - if err := writeKey(&enc.scratch, key); err != nil { - return err - } - if _, err := enc.scratch.Write(equals); err != nil { - return err - } - if err := writeValue(&enc.scratch, value); err != nil { - return err - } - _, err := enc.w.Write(enc.scratch.Bytes()) - enc.needSep = true - return err -} - -// EncodeKeyvals writes the logfmt encoding of keyvals to the stream. Keyvals -// is a variadic sequence of alternating keys and values. Keys of unsupported -// type are skipped along with their corresponding value. Values of -// unsupported type or that cause a MarshalerError are replaced by their error -// but do not cause EncodeKeyvals to return an error. If a non-nil error is -// returned some key/value pairs may not have be written. -func (enc *Encoder) EncodeKeyvals(keyvals ...interface{}) error { - if len(keyvals) == 0 { - return nil - } - if len(keyvals)%2 == 1 { - keyvals = append(keyvals, nil) - } - for i := 0; i < len(keyvals); i += 2 { - k, v := keyvals[i], keyvals[i+1] - err := enc.EncodeKeyval(k, v) - if err == ErrUnsupportedKeyType { - continue - } - if _, ok := err.(*MarshalerError); ok || err == ErrUnsupportedValueType { - v = err - err = enc.EncodeKeyval(k, v) - } - if err != nil { - return err - } - } - return nil -} - -// MarshalerError represents an error encountered while marshaling a value. -type MarshalerError struct { - Type reflect.Type - Err error -} - -func (e *MarshalerError) Error() string { - return "error marshaling value of type " + e.Type.String() + ": " + e.Err.Error() -} - -// ErrNilKey is returned by Marshal functions and Encoder methods if a key is -// a nil interface or pointer value. -var ErrNilKey = errors.New("nil key") - -// ErrInvalidKey is returned by Marshal functions and Encoder methods if a key -// contains an invalid character. -var ErrInvalidKey = errors.New("invalid key") - -// ErrUnsupportedKeyType is returned by Encoder methods if a key has an -// unsupported type. -var ErrUnsupportedKeyType = errors.New("unsupported key type") - -// ErrUnsupportedValueType is returned by Encoder methods if a value has an -// unsupported type. -var ErrUnsupportedValueType = errors.New("unsupported value type") - -func writeKey(w io.Writer, key interface{}) error { - if key == nil { - return ErrNilKey - } - - switch k := key.(type) { - case string: - return writeStringKey(w, k) - case []byte: - if k == nil { - return ErrNilKey - } - return writeBytesKey(w, k) - case encoding.TextMarshaler: - kb, err := safeMarshal(k) - if err != nil { - return err - } - if kb == nil { - return ErrNilKey - } - return writeBytesKey(w, kb) - case fmt.Stringer: - ks, ok := safeString(k) - if !ok { - return ErrNilKey - } - return writeStringKey(w, ks) - default: - rkey := reflect.ValueOf(key) - switch rkey.Kind() { - case reflect.Array, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.Struct: - return ErrUnsupportedKeyType - case reflect.Ptr: - if rkey.IsNil() { - return ErrNilKey - } - return writeKey(w, rkey.Elem().Interface()) - } - return writeStringKey(w, fmt.Sprint(k)) - } -} - -func invalidKeyRune(r rune) bool { - return r <= ' ' || r == '=' || r == '"' -} - -func writeStringKey(w io.Writer, key string) error { - if len(key) == 0 || strings.IndexFunc(key, invalidKeyRune) != -1 { - return ErrInvalidKey - } - _, err := io.WriteString(w, key) - return err -} - -func writeBytesKey(w io.Writer, key []byte) error { - if len(key) == 0 || bytes.IndexFunc(key, invalidKeyRune) != -1 { - return ErrInvalidKey - } - _, err := w.Write(key) - return err -} - -func writeValue(w io.Writer, value interface{}) error { - switch v := value.(type) { - case nil: - return writeBytesValue(w, null) - case string: - return writeStringValue(w, v, true) - case []byte: - return writeBytesValue(w, v) - case encoding.TextMarshaler: - vb, err := safeMarshal(v) - if err != nil { - return err - } - if vb == nil { - vb = null - } - return writeBytesValue(w, vb) - case error: - se, ok := safeError(v) - return writeStringValue(w, se, ok) - case fmt.Stringer: - ss, ok := safeString(v) - return writeStringValue(w, ss, ok) - default: - rvalue := reflect.ValueOf(value) - switch rvalue.Kind() { - case reflect.Array, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.Struct: - return ErrUnsupportedValueType - case reflect.Ptr: - if rvalue.IsNil() { - return writeBytesValue(w, null) - } - return writeValue(w, rvalue.Elem().Interface()) - } - return writeStringValue(w, fmt.Sprint(v), true) - } -} - -func needsQuotedValueRune(r rune) bool { - return r <= ' ' || r == '=' || r == '"' -} - -func writeStringValue(w io.Writer, value string, ok bool) error { - var err error - if ok && value == "null" { - _, err = io.WriteString(w, `"null"`) - } else if strings.IndexFunc(value, needsQuotedValueRune) != -1 { - _, err = writeQuotedString(w, value) - } else { - _, err = io.WriteString(w, value) - } - return err -} - -func writeBytesValue(w io.Writer, value []byte) error { - var err error - if bytes.IndexFunc(value, needsQuotedValueRune) >= 0 { - _, err = writeQuotedBytes(w, value) - } else { - _, err = w.Write(value) - } - return err -} - -// EndRecord writes a newline character to the stream and resets the encoder -// to the beginning of a new record. -func (enc *Encoder) EndRecord() error { - _, err := enc.w.Write(newline) - if err == nil { - enc.needSep = false - } - return err -} - -// Reset resets the encoder to the beginning of a new record. -func (enc *Encoder) Reset() { - enc.needSep = false -} - -func safeError(err error) (s string, ok bool) { - defer func() { - if panicVal := recover(); panicVal != nil { - if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { - s, ok = "null", false - } else { - panic(panicVal) - } - } - }() - s, ok = err.Error(), true - return -} - -func safeString(str fmt.Stringer) (s string, ok bool) { - defer func() { - if panicVal := recover(); panicVal != nil { - if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() { - s, ok = "null", false - } else { - panic(panicVal) - } - } - }() - s, ok = str.String(), true - return -} - -func safeMarshal(tm encoding.TextMarshaler) (b []byte, err error) { - defer func() { - if panicVal := recover(); panicVal != nil { - if v := reflect.ValueOf(tm); v.Kind() == reflect.Ptr && v.IsNil() { - b, err = nil, nil - } else { - panic(panicVal) - } - } - }() - b, err = tm.MarshalText() - if err != nil { - return nil, &MarshalerError{ - Type: reflect.TypeOf(tm), - Err: err, - } - } - return -} diff --git a/vendor/github.com/go-logfmt/logfmt/encode_internal_test.go b/vendor/github.com/go-logfmt/logfmt/encode_internal_test.go deleted file mode 100644 index 6271ce8..0000000 --- a/vendor/github.com/go-logfmt/logfmt/encode_internal_test.go +++ /dev/null @@ -1,233 +0,0 @@ -package logfmt - -import ( - "bytes" - "errors" - "fmt" - "reflect" - "testing" -) - -func TestSafeString(t *testing.T) { - _, ok := safeString((*stringStringer)(nil)) - if got, want := ok, false; got != want { - t.Errorf(" got %v, want %v", got, want) - } -} - -func TestSafeMarshal(t *testing.T) { - kb, err := safeMarshal((*stringMarshaler)(nil)) - if got := kb; got != nil { - t.Errorf(" got %v, want nil", got) - } - if got, want := err, error(nil); got != want { - t.Errorf(" got %v, want %v", got, want) - } -} - -func TestWriteKeyStrings(t *testing.T) { - keygen := []func(string) interface{}{ - func(s string) interface{} { return s }, - func(s string) interface{} { return stringData(s) }, - func(s string) interface{} { return stringStringer(s) }, - func(s string) interface{} { return stringMarshaler(s) }, - } - - data := []struct { - key string - want string - err error - }{ - {key: "k", want: "k"}, - {key: `\`, want: `\`}, - {key: "\n", err: ErrInvalidKey}, - {key: "\x00", err: ErrInvalidKey}, - {key: "\x10", err: ErrInvalidKey}, - {key: "\x1F", err: ErrInvalidKey}, - {key: "", err: ErrInvalidKey}, - {key: " ", err: ErrInvalidKey}, - {key: "=", err: ErrInvalidKey}, - {key: `"`, err: ErrInvalidKey}, - } - - for _, g := range keygen { - for _, d := range data { - w := &bytes.Buffer{} - key := g(d.key) - err := writeKey(w, key) - if err != d.err { - t.Errorf("%#v (%[1]T): got error: %v, want error: %v", key, err, d.err) - } - if err != nil { - continue - } - if got, want := w.String(), d.want; got != want { - t.Errorf("%#v (%[1]T): got '%s', want '%s'", key, got, want) - } - } - } -} - -func TestWriteKey(t *testing.T) { - var ( - nilPtr *int - one = 1 - ptr = &one - ) - - data := []struct { - key interface{} - want string - err error - }{ - {key: nil, err: ErrNilKey}, - {key: nilPtr, err: ErrNilKey}, - {key: (*stringStringer)(nil), err: ErrNilKey}, - {key: (*stringMarshaler)(nil), err: ErrNilKey}, - {key: (*stringerMarshaler)(nil), err: ErrNilKey}, - {key: ptr, want: "1"}, - - {key: errorMarshaler{}, err: &MarshalerError{Type: reflect.TypeOf(errorMarshaler{}), Err: errMarshaling}}, - {key: make(chan int), err: ErrUnsupportedKeyType}, - {key: []int{}, err: ErrUnsupportedKeyType}, - {key: map[int]int{}, err: ErrUnsupportedKeyType}, - {key: [2]int{}, err: ErrUnsupportedKeyType}, - {key: struct{}{}, err: ErrUnsupportedKeyType}, - {key: fmt.Sprint, err: ErrUnsupportedKeyType}, - } - - for _, d := range data { - w := &bytes.Buffer{} - err := writeKey(w, d.key) - if !reflect.DeepEqual(err, d.err) { - t.Errorf("%#v: got error: %v, want error: %v", d.key, err, d.err) - } - if err != nil { - continue - } - if got, want := w.String(), d.want; got != want { - t.Errorf("%#v: got '%s', want '%s'", d.key, got, want) - } - } -} - -func TestWriteValueStrings(t *testing.T) { - keygen := []func(string) interface{}{ - func(s string) interface{} { return s }, - func(s string) interface{} { return errors.New(s) }, - func(s string) interface{} { return stringData(s) }, - func(s string) interface{} { return stringStringer(s) }, - func(s string) interface{} { return stringMarshaler(s) }, - } - - data := []struct { - value string - want string - err error - }{ - {value: "", want: ""}, - {value: "v", want: "v"}, - {value: " ", want: `" "`}, - {value: "=", want: `"="`}, - {value: `\`, want: `\`}, - {value: `"`, want: `"\""`}, - {value: `\"`, want: `"\\\""`}, - {value: "\n", want: `"\n"`}, - {value: "\x00", want: `"\u0000"`}, - {value: "\x10", want: `"\u0010"`}, - {value: "\x1F", want: `"\u001f"`}, - {value: "µ", want: `µ`}, - } - - for _, g := range keygen { - for _, d := range data { - w := &bytes.Buffer{} - value := g(d.value) - err := writeValue(w, value) - if err != d.err { - t.Errorf("%#v (%[1]T): got error: %v, want error: %v", value, err, d.err) - } - if err != nil { - continue - } - if got, want := w.String(), d.want; got != want { - t.Errorf("%#v (%[1]T): got '%s', want '%s'", value, got, want) - } - } - } -} - -func TestWriteValue(t *testing.T) { - var ( - nilPtr *int - one = 1 - ptr = &one - ) - - data := []struct { - value interface{} - want string - err error - }{ - {value: nil, want: "null"}, - {value: nilPtr, want: "null"}, - {value: (*stringStringer)(nil), want: "null"}, - {value: (*stringMarshaler)(nil), want: "null"}, - {value: (*stringerMarshaler)(nil), want: "null"}, - {value: ptr, want: "1"}, - - {value: errorMarshaler{}, err: &MarshalerError{Type: reflect.TypeOf(errorMarshaler{}), Err: errMarshaling}}, - {value: make(chan int), err: ErrUnsupportedValueType}, - {value: []int{}, err: ErrUnsupportedValueType}, - {value: map[int]int{}, err: ErrUnsupportedValueType}, - {value: [2]int{}, err: ErrUnsupportedValueType}, - {value: struct{}{}, err: ErrUnsupportedValueType}, - {value: fmt.Sprint, err: ErrUnsupportedValueType}, - } - - for _, d := range data { - w := &bytes.Buffer{} - err := writeValue(w, d.value) - if !reflect.DeepEqual(err, d.err) { - t.Errorf("%#v: got error: %v, want error: %v", d.value, err, d.err) - } - if err != nil { - continue - } - if got, want := w.String(), d.want; got != want { - t.Errorf("%#v: got '%s', want '%s'", d.value, got, want) - } - } -} - -type stringData string - -type stringStringer string - -func (s stringStringer) String() string { - return string(s) -} - -type stringMarshaler string - -func (s stringMarshaler) MarshalText() ([]byte, error) { - return []byte(s), nil -} - -type stringerMarshaler string - -func (s stringerMarshaler) String() string { - return "String() called" -} - -func (s stringerMarshaler) MarshalText() ([]byte, error) { - return []byte(s), nil -} - -var errMarshaling = errors.New("marshal error") - -type errorMarshaler struct{} - -func (errorMarshaler) MarshalText() ([]byte, error) { - return nil, errMarshaling -} diff --git a/vendor/github.com/go-logfmt/logfmt/encode_test.go b/vendor/github.com/go-logfmt/logfmt/encode_test.go deleted file mode 100644 index c7aa1d5..0000000 --- a/vendor/github.com/go-logfmt/logfmt/encode_test.go +++ /dev/null @@ -1,199 +0,0 @@ -package logfmt_test - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "reflect" - "testing" - "time" - - "github.com/go-logfmt/logfmt" -) - -func TestEncodeKeyval(t *testing.T) { - data := []struct { - key, value interface{} - want string - err error - }{ - {key: "k", value: "v", want: "k=v"}, - {key: "k", value: nil, want: "k=null"}, - {key: `\`, value: "v", want: `\=v`}, - {key: "k", value: "", want: "k="}, - {key: "k", value: "null", want: `k="null"`}, - {key: "k", value: "", want: `k=`}, - {key: "k", value: true, want: "k=true"}, - {key: "k", value: 1, want: "k=1"}, - {key: "k", value: 1.025, want: "k=1.025"}, - {key: "k", value: 1e-3, want: "k=0.001"}, - {key: "k", value: 3.5 + 2i, want: "k=(3.5+2i)"}, - {key: "k", value: "v v", want: `k="v v"`}, - {key: "k", value: " ", want: `k=" "`}, - {key: "k", value: `"`, want: `k="\""`}, - {key: "k", value: `=`, want: `k="="`}, - {key: "k", value: `\`, want: `k=\`}, - {key: "k", value: `=\`, want: `k="=\\"`}, - {key: "k", value: `\"`, want: `k="\\\""`}, - {key: "k", value: [2]int{2, 19}, err: logfmt.ErrUnsupportedValueType}, - {key: "k", value: []string{"e1", "e 2"}, err: logfmt.ErrUnsupportedValueType}, - {key: "k", value: structData{"a a", 9}, err: logfmt.ErrUnsupportedValueType}, - {key: "k", value: decimalMarshaler{5, 9}, want: "k=5.9"}, - {key: "k", value: (*decimalMarshaler)(nil), want: "k=null"}, - {key: "k", value: decimalStringer{5, 9}, want: "k=5.9"}, - {key: "k", value: (*decimalStringer)(nil), want: "k=null"}, - {key: "k", value: marshalerStringer{5, 9}, want: "k=5.9"}, - {key: "k", value: (*marshalerStringer)(nil), want: "k=null"}, - {key: "k", value: new(nilMarshaler), want: "k=notnilmarshaler"}, - {key: "k", value: (*nilMarshaler)(nil), want: "k=nilmarshaler"}, - {key: (*marshalerStringer)(nil), value: "v", err: logfmt.ErrNilKey}, - {key: decimalMarshaler{5, 9}, value: "v", want: "5.9=v"}, - {key: (*decimalMarshaler)(nil), value: "v", err: logfmt.ErrNilKey}, - {key: decimalStringer{5, 9}, value: "v", want: "5.9=v"}, - {key: (*decimalStringer)(nil), value: "v", err: logfmt.ErrNilKey}, - {key: marshalerStringer{5, 9}, value: "v", want: "5.9=v"}, - } - - for _, d := range data { - w := &bytes.Buffer{} - enc := logfmt.NewEncoder(w) - err := enc.EncodeKeyval(d.key, d.value) - if err != d.err { - t.Errorf("%#v, %#v: got error: %v, want error: %v", d.key, d.value, err, d.err) - } - if got, want := w.String(), d.want; got != want { - t.Errorf("%#v, %#v: got '%s', want '%s'", d.key, d.value, got, want) - } - } -} - -func TestMarshalKeyvals(t *testing.T) { - one := 1 - ptr := &one - nilPtr := (*int)(nil) - - data := []struct { - in []interface{} - want []byte - err error - }{ - {in: nil, want: nil}, - {in: kv(), want: nil}, - {in: kv(nil, "v"), err: logfmt.ErrNilKey}, - {in: kv(nilPtr, "v"), err: logfmt.ErrNilKey}, - {in: kv("k"), want: []byte("k=null")}, - {in: kv("k", nil), want: []byte("k=null")}, - {in: kv("k", ""), want: []byte("k=")}, - {in: kv("k", "null"), want: []byte(`k="null"`)}, - {in: kv("k", "v"), want: []byte("k=v")}, - {in: kv("k", true), want: []byte("k=true")}, - {in: kv("k", 1), want: []byte("k=1")}, - {in: kv("k", ptr), want: []byte("k=1")}, - {in: kv("k", nilPtr), want: []byte("k=null")}, - {in: kv("k", 1.025), want: []byte("k=1.025")}, - {in: kv("k", 1e-3), want: []byte("k=0.001")}, - {in: kv("k", "v v"), want: []byte(`k="v v"`)}, - {in: kv("k", `"`), want: []byte(`k="\""`)}, - {in: kv("k", `=`), want: []byte(`k="="`)}, - {in: kv("k", `\`), want: []byte(`k=\`)}, - {in: kv("k", `=\`), want: []byte(`k="=\\"`)}, - {in: kv("k", `\"`), want: []byte(`k="\\\""`)}, - {in: kv("k1", "v1", "k2", "v2"), want: []byte("k1=v1 k2=v2")}, - {in: kv("k1", "v1", "k2", [2]int{}), want: []byte("k1=v1 k2=\"unsupported value type\"")}, - {in: kv([2]int{}, "v1", "k2", "v2"), want: []byte("k2=v2")}, - {in: kv("k", time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)), want: []byte("k=2009-11-10T23:00:00Z")}, - {in: kv("k", errorMarshaler{}), want: []byte("k=\"error marshaling value of type logfmt_test.errorMarshaler: marshal error\"")}, - {in: kv("k", decimalMarshaler{5, 9}), want: []byte("k=5.9")}, - {in: kv("k", (*decimalMarshaler)(nil)), want: []byte("k=null")}, - {in: kv("k", decimalStringer{5, 9}), want: []byte("k=5.9")}, - {in: kv("k", (*decimalStringer)(nil)), want: []byte("k=null")}, - {in: kv("k", marshalerStringer{5, 9}), want: []byte("k=5.9")}, - {in: kv("k", (*marshalerStringer)(nil)), want: []byte("k=null")}, - {in: kv(one, "v"), want: []byte("1=v")}, - {in: kv(ptr, "v"), want: []byte("1=v")}, - {in: kv((*marshalerStringer)(nil), "v"), err: logfmt.ErrNilKey}, - {in: kv(decimalMarshaler{5, 9}, "v"), want: []byte("5.9=v")}, - {in: kv((*decimalMarshaler)(nil), "v"), err: logfmt.ErrNilKey}, - {in: kv(decimalStringer{5, 9}, "v"), want: []byte("5.9=v")}, - {in: kv((*decimalStringer)(nil), "v"), err: logfmt.ErrNilKey}, - {in: kv(marshalerStringer{5, 9}, "v"), want: []byte("5.9=v")}, - } - - for _, d := range data { - got, err := logfmt.MarshalKeyvals(d.in...) - if err != d.err { - t.Errorf("%#v: got error: %v, want error: %v", d.in, err, d.err) - } - if got, want := got, d.want; !reflect.DeepEqual(got, want) { - t.Errorf("%#v: got '%s', want '%s'", d.in, got, want) - } - } -} - -func kv(keyvals ...interface{}) []interface{} { - return keyvals -} - -type structData struct { - A string `logfmt:"fieldA"` - B int -} - -type nilMarshaler int - -func (m *nilMarshaler) MarshalText() ([]byte, error) { - if m == nil { - return []byte("nilmarshaler"), nil - } - return []byte("notnilmarshaler"), nil -} - -type decimalMarshaler struct { - a, b int -} - -func (t decimalMarshaler) MarshalText() ([]byte, error) { - buf := &bytes.Buffer{} - fmt.Fprintf(buf, "%d.%d", t.a, t.b) - return buf.Bytes(), nil -} - -type decimalStringer struct { - a, b int -} - -func (s decimalStringer) String() string { - return fmt.Sprintf("%d.%d", s.a, s.b) -} - -type marshalerStringer struct { - a, b int -} - -func (t marshalerStringer) MarshalText() ([]byte, error) { - buf := &bytes.Buffer{} - fmt.Fprintf(buf, "%d.%d", t.a, t.b) - return buf.Bytes(), nil -} - -func (t marshalerStringer) String() string { - return fmt.Sprint(t.a + t.b) -} - -var marshalError = errors.New("marshal error") - -type errorMarshaler struct{} - -func (errorMarshaler) MarshalText() ([]byte, error) { - return nil, marshalError -} - -func BenchmarkEncodeKeyval(b *testing.B) { - b.ReportAllocs() - enc := logfmt.NewEncoder(ioutil.Discard) - for i := 0; i < b.N; i++ { - enc.EncodeKeyval("sk", "10") - enc.EncodeKeyval("some-key", "a rather long string with spaces") - } -} diff --git a/vendor/github.com/go-logfmt/logfmt/example_test.go b/vendor/github.com/go-logfmt/logfmt/example_test.go deleted file mode 100644 index 829dbd5..0000000 --- a/vendor/github.com/go-logfmt/logfmt/example_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package logfmt_test - -import ( - "errors" - "fmt" - "os" - "strings" - "time" - - "github.com/go-logfmt/logfmt" -) - -func ExampleEncoder() { - check := func(err error) { - if err != nil { - panic(err) - } - } - - e := logfmt.NewEncoder(os.Stdout) - - check(e.EncodeKeyval("id", 1)) - check(e.EncodeKeyval("dur", time.Second+time.Millisecond)) - check(e.EndRecord()) - - check(e.EncodeKeyval("id", 1)) - check(e.EncodeKeyval("path", "/path/to/file")) - check(e.EncodeKeyval("err", errors.New("file not found"))) - check(e.EndRecord()) - - // Output: - // id=1 dur=1.001s - // id=1 path=/path/to/file err="file not found" -} - -func ExampleDecoder() { - in := ` -id=1 dur=1.001s -id=1 path=/path/to/file err="file not found" -` - - d := logfmt.NewDecoder(strings.NewReader(in)) - for d.ScanRecord() { - for d.ScanKeyval() { - fmt.Printf("k: %s v: %s\n", d.Key(), d.Value()) - } - fmt.Println() - } - if d.Err() != nil { - panic(d.Err()) - } - - // Output: - // k: id v: 1 - // k: dur v: 1.001s - // - // k: id v: 1 - // k: path v: /path/to/file - // k: err v: file not found -} diff --git a/vendor/github.com/go-logfmt/logfmt/fuzz.go b/vendor/github.com/go-logfmt/logfmt/fuzz.go deleted file mode 100644 index ab916a9..0000000 --- a/vendor/github.com/go-logfmt/logfmt/fuzz.go +++ /dev/null @@ -1,125 +0,0 @@ -// +build gofuzz - -package logfmt - -import ( - "bufio" - "bytes" - "fmt" - "io" - "reflect" - - kr "github.com/kr/logfmt" -) - -func Fuzz(data []byte) int { - parsed, err := parse(data) - if err != nil { - return 0 - } - var w1 bytes.Buffer - if err := write(parsed, &w1); err != nil { - panic(err) - } - parsed, err = parse(data) - if err != nil { - panic(err) - } - var w2 bytes.Buffer - if err := write(parsed, &w2); err != nil { - panic(err) - } - if !bytes.Equal(w1.Bytes(), w2.Bytes()) { - panic(fmt.Sprintf("reserialized data does not match:\n%q\n%q\n", w1.Bytes(), w2.Bytes())) - } - return 1 -} - -func FuzzVsKR(data []byte) int { - parsed, err := parse(data) - parsedKR, errKR := parseKR(data) - - // github.com/go-logfmt/logfmt is a stricter parser. It returns errors for - // more inputs than github.com/kr/logfmt. Ignore any inputs that have a - // stict error. - if err != nil { - return 0 - } - - // Fail if the more forgiving parser finds an error not found by the - // stricter parser. - if errKR != nil { - panic(fmt.Sprintf("unmatched error: %v", errKR)) - } - - if !reflect.DeepEqual(parsed, parsedKR) { - panic(fmt.Sprintf("parsers disagree:\n%+v\n%+v\n", parsed, parsedKR)) - } - return 1 -} - -type kv struct { - k, v []byte -} - -func parse(data []byte) ([][]kv, error) { - var got [][]kv - dec := NewDecoder(bytes.NewReader(data)) - for dec.ScanRecord() { - var kvs []kv - for dec.ScanKeyval() { - kvs = append(kvs, kv{dec.Key(), dec.Value()}) - } - got = append(got, kvs) - kvs = nil - } - return got, dec.Err() -} - -func parseKR(data []byte) ([][]kv, error) { - var ( - s = bufio.NewScanner(bytes.NewReader(data)) - err error - h saveHandler - got [][]kv - ) - for err == nil && s.Scan() { - h.kvs = nil - err = kr.Unmarshal(s.Bytes(), &h) - got = append(got, h.kvs) - } - if err == nil { - err = s.Err() - } - return got, err -} - -type saveHandler struct { - kvs []kv -} - -func (h *saveHandler) HandleLogfmt(key, val []byte) error { - if len(key) == 0 { - key = nil - } - if len(val) == 0 { - val = nil - } - h.kvs = append(h.kvs, kv{key, val}) - return nil -} - -func write(recs [][]kv, w io.Writer) error { - enc := NewEncoder(w) - for _, rec := range recs { - for _, f := range rec { - if err := enc.EncodeKeyval(f.k, f.v); err != nil { - return err - } - } - if err := enc.EndRecord(); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/go-logfmt/logfmt/jsonstring.go b/vendor/github.com/go-logfmt/logfmt/jsonstring.go deleted file mode 100644 index 53b6532..0000000 --- a/vendor/github.com/go-logfmt/logfmt/jsonstring.go +++ /dev/null @@ -1,277 +0,0 @@ -package logfmt - -import ( - "bytes" - "io" - "strconv" - "sync" - "unicode" - "unicode/utf16" - "unicode/utf8" -) - -// Taken from Go's encoding/json and modified for use here. - -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -var hex = "0123456789abcdef" - -var bufferPool = sync.Pool{ - New: func() interface{} { - return &bytes.Buffer{} - }, -} - -func getBuffer() *bytes.Buffer { - return bufferPool.Get().(*bytes.Buffer) -} - -func poolBuffer(buf *bytes.Buffer) { - buf.Reset() - bufferPool.Put(buf) -} - -// NOTE: keep in sync with writeQuotedBytes below. -func writeQuotedString(w io.Writer, s string) (int, error) { - buf := getBuffer() - buf.WriteByte('"') - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if 0x20 <= b && b != '\\' && b != '"' { - i++ - continue - } - if start < i { - buf.WriteString(s[start:i]) - } - switch b { - case '\\', '"': - buf.WriteByte('\\') - buf.WriteByte(b) - case '\n': - buf.WriteByte('\\') - buf.WriteByte('n') - case '\r': - buf.WriteByte('\\') - buf.WriteByte('r') - case '\t': - buf.WriteByte('\\') - buf.WriteByte('t') - default: - // This encodes bytes < 0x20 except for \n, \r, and \t. - buf.WriteString(`\u00`) - buf.WriteByte(hex[b>>4]) - buf.WriteByte(hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRuneInString(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - buf.WriteString(s[start:i]) - } - buf.WriteString(`\ufffd`) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - buf.WriteString(s[start:]) - } - buf.WriteByte('"') - n, err := w.Write(buf.Bytes()) - poolBuffer(buf) - return n, err -} - -// NOTE: keep in sync with writeQuoteString above. -func writeQuotedBytes(w io.Writer, s []byte) (int, error) { - buf := getBuffer() - buf.WriteByte('"') - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if 0x20 <= b && b != '\\' && b != '"' { - i++ - continue - } - if start < i { - buf.Write(s[start:i]) - } - switch b { - case '\\', '"': - buf.WriteByte('\\') - buf.WriteByte(b) - case '\n': - buf.WriteByte('\\') - buf.WriteByte('n') - case '\r': - buf.WriteByte('\\') - buf.WriteByte('r') - case '\t': - buf.WriteByte('\\') - buf.WriteByte('t') - default: - // This encodes bytes < 0x20 except for \n, \r, and \t. - buf.WriteString(`\u00`) - buf.WriteByte(hex[b>>4]) - buf.WriteByte(hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRune(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - buf.Write(s[start:i]) - } - buf.WriteString(`\ufffd`) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - buf.Write(s[start:]) - } - buf.WriteByte('"') - n, err := w.Write(buf.Bytes()) - poolBuffer(buf) - return n, err -} - -// getu4 decodes \uXXXX from the beginning of s, returning the hex value, -// or it returns -1. -func getu4(s []byte) rune { - if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { - return -1 - } - r, err := strconv.ParseUint(string(s[2:6]), 16, 64) - if err != nil { - return -1 - } - return rune(r) -} - -func unquoteBytes(s []byte) (t []byte, ok bool) { - if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { - return - } - s = s[1 : len(s)-1] - - // Check for unusual characters. If there are none, - // then no unquoting is needed, so return a slice of the - // original bytes. - r := 0 - for r < len(s) { - c := s[r] - if c == '\\' || c == '"' || c < ' ' { - break - } - if c < utf8.RuneSelf { - r++ - continue - } - rr, size := utf8.DecodeRune(s[r:]) - if rr == utf8.RuneError && size == 1 { - break - } - r += size - } - if r == len(s) { - return s, true - } - - b := make([]byte, len(s)+2*utf8.UTFMax) - w := copy(b, s[0:r]) - for r < len(s) { - // Out of room? Can only happen if s is full of - // malformed UTF-8 and we're replacing each - // byte with RuneError. - if w >= len(b)-2*utf8.UTFMax { - nb := make([]byte, (len(b)+utf8.UTFMax)*2) - copy(nb, b[0:w]) - b = nb - } - switch c := s[r]; { - case c == '\\': - r++ - if r >= len(s) { - return - } - switch s[r] { - default: - return - case '"', '\\', '/', '\'': - b[w] = s[r] - r++ - w++ - case 'b': - b[w] = '\b' - r++ - w++ - case 'f': - b[w] = '\f' - r++ - w++ - case 'n': - b[w] = '\n' - r++ - w++ - case 'r': - b[w] = '\r' - r++ - w++ - case 't': - b[w] = '\t' - r++ - w++ - case 'u': - r-- - rr := getu4(s[r:]) - if rr < 0 { - return - } - r += 6 - if utf16.IsSurrogate(rr) { - rr1 := getu4(s[r:]) - if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { - // A valid pair; consume. - r += 6 - w += utf8.EncodeRune(b[w:], dec) - break - } - // Invalid surrogate; fall back to replacement rune. - rr = unicode.ReplacementChar - } - w += utf8.EncodeRune(b[w:], rr) - } - - // Quote, control characters are invalid. - case c == '"', c < ' ': - return - - // ASCII - case c < utf8.RuneSelf: - b[w] = c - r++ - w++ - - // Coerce to well-formed UTF-8. - default: - rr, size := utf8.DecodeRune(s[r:]) - r += size - w += utf8.EncodeRune(b[w:], rr) - } - } - return b[0:w], true -} diff --git a/vendor/github.com/go-stack/stack/.travis.yml b/vendor/github.com/go-stack/stack/.travis.yml deleted file mode 100644 index d5e5dd5..0000000 --- a/vendor/github.com/go-stack/stack/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -language: go -sudo: false -go: - - 1.2 - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - tip - -before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover - -script: - - goveralls -service=travis-ci diff --git a/vendor/github.com/go-stack/stack/LICENSE.md b/vendor/github.com/go-stack/stack/LICENSE.md deleted file mode 100644 index c8ca66c..0000000 --- a/vendor/github.com/go-stack/stack/LICENSE.md +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2014 Chris Hines - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/go-stack/stack/README.md b/vendor/github.com/go-stack/stack/README.md deleted file mode 100644 index f11cccc..0000000 --- a/vendor/github.com/go-stack/stack/README.md +++ /dev/null @@ -1,38 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/go-stack/stack?status.svg)](https://godoc.org/github.com/go-stack/stack) -[![Go Report Card](https://goreportcard.com/badge/go-stack/stack)](https://goreportcard.com/report/go-stack/stack) -[![TravisCI](https://travis-ci.org/go-stack/stack.svg?branch=master)](https://travis-ci.org/go-stack/stack) -[![Coverage Status](https://coveralls.io/repos/github/go-stack/stack/badge.svg?branch=master)](https://coveralls.io/github/go-stack/stack?branch=master) - -# stack - -Package stack implements utilities to capture, manipulate, and format call -stacks. It provides a simpler API than package runtime. - -The implementation takes care of the minutia and special cases of interpreting -the program counter (pc) values returned by runtime.Callers. - -## Versioning - -Package stack publishes releases via [semver](http://semver.org/) compatible Git -tags prefixed with a single 'v'. The master branch always contains the latest -release. The develop branch contains unreleased commits. - -## Formatting - -Package stack's types implement fmt.Formatter, which provides a simple and -flexible way to declaratively configure formatting when used with logging or -error tracking packages. - -```go -func DoTheThing() { - c := stack.Caller(0) - log.Print(c) // "source.go:10" - log.Printf("%+v", c) // "pkg/path/source.go:10" - log.Printf("%n", c) // "DoTheThing" - - s := stack.Trace().TrimRuntime() - log.Print(s) // "[source.go:15 caller.go:42 main.go:14]" -} -``` - -See the docs for all of the supported formatting options. diff --git a/vendor/github.com/go-stack/stack/format_test.go b/vendor/github.com/go-stack/stack/format_test.go deleted file mode 100644 index 013ad67..0000000 --- a/vendor/github.com/go-stack/stack/format_test.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build go1.2 - -package stack_test - -import ( - "fmt" - - "github.com/go-stack/stack" -) - -func Example_callFormat() { - logCaller("%+s") - logCaller("%v %[1]n()") - // Output: - // github.com/go-stack/stack/format_test.go - // format_test.go:13 Example_callFormat() -} - -func logCaller(format string) { - fmt.Printf(format+"\n", stack.Caller(1)) -} diff --git a/vendor/github.com/go-stack/stack/stack.go b/vendor/github.com/go-stack/stack/stack.go deleted file mode 100644 index a614eee..0000000 --- a/vendor/github.com/go-stack/stack/stack.go +++ /dev/null @@ -1,349 +0,0 @@ -// Package stack implements utilities to capture, manipulate, and format call -// stacks. It provides a simpler API than package runtime. -// -// The implementation takes care of the minutia and special cases of -// interpreting the program counter (pc) values returned by runtime.Callers. -// -// Package stack's types implement fmt.Formatter, which provides a simple and -// flexible way to declaratively configure formatting when used with logging -// or error tracking packages. -package stack - -import ( - "bytes" - "errors" - "fmt" - "io" - "runtime" - "strconv" - "strings" -) - -// Call records a single function invocation from a goroutine stack. -type Call struct { - fn *runtime.Func - pc uintptr -} - -// Caller returns a Call from the stack of the current goroutine. The argument -// skip is the number of stack frames to ascend, with 0 identifying the -// calling function. -func Caller(skip int) Call { - var pcs [2]uintptr - n := runtime.Callers(skip+1, pcs[:]) - - var c Call - - if n < 2 { - return c - } - - c.pc = pcs[1] - if runtime.FuncForPC(pcs[0]) != sigpanic { - c.pc-- - } - c.fn = runtime.FuncForPC(c.pc) - return c -} - -// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", c). -func (c Call) String() string { - return fmt.Sprint(c) -} - -// MarshalText implements encoding.TextMarshaler. It formats the Call the same -// as fmt.Sprintf("%v", c). -func (c Call) MarshalText() ([]byte, error) { - if c.fn == nil { - return nil, ErrNoFunc - } - buf := bytes.Buffer{} - fmt.Fprint(&buf, c) - return buf.Bytes(), nil -} - -// ErrNoFunc means that the Call has a nil *runtime.Func. The most likely -// cause is a Call with the zero value. -var ErrNoFunc = errors.New("no call stack information") - -// Format implements fmt.Formatter with support for the following verbs. -// -// %s source file -// %d line number -// %n function name -// %v equivalent to %s:%d -// -// It accepts the '+' and '#' flags for most of the verbs as follows. -// -// %+s path of source file relative to the compile time GOPATH -// %#s full path of source file -// %+n import path qualified function name -// %+v equivalent to %+s:%d -// %#v equivalent to %#s:%d -func (c Call) Format(s fmt.State, verb rune) { - if c.fn == nil { - fmt.Fprintf(s, "%%!%c(NOFUNC)", verb) - return - } - - switch verb { - case 's', 'v': - file, line := c.fn.FileLine(c.pc) - switch { - case s.Flag('#'): - // done - case s.Flag('+'): - file = file[pkgIndex(file, c.fn.Name()):] - default: - const sep = "/" - if i := strings.LastIndex(file, sep); i != -1 { - file = file[i+len(sep):] - } - } - io.WriteString(s, file) - if verb == 'v' { - buf := [7]byte{':'} - s.Write(strconv.AppendInt(buf[:1], int64(line), 10)) - } - - case 'd': - _, line := c.fn.FileLine(c.pc) - buf := [6]byte{} - s.Write(strconv.AppendInt(buf[:0], int64(line), 10)) - - case 'n': - name := c.fn.Name() - if !s.Flag('+') { - const pathSep = "/" - if i := strings.LastIndex(name, pathSep); i != -1 { - name = name[i+len(pathSep):] - } - const pkgSep = "." - if i := strings.Index(name, pkgSep); i != -1 { - name = name[i+len(pkgSep):] - } - } - io.WriteString(s, name) - } -} - -// PC returns the program counter for this call frame; multiple frames may -// have the same PC value. -func (c Call) PC() uintptr { - return c.pc -} - -// name returns the import path qualified name of the function containing the -// call. -func (c Call) name() string { - if c.fn == nil { - return "???" - } - return c.fn.Name() -} - -func (c Call) file() string { - if c.fn == nil { - return "???" - } - file, _ := c.fn.FileLine(c.pc) - return file -} - -func (c Call) line() int { - if c.fn == nil { - return 0 - } - _, line := c.fn.FileLine(c.pc) - return line -} - -// CallStack records a sequence of function invocations from a goroutine -// stack. -type CallStack []Call - -// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", cs). -func (cs CallStack) String() string { - return fmt.Sprint(cs) -} - -var ( - openBracketBytes = []byte("[") - closeBracketBytes = []byte("]") - spaceBytes = []byte(" ") -) - -// MarshalText implements encoding.TextMarshaler. It formats the CallStack the -// same as fmt.Sprintf("%v", cs). -func (cs CallStack) MarshalText() ([]byte, error) { - buf := bytes.Buffer{} - buf.Write(openBracketBytes) - for i, pc := range cs { - if pc.fn == nil { - return nil, ErrNoFunc - } - if i > 0 { - buf.Write(spaceBytes) - } - fmt.Fprint(&buf, pc) - } - buf.Write(closeBracketBytes) - return buf.Bytes(), nil -} - -// Format implements fmt.Formatter by printing the CallStack as square brackets -// ([, ]) surrounding a space separated list of Calls each formatted with the -// supplied verb and options. -func (cs CallStack) Format(s fmt.State, verb rune) { - s.Write(openBracketBytes) - for i, pc := range cs { - if i > 0 { - s.Write(spaceBytes) - } - pc.Format(s, verb) - } - s.Write(closeBracketBytes) -} - -// findSigpanic intentionally executes faulting code to generate a stack trace -// containing an entry for runtime.sigpanic. -func findSigpanic() *runtime.Func { - var fn *runtime.Func - var p *int - func() int { - defer func() { - if p := recover(); p != nil { - var pcs [512]uintptr - n := runtime.Callers(2, pcs[:]) - for _, pc := range pcs[:n] { - f := runtime.FuncForPC(pc) - if f.Name() == "runtime.sigpanic" { - fn = f - break - } - } - } - }() - // intentional nil pointer dereference to trigger sigpanic - return *p - }() - return fn -} - -var sigpanic = findSigpanic() - -// Trace returns a CallStack for the current goroutine with element 0 -// identifying the calling function. -func Trace() CallStack { - var pcs [512]uintptr - n := runtime.Callers(2, pcs[:]) - cs := make([]Call, n) - - for i, pc := range pcs[:n] { - pcFix := pc - if i > 0 && cs[i-1].fn != sigpanic { - pcFix-- - } - cs[i] = Call{ - fn: runtime.FuncForPC(pcFix), - pc: pcFix, - } - } - - return cs -} - -// TrimBelow returns a slice of the CallStack with all entries below c -// removed. -func (cs CallStack) TrimBelow(c Call) CallStack { - for len(cs) > 0 && cs[0].pc != c.pc { - cs = cs[1:] - } - return cs -} - -// TrimAbove returns a slice of the CallStack with all entries above c -// removed. -func (cs CallStack) TrimAbove(c Call) CallStack { - for len(cs) > 0 && cs[len(cs)-1].pc != c.pc { - cs = cs[:len(cs)-1] - } - return cs -} - -// pkgIndex returns the index that results in file[index:] being the path of -// file relative to the compile time GOPATH, and file[:index] being the -// $GOPATH/src/ portion of file. funcName must be the name of a function in -// file as returned by runtime.Func.Name. -func pkgIndex(file, funcName string) int { - // As of Go 1.6.2 there is no direct way to know the compile time GOPATH - // at runtime, but we can infer the number of path segments in the GOPATH. - // We note that runtime.Func.Name() returns the function name qualified by - // the import path, which does not include the GOPATH. Thus we can trim - // segments from the beginning of the file path until the number of path - // separators remaining is one more than the number of path separators in - // the function name. For example, given: - // - // GOPATH /home/user - // file /home/user/src/pkg/sub/file.go - // fn.Name() pkg/sub.Type.Method - // - // We want to produce: - // - // file[:idx] == /home/user/src/ - // file[idx:] == pkg/sub/file.go - // - // From this we can easily see that fn.Name() has one less path separator - // than our desired result for file[idx:]. We count separators from the - // end of the file path until it finds two more than in the function name - // and then move one character forward to preserve the initial path - // segment without a leading separator. - const sep = "/" - i := len(file) - for n := strings.Count(funcName, sep) + 2; n > 0; n-- { - i = strings.LastIndex(file[:i], sep) - if i == -1 { - i = -len(sep) - break - } - } - // get back to 0 or trim the leading separator - return i + len(sep) -} - -var runtimePath string - -func init() { - var pcs [1]uintptr - runtime.Callers(0, pcs[:]) - fn := runtime.FuncForPC(pcs[0]) - file, _ := fn.FileLine(pcs[0]) - - idx := pkgIndex(file, fn.Name()) - - runtimePath = file[:idx] - if runtime.GOOS == "windows" { - runtimePath = strings.ToLower(runtimePath) - } -} - -func inGoroot(c Call) bool { - file := c.file() - if len(file) == 0 || file[0] == '?' { - return true - } - if runtime.GOOS == "windows" { - file = strings.ToLower(file) - } - return strings.HasPrefix(file, runtimePath) || strings.HasSuffix(file, "/_testmain.go") -} - -// TrimRuntime returns a slice of the CallStack with the topmost entries from -// the go runtime removed. It considers any calls originating from unknown -// files, files under GOROOT, or _testmain.go as part of the runtime. -func (cs CallStack) TrimRuntime() CallStack { - for len(cs) > 0 && inGoroot(cs[len(cs)-1]) { - cs = cs[:len(cs)-1] - } - return cs -} diff --git a/vendor/github.com/go-stack/stack/stack_test.go b/vendor/github.com/go-stack/stack/stack_test.go deleted file mode 100644 index f203680..0000000 --- a/vendor/github.com/go-stack/stack/stack_test.go +++ /dev/null @@ -1,398 +0,0 @@ -package stack_test - -import ( - "fmt" - "io/ioutil" - "path" - "path/filepath" - "reflect" - "runtime" - "strings" - "testing" - - "github.com/go-stack/stack" -) - -const importPath = "github.com/go-stack/stack" - -type testType struct{} - -func (tt testType) testMethod() (c stack.Call, pc uintptr, file string, line int, ok bool) { - c = stack.Caller(0) - pc, file, line, ok = runtime.Caller(0) - line-- - return -} - -func TestCallFormat(t *testing.T) { - t.Parallel() - - c := stack.Caller(0) - pc, file, line, ok := runtime.Caller(0) - line-- - if !ok { - t.Fatal("runtime.Caller(0) failed") - } - relFile := path.Join(importPath, filepath.Base(file)) - - c2, pc2, file2, line2, ok2 := testType{}.testMethod() - if !ok2 { - t.Fatal("runtime.Caller(0) failed") - } - relFile2 := path.Join(importPath, filepath.Base(file2)) - - data := []struct { - c stack.Call - desc string - fmt string - out string - }{ - {stack.Call{}, "error", "%s", "%!s(NOFUNC)"}, - - {c, "func", "%s", path.Base(file)}, - {c, "func", "%+s", relFile}, - {c, "func", "%#s", file}, - {c, "func", "%d", fmt.Sprint(line)}, - {c, "func", "%n", "TestCallFormat"}, - {c, "func", "%+n", runtime.FuncForPC(pc - 1).Name()}, - {c, "func", "%v", fmt.Sprint(path.Base(file), ":", line)}, - {c, "func", "%+v", fmt.Sprint(relFile, ":", line)}, - {c, "func", "%#v", fmt.Sprint(file, ":", line)}, - - {c2, "meth", "%s", path.Base(file2)}, - {c2, "meth", "%+s", relFile2}, - {c2, "meth", "%#s", file2}, - {c2, "meth", "%d", fmt.Sprint(line2)}, - {c2, "meth", "%n", "testType.testMethod"}, - {c2, "meth", "%+n", runtime.FuncForPC(pc2).Name()}, - {c2, "meth", "%v", fmt.Sprint(path.Base(file2), ":", line2)}, - {c2, "meth", "%+v", fmt.Sprint(relFile2, ":", line2)}, - {c2, "meth", "%#v", fmt.Sprint(file2, ":", line2)}, - } - - for _, d := range data { - got := fmt.Sprintf(d.fmt, d.c) - if got != d.out { - t.Errorf("fmt.Sprintf(%q, Call(%s)) = %s, want %s", d.fmt, d.desc, got, d.out) - } - } -} - -func TestCallString(t *testing.T) { - t.Parallel() - - c := stack.Caller(0) - _, file, line, ok := runtime.Caller(0) - line-- - if !ok { - t.Fatal("runtime.Caller(0) failed") - } - - c2, _, file2, line2, ok2 := testType{}.testMethod() - if !ok2 { - t.Fatal("runtime.Caller(0) failed") - } - - data := []struct { - c stack.Call - desc string - out string - }{ - {stack.Call{}, "error", "%!v(NOFUNC)"}, - {c, "func", fmt.Sprint(path.Base(file), ":", line)}, - {c2, "meth", fmt.Sprint(path.Base(file2), ":", line2)}, - } - - for _, d := range data { - got := d.c.String() - if got != d.out { - t.Errorf("got %s, want %s", got, d.out) - } - } -} - -func TestCallMarshalText(t *testing.T) { - t.Parallel() - - c := stack.Caller(0) - _, file, line, ok := runtime.Caller(0) - line-- - if !ok { - t.Fatal("runtime.Caller(0) failed") - } - - c2, _, file2, line2, ok2 := testType{}.testMethod() - if !ok2 { - t.Fatal("runtime.Caller(0) failed") - } - - data := []struct { - c stack.Call - desc string - out []byte - err error - }{ - {stack.Call{}, "error", nil, stack.ErrNoFunc}, - {c, "func", []byte(fmt.Sprint(path.Base(file), ":", line)), nil}, - {c2, "meth", []byte(fmt.Sprint(path.Base(file2), ":", line2)), nil}, - } - - for _, d := range data { - text, err := d.c.MarshalText() - if got, want := err, d.err; got != want { - t.Errorf("%s: got err %v, want err %v", d.desc, got, want) - } - if got, want := text, d.out; !reflect.DeepEqual(got, want) { - t.Errorf("%s: got %s, want %s", d.desc, got, want) - } - } -} - -func TestCallStackString(t *testing.T) { - cs, line0 := getTrace(t) - _, file, line1, ok := runtime.Caller(0) - line1-- - if !ok { - t.Fatal("runtime.Caller(0) failed") - } - file = path.Base(file) - if got, want := cs.String(), fmt.Sprintf("[%s:%d %s:%d]", file, line0, file, line1); got != want { - t.Errorf("\n got %v\nwant %v", got, want) - } -} - -func TestCallStackMarshalText(t *testing.T) { - cs, line0 := getTrace(t) - _, file, line1, ok := runtime.Caller(0) - line1-- - if !ok { - t.Fatal("runtime.Caller(0) failed") - } - file = path.Base(file) - text, _ := cs.MarshalText() - if got, want := text, []byte(fmt.Sprintf("[%s:%d %s:%d]", file, line0, file, line1)); !reflect.DeepEqual(got, want) { - t.Errorf("\n got %v\nwant %v", got, want) - } -} -func getTrace(t *testing.T) (stack.CallStack, int) { - cs := stack.Trace().TrimRuntime() - _, _, line, ok := runtime.Caller(0) - line-- - if !ok { - t.Fatal("runtime.Caller(0) failed") - } - return cs, line -} - -func TestTrimAbove(t *testing.T) { - trace := trimAbove() - if got, want := len(trace), 2; got != want { - t.Errorf("got len(trace) == %v, want %v, trace: %n", got, want, trace) - } - if got, want := fmt.Sprintf("%n", trace[1]), "TestTrimAbove"; got != want { - t.Errorf("got %q, want %q", got, want) - } -} - -func trimAbove() stack.CallStack { - call := stack.Caller(1) - trace := stack.Trace() - return trace.TrimAbove(call) -} - -func TestTrimBelow(t *testing.T) { - trace := trimBelow() - if got, want := fmt.Sprintf("%n", trace[0]), "TestTrimBelow"; got != want { - t.Errorf("got %q, want %q", got, want) - } -} - -func trimBelow() stack.CallStack { - call := stack.Caller(1) - trace := stack.Trace() - return trace.TrimBelow(call) -} - -func TestTrimRuntime(t *testing.T) { - trace := stack.Trace().TrimRuntime() - if got, want := len(trace), 1; got != want { - t.Errorf("got len(trace) == %v, want %v, goroot: %q, trace: %#v", got, want, runtime.GOROOT(), trace) - } -} - -func BenchmarkCallVFmt(b *testing.B) { - c := stack.Caller(0) - b.ResetTimer() - for i := 0; i < b.N; i++ { - fmt.Fprint(ioutil.Discard, c) - } -} - -func BenchmarkCallPlusVFmt(b *testing.B) { - c := stack.Caller(0) - b.ResetTimer() - for i := 0; i < b.N; i++ { - fmt.Fprintf(ioutil.Discard, "%+v", c) - } -} - -func BenchmarkCallSharpVFmt(b *testing.B) { - c := stack.Caller(0) - b.ResetTimer() - for i := 0; i < b.N; i++ { - fmt.Fprintf(ioutil.Discard, "%#v", c) - } -} - -func BenchmarkCallSFmt(b *testing.B) { - c := stack.Caller(0) - b.ResetTimer() - for i := 0; i < b.N; i++ { - fmt.Fprintf(ioutil.Discard, "%s", c) - } -} - -func BenchmarkCallPlusSFmt(b *testing.B) { - c := stack.Caller(0) - b.ResetTimer() - for i := 0; i < b.N; i++ { - fmt.Fprintf(ioutil.Discard, "%+s", c) - } -} - -func BenchmarkCallSharpSFmt(b *testing.B) { - c := stack.Caller(0) - b.ResetTimer() - for i := 0; i < b.N; i++ { - fmt.Fprintf(ioutil.Discard, "%#s", c) - } -} - -func BenchmarkCallDFmt(b *testing.B) { - c := stack.Caller(0) - b.ResetTimer() - for i := 0; i < b.N; i++ { - fmt.Fprintf(ioutil.Discard, "%d", c) - } -} - -func BenchmarkCallNFmt(b *testing.B) { - c := stack.Caller(0) - b.ResetTimer() - for i := 0; i < b.N; i++ { - fmt.Fprintf(ioutil.Discard, "%n", c) - } -} - -func BenchmarkCallPlusNFmt(b *testing.B) { - c := stack.Caller(0) - b.ResetTimer() - for i := 0; i < b.N; i++ { - fmt.Fprintf(ioutil.Discard, "%+n", c) - } -} - -func BenchmarkCaller(b *testing.B) { - for i := 0; i < b.N; i++ { - stack.Caller(0) - } -} - -func BenchmarkTrace(b *testing.B) { - for i := 0; i < b.N; i++ { - stack.Trace() - } -} - -func deepStack(depth int, b *testing.B) stack.CallStack { - if depth > 0 { - return deepStack(depth-1, b) - } - b.StartTimer() - s := stack.Trace() - return s -} - -func BenchmarkTrace10(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - deepStack(10, b) - } -} - -func BenchmarkTrace50(b *testing.B) { - b.StopTimer() - for i := 0; i < b.N; i++ { - deepStack(50, b) - } -} - -func BenchmarkTrace100(b *testing.B) { - b.StopTimer() - for i := 0; i < b.N; i++ { - deepStack(100, b) - } -} - -//////////////// -// Benchmark functions followed by formatting -//////////////// - -func BenchmarkCallerAndVFmt(b *testing.B) { - for i := 0; i < b.N; i++ { - fmt.Fprint(ioutil.Discard, stack.Caller(0)) - } -} - -func BenchmarkTraceAndVFmt(b *testing.B) { - for i := 0; i < b.N; i++ { - fmt.Fprint(ioutil.Discard, stack.Trace()) - } -} - -func BenchmarkTrace10AndVFmt(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - fmt.Fprint(ioutil.Discard, deepStack(10, b)) - } -} - -//////////////// -// Baseline against package runtime. -//////////////// - -func BenchmarkRuntimeCaller(b *testing.B) { - for i := 0; i < b.N; i++ { - runtime.Caller(0) - } -} - -func BenchmarkRuntimeCallerAndFmt(b *testing.B) { - for i := 0; i < b.N; i++ { - _, file, line, _ := runtime.Caller(0) - const sep = "/" - if i := strings.LastIndex(file, sep); i != -1 { - file = file[i+len(sep):] - } - fmt.Fprint(ioutil.Discard, file, ":", line) - } -} - -func BenchmarkFuncForPC(b *testing.B) { - pc, _, _, _ := runtime.Caller(0) - pc-- - b.ResetTimer() - for i := 0; i < b.N; i++ { - runtime.FuncForPC(pc) - } -} - -func BenchmarkFuncFileLine(b *testing.B) { - pc, _, _, _ := runtime.Caller(0) - pc-- - fn := runtime.FuncForPC(pc) - b.ResetTimer() - for i := 0; i < b.N; i++ { - fn.FileLine(pc) - } -} diff --git a/vendor/github.com/go-stack/stack/stackinternal_test.go b/vendor/github.com/go-stack/stack/stackinternal_test.go deleted file mode 100644 index 14ee9a5..0000000 --- a/vendor/github.com/go-stack/stack/stackinternal_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package stack - -import ( - "runtime" - "testing" -) - -func TestFindSigpanic(t *testing.T) { - t.Parallel() - sp := findSigpanic() - if got, want := sp.Name(), "runtime.sigpanic"; got != want { - t.Errorf("got == %v, want == %v", got, want) - } -} - -func TestCaller(t *testing.T) { - t.Parallel() - - c := Caller(0) - _, file, line, ok := runtime.Caller(0) - line-- - if !ok { - t.Fatal("runtime.Caller(0) failed") - } - - if got, want := c.file(), file; got != want { - t.Errorf("got file == %v, want file == %v", got, want) - } - - if got, want := c.line(), line; got != want { - t.Errorf("got line == %v, want line == %v", got, want) - } -} - -type fholder struct { - f func() CallStack -} - -func (fh *fholder) labyrinth() CallStack { - for { - return fh.f() - } - panic("this line only needed for go 1.0") -} - -func TestTrace(t *testing.T) { - t.Parallel() - - fh := fholder{ - f: func() CallStack { - cs := Trace() - return cs - }, - } - - cs := fh.labyrinth() - - lines := []int{51, 41, 56} - - for i, line := range lines { - if got, want := cs[i].line(), line; got != want { - t.Errorf("got line[%d] == %v, want line[%d] == %v", i, got, i, want) - } - } -} diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore deleted file mode 100644 index ac71020..0000000 --- a/vendor/github.com/gorilla/websocket/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -.idea/ -*.iml \ No newline at end of file diff --git a/vendor/github.com/gorilla/websocket/.travis.yml b/vendor/github.com/gorilla/websocket/.travis.yml deleted file mode 100644 index 66435ac..0000000 --- a/vendor/github.com/gorilla/websocket/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -language: go -sudo: false - -matrix: - include: - - go: 1.4 - - go: 1.5 - - go: 1.6 - - go: tip - allow_failures: - - go: tip - -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d .) - - go vet $(go list ./... | grep -v /vendor/) - - go test -v -race ./... diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS deleted file mode 100644 index b003eca..0000000 --- a/vendor/github.com/gorilla/websocket/AUTHORS +++ /dev/null @@ -1,8 +0,0 @@ -# This is the official list of Gorilla WebSocket authors for copyright -# purposes. -# -# Please keep the list sorted. - -Gary Burd -Joachim Bauch - diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE deleted file mode 100644 index 9171c97..0000000 --- a/vendor/github.com/gorilla/websocket/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md deleted file mode 100644 index 9d71959..0000000 --- a/vendor/github.com/gorilla/websocket/README.md +++ /dev/null @@ -1,61 +0,0 @@ -# Gorilla WebSocket - -Gorilla WebSocket is a [Go](http://golang.org/) implementation of the -[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. - -### Documentation - -* [API Reference](http://godoc.org/github.com/gorilla/websocket) -* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) -* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) -* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) -* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) - -### Status - -The Gorilla WebSocket package provides a complete and tested implementation of -the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The -package API is stable. - -### Installation - - go get github.com/gorilla/websocket - -### Protocol Compliance - -The Gorilla WebSocket package passes the server tests in the [Autobahn Test -Suite](http://autobahn.ws/testsuite) using the application in the [examples/autobahn -subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). - -### Gorilla WebSocket compared with other packages - - - - -github.com/gorilla -golang.org/x/net - - -RFC 6455 Features -Passes Autobahn Test SuiteYesNo -Receive fragmented messageYesNo, see note 1 -Send close messageYesNo -Send pings and receive pongsYesNo -Get the type of a received data messageYesYes, see note 2 -Other Features -Limit size of received messageYesNo -Read message using io.ReaderYesNo, see note 3 -Write message using io.WriteCloserYesNo, see note 3 - - -Notes: - -1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html). -2. The application can get the type of a received data message by implementing - a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal) - function. -3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries. - Read returns when the input buffer is full or a frame boundary is - encountered. Each call to Write sends a single frame message. The Gorilla - io.Reader and io.WriteCloser operate on a single WebSocket message. - diff --git a/vendor/github.com/gorilla/websocket/bench_test.go b/vendor/github.com/gorilla/websocket/bench_test.go deleted file mode 100644 index f66fc36..0000000 --- a/vendor/github.com/gorilla/websocket/bench_test.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2014 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "testing" -) - -func BenchmarkMaskBytes(b *testing.B) { - var key [4]byte - data := make([]byte, 1024) - pos := 0 - for i := 0; i < b.N; i++ { - pos = maskBytes(key, pos, data) - } - b.SetBytes(int64(len(data))) -} diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go deleted file mode 100644 index 879d33e..0000000 --- a/vendor/github.com/gorilla/websocket/client.go +++ /dev/null @@ -1,375 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bufio" - "bytes" - "crypto/tls" - "encoding/base64" - "errors" - "io" - "io/ioutil" - "net" - "net/http" - "net/url" - "strings" - "time" -) - -// ErrBadHandshake is returned when the server response to opening handshake is -// invalid. -var ErrBadHandshake = errors.New("websocket: bad handshake") - -// NewClient creates a new client connection using the given net connection. -// The URL u specifies the host and request URI. Use requestHeader to specify -// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies -// (Cookie). Use the response.Header to get the selected subprotocol -// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). -// -// If the WebSocket handshake fails, ErrBadHandshake is returned along with a -// non-nil *http.Response so that callers can handle redirects, authentication, -// etc. -// -// Deprecated: Use Dialer instead. -func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { - d := Dialer{ - ReadBufferSize: readBufSize, - WriteBufferSize: writeBufSize, - NetDial: func(net, addr string) (net.Conn, error) { - return netConn, nil - }, - } - return d.Dial(u.String(), requestHeader) -} - -// A Dialer contains options for connecting to WebSocket server. -type Dialer struct { - // NetDial specifies the dial function for creating TCP connections. If - // NetDial is nil, net.Dial is used. - NetDial func(network, addr string) (net.Conn, error) - - // Proxy specifies a function to return a proxy for a given - // Request. If the function returns a non-nil error, the - // request is aborted with the provided error. - // If Proxy is nil or returns a nil *URL, no proxy is used. - Proxy func(*http.Request) (*url.URL, error) - - // TLSClientConfig specifies the TLS configuration to use with tls.Client. - // If nil, the default configuration is used. - TLSClientConfig *tls.Config - - // HandshakeTimeout specifies the duration for the handshake to complete. - HandshakeTimeout time.Duration - - // Input and output buffer sizes. If the buffer size is zero, then a - // default value of 4096 is used. - ReadBufferSize, WriteBufferSize int - - // Subprotocols specifies the client's requested subprotocols. - Subprotocols []string -} - -var errMalformedURL = errors.New("malformed ws or wss URL") - -// parseURL parses the URL. -// -// This function is a replacement for the standard library url.Parse function. -// In Go 1.4 and earlier, url.Parse loses information from the path. -func parseURL(s string) (*url.URL, error) { - // From the RFC: - // - // ws-URI = "ws:" "//" host [ ":" port ] path [ "?" query ] - // wss-URI = "wss:" "//" host [ ":" port ] path [ "?" query ] - - var u url.URL - switch { - case strings.HasPrefix(s, "ws://"): - u.Scheme = "ws" - s = s[len("ws://"):] - case strings.HasPrefix(s, "wss://"): - u.Scheme = "wss" - s = s[len("wss://"):] - default: - return nil, errMalformedURL - } - - if i := strings.Index(s, "?"); i >= 0 { - u.RawQuery = s[i+1:] - s = s[:i] - } - - if i := strings.Index(s, "/"); i >= 0 { - u.Opaque = s[i:] - s = s[:i] - } else { - u.Opaque = "/" - } - - u.Host = s - - if strings.Contains(u.Host, "@") { - // Don't bother parsing user information because user information is - // not allowed in websocket URIs. - return nil, errMalformedURL - } - - return &u, nil -} - -func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { - hostPort = u.Host - hostNoPort = u.Host - if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { - hostNoPort = hostNoPort[:i] - } else { - switch u.Scheme { - case "wss": - hostPort += ":443" - case "https": - hostPort += ":443" - default: - hostPort += ":80" - } - } - return hostPort, hostNoPort -} - -// DefaultDialer is a dialer with all fields set to the default zero values. -var DefaultDialer = &Dialer{ - Proxy: http.ProxyFromEnvironment, -} - -// Dial creates a new client connection. Use requestHeader to specify the -// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). -// Use the response.Header to get the selected subprotocol -// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). -// -// If the WebSocket handshake fails, ErrBadHandshake is returned along with a -// non-nil *http.Response so that callers can handle redirects, authentication, -// etcetera. The response body may not contain the entire response and does not -// need to be closed by the application. -func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { - - if d == nil { - d = &Dialer{ - Proxy: http.ProxyFromEnvironment, - } - } - - challengeKey, err := generateChallengeKey() - if err != nil { - return nil, nil, err - } - - u, err := parseURL(urlStr) - if err != nil { - return nil, nil, err - } - - switch u.Scheme { - case "ws": - u.Scheme = "http" - case "wss": - u.Scheme = "https" - default: - return nil, nil, errMalformedURL - } - - if u.User != nil { - // User name and password are not allowed in websocket URIs. - return nil, nil, errMalformedURL - } - - req := &http.Request{ - Method: "GET", - URL: u, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - Header: make(http.Header), - Host: u.Host, - } - - // Set the request headers using the capitalization for names and values in - // RFC examples. Although the capitalization shouldn't matter, there are - // servers that depend on it. The Header.Set method is not used because the - // method canonicalizes the header names. - req.Header["Upgrade"] = []string{"websocket"} - req.Header["Connection"] = []string{"Upgrade"} - req.Header["Sec-WebSocket-Key"] = []string{challengeKey} - req.Header["Sec-WebSocket-Version"] = []string{"13"} - if len(d.Subprotocols) > 0 { - req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")} - } - for k, vs := range requestHeader { - switch { - case k == "Host": - if len(vs) > 0 { - req.Host = vs[0] - } - case k == "Upgrade" || - k == "Connection" || - k == "Sec-Websocket-Key" || - k == "Sec-Websocket-Version" || - (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): - return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) - default: - req.Header[k] = vs - } - } - - hostPort, hostNoPort := hostPortNoPort(u) - - var proxyURL *url.URL - // Check wether the proxy method has been configured - if d.Proxy != nil { - proxyURL, err = d.Proxy(req) - } - if err != nil { - return nil, nil, err - } - - var targetHostPort string - if proxyURL != nil { - targetHostPort, _ = hostPortNoPort(proxyURL) - } else { - targetHostPort = hostPort - } - - var deadline time.Time - if d.HandshakeTimeout != 0 { - deadline = time.Now().Add(d.HandshakeTimeout) - } - - netDial := d.NetDial - if netDial == nil { - netDialer := &net.Dialer{Deadline: deadline} - netDial = netDialer.Dial - } - - netConn, err := netDial("tcp", targetHostPort) - if err != nil { - return nil, nil, err - } - - defer func() { - if netConn != nil { - netConn.Close() - } - }() - - if err := netConn.SetDeadline(deadline); err != nil { - return nil, nil, err - } - - if proxyURL != nil { - connectHeader := make(http.Header) - if user := proxyURL.User; user != nil { - proxyUser := user.Username() - if proxyPassword, passwordSet := user.Password(); passwordSet { - credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) - connectHeader.Set("Proxy-Authorization", "Basic "+credential) - } - } - connectReq := &http.Request{ - Method: "CONNECT", - URL: &url.URL{Opaque: hostPort}, - Host: hostPort, - Header: connectHeader, - } - - connectReq.Write(netConn) - - // Read response. - // Okay to use and discard buffered reader here, because - // TLS server will not speak until spoken to. - br := bufio.NewReader(netConn) - resp, err := http.ReadResponse(br, connectReq) - if err != nil { - return nil, nil, err - } - if resp.StatusCode != 200 { - f := strings.SplitN(resp.Status, " ", 2) - return nil, nil, errors.New(f[1]) - } - } - - if u.Scheme == "https" { - cfg := cloneTLSConfig(d.TLSClientConfig) - if cfg.ServerName == "" { - cfg.ServerName = hostNoPort - } - tlsConn := tls.Client(netConn, cfg) - netConn = tlsConn - if err := tlsConn.Handshake(); err != nil { - return nil, nil, err - } - if !cfg.InsecureSkipVerify { - if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { - return nil, nil, err - } - } - } - - conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize) - - if err := req.Write(netConn); err != nil { - return nil, nil, err - } - - resp, err := http.ReadResponse(conn.br, req) - if err != nil { - return nil, nil, err - } - if resp.StatusCode != 101 || - !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || - !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || - resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { - // Before closing the network connection on return from this - // function, slurp up some of the response to aid application - // debugging. - buf := make([]byte, 1024) - n, _ := io.ReadFull(resp.Body, buf) - resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) - return nil, resp, ErrBadHandshake - } - - resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) - conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") - - netConn.SetDeadline(time.Time{}) - netConn = nil // to avoid close in defer. - return conn, resp, nil -} - -// cloneTLSConfig clones all public fields except the fields -// SessionTicketsDisabled and SessionTicketKey. This avoids copying the -// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a -// config in active use. -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - if cfg == nil { - return &tls.Config{} - } - return &tls.Config{ - Rand: cfg.Rand, - Time: cfg.Time, - Certificates: cfg.Certificates, - NameToCertificate: cfg.NameToCertificate, - GetCertificate: cfg.GetCertificate, - RootCAs: cfg.RootCAs, - NextProtos: cfg.NextProtos, - ServerName: cfg.ServerName, - ClientAuth: cfg.ClientAuth, - ClientCAs: cfg.ClientCAs, - InsecureSkipVerify: cfg.InsecureSkipVerify, - CipherSuites: cfg.CipherSuites, - PreferServerCipherSuites: cfg.PreferServerCipherSuites, - ClientSessionCache: cfg.ClientSessionCache, - MinVersion: cfg.MinVersion, - MaxVersion: cfg.MaxVersion, - CurvePreferences: cfg.CurvePreferences, - } -} diff --git a/vendor/github.com/gorilla/websocket/client_server_test.go b/vendor/github.com/gorilla/websocket/client_server_test.go deleted file mode 100644 index 3f7345d..0000000 --- a/vendor/github.com/gorilla/websocket/client_server_test.go +++ /dev/null @@ -1,451 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "crypto/tls" - "crypto/x509" - "encoding/base64" - "io" - "io/ioutil" - "net" - "net/http" - "net/http/httptest" - "net/url" - "reflect" - "strings" - "testing" - "time" -) - -var cstUpgrader = Upgrader{ - Subprotocols: []string{"p0", "p1"}, - ReadBufferSize: 1024, - WriteBufferSize: 1024, - Error: func(w http.ResponseWriter, r *http.Request, status int, reason error) { - http.Error(w, reason.Error(), status) - }, -} - -var cstDialer = Dialer{ - Subprotocols: []string{"p1", "p2"}, - ReadBufferSize: 1024, - WriteBufferSize: 1024, -} - -type cstHandler struct{ *testing.T } - -type cstServer struct { - *httptest.Server - URL string -} - -const ( - cstPath = "/a/b" - cstRawQuery = "x=y" - cstRequestURI = cstPath + "?" + cstRawQuery -) - -func newServer(t *testing.T) *cstServer { - var s cstServer - s.Server = httptest.NewServer(cstHandler{t}) - s.Server.URL += cstRequestURI - s.URL = makeWsProto(s.Server.URL) - return &s -} - -func newTLSServer(t *testing.T) *cstServer { - var s cstServer - s.Server = httptest.NewTLSServer(cstHandler{t}) - s.Server.URL += cstRequestURI - s.URL = makeWsProto(s.Server.URL) - return &s -} - -func (t cstHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if r.URL.Path != cstPath { - t.Logf("path=%v, want %v", r.URL.Path, cstPath) - http.Error(w, "bad path", 400) - return - } - if r.URL.RawQuery != cstRawQuery { - t.Logf("query=%v, want %v", r.URL.RawQuery, cstRawQuery) - http.Error(w, "bad path", 400) - return - } - subprotos := Subprotocols(r) - if !reflect.DeepEqual(subprotos, cstDialer.Subprotocols) { - t.Logf("subprotols=%v, want %v", subprotos, cstDialer.Subprotocols) - http.Error(w, "bad protocol", 400) - return - } - ws, err := cstUpgrader.Upgrade(w, r, http.Header{"Set-Cookie": {"sessionID=1234"}}) - if err != nil { - t.Logf("Upgrade: %v", err) - return - } - defer ws.Close() - - if ws.Subprotocol() != "p1" { - t.Logf("Subprotocol() = %s, want p1", ws.Subprotocol()) - ws.Close() - return - } - op, rd, err := ws.NextReader() - if err != nil { - t.Logf("NextReader: %v", err) - return - } - wr, err := ws.NextWriter(op) - if err != nil { - t.Logf("NextWriter: %v", err) - return - } - if _, err = io.Copy(wr, rd); err != nil { - t.Logf("NextWriter: %v", err) - return - } - if err := wr.Close(); err != nil { - t.Logf("Close: %v", err) - return - } -} - -func makeWsProto(s string) string { - return "ws" + strings.TrimPrefix(s, "http") -} - -func sendRecv(t *testing.T, ws *Conn) { - const message = "Hello World!" - if err := ws.SetWriteDeadline(time.Now().Add(time.Second)); err != nil { - t.Fatalf("SetWriteDeadline: %v", err) - } - if err := ws.WriteMessage(TextMessage, []byte(message)); err != nil { - t.Fatalf("WriteMessage: %v", err) - } - if err := ws.SetReadDeadline(time.Now().Add(time.Second)); err != nil { - t.Fatalf("SetReadDeadline: %v", err) - } - _, p, err := ws.ReadMessage() - if err != nil { - t.Fatalf("ReadMessage: %v", err) - } - if string(p) != message { - t.Fatalf("message=%s, want %s", p, message) - } -} - -func TestProxyDial(t *testing.T) { - - s := newServer(t) - defer s.Close() - - surl, _ := url.Parse(s.URL) - - cstDialer.Proxy = http.ProxyURL(surl) - - connect := false - origHandler := s.Server.Config.Handler - - // Capture the request Host header. - s.Server.Config.Handler = http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { - if r.Method == "CONNECT" { - connect = true - w.WriteHeader(200) - return - } - - if !connect { - t.Log("connect not recieved") - http.Error(w, "connect not recieved", 405) - return - } - origHandler.ServeHTTP(w, r) - }) - - ws, _, err := cstDialer.Dial(s.URL, nil) - if err != nil { - t.Fatalf("Dial: %v", err) - } - defer ws.Close() - sendRecv(t, ws) - - cstDialer.Proxy = http.ProxyFromEnvironment -} - -func TestProxyAuthorizationDial(t *testing.T) { - s := newServer(t) - defer s.Close() - - surl, _ := url.Parse(s.URL) - surl.User = url.UserPassword("username", "password") - cstDialer.Proxy = http.ProxyURL(surl) - - connect := false - origHandler := s.Server.Config.Handler - - // Capture the request Host header. - s.Server.Config.Handler = http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { - proxyAuth := r.Header.Get("Proxy-Authorization") - expectedProxyAuth := "Basic " + base64.StdEncoding.EncodeToString([]byte("username:password")) - if r.Method == "CONNECT" && proxyAuth == expectedProxyAuth { - connect = true - w.WriteHeader(200) - return - } - - if !connect { - t.Log("connect with proxy authorization not recieved") - http.Error(w, "connect with proxy authorization not recieved", 405) - return - } - origHandler.ServeHTTP(w, r) - }) - - ws, _, err := cstDialer.Dial(s.URL, nil) - if err != nil { - t.Fatalf("Dial: %v", err) - } - defer ws.Close() - sendRecv(t, ws) - - cstDialer.Proxy = http.ProxyFromEnvironment -} - -func TestDial(t *testing.T) { - s := newServer(t) - defer s.Close() - - ws, _, err := cstDialer.Dial(s.URL, nil) - if err != nil { - t.Fatalf("Dial: %v", err) - } - defer ws.Close() - sendRecv(t, ws) -} - -func TestDialTLS(t *testing.T) { - s := newTLSServer(t) - defer s.Close() - - certs := x509.NewCertPool() - for _, c := range s.TLS.Certificates { - roots, err := x509.ParseCertificates(c.Certificate[len(c.Certificate)-1]) - if err != nil { - t.Fatalf("error parsing server's root cert: %v", err) - } - for _, root := range roots { - certs.AddCert(root) - } - } - - u, _ := url.Parse(s.URL) - d := cstDialer - d.NetDial = func(network, addr string) (net.Conn, error) { return net.Dial(network, u.Host) } - d.TLSClientConfig = &tls.Config{RootCAs: certs} - ws, _, err := d.Dial("wss://example.com"+cstRequestURI, nil) - if err != nil { - t.Fatalf("Dial: %v", err) - } - defer ws.Close() - sendRecv(t, ws) -} - -func xTestDialTLSBadCert(t *testing.T) { - // This test is deactivated because of noisy logging from the net/http package. - s := newTLSServer(t) - defer s.Close() - - ws, _, err := cstDialer.Dial(s.URL, nil) - if err == nil { - ws.Close() - t.Fatalf("Dial: nil") - } -} - -func xTestDialTLSNoVerify(t *testing.T) { - s := newTLSServer(t) - defer s.Close() - - d := cstDialer - d.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} - ws, _, err := d.Dial(s.URL, nil) - if err != nil { - t.Fatalf("Dial: %v", err) - } - defer ws.Close() - sendRecv(t, ws) -} - -func TestDialTimeout(t *testing.T) { - s := newServer(t) - defer s.Close() - - d := cstDialer - d.HandshakeTimeout = -1 - ws, _, err := d.Dial(s.URL, nil) - if err == nil { - ws.Close() - t.Fatalf("Dial: nil") - } -} - -func TestDialBadScheme(t *testing.T) { - s := newServer(t) - defer s.Close() - - ws, _, err := cstDialer.Dial(s.Server.URL, nil) - if err == nil { - ws.Close() - t.Fatalf("Dial: nil") - } -} - -func TestDialBadOrigin(t *testing.T) { - s := newServer(t) - defer s.Close() - - ws, resp, err := cstDialer.Dial(s.URL, http.Header{"Origin": {"bad"}}) - if err == nil { - ws.Close() - t.Fatalf("Dial: nil") - } - if resp == nil { - t.Fatalf("resp=nil, err=%v", err) - } - if resp.StatusCode != http.StatusForbidden { - t.Fatalf("status=%d, want %d", resp.StatusCode, http.StatusForbidden) - } -} - -func TestDialBadHeader(t *testing.T) { - s := newServer(t) - defer s.Close() - - for _, k := range []string{"Upgrade", - "Connection", - "Sec-Websocket-Key", - "Sec-Websocket-Version", - "Sec-Websocket-Protocol"} { - h := http.Header{} - h.Set(k, "bad") - ws, _, err := cstDialer.Dial(s.URL, http.Header{"Origin": {"bad"}}) - if err == nil { - ws.Close() - t.Errorf("Dial with header %s returned nil", k) - } - } -} - -func TestBadMethod(t *testing.T) { - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ws, err := cstUpgrader.Upgrade(w, r, nil) - if err == nil { - t.Errorf("handshake succeeded, expect fail") - ws.Close() - } - })) - defer s.Close() - - resp, err := http.PostForm(s.URL, url.Values{}) - if err != nil { - t.Fatalf("PostForm returned error %v", err) - } - resp.Body.Close() - if resp.StatusCode != http.StatusMethodNotAllowed { - t.Errorf("Status = %d, want %d", resp.StatusCode, http.StatusMethodNotAllowed) - } -} - -func TestHandshake(t *testing.T) { - s := newServer(t) - defer s.Close() - - ws, resp, err := cstDialer.Dial(s.URL, http.Header{"Origin": {s.URL}}) - if err != nil { - t.Fatalf("Dial: %v", err) - } - defer ws.Close() - - var sessionID string - for _, c := range resp.Cookies() { - if c.Name == "sessionID" { - sessionID = c.Value - } - } - if sessionID != "1234" { - t.Error("Set-Cookie not received from the server.") - } - - if ws.Subprotocol() != "p1" { - t.Errorf("ws.Subprotocol() = %s, want p1", ws.Subprotocol()) - } - sendRecv(t, ws) -} - -func TestRespOnBadHandshake(t *testing.T) { - const expectedStatus = http.StatusGone - const expectedBody = "This is the response body." - - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(expectedStatus) - io.WriteString(w, expectedBody) - })) - defer s.Close() - - ws, resp, err := cstDialer.Dial(makeWsProto(s.URL), nil) - if err == nil { - ws.Close() - t.Fatalf("Dial: nil") - } - - if resp == nil { - t.Fatalf("resp=nil, err=%v", err) - } - - if resp.StatusCode != expectedStatus { - t.Errorf("resp.StatusCode=%d, want %d", resp.StatusCode, expectedStatus) - } - - p, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatalf("ReadFull(resp.Body) returned error %v", err) - } - - if string(p) != expectedBody { - t.Errorf("resp.Body=%s, want %s", p, expectedBody) - } -} - -// TestHostHeader confirms that the host header provided in the call to Dial is -// sent to the server. -func TestHostHeader(t *testing.T) { - s := newServer(t) - defer s.Close() - - specifiedHost := make(chan string, 1) - origHandler := s.Server.Config.Handler - - // Capture the request Host header. - s.Server.Config.Handler = http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { - specifiedHost <- r.Host - origHandler.ServeHTTP(w, r) - }) - - ws, _, err := cstDialer.Dial(s.URL, http.Header{"Host": {"testhost"}}) - if err != nil { - t.Fatalf("Dial: %v", err) - } - defer ws.Close() - - if gotHost := <-specifiedHost; gotHost != "testhost" { - t.Fatalf("gotHost = %q, want \"testhost\"", gotHost) - } - - sendRecv(t, ws) -} diff --git a/vendor/github.com/gorilla/websocket/client_test.go b/vendor/github.com/gorilla/websocket/client_test.go deleted file mode 100644 index 7d2b084..0000000 --- a/vendor/github.com/gorilla/websocket/client_test.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2014 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "net/url" - "reflect" - "testing" -) - -var parseURLTests = []struct { - s string - u *url.URL - rui string -}{ - {"ws://example.com/", &url.URL{Scheme: "ws", Host: "example.com", Opaque: "/"}, "/"}, - {"ws://example.com", &url.URL{Scheme: "ws", Host: "example.com", Opaque: "/"}, "/"}, - {"ws://example.com:7777/", &url.URL{Scheme: "ws", Host: "example.com:7777", Opaque: "/"}, "/"}, - {"wss://example.com/", &url.URL{Scheme: "wss", Host: "example.com", Opaque: "/"}, "/"}, - {"wss://example.com/a/b", &url.URL{Scheme: "wss", Host: "example.com", Opaque: "/a/b"}, "/a/b"}, - {"ss://example.com/a/b", nil, ""}, - {"ws://webmaster@example.com/", nil, ""}, - {"wss://example.com/a/b?x=y", &url.URL{Scheme: "wss", Host: "example.com", Opaque: "/a/b", RawQuery: "x=y"}, "/a/b?x=y"}, - {"wss://example.com?x=y", &url.URL{Scheme: "wss", Host: "example.com", Opaque: "/", RawQuery: "x=y"}, "/?x=y"}, -} - -func TestParseURL(t *testing.T) { - for _, tt := range parseURLTests { - u, err := parseURL(tt.s) - if tt.u != nil && err != nil { - t.Errorf("parseURL(%q) returned error %v", tt.s, err) - continue - } - if tt.u == nil { - if err == nil { - t.Errorf("parseURL(%q) did not return error", tt.s) - } - continue - } - if !reflect.DeepEqual(u, tt.u) { - t.Errorf("parseURL(%q) = %v, want %v", tt.s, u, tt.u) - continue - } - if u.RequestURI() != tt.rui { - t.Errorf("parseURL(%q).RequestURI() = %v, want %v", tt.s, u.RequestURI(), tt.rui) - } - } -} - -var hostPortNoPortTests = []struct { - u *url.URL - hostPort, hostNoPort string -}{ - {&url.URL{Scheme: "ws", Host: "example.com"}, "example.com:80", "example.com"}, - {&url.URL{Scheme: "wss", Host: "example.com"}, "example.com:443", "example.com"}, - {&url.URL{Scheme: "ws", Host: "example.com:7777"}, "example.com:7777", "example.com"}, - {&url.URL{Scheme: "wss", Host: "example.com:7777"}, "example.com:7777", "example.com"}, -} - -func TestHostPortNoPort(t *testing.T) { - for _, tt := range hostPortNoPortTests { - hostPort, hostNoPort := hostPortNoPort(tt.u) - if hostPort != tt.hostPort { - t.Errorf("hostPortNoPort(%v) returned hostPort %q, want %q", tt.u, hostPort, tt.hostPort) - } - if hostNoPort != tt.hostNoPort { - t.Errorf("hostPortNoPort(%v) returned hostNoPort %q, want %q", tt.u, hostNoPort, tt.hostNoPort) - } - } -} diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go deleted file mode 100644 index 794c2ef..0000000 --- a/vendor/github.com/gorilla/websocket/conn.go +++ /dev/null @@ -1,951 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bufio" - "encoding/binary" - "errors" - "io" - "io/ioutil" - "math/rand" - "net" - "strconv" - "time" - "unicode/utf8" -) - -const ( - maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask - maxControlFramePayloadSize = 125 - finalBit = 1 << 7 - maskBit = 1 << 7 - writeWait = time.Second - - defaultReadBufferSize = 4096 - defaultWriteBufferSize = 4096 - - continuationFrame = 0 - noFrame = -1 -) - -// Close codes defined in RFC 6455, section 11.7. -const ( - CloseNormalClosure = 1000 - CloseGoingAway = 1001 - CloseProtocolError = 1002 - CloseUnsupportedData = 1003 - CloseNoStatusReceived = 1005 - CloseAbnormalClosure = 1006 - CloseInvalidFramePayloadData = 1007 - ClosePolicyViolation = 1008 - CloseMessageTooBig = 1009 - CloseMandatoryExtension = 1010 - CloseInternalServerErr = 1011 - CloseServiceRestart = 1012 - CloseTryAgainLater = 1013 - CloseTLSHandshake = 1015 -) - -// The message types are defined in RFC 6455, section 11.8. -const ( - // TextMessage denotes a text data message. The text message payload is - // interpreted as UTF-8 encoded text data. - TextMessage = 1 - - // BinaryMessage denotes a binary data message. - BinaryMessage = 2 - - // CloseMessage denotes a close control message. The optional message - // payload contains a numeric code and text. Use the FormatCloseMessage - // function to format a close message payload. - CloseMessage = 8 - - // PingMessage denotes a ping control message. The optional message payload - // is UTF-8 encoded text. - PingMessage = 9 - - // PongMessage denotes a ping control message. The optional message payload - // is UTF-8 encoded text. - PongMessage = 10 -) - -// ErrCloseSent is returned when the application writes a message to the -// connection after sending a close message. -var ErrCloseSent = errors.New("websocket: close sent") - -// ErrReadLimit is returned when reading a message that is larger than the -// read limit set for the connection. -var ErrReadLimit = errors.New("websocket: read limit exceeded") - -// netError satisfies the net Error interface. -type netError struct { - msg string - temporary bool - timeout bool -} - -func (e *netError) Error() string { return e.msg } -func (e *netError) Temporary() bool { return e.temporary } -func (e *netError) Timeout() bool { return e.timeout } - -// CloseError represents close frame. -type CloseError struct { - - // Code is defined in RFC 6455, section 11.7. - Code int - - // Text is the optional text payload. - Text string -} - -func (e *CloseError) Error() string { - s := []byte("websocket: close ") - s = strconv.AppendInt(s, int64(e.Code), 10) - switch e.Code { - case CloseNormalClosure: - s = append(s, " (normal)"...) - case CloseGoingAway: - s = append(s, " (going away)"...) - case CloseProtocolError: - s = append(s, " (protocol error)"...) - case CloseUnsupportedData: - s = append(s, " (unsupported data)"...) - case CloseNoStatusReceived: - s = append(s, " (no status)"...) - case CloseAbnormalClosure: - s = append(s, " (abnormal closure)"...) - case CloseInvalidFramePayloadData: - s = append(s, " (invalid payload data)"...) - case ClosePolicyViolation: - s = append(s, " (policy violation)"...) - case CloseMessageTooBig: - s = append(s, " (message too big)"...) - case CloseMandatoryExtension: - s = append(s, " (mandatory extension missing)"...) - case CloseInternalServerErr: - s = append(s, " (internal server error)"...) - case CloseTLSHandshake: - s = append(s, " (TLS handshake error)"...) - } - if e.Text != "" { - s = append(s, ": "...) - s = append(s, e.Text...) - } - return string(s) -} - -// IsCloseError returns boolean indicating whether the error is a *CloseError -// with one of the specified codes. -func IsCloseError(err error, codes ...int) bool { - if e, ok := err.(*CloseError); ok { - for _, code := range codes { - if e.Code == code { - return true - } - } - } - return false -} - -// IsUnexpectedCloseError returns boolean indicating whether the error is a -// *CloseError with a code not in the list of expected codes. -func IsUnexpectedCloseError(err error, expectedCodes ...int) bool { - if e, ok := err.(*CloseError); ok { - for _, code := range expectedCodes { - if e.Code == code { - return false - } - } - return true - } - return false -} - -var ( - errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true} - errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()} - errBadWriteOpCode = errors.New("websocket: bad write message type") - errWriteClosed = errors.New("websocket: write closed") - errInvalidControlFrame = errors.New("websocket: invalid control frame") -) - -func hideTempErr(err error) error { - if e, ok := err.(net.Error); ok && e.Temporary() { - err = &netError{msg: e.Error(), timeout: e.Timeout()} - } - return err -} - -func isControl(frameType int) bool { - return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage -} - -func isData(frameType int) bool { - return frameType == TextMessage || frameType == BinaryMessage -} - -var validReceivedCloseCodes = map[int]bool{ - // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number - - CloseNormalClosure: true, - CloseGoingAway: true, - CloseProtocolError: true, - CloseUnsupportedData: true, - CloseNoStatusReceived: false, - CloseAbnormalClosure: false, - CloseInvalidFramePayloadData: true, - ClosePolicyViolation: true, - CloseMessageTooBig: true, - CloseMandatoryExtension: true, - CloseInternalServerErr: true, - CloseServiceRestart: true, - CloseTryAgainLater: true, - CloseTLSHandshake: false, -} - -func isValidReceivedCloseCode(code int) bool { - return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999) -} - -func maskBytes(key [4]byte, pos int, b []byte) int { - for i := range b { - b[i] ^= key[pos&3] - pos++ - } - return pos & 3 -} - -func newMaskKey() [4]byte { - n := rand.Uint32() - return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} -} - -// Conn represents a WebSocket connection. -type Conn struct { - conn net.Conn - isServer bool - subprotocol string - - // Write fields - mu chan bool // used as mutex to protect write to conn and closeSent - closeSent bool // true if close message was sent - - // Message writer fields. - writeErr error - writeBuf []byte // frame is constructed in this buffer. - writePos int // end of data in writeBuf. - writeFrameType int // type of the current frame. - writeDeadline time.Time - isWriting bool // for best-effort concurrent write detection - messageWriter *messageWriter // the current writer - - // Read fields - readErr error - br *bufio.Reader - readRemaining int64 // bytes remaining in current frame. - readFinal bool // true the current message has more frames. - readLength int64 // Message size. - readLimit int64 // Maximum message size. - readMaskPos int - readMaskKey [4]byte - handlePong func(string) error - handlePing func(string) error - readErrCount int - messageReader *messageReader // the current reader -} - -func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int) *Conn { - mu := make(chan bool, 1) - mu <- true - - if readBufferSize == 0 { - readBufferSize = defaultReadBufferSize - } - if readBufferSize < maxControlFramePayloadSize { - readBufferSize = maxControlFramePayloadSize - } - if writeBufferSize == 0 { - writeBufferSize = defaultWriteBufferSize - } - - c := &Conn{ - isServer: isServer, - br: bufio.NewReaderSize(conn, readBufferSize), - conn: conn, - mu: mu, - readFinal: true, - writeBuf: make([]byte, writeBufferSize+maxFrameHeaderSize), - writeFrameType: noFrame, - writePos: maxFrameHeaderSize, - } - c.SetPingHandler(nil) - c.SetPongHandler(nil) - return c -} - -// Subprotocol returns the negotiated protocol for the connection. -func (c *Conn) Subprotocol() string { - return c.subprotocol -} - -// Close closes the underlying network connection without sending or waiting for a close frame. -func (c *Conn) Close() error { - return c.conn.Close() -} - -// LocalAddr returns the local network address. -func (c *Conn) LocalAddr() net.Addr { - return c.conn.LocalAddr() -} - -// RemoteAddr returns the remote network address. -func (c *Conn) RemoteAddr() net.Addr { - return c.conn.RemoteAddr() -} - -// Write methods - -func (c *Conn) write(frameType int, deadline time.Time, bufs ...[]byte) error { - <-c.mu - defer func() { c.mu <- true }() - - if c.closeSent { - return ErrCloseSent - } else if frameType == CloseMessage { - c.closeSent = true - } - - c.conn.SetWriteDeadline(deadline) - for _, buf := range bufs { - if len(buf) > 0 { - n, err := c.conn.Write(buf) - if n != len(buf) { - // Close on partial write. - c.conn.Close() - } - if err != nil { - return err - } - } - } - return nil -} - -// WriteControl writes a control message with the given deadline. The allowed -// message types are CloseMessage, PingMessage and PongMessage. -func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { - if !isControl(messageType) { - return errBadWriteOpCode - } - if len(data) > maxControlFramePayloadSize { - return errInvalidControlFrame - } - - b0 := byte(messageType) | finalBit - b1 := byte(len(data)) - if !c.isServer { - b1 |= maskBit - } - - buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) - buf = append(buf, b0, b1) - - if c.isServer { - buf = append(buf, data...) - } else { - key := newMaskKey() - buf = append(buf, key[:]...) - buf = append(buf, data...) - maskBytes(key, 0, buf[6:]) - } - - d := time.Hour * 1000 - if !deadline.IsZero() { - d = deadline.Sub(time.Now()) - if d < 0 { - return errWriteTimeout - } - } - - timer := time.NewTimer(d) - select { - case <-c.mu: - timer.Stop() - case <-timer.C: - return errWriteTimeout - } - defer func() { c.mu <- true }() - - if c.closeSent { - return ErrCloseSent - } else if messageType == CloseMessage { - c.closeSent = true - } - - c.conn.SetWriteDeadline(deadline) - n, err := c.conn.Write(buf) - if n != 0 && n != len(buf) { - c.conn.Close() - } - return hideTempErr(err) -} - -// NextWriter returns a writer for the next message to send. The writer's Close -// method flushes the complete message to the network. -// -// There can be at most one open writer on a connection. NextWriter closes the -// previous writer if the application has not already done so. -func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { - if c.writeErr != nil { - return nil, c.writeErr - } - - if c.writeFrameType != noFrame { - if err := c.flushFrame(true, nil); err != nil { - return nil, err - } - } - - if !isControl(messageType) && !isData(messageType) { - return nil, errBadWriteOpCode - } - - c.writeFrameType = messageType - w := &messageWriter{c} - c.messageWriter = w - return w, nil -} - -func (c *Conn) flushFrame(final bool, extra []byte) error { - length := c.writePos - maxFrameHeaderSize + len(extra) - - // Check for invalid control frames. - if isControl(c.writeFrameType) && - (!final || length > maxControlFramePayloadSize) { - c.messageWriter = nil - c.writeFrameType = noFrame - c.writePos = maxFrameHeaderSize - return errInvalidControlFrame - } - - b0 := byte(c.writeFrameType) - if final { - b0 |= finalBit - } - b1 := byte(0) - if !c.isServer { - b1 |= maskBit - } - - // Assume that the frame starts at beginning of c.writeBuf. - framePos := 0 - if c.isServer { - // Adjust up if mask not included in the header. - framePos = 4 - } - - switch { - case length >= 65536: - c.writeBuf[framePos] = b0 - c.writeBuf[framePos+1] = b1 | 127 - binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) - case length > 125: - framePos += 6 - c.writeBuf[framePos] = b0 - c.writeBuf[framePos+1] = b1 | 126 - binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) - default: - framePos += 8 - c.writeBuf[framePos] = b0 - c.writeBuf[framePos+1] = b1 | byte(length) - } - - if !c.isServer { - key := newMaskKey() - copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) - maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:c.writePos]) - if len(extra) > 0 { - c.writeErr = errors.New("websocket: internal error, extra used in client mode") - return c.writeErr - } - } - - // Write the buffers to the connection with best-effort detection of - // concurrent writes. See the concurrency section in the package - // documentation for more info. - - if c.isWriting { - panic("concurrent write to websocket connection") - } - c.isWriting = true - - c.writeErr = c.write(c.writeFrameType, c.writeDeadline, c.writeBuf[framePos:c.writePos], extra) - - if !c.isWriting { - panic("concurrent write to websocket connection") - } - c.isWriting = false - - // Setup for next frame. - c.writePos = maxFrameHeaderSize - c.writeFrameType = continuationFrame - if final { - c.messageWriter = nil - c.writeFrameType = noFrame - } - return c.writeErr -} - -type messageWriter struct{ c *Conn } - -func (w *messageWriter) err() error { - c := w.c - if c.messageWriter != w { - return errWriteClosed - } - if c.writeErr != nil { - return c.writeErr - } - return nil -} - -func (w *messageWriter) ncopy(max int) (int, error) { - n := len(w.c.writeBuf) - w.c.writePos - if n <= 0 { - if err := w.c.flushFrame(false, nil); err != nil { - return 0, err - } - n = len(w.c.writeBuf) - w.c.writePos - } - if n > max { - n = max - } - return n, nil -} - -func (w *messageWriter) write(final bool, p []byte) (int, error) { - if err := w.err(); err != nil { - return 0, err - } - - if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { - // Don't buffer large messages. - err := w.c.flushFrame(final, p) - if err != nil { - return 0, err - } - return len(p), nil - } - - nn := len(p) - for len(p) > 0 { - n, err := w.ncopy(len(p)) - if err != nil { - return 0, err - } - copy(w.c.writeBuf[w.c.writePos:], p[:n]) - w.c.writePos += n - p = p[n:] - } - return nn, nil -} - -func (w *messageWriter) Write(p []byte) (int, error) { - return w.write(false, p) -} - -func (w *messageWriter) WriteString(p string) (int, error) { - if err := w.err(); err != nil { - return 0, err - } - - nn := len(p) - for len(p) > 0 { - n, err := w.ncopy(len(p)) - if err != nil { - return 0, err - } - copy(w.c.writeBuf[w.c.writePos:], p[:n]) - w.c.writePos += n - p = p[n:] - } - return nn, nil -} - -func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { - if err := w.err(); err != nil { - return 0, err - } - for { - if w.c.writePos == len(w.c.writeBuf) { - err = w.c.flushFrame(false, nil) - if err != nil { - break - } - } - var n int - n, err = r.Read(w.c.writeBuf[w.c.writePos:]) - w.c.writePos += n - nn += int64(n) - if err != nil { - if err == io.EOF { - err = nil - } - break - } - } - return nn, err -} - -func (w *messageWriter) Close() error { - if err := w.err(); err != nil { - return err - } - return w.c.flushFrame(true, nil) -} - -// WriteMessage is a helper method for getting a writer using NextWriter, -// writing the message and closing the writer. -func (c *Conn) WriteMessage(messageType int, data []byte) error { - w, err := c.NextWriter(messageType) - if err != nil { - return err - } - if _, ok := w.(*messageWriter); ok && c.isServer { - // Optimize write as a single frame. - n := copy(c.writeBuf[c.writePos:], data) - c.writePos += n - data = data[n:] - err = c.flushFrame(true, data) - return err - } - if _, err = w.Write(data); err != nil { - return err - } - return w.Close() -} - -// SetWriteDeadline sets the write deadline on the underlying network -// connection. After a write has timed out, the websocket state is corrupt and -// all future writes will return an error. A zero value for t means writes will -// not time out. -func (c *Conn) SetWriteDeadline(t time.Time) error { - c.writeDeadline = t - return nil -} - -// Read methods - -func (c *Conn) advanceFrame() (int, error) { - - // 1. Skip remainder of previous frame. - - if c.readRemaining > 0 { - if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { - return noFrame, err - } - } - - // 2. Read and parse first two bytes of frame header. - - p, err := c.read(2) - if err != nil { - return noFrame, err - } - - final := p[0]&finalBit != 0 - frameType := int(p[0] & 0xf) - reserved := int((p[0] >> 4) & 0x7) - mask := p[1]&maskBit != 0 - c.readRemaining = int64(p[1] & 0x7f) - - if reserved != 0 { - return noFrame, c.handleProtocolError("unexpected reserved bits " + strconv.Itoa(reserved)) - } - - switch frameType { - case CloseMessage, PingMessage, PongMessage: - if c.readRemaining > maxControlFramePayloadSize { - return noFrame, c.handleProtocolError("control frame length > 125") - } - if !final { - return noFrame, c.handleProtocolError("control frame not final") - } - case TextMessage, BinaryMessage: - if !c.readFinal { - return noFrame, c.handleProtocolError("message start before final message frame") - } - c.readFinal = final - case continuationFrame: - if c.readFinal { - return noFrame, c.handleProtocolError("continuation after final message frame") - } - c.readFinal = final - default: - return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType)) - } - - // 3. Read and parse frame length. - - switch c.readRemaining { - case 126: - p, err := c.read(2) - if err != nil { - return noFrame, err - } - c.readRemaining = int64(binary.BigEndian.Uint16(p)) - case 127: - p, err := c.read(8) - if err != nil { - return noFrame, err - } - c.readRemaining = int64(binary.BigEndian.Uint64(p)) - } - - // 4. Handle frame masking. - - if mask != c.isServer { - return noFrame, c.handleProtocolError("incorrect mask flag") - } - - if mask { - c.readMaskPos = 0 - p, err := c.read(len(c.readMaskKey)) - if err != nil { - return noFrame, err - } - copy(c.readMaskKey[:], p) - } - - // 5. For text and binary messages, enforce read limit and return. - - if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { - - c.readLength += c.readRemaining - if c.readLimit > 0 && c.readLength > c.readLimit { - c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) - return noFrame, ErrReadLimit - } - - return frameType, nil - } - - // 6. Read control frame payload. - - var payload []byte - if c.readRemaining > 0 { - payload, err = c.read(int(c.readRemaining)) - c.readRemaining = 0 - if err != nil { - return noFrame, err - } - if c.isServer { - maskBytes(c.readMaskKey, 0, payload) - } - } - - // 7. Process control frame payload. - - switch frameType { - case PongMessage: - if err := c.handlePong(string(payload)); err != nil { - return noFrame, err - } - case PingMessage: - if err := c.handlePing(string(payload)); err != nil { - return noFrame, err - } - case CloseMessage: - echoMessage := []byte{} - closeCode := CloseNoStatusReceived - closeText := "" - if len(payload) >= 2 { - echoMessage = payload[:2] - closeCode = int(binary.BigEndian.Uint16(payload)) - if !isValidReceivedCloseCode(closeCode) { - return noFrame, c.handleProtocolError("invalid close code") - } - closeText = string(payload[2:]) - if !utf8.ValidString(closeText) { - return noFrame, c.handleProtocolError("invalid utf8 payload in close frame") - } - } - c.WriteControl(CloseMessage, echoMessage, time.Now().Add(writeWait)) - return noFrame, &CloseError{Code: closeCode, Text: closeText} - } - - return frameType, nil -} - -func (c *Conn) handleProtocolError(message string) error { - c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait)) - return errors.New("websocket: " + message) -} - -// NextReader returns the next data message received from the peer. The -// returned messageType is either TextMessage or BinaryMessage. -// -// There can be at most one open reader on a connection. NextReader discards -// the previous message if the application has not already consumed it. -// -// Applications must break out of the application's read loop when this method -// returns a non-nil error value. Errors returned from this method are -// permanent. Once this method returns a non-nil error, all subsequent calls to -// this method return the same error. -func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { - - c.messageReader = nil - c.readLength = 0 - - for c.readErr == nil { - frameType, err := c.advanceFrame() - if err != nil { - c.readErr = hideTempErr(err) - break - } - if frameType == TextMessage || frameType == BinaryMessage { - r := &messageReader{c} - c.messageReader = r - return frameType, r, nil - } - } - - // Applications that do handle the error returned from this method spin in - // tight loop on connection failure. To help application developers detect - // this error, panic on repeated reads to the failed connection. - c.readErrCount++ - if c.readErrCount >= 1000 { - panic("repeated read on failed websocket connection") - } - - return noFrame, nil, c.readErr -} - -type messageReader struct{ c *Conn } - -func (r *messageReader) Read(b []byte) (int, error) { - c := r.c - if c.messageReader != r { - return 0, io.EOF - } - - for c.readErr == nil { - - if c.readRemaining > 0 { - if int64(len(b)) > c.readRemaining { - b = b[:c.readRemaining] - } - n, err := c.br.Read(b) - c.readErr = hideTempErr(err) - if c.isServer { - c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n]) - } - c.readRemaining -= int64(n) - if c.readRemaining > 0 && c.readErr == io.EOF { - c.readErr = errUnexpectedEOF - } - return n, c.readErr - } - - if c.readFinal { - c.messageReader = nil - return 0, io.EOF - } - - frameType, err := c.advanceFrame() - switch { - case err != nil: - c.readErr = hideTempErr(err) - case frameType == TextMessage || frameType == BinaryMessage: - c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") - } - } - - err := c.readErr - if err == io.EOF && c.messageReader == r { - err = errUnexpectedEOF - } - return 0, err -} - -// ReadMessage is a helper method for getting a reader using NextReader and -// reading from that reader to a buffer. -func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { - var r io.Reader - messageType, r, err = c.NextReader() - if err != nil { - return messageType, nil, err - } - p, err = ioutil.ReadAll(r) - return messageType, p, err -} - -// SetReadDeadline sets the read deadline on the underlying network connection. -// After a read has timed out, the websocket connection state is corrupt and -// all future reads will return an error. A zero value for t means reads will -// not time out. -func (c *Conn) SetReadDeadline(t time.Time) error { - return c.conn.SetReadDeadline(t) -} - -// SetReadLimit sets the maximum size for a message read from the peer. If a -// message exceeds the limit, the connection sends a close frame to the peer -// and returns ErrReadLimit to the application. -func (c *Conn) SetReadLimit(limit int64) { - c.readLimit = limit -} - -// PingHandler returns the current ping handler -func (c *Conn) PingHandler() func(appData string) error { - return c.handlePing -} - -// SetPingHandler sets the handler for ping messages received from the peer. -// The appData argument to h is the PING frame application data. The default -// ping handler sends a pong to the peer. -func (c *Conn) SetPingHandler(h func(appData string) error) { - if h == nil { - h = func(message string) error { - err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) - if err == ErrCloseSent { - return nil - } else if e, ok := err.(net.Error); ok && e.Temporary() { - return nil - } - return err - } - } - c.handlePing = h -} - -// PongHandler returns the current pong handler -func (c *Conn) PongHandler() func(appData string) error { - return c.handlePong -} - -// SetPongHandler sets the handler for pong messages received from the peer. -// The appData argument to h is the PONG frame application data. The default -// pong handler does nothing. -func (c *Conn) SetPongHandler(h func(appData string) error) { - if h == nil { - h = func(string) error { return nil } - } - c.handlePong = h -} - -// UnderlyingConn returns the internal net.Conn. This can be used to further -// modifications to connection specific flags. -func (c *Conn) UnderlyingConn() net.Conn { - return c.conn -} - -// FormatCloseMessage formats closeCode and text as a WebSocket close message. -func FormatCloseMessage(closeCode int, text string) []byte { - buf := make([]byte, 2+len(text)) - binary.BigEndian.PutUint16(buf, uint16(closeCode)) - copy(buf[2:], text) - return buf -} diff --git a/vendor/github.com/gorilla/websocket/conn_read.go b/vendor/github.com/gorilla/websocket/conn_read.go deleted file mode 100644 index 1ea1505..0000000 --- a/vendor/github.com/gorilla/websocket/conn_read.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.5 - -package websocket - -import "io" - -func (c *Conn) read(n int) ([]byte, error) { - p, err := c.br.Peek(n) - if err == io.EOF { - err = errUnexpectedEOF - } - c.br.Discard(len(p)) - return p, err -} diff --git a/vendor/github.com/gorilla/websocket/conn_read_legacy.go b/vendor/github.com/gorilla/websocket/conn_read_legacy.go deleted file mode 100644 index 018541c..0000000 --- a/vendor/github.com/gorilla/websocket/conn_read_legacy.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.5 - -package websocket - -import "io" - -func (c *Conn) read(n int) ([]byte, error) { - p, err := c.br.Peek(n) - if err == io.EOF { - err = errUnexpectedEOF - } - if len(p) > 0 { - // advance over the bytes just read - io.ReadFull(c.br, p) - } - return p, err -} diff --git a/vendor/github.com/gorilla/websocket/conn_test.go b/vendor/github.com/gorilla/websocket/conn_test.go deleted file mode 100644 index 0243c11..0000000 --- a/vendor/github.com/gorilla/websocket/conn_test.go +++ /dev/null @@ -1,402 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "reflect" - "testing" - "testing/iotest" - "time" -) - -var _ net.Error = errWriteTimeout - -type fakeNetConn struct { - io.Reader - io.Writer -} - -func (c fakeNetConn) Close() error { return nil } -func (c fakeNetConn) LocalAddr() net.Addr { return nil } -func (c fakeNetConn) RemoteAddr() net.Addr { return nil } -func (c fakeNetConn) SetDeadline(t time.Time) error { return nil } -func (c fakeNetConn) SetReadDeadline(t time.Time) error { return nil } -func (c fakeNetConn) SetWriteDeadline(t time.Time) error { return nil } - -func TestFraming(t *testing.T) { - frameSizes := []int{0, 1, 2, 124, 125, 126, 127, 128, 129, 65534, 65535, 65536, 65537} - var readChunkers = []struct { - name string - f func(io.Reader) io.Reader - }{ - {"half", iotest.HalfReader}, - {"one", iotest.OneByteReader}, - {"asis", func(r io.Reader) io.Reader { return r }}, - } - - writeBuf := make([]byte, 65537) - for i := range writeBuf { - writeBuf[i] = byte(i) - } - - for _, isServer := range []bool{true, false} { - for _, chunker := range readChunkers { - - var connBuf bytes.Buffer - wc := newConn(fakeNetConn{Reader: nil, Writer: &connBuf}, isServer, 1024, 1024) - rc := newConn(fakeNetConn{Reader: chunker.f(&connBuf), Writer: nil}, !isServer, 1024, 1024) - - for _, n := range frameSizes { - for _, iocopy := range []bool{true, false} { - name := fmt.Sprintf("s:%v, r:%s, n:%d c:%v", isServer, chunker.name, n, iocopy) - - w, err := wc.NextWriter(TextMessage) - if err != nil { - t.Errorf("%s: wc.NextWriter() returned %v", name, err) - continue - } - var nn int - if iocopy { - var n64 int64 - n64, err = io.Copy(w, bytes.NewReader(writeBuf[:n])) - nn = int(n64) - } else { - nn, err = w.Write(writeBuf[:n]) - } - if err != nil || nn != n { - t.Errorf("%s: w.Write(writeBuf[:n]) returned %d, %v", name, nn, err) - continue - } - err = w.Close() - if err != nil { - t.Errorf("%s: w.Close() returned %v", name, err) - continue - } - - opCode, r, err := rc.NextReader() - if err != nil || opCode != TextMessage { - t.Errorf("%s: NextReader() returned %d, r, %v", name, opCode, err) - continue - } - rbuf, err := ioutil.ReadAll(r) - if err != nil { - t.Errorf("%s: ReadFull() returned rbuf, %v", name, err) - continue - } - - if len(rbuf) != n { - t.Errorf("%s: len(rbuf) is %d, want %d", name, len(rbuf), n) - continue - } - - for i, b := range rbuf { - if byte(i) != b { - t.Errorf("%s: bad byte at offset %d", name, i) - break - } - } - } - } - } - } -} - -func TestControl(t *testing.T) { - const message = "this is a ping/pong messsage" - for _, isServer := range []bool{true, false} { - for _, isWriteControl := range []bool{true, false} { - name := fmt.Sprintf("s:%v, wc:%v", isServer, isWriteControl) - var connBuf bytes.Buffer - wc := newConn(fakeNetConn{Reader: nil, Writer: &connBuf}, isServer, 1024, 1024) - rc := newConn(fakeNetConn{Reader: &connBuf, Writer: nil}, !isServer, 1024, 1024) - if isWriteControl { - wc.WriteControl(PongMessage, []byte(message), time.Now().Add(time.Second)) - } else { - w, err := wc.NextWriter(PongMessage) - if err != nil { - t.Errorf("%s: wc.NextWriter() returned %v", name, err) - continue - } - if _, err := w.Write([]byte(message)); err != nil { - t.Errorf("%s: w.Write() returned %v", name, err) - continue - } - if err := w.Close(); err != nil { - t.Errorf("%s: w.Close() returned %v", name, err) - continue - } - var actualMessage string - rc.SetPongHandler(func(s string) error { actualMessage = s; return nil }) - rc.NextReader() - if actualMessage != message { - t.Errorf("%s: pong=%q, want %q", name, actualMessage, message) - continue - } - } - } - } -} - -func TestCloseBeforeFinalFrame(t *testing.T) { - const bufSize = 512 - - expectedErr := &CloseError{Code: CloseNormalClosure, Text: "hello"} - - var b1, b2 bytes.Buffer - wc := newConn(fakeNetConn{Reader: nil, Writer: &b1}, false, 1024, bufSize) - rc := newConn(fakeNetConn{Reader: &b1, Writer: &b2}, true, 1024, 1024) - - w, _ := wc.NextWriter(BinaryMessage) - w.Write(make([]byte, bufSize+bufSize/2)) - wc.WriteControl(CloseMessage, FormatCloseMessage(expectedErr.Code, expectedErr.Text), time.Now().Add(10*time.Second)) - w.Close() - - op, r, err := rc.NextReader() - if op != BinaryMessage || err != nil { - t.Fatalf("NextReader() returned %d, %v", op, err) - } - _, err = io.Copy(ioutil.Discard, r) - if !reflect.DeepEqual(err, expectedErr) { - t.Fatalf("io.Copy() returned %v, want %v", err, expectedErr) - } - _, _, err = rc.NextReader() - if !reflect.DeepEqual(err, expectedErr) { - t.Fatalf("NextReader() returned %v, want %v", err, expectedErr) - } -} - -func TestEOFWithinFrame(t *testing.T) { - const bufSize = 64 - - for n := 0; ; n++ { - var b bytes.Buffer - wc := newConn(fakeNetConn{Reader: nil, Writer: &b}, false, 1024, 1024) - rc := newConn(fakeNetConn{Reader: &b, Writer: nil}, true, 1024, 1024) - - w, _ := wc.NextWriter(BinaryMessage) - w.Write(make([]byte, bufSize)) - w.Close() - - if n >= b.Len() { - break - } - b.Truncate(n) - - op, r, err := rc.NextReader() - if err == errUnexpectedEOF { - continue - } - if op != BinaryMessage || err != nil { - t.Fatalf("%d: NextReader() returned %d, %v", n, op, err) - } - _, err = io.Copy(ioutil.Discard, r) - if err != errUnexpectedEOF { - t.Fatalf("%d: io.Copy() returned %v, want %v", n, err, errUnexpectedEOF) - } - _, _, err = rc.NextReader() - if err != errUnexpectedEOF { - t.Fatalf("%d: NextReader() returned %v, want %v", n, err, errUnexpectedEOF) - } - } -} - -func TestEOFBeforeFinalFrame(t *testing.T) { - const bufSize = 512 - - var b1, b2 bytes.Buffer - wc := newConn(fakeNetConn{Reader: nil, Writer: &b1}, false, 1024, bufSize) - rc := newConn(fakeNetConn{Reader: &b1, Writer: &b2}, true, 1024, 1024) - - w, _ := wc.NextWriter(BinaryMessage) - w.Write(make([]byte, bufSize+bufSize/2)) - - op, r, err := rc.NextReader() - if op != BinaryMessage || err != nil { - t.Fatalf("NextReader() returned %d, %v", op, err) - } - _, err = io.Copy(ioutil.Discard, r) - if err != errUnexpectedEOF { - t.Fatalf("io.Copy() returned %v, want %v", err, errUnexpectedEOF) - } - _, _, err = rc.NextReader() - if err != errUnexpectedEOF { - t.Fatalf("NextReader() returned %v, want %v", err, errUnexpectedEOF) - } -} - -func TestReadLimit(t *testing.T) { - - const readLimit = 512 - message := make([]byte, readLimit+1) - - var b1, b2 bytes.Buffer - wc := newConn(fakeNetConn{Reader: nil, Writer: &b1}, false, 1024, readLimit-2) - rc := newConn(fakeNetConn{Reader: &b1, Writer: &b2}, true, 1024, 1024) - rc.SetReadLimit(readLimit) - - // Send message at the limit with interleaved pong. - w, _ := wc.NextWriter(BinaryMessage) - w.Write(message[:readLimit-1]) - wc.WriteControl(PongMessage, []byte("this is a pong"), time.Now().Add(10*time.Second)) - w.Write(message[:1]) - w.Close() - - // Send message larger than the limit. - wc.WriteMessage(BinaryMessage, message[:readLimit+1]) - - op, _, err := rc.NextReader() - if op != BinaryMessage || err != nil { - t.Fatalf("1: NextReader() returned %d, %v", op, err) - } - op, r, err := rc.NextReader() - if op != BinaryMessage || err != nil { - t.Fatalf("2: NextReader() returned %d, %v", op, err) - } - _, err = io.Copy(ioutil.Discard, r) - if err != ErrReadLimit { - t.Fatalf("io.Copy() returned %v", err) - } -} - -func TestUnderlyingConn(t *testing.T) { - var b1, b2 bytes.Buffer - fc := fakeNetConn{Reader: &b1, Writer: &b2} - c := newConn(fc, true, 1024, 1024) - ul := c.UnderlyingConn() - if ul != fc { - t.Fatalf("Underlying conn is not what it should be.") - } -} - -func TestBufioReadBytes(t *testing.T) { - - // Test calling bufio.ReadBytes for value longer than read buffer size. - - m := make([]byte, 512) - m[len(m)-1] = '\n' - - var b1, b2 bytes.Buffer - wc := newConn(fakeNetConn{Reader: nil, Writer: &b1}, false, len(m)+64, len(m)+64) - rc := newConn(fakeNetConn{Reader: &b1, Writer: &b2}, true, len(m)-64, len(m)-64) - - w, _ := wc.NextWriter(BinaryMessage) - w.Write(m) - w.Close() - - op, r, err := rc.NextReader() - if op != BinaryMessage || err != nil { - t.Fatalf("NextReader() returned %d, %v", op, err) - } - - br := bufio.NewReader(r) - p, err := br.ReadBytes('\n') - if err != nil { - t.Fatalf("ReadBytes() returned %v", err) - } - if len(p) != len(m) { - t.Fatalf("read returnd %d bytes, want %d bytes", len(p), len(m)) - } -} - -var closeErrorTests = []struct { - err error - codes []int - ok bool -}{ - {&CloseError{Code: CloseNormalClosure}, []int{CloseNormalClosure}, true}, - {&CloseError{Code: CloseNormalClosure}, []int{CloseNoStatusReceived}, false}, - {&CloseError{Code: CloseNormalClosure}, []int{CloseNoStatusReceived, CloseNormalClosure}, true}, - {errors.New("hello"), []int{CloseNormalClosure}, false}, -} - -func TestCloseError(t *testing.T) { - for _, tt := range closeErrorTests { - ok := IsCloseError(tt.err, tt.codes...) - if ok != tt.ok { - t.Errorf("IsCloseError(%#v, %#v) returned %v, want %v", tt.err, tt.codes, ok, tt.ok) - } - } -} - -var unexpectedCloseErrorTests = []struct { - err error - codes []int - ok bool -}{ - {&CloseError{Code: CloseNormalClosure}, []int{CloseNormalClosure}, false}, - {&CloseError{Code: CloseNormalClosure}, []int{CloseNoStatusReceived}, true}, - {&CloseError{Code: CloseNormalClosure}, []int{CloseNoStatusReceived, CloseNormalClosure}, false}, - {errors.New("hello"), []int{CloseNormalClosure}, false}, -} - -func TestUnexpectedCloseErrors(t *testing.T) { - for _, tt := range unexpectedCloseErrorTests { - ok := IsUnexpectedCloseError(tt.err, tt.codes...) - if ok != tt.ok { - t.Errorf("IsUnexpectedCloseError(%#v, %#v) returned %v, want %v", tt.err, tt.codes, ok, tt.ok) - } - } -} - -type blockingWriter struct { - c1, c2 chan struct{} -} - -func (w blockingWriter) Write(p []byte) (int, error) { - // Allow main to continue - close(w.c1) - // Wait for panic in main - <-w.c2 - return len(p), nil -} - -func TestConcurrentWritePanic(t *testing.T) { - w := blockingWriter{make(chan struct{}), make(chan struct{})} - c := newConn(fakeNetConn{Reader: nil, Writer: w}, false, 1024, 1024) - go func() { - c.WriteMessage(TextMessage, []byte{}) - }() - - // wait for goroutine to block in write. - <-w.c1 - - defer func() { - close(w.c2) - if v := recover(); v != nil { - return - } - }() - - c.WriteMessage(TextMessage, []byte{}) - t.Fatal("should not get here") -} - -type failingReader struct{} - -func (r failingReader) Read(p []byte) (int, error) { - return 0, io.EOF -} - -func TestFailedConnectionReadPanic(t *testing.T) { - c := newConn(fakeNetConn{Reader: failingReader{}, Writer: nil}, false, 1024, 1024) - - defer func() { - if v := recover(); v != nil { - return - } - }() - - for i := 0; i < 20000; i++ { - c.ReadMessage() - } - t.Fatal("should not get here") -} diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go deleted file mode 100644 index c901a7a..0000000 --- a/vendor/github.com/gorilla/websocket/doc.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package websocket implements the WebSocket protocol defined in RFC 6455. -// -// Overview -// -// The Conn type represents a WebSocket connection. A server application uses -// the Upgrade function from an Upgrader object with a HTTP request handler -// to get a pointer to a Conn: -// -// var upgrader = websocket.Upgrader{ -// ReadBufferSize: 1024, -// WriteBufferSize: 1024, -// } -// -// func handler(w http.ResponseWriter, r *http.Request) { -// conn, err := upgrader.Upgrade(w, r, nil) -// if err != nil { -// log.Println(err) -// return -// } -// ... Use conn to send and receive messages. -// } -// -// Call the connection's WriteMessage and ReadMessage methods to send and -// receive messages as a slice of bytes. This snippet of code shows how to echo -// messages using these methods: -// -// for { -// messageType, p, err := conn.ReadMessage() -// if err != nil { -// return -// } -// if err = conn.WriteMessage(messageType, p); err != nil { -// return err -// } -// } -// -// In above snippet of code, p is a []byte and messageType is an int with value -// websocket.BinaryMessage or websocket.TextMessage. -// -// An application can also send and receive messages using the io.WriteCloser -// and io.Reader interfaces. To send a message, call the connection NextWriter -// method to get an io.WriteCloser, write the message to the writer and close -// the writer when done. To receive a message, call the connection NextReader -// method to get an io.Reader and read until io.EOF is returned. This snippet -// shows how to echo messages using the NextWriter and NextReader methods: -// -// for { -// messageType, r, err := conn.NextReader() -// if err != nil { -// return -// } -// w, err := conn.NextWriter(messageType) -// if err != nil { -// return err -// } -// if _, err := io.Copy(w, r); err != nil { -// return err -// } -// if err := w.Close(); err != nil { -// return err -// } -// } -// -// Data Messages -// -// The WebSocket protocol distinguishes between text and binary data messages. -// Text messages are interpreted as UTF-8 encoded text. The interpretation of -// binary messages is left to the application. -// -// This package uses the TextMessage and BinaryMessage integer constants to -// identify the two data message types. The ReadMessage and NextReader methods -// return the type of the received message. The messageType argument to the -// WriteMessage and NextWriter methods specifies the type of a sent message. -// -// It is the application's responsibility to ensure that text messages are -// valid UTF-8 encoded text. -// -// Control Messages -// -// The WebSocket protocol defines three types of control messages: close, ping -// and pong. Call the connection WriteControl, WriteMessage or NextWriter -// methods to send a control message to the peer. -// -// Connections handle received close messages by sending a close message to the -// peer and returning a *CloseError from the the NextReader, ReadMessage or the -// message Read method. -// -// Connections handle received ping and pong messages by invoking callback -// functions set with SetPingHandler and SetPongHandler methods. The callback -// functions are called from the NextReader, ReadMessage and the message Read -// methods. -// -// The default ping handler sends a pong to the peer. The application's reading -// goroutine can block for a short time while the handler writes the pong data -// to the connection. -// -// The application must read the connection to process ping, pong and close -// messages sent from the peer. If the application is not otherwise interested -// in messages from the peer, then the application should start a goroutine to -// read and discard messages from the peer. A simple example is: -// -// func readLoop(c *websocket.Conn) { -// for { -// if _, _, err := c.NextReader(); err != nil { -// c.Close() -// break -// } -// } -// } -// -// Concurrency -// -// Connections support one concurrent reader and one concurrent writer. -// -// Applications are responsible for ensuring that no more than one goroutine -// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage, -// WriteJSON) concurrently and that no more than one goroutine calls the read -// methods (NextReader, SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, -// SetPingHandler) concurrently. -// -// The Close and WriteControl methods can be called concurrently with all other -// methods. -// -// Origin Considerations -// -// Web browsers allow Javascript applications to open a WebSocket connection to -// any host. It's up to the server to enforce an origin policy using the Origin -// request header sent by the browser. -// -// The Upgrader calls the function specified in the CheckOrigin field to check -// the origin. If the CheckOrigin function returns false, then the Upgrade -// method fails the WebSocket handshake with HTTP status 403. -// -// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail -// the handshake if the Origin request header is present and not equal to the -// Host request header. -// -// An application can allow connections from any origin by specifying a -// function that always returns true: -// -// var upgrader = websocket.Upgrader{ -// CheckOrigin: func(r *http.Request) bool { return true }, -// } -// -// The deprecated Upgrade function does not enforce an origin policy. It's the -// application's responsibility to check the Origin header before calling -// Upgrade. -package websocket diff --git a/vendor/github.com/gorilla/websocket/example_test.go b/vendor/github.com/gorilla/websocket/example_test.go deleted file mode 100644 index 96449ea..0000000 --- a/vendor/github.com/gorilla/websocket/example_test.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2015 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket_test - -import ( - "log" - "net/http" - "testing" - - "github.com/gorilla/websocket" -) - -var ( - c *websocket.Conn - req *http.Request -) - -// The websocket.IsUnexpectedCloseError function is useful for identifying -// application and protocol errors. -// -// This server application works with a client application running in the -// browser. The client application does not explicitly close the websocket. The -// only expected close message from the client has the code -// websocket.CloseGoingAway. All other other close messages are likely the -// result of an application or protocol error and are logged to aid debugging. -func ExampleIsUnexpectedCloseError() { - - for { - messageType, p, err := c.ReadMessage() - if err != nil { - if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway) { - log.Printf("error: %v, user-agent: %v", err, req.Header.Get("User-Agent")) - } - return - } - processMesage(messageType, p) - } -} - -func processMesage(mt int, p []byte) {} - -// TestX prevents godoc from showing this entire file in the example. Remove -// this function when a second example is added. -func TestX(t *testing.T) {} diff --git a/vendor/github.com/gorilla/websocket/examples/autobahn/README.md b/vendor/github.com/gorilla/websocket/examples/autobahn/README.md deleted file mode 100644 index 075ac15..0000000 --- a/vendor/github.com/gorilla/websocket/examples/autobahn/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Test Server - -This package contains a server for the [Autobahn WebSockets Test Suite](http://autobahn.ws/testsuite). - -To test the server, run - - go run server.go - -and start the client test driver - - wstest -m fuzzingclient -s fuzzingclient.json - -When the client completes, it writes a report to reports/clients/index.html. diff --git a/vendor/github.com/gorilla/websocket/examples/autobahn/fuzzingclient.json b/vendor/github.com/gorilla/websocket/examples/autobahn/fuzzingclient.json deleted file mode 100644 index 27d5a5b..0000000 --- a/vendor/github.com/gorilla/websocket/examples/autobahn/fuzzingclient.json +++ /dev/null @@ -1,14 +0,0 @@ - -{ - "options": {"failByDrop": false}, - "outdir": "./reports/clients", - "servers": [ - {"agent": "ReadAllWriteMessage", "url": "ws://localhost:9000/m", "options": {"version": 18}}, - {"agent": "ReadAllWrite", "url": "ws://localhost:9000/r", "options": {"version": 18}}, - {"agent": "CopyFull", "url": "ws://localhost:9000/f", "options": {"version": 18}}, - {"agent": "CopyWriterOnly", "url": "ws://localhost:9000/c", "options": {"version": 18}} - ], - "cases": ["*"], - "exclude-cases": [], - "exclude-agent-cases": {} -} diff --git a/vendor/github.com/gorilla/websocket/examples/autobahn/server.go b/vendor/github.com/gorilla/websocket/examples/autobahn/server.go deleted file mode 100644 index d96ac84..0000000 --- a/vendor/github.com/gorilla/websocket/examples/autobahn/server.go +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Command server is a test server for the Autobahn WebSockets Test Suite. -package main - -import ( - "errors" - "flag" - "github.com/gorilla/websocket" - "io" - "log" - "net/http" - "time" - "unicode/utf8" -) - -var upgrader = websocket.Upgrader{ - ReadBufferSize: 4096, - WriteBufferSize: 4096, - CheckOrigin: func(r *http.Request) bool { - return true - }, -} - -// echoCopy echoes messages from the client using io.Copy. -func echoCopy(w http.ResponseWriter, r *http.Request, writerOnly bool) { - conn, err := upgrader.Upgrade(w, r, nil) - if err != nil { - log.Println("Upgrade:", err) - return - } - defer conn.Close() - for { - mt, r, err := conn.NextReader() - if err != nil { - if err != io.EOF { - log.Println("NextReader:", err) - } - return - } - if mt == websocket.TextMessage { - r = &validator{r: r} - } - w, err := conn.NextWriter(mt) - if err != nil { - log.Println("NextWriter:", err) - return - } - if mt == websocket.TextMessage { - r = &validator{r: r} - } - if writerOnly { - _, err = io.Copy(struct{ io.Writer }{w}, r) - } else { - _, err = io.Copy(w, r) - } - if err != nil { - if err == errInvalidUTF8 { - conn.WriteControl(websocket.CloseMessage, - websocket.FormatCloseMessage(websocket.CloseInvalidFramePayloadData, ""), - time.Time{}) - } - log.Println("Copy:", err) - return - } - err = w.Close() - if err != nil { - log.Println("Close:", err) - return - } - } -} - -func echoCopyWriterOnly(w http.ResponseWriter, r *http.Request) { - echoCopy(w, r, true) -} - -func echoCopyFull(w http.ResponseWriter, r *http.Request) { - echoCopy(w, r, false) -} - -// echoReadAll echoes messages from the client by reading the entire message -// with ioutil.ReadAll. -func echoReadAll(w http.ResponseWriter, r *http.Request, writeMessage bool) { - conn, err := upgrader.Upgrade(w, r, nil) - if err != nil { - log.Println("Upgrade:", err) - return - } - defer conn.Close() - for { - mt, b, err := conn.ReadMessage() - if err != nil { - if err != io.EOF { - log.Println("NextReader:", err) - } - return - } - if mt == websocket.TextMessage { - if !utf8.Valid(b) { - conn.WriteControl(websocket.CloseMessage, - websocket.FormatCloseMessage(websocket.CloseInvalidFramePayloadData, ""), - time.Time{}) - log.Println("ReadAll: invalid utf8") - } - } - if writeMessage { - err = conn.WriteMessage(mt, b) - if err != nil { - log.Println("WriteMessage:", err) - } - } else { - w, err := conn.NextWriter(mt) - if err != nil { - log.Println("NextWriter:", err) - return - } - if _, err := w.Write(b); err != nil { - log.Println("Writer:", err) - return - } - if err := w.Close(); err != nil { - log.Println("Close:", err) - return - } - } - } -} - -func echoReadAllWriter(w http.ResponseWriter, r *http.Request) { - echoReadAll(w, r, false) -} - -func echoReadAllWriteMessage(w http.ResponseWriter, r *http.Request) { - echoReadAll(w, r, true) -} - -func serveHome(w http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/" { - http.Error(w, "Not found.", 404) - return - } - if r.Method != "GET" { - http.Error(w, "Method not allowed", 405) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - io.WriteString(w, "Echo Server") -} - -var addr = flag.String("addr", ":9000", "http service address") - -func main() { - flag.Parse() - http.HandleFunc("/", serveHome) - http.HandleFunc("/c", echoCopyWriterOnly) - http.HandleFunc("/f", echoCopyFull) - http.HandleFunc("/r", echoReadAllWriter) - http.HandleFunc("/m", echoReadAllWriteMessage) - err := http.ListenAndServe(*addr, nil) - if err != nil { - log.Fatal("ListenAndServe: ", err) - } -} - -type validator struct { - state int - x rune - r io.Reader -} - -var errInvalidUTF8 = errors.New("invalid utf8") - -func (r *validator) Read(p []byte) (int, error) { - n, err := r.r.Read(p) - state := r.state - x := r.x - for _, b := range p[:n] { - state, x = decode(state, x, b) - if state == utf8Reject { - break - } - } - r.state = state - r.x = x - if state == utf8Reject || (err == io.EOF && state != utf8Accept) { - return n, errInvalidUTF8 - } - return n, err -} - -// UTF-8 decoder from http://bjoern.hoehrmann.de/utf-8/decoder/dfa/ -// -// Copyright (c) 2008-2009 Bjoern Hoehrmann -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to -// deal in the Software without restriction, including without limitation the -// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -// sell copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -// IN THE SOFTWARE. -var utf8d = [...]byte{ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 00..1f - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20..3f - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 40..5f - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 60..7f - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, // 80..9f - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, // a0..bf - 8, 8, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // c0..df - 0xa, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x4, 0x3, 0x3, // e0..ef - 0xb, 0x6, 0x6, 0x6, 0x5, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, // f0..ff - 0x0, 0x1, 0x2, 0x3, 0x5, 0x8, 0x7, 0x1, 0x1, 0x1, 0x4, 0x6, 0x1, 0x1, 0x1, 0x1, // s0..s0 - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, // s1..s2 - 1, 2, 1, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, // s3..s4 - 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, // s5..s6 - 1, 3, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // s7..s8 -} - -const ( - utf8Accept = 0 - utf8Reject = 1 -) - -func decode(state int, x rune, b byte) (int, rune) { - t := utf8d[b] - if state != utf8Accept { - x = rune(b&0x3f) | (x << 6) - } else { - x = rune((0xff >> t) & b) - } - state = int(utf8d[256+state*16+int(t)]) - return state, x -} diff --git a/vendor/github.com/gorilla/websocket/examples/chat/README.md b/vendor/github.com/gorilla/websocket/examples/chat/README.md deleted file mode 100644 index 5df3cf1..0000000 --- a/vendor/github.com/gorilla/websocket/examples/chat/README.md +++ /dev/null @@ -1,20 +0,0 @@ -# Chat Example - -This application shows how to use use the -[websocket](https://github.com/gorilla/websocket) package and -[jQuery](http://jquery.com) to implement a simple web chat application. - -## Running the example - -The example requires a working Go development environment. The [Getting -Started](http://golang.org/doc/install) page describes how to install the -development environment. - -Once you have Go up and running, you can download, build and run the example -using the following commands. - - $ go get github.com/gorilla/websocket - $ cd `go list -f '{{.Dir}}' github.com/gorilla/websocket/examples/chat` - $ go run *.go - -To use the chat example, open http://localhost:8080/ in your browser. diff --git a/vendor/github.com/gorilla/websocket/examples/chat/conn.go b/vendor/github.com/gorilla/websocket/examples/chat/conn.go deleted file mode 100644 index 44f3df0..0000000 --- a/vendor/github.com/gorilla/websocket/examples/chat/conn.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "bytes" - "log" - "net/http" - "time" - - "github.com/gorilla/websocket" -) - -const ( - // Time allowed to write a message to the peer. - writeWait = 10 * time.Second - - // Time allowed to read the next pong message from the peer. - pongWait = 60 * time.Second - - // Send pings to peer with this period. Must be less than pongWait. - pingPeriod = (pongWait * 9) / 10 - - // Maximum message size allowed from peer. - maxMessageSize = 512 -) - -var ( - newline = []byte{'\n'} - space = []byte{' '} -) - -var upgrader = websocket.Upgrader{ - ReadBufferSize: 1024, - WriteBufferSize: 1024, -} - -// Conn is an middleman between the websocket connection and the hub. -type Conn struct { - // The websocket connection. - ws *websocket.Conn - - // Buffered channel of outbound messages. - send chan []byte -} - -// readPump pumps messages from the websocket connection to the hub. -func (c *Conn) readPump() { - defer func() { - hub.unregister <- c - c.ws.Close() - }() - c.ws.SetReadLimit(maxMessageSize) - c.ws.SetReadDeadline(time.Now().Add(pongWait)) - c.ws.SetPongHandler(func(string) error { c.ws.SetReadDeadline(time.Now().Add(pongWait)); return nil }) - for { - _, message, err := c.ws.ReadMessage() - if err != nil { - if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway) { - log.Printf("error: %v", err) - } - break - } - message = bytes.TrimSpace(bytes.Replace(message, newline, space, -1)) - hub.broadcast <- message - } -} - -// write writes a message with the given message type and payload. -func (c *Conn) write(mt int, payload []byte) error { - c.ws.SetWriteDeadline(time.Now().Add(writeWait)) - return c.ws.WriteMessage(mt, payload) -} - -// writePump pumps messages from the hub to the websocket connection. -func (c *Conn) writePump() { - ticker := time.NewTicker(pingPeriod) - defer func() { - ticker.Stop() - c.ws.Close() - }() - for { - select { - case message, ok := <-c.send: - if !ok { - // The hub closed the channel. - c.write(websocket.CloseMessage, []byte{}) - return - } - - c.ws.SetWriteDeadline(time.Now().Add(writeWait)) - w, err := c.ws.NextWriter(websocket.TextMessage) - if err != nil { - return - } - w.Write(message) - - // Add queued chat messages to the current websocket message. - n := len(c.send) - for i := 0; i < n; i++ { - w.Write(newline) - w.Write(<-c.send) - } - - if err := w.Close(); err != nil { - return - } - case <-ticker.C: - if err := c.write(websocket.PingMessage, []byte{}); err != nil { - return - } - } - } -} - -// serveWs handles websocket requests from the peer. -func serveWs(w http.ResponseWriter, r *http.Request) { - ws, err := upgrader.Upgrade(w, r, nil) - if err != nil { - log.Println(err) - return - } - conn := &Conn{send: make(chan []byte, 256), ws: ws} - hub.register <- conn - go conn.writePump() - conn.readPump() -} diff --git a/vendor/github.com/gorilla/websocket/examples/chat/home.html b/vendor/github.com/gorilla/websocket/examples/chat/home.html deleted file mode 100644 index c0c4c41..0000000 --- a/vendor/github.com/gorilla/websocket/examples/chat/home.html +++ /dev/null @@ -1,94 +0,0 @@ - - - -Chat Example - - - - - - - - - - - - diff --git a/vendor/github.com/gorilla/websocket/examples/chat/hub.go b/vendor/github.com/gorilla/websocket/examples/chat/hub.go deleted file mode 100644 index 92da865..0000000 --- a/vendor/github.com/gorilla/websocket/examples/chat/hub.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -// hub maintains the set of active connections and broadcasts messages to the -// connections. -type Hub struct { - // Registered connections. - connections map[*Conn]bool - - // Inbound messages from the connections. - broadcast chan []byte - - // Register requests from the connections. - register chan *Conn - - // Unregister requests from connections. - unregister chan *Conn -} - -var hub = Hub{ - broadcast: make(chan []byte), - register: make(chan *Conn), - unregister: make(chan *Conn), - connections: make(map[*Conn]bool), -} - -func (h *Hub) run() { - for { - select { - case conn := <-h.register: - h.connections[conn] = true - case conn := <-h.unregister: - if _, ok := h.connections[conn]; ok { - delete(h.connections, conn) - close(conn.send) - } - case message := <-h.broadcast: - for conn := range h.connections { - select { - case conn.send <- message: - default: - close(conn.send) - delete(hub.connections, conn) - } - } - } - } -} diff --git a/vendor/github.com/gorilla/websocket/examples/chat/main.go b/vendor/github.com/gorilla/websocket/examples/chat/main.go deleted file mode 100644 index 39943e6..0000000 --- a/vendor/github.com/gorilla/websocket/examples/chat/main.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "flag" - "log" - "net/http" - "text/template" -) - -var addr = flag.String("addr", ":8080", "http service address") -var homeTemplate = template.Must(template.ParseFiles("home.html")) - -func serveHome(w http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/" { - http.Error(w, "Not found", 404) - return - } - if r.Method != "GET" { - http.Error(w, "Method not allowed", 405) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - homeTemplate.Execute(w, r.Host) -} - -func main() { - flag.Parse() - go hub.run() - http.HandleFunc("/", serveHome) - http.HandleFunc("/ws", serveWs) - err := http.ListenAndServe(*addr, nil) - if err != nil { - log.Fatal("ListenAndServe: ", err) - } -} diff --git a/vendor/github.com/gorilla/websocket/examples/command/README.md b/vendor/github.com/gorilla/websocket/examples/command/README.md deleted file mode 100644 index c30d397..0000000 --- a/vendor/github.com/gorilla/websocket/examples/command/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# Command example - -This example connects a websocket connection to stdin and stdout of a command. -Received messages are written to stdin followed by a `\n`. Each line read from -from standard out is sent as a message to the client. - - $ go get github.com/gorilla/websocket - $ cd `go list -f '{{.Dir}}' github.com/gorilla/websocket/examples/command` - $ go run main.go - # Open http://localhost:8080/ . - -Try the following commands. - - # Echo sent messages to the output area. - $ go run main.go cat - - # Run a shell.Try sending "ls" and "cat main.go". - $ go run main.go sh - diff --git a/vendor/github.com/gorilla/websocket/examples/command/home.html b/vendor/github.com/gorilla/websocket/examples/command/home.html deleted file mode 100644 index 72fd02b..0000000 --- a/vendor/github.com/gorilla/websocket/examples/command/home.html +++ /dev/null @@ -1,96 +0,0 @@ - - - -Command Example - - - - - - - - - - - - diff --git a/vendor/github.com/gorilla/websocket/examples/command/main.go b/vendor/github.com/gorilla/websocket/examples/command/main.go deleted file mode 100644 index f3f022e..0000000 --- a/vendor/github.com/gorilla/websocket/examples/command/main.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright 2015 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "bufio" - "flag" - "io" - "log" - "net/http" - "os" - "os/exec" - "text/template" - "time" - - "github.com/gorilla/websocket" -) - -var ( - addr = flag.String("addr", "127.0.0.1:8080", "http service address") - cmdPath string - homeTempl = template.Must(template.ParseFiles("home.html")) -) - -const ( - // Time allowed to write a message to the peer. - writeWait = 10 * time.Second - - // Maximum message size allowed from peer. - maxMessageSize = 8192 - - // Time allowed to read the next pong message from the peer. - pongWait = 60 * time.Second - - // Send pings to peer with this period. Must be less than pongWait. - pingPeriod = (pongWait * 9) / 10 -) - -func pumpStdin(ws *websocket.Conn, w io.Writer) { - defer ws.Close() - ws.SetReadLimit(maxMessageSize) - ws.SetReadDeadline(time.Now().Add(pongWait)) - ws.SetPongHandler(func(string) error { ws.SetReadDeadline(time.Now().Add(pongWait)); return nil }) - for { - _, message, err := ws.ReadMessage() - if err != nil { - break - } - message = append(message, '\n') - if _, err := w.Write(message); err != nil { - break - } - } -} - -func pumpStdout(ws *websocket.Conn, r io.Reader, done chan struct{}) { - defer func() { - ws.Close() - close(done) - }() - s := bufio.NewScanner(r) - for s.Scan() { - ws.SetWriteDeadline(time.Now().Add(writeWait)) - if err := ws.WriteMessage(websocket.TextMessage, s.Bytes()); err != nil { - break - } - } - if s.Err() != nil { - log.Println("scan:", s.Err()) - } -} - -func ping(ws *websocket.Conn, done chan struct{}) { - ticker := time.NewTicker(pingPeriod) - defer ticker.Stop() - for { - select { - case <-ticker.C: - if err := ws.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(writeWait)); err != nil { - log.Println("ping:", err) - } - case <-done: - return - } - } -} - -func internalError(ws *websocket.Conn, msg string, err error) { - log.Println(msg, err) - ws.WriteMessage(websocket.TextMessage, []byte("Internal server error.")) -} - -var upgrader = websocket.Upgrader{} - -func serveWs(w http.ResponseWriter, r *http.Request) { - ws, err := upgrader.Upgrade(w, r, nil) - if err != nil { - log.Println("upgrade:", err) - return - } - - defer ws.Close() - - outr, outw, err := os.Pipe() - if err != nil { - internalError(ws, "stdout:", err) - return - } - defer outr.Close() - defer outw.Close() - - inr, inw, err := os.Pipe() - if err != nil { - internalError(ws, "stdin:", err) - return - } - defer inr.Close() - defer inw.Close() - - proc, err := os.StartProcess(cmdPath, flag.Args(), &os.ProcAttr{ - Files: []*os.File{inr, outw, outw}, - }) - if err != nil { - internalError(ws, "start:", err) - return - } - - inr.Close() - outw.Close() - - stdoutDone := make(chan struct{}) - go pumpStdout(ws, outr, stdoutDone) - go ping(ws, stdoutDone) - - pumpStdin(ws, inw) - - // Some commands will exit when stdin is closed. - inw.Close() - - // Other commands need a bonk on the head. - if err := proc.Signal(os.Interrupt); err != nil { - log.Println("inter:", err) - } - - select { - case <-stdoutDone: - case <-time.After(time.Second): - // A bigger bonk on the head. - if err := proc.Signal(os.Kill); err != nil { - log.Println("term:", err) - } - <-stdoutDone - } - - if _, err := proc.Wait(); err != nil { - log.Println("wait:", err) - } -} - -func serveHome(w http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/" { - http.Error(w, "Not found", 404) - return - } - if r.Method != "GET" { - http.Error(w, "Method not allowed", 405) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - homeTempl.Execute(w, r.Host) -} - -func main() { - flag.Parse() - if len(flag.Args()) < 1 { - log.Fatal("must specify at least one argument") - } - var err error - cmdPath, err = exec.LookPath(flag.Args()[0]) - if err != nil { - log.Fatal(err) - } - http.HandleFunc("/", serveHome) - http.HandleFunc("/ws", serveWs) - log.Fatal(http.ListenAndServe(*addr, nil)) -} diff --git a/vendor/github.com/gorilla/websocket/examples/echo/README.md b/vendor/github.com/gorilla/websocket/examples/echo/README.md deleted file mode 100644 index 6ad79ed..0000000 --- a/vendor/github.com/gorilla/websocket/examples/echo/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# Client and server example - -This example shows a simple client and server. - -The server echoes messages sent to it. The client sends a message every second -and prints all messages received. - -To run the example, start the server: - - $ go run server.go - -Next, start the client: - - $ go run client.go - -The server includes a simple web client. To use the client, open -http://127.0.0.1:8080 in the browser and follow the instructions on the page. diff --git a/vendor/github.com/gorilla/websocket/examples/echo/client.go b/vendor/github.com/gorilla/websocket/examples/echo/client.go deleted file mode 100644 index 6578094..0000000 --- a/vendor/github.com/gorilla/websocket/examples/echo/client.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2015 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "flag" - "log" - "net/url" - "os" - "os/signal" - "time" - - "github.com/gorilla/websocket" -) - -var addr = flag.String("addr", "localhost:8080", "http service address") - -func main() { - flag.Parse() - log.SetFlags(0) - - interrupt := make(chan os.Signal, 1) - signal.Notify(interrupt, os.Interrupt) - - u := url.URL{Scheme: "ws", Host: *addr, Path: "/echo"} - log.Printf("connecting to %s", u.String()) - - c, _, err := websocket.DefaultDialer.Dial(u.String(), nil) - if err != nil { - log.Fatal("dial:", err) - } - defer c.Close() - - done := make(chan struct{}) - - go func() { - defer c.Close() - defer close(done) - for { - _, message, err := c.ReadMessage() - if err != nil { - log.Println("read:", err) - return - } - log.Printf("recv: %s", message) - } - }() - - ticker := time.NewTicker(time.Second) - defer ticker.Stop() - - for { - select { - case t := <-ticker.C: - err := c.WriteMessage(websocket.TextMessage, []byte(t.String())) - if err != nil { - log.Println("write:", err) - return - } - case <-interrupt: - log.Println("interrupt") - // To cleanly close a connection, a client should send a close - // frame and wait for the server to close the connection. - err := c.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) - if err != nil { - log.Println("write close:", err) - return - } - select { - case <-done: - case <-time.After(time.Second): - } - c.Close() - return - } - } -} diff --git a/vendor/github.com/gorilla/websocket/examples/echo/server.go b/vendor/github.com/gorilla/websocket/examples/echo/server.go deleted file mode 100644 index a685b09..0000000 --- a/vendor/github.com/gorilla/websocket/examples/echo/server.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2015 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "flag" - "html/template" - "log" - "net/http" - - "github.com/gorilla/websocket" -) - -var addr = flag.String("addr", "localhost:8080", "http service address") - -var upgrader = websocket.Upgrader{} // use default options - -func echo(w http.ResponseWriter, r *http.Request) { - c, err := upgrader.Upgrade(w, r, nil) - if err != nil { - log.Print("upgrade:", err) - return - } - defer c.Close() - for { - mt, message, err := c.ReadMessage() - if err != nil { - log.Println("read:", err) - break - } - log.Printf("recv: %s", message) - err = c.WriteMessage(mt, message) - if err != nil { - log.Println("write:", err) - break - } - } -} - -func home(w http.ResponseWriter, r *http.Request) { - homeTemplate.Execute(w, "ws://"+r.Host+"/echo") -} - -func main() { - flag.Parse() - log.SetFlags(0) - http.HandleFunc("/echo", echo) - http.HandleFunc("/", home) - log.Fatal(http.ListenAndServe(*addr, nil)) -} - -var homeTemplate = template.Must(template.New("").Parse(` - - - - - - - - -Click "Open" to create a connection to the server, -"Send" to send a message to the server and "Close" to close the connection. -You can change the message and send multiple times. - - -Open -Close - -Send - - - - - -
Click "Open" to create a connection to the server, -"Send" to send a message to the server and "Close" to close the connection. -You can change the message and send multiple times. -
-
-Send -