diff --git a/constants/constants.go b/constants/constants.go new file mode 100644 index 000000000..5a7da08cc --- /dev/null +++ b/constants/constants.go @@ -0,0 +1,4 @@ +package constants + +// DefaultDockerRegistry - default docker registry +const DefaultDockerRegistry = "https://index.docker.io" diff --git a/glide.yaml b/glide.yaml index 6ab48ff69..4eb9164cd 100644 --- a/glide.yaml +++ b/glide.yaml @@ -17,9 +17,10 @@ import: version: a94a7ac054dc76e5ab6cf170a9d82faba8aaf33f subpackages: - status -- package: github.com/coreos/go-semver +- package: github.com/sirupsen/logrus subpackages: - - semver + - formatters + - formatters/logstash - package: github.com/gorilla/mux - package: github.com/urfave/negroni - package: golang.org/x/net @@ -46,7 +47,11 @@ import: - pkg/apis/extensions/v1beta1 - rest - tools/clientcmd +- package: github.com/docker/distribution + version: ^2.6.1 + subpackages: + - digest - package: github.com/Masterminds/semver version: ^1.3.0 -- package: github.com/sirupsen/logrus - version: ^1.0.0 +- package: github.com/opencontainers/go-digest + version: ^1.0.0-rc0 diff --git a/main.go b/main.go index 11c6bcc58..7f5ffa5c1 100644 --- a/main.go +++ b/main.go @@ -9,7 +9,9 @@ import ( "github.com/rusenask/keel/provider" "github.com/rusenask/keel/provider/kubernetes" + "github.com/rusenask/keel/registry" "github.com/rusenask/keel/trigger/http" + "github.com/rusenask/keel/trigger/poll" "github.com/rusenask/keel/trigger/pubsub" "github.com/rusenask/keel/types" "github.com/rusenask/keel/version" @@ -20,6 +22,7 @@ import ( // gcloud pubsub related config const ( EnvTriggerPubSub = "PUBSUB" // set to 1 or something to enable pub/sub trigger + EnvTriggerPoll = "POLL" // set to 1 or something to enable poll trigger EnvProjectID = "PROJECT_ID" ) @@ -154,6 +157,17 @@ func setupTriggers(ctx context.Context, k8sImplementer kubernetes.Implementer, p go subManager.Start(ctx) } + if os.Getenv(EnvTriggerPoll) != "" { + + registryClient := registry.New() + watcher := poll.NewRepositoryWatcher(providers, registryClient) + pollManager := poll.NewPollManager(k8sImplementer, watcher) + + // start poll manager, will finish with ctx + go watcher.Start(ctx) + go pollManager.Start(ctx) + } + teardown = func() { whs.Stop() } diff --git a/provider/kubernetes/common.go b/provider/kubernetes/common.go new file mode 100644 index 000000000..24defd046 --- /dev/null +++ b/provider/kubernetes/common.go @@ -0,0 +1,36 @@ +package kubernetes + +import ( + "strings" +) + +func addImageToPull(annotations map[string]string, image string) map[string]string { + existing, ok := annotations[forceUpdateImageAnnotation] + if ok { + // check if it's already there + if shouldPullImage(annotations, image) { + // skipping + return annotations + } + + annotations[forceUpdateImageAnnotation] = existing + "," + image + return annotations + } + annotations[forceUpdateImageAnnotation] = image + return annotations +} + +func shouldPullImage(annotations map[string]string, image string) bool { + imagesStr, ok := annotations[forceUpdateImageAnnotation] + if !ok { + return false + } + + images := strings.Split(imagesStr, ",") + for _, img := range images { + if img == image { + return true + } + } + return false +} diff --git a/provider/kubernetes/common_test.go b/provider/kubernetes/common_test.go new file mode 100644 index 000000000..8cb79eb62 --- /dev/null +++ b/provider/kubernetes/common_test.go @@ -0,0 +1,76 @@ +package kubernetes + +import ( + "reflect" + "testing" +) + +func Test_addImageToPull(t *testing.T) { + type args struct { + annotations map[string]string + image string + } + tests := []struct { + name string + args args + want map[string]string + }{ + { + name: "empty", + args: args{annotations: make(map[string]string), image: "whatever"}, + want: map[string]string{forceUpdateImageAnnotation: "whatever"}, + }, + { + name: "not empty", + args: args{annotations: map[string]string{forceUpdateImageAnnotation: "foo"}, image: "bar"}, + want: map[string]string{forceUpdateImageAnnotation: "foo,bar"}, + }, + { + name: "not empty with same image", + args: args{annotations: map[string]string{forceUpdateImageAnnotation: "foo"}, image: "foo"}, + want: map[string]string{forceUpdateImageAnnotation: "foo"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := addImageToPull(tt.args.annotations, tt.args.image); !reflect.DeepEqual(got, tt.want) { + t.Errorf("addImageToPull() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_shouldPullImage(t *testing.T) { + type args struct { + annotations map[string]string + image string + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "should pull single image", + args: args{annotations: map[string]string{forceUpdateImageAnnotation: "bar"}, image: "bar"}, + want: true, + }, + { + name: "should pull multiple image", + args: args{annotations: map[string]string{forceUpdateImageAnnotation: "foo,bar,whatever"}, image: "bar"}, + want: true, + }, + { + name: "should not pull multiple image", + args: args{annotations: map[string]string{forceUpdateImageAnnotation: "foo,bar,whatever"}, image: "alpha"}, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := shouldPullImage(tt.args.annotations, tt.args.image); got != tt.want { + t.Errorf("shouldPullImage() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/provider/kubernetes/kubernetes.go b/provider/kubernetes/kubernetes.go index b89b7e823..2c682e38a 100644 --- a/provider/kubernetes/kubernetes.go +++ b/provider/kubernetes/kubernetes.go @@ -3,11 +3,15 @@ package kubernetes import ( "fmt" "regexp" + "strings" + "time" "k8s.io/client-go/pkg/api/v1" "k8s.io/client-go/pkg/apis/extensions/v1beta1" "github.com/rusenask/keel/types" + "github.com/rusenask/keel/util/image" + "github.com/rusenask/keel/util/policies" "github.com/rusenask/keel/util/version" log "github.com/Sirupsen/logrus" @@ -18,6 +22,12 @@ const ProviderName = "kubernetes" var versionreg = regexp.MustCompile(`:[^:]*$`) +// annotation used to specify which image to force pull +const forceUpdateImageAnnotation = "keel.sh/update-image" + +// forceUpdateResetTag - tag used to reset container to force pull image +const forceUpdateResetTag = "0.0.0" + // Provider - kubernetes provider for auto update type Provider struct { implementer Implementer @@ -99,7 +109,59 @@ func (p *Provider) processEvent(event *types.Event) (updated []*v1beta1.Deployme func (p *Provider) updateDeployments(deployments []v1beta1.Deployment) (updated []*v1beta1.Deployment, err error) { for _, deployment := range deployments { - err := p.implementer.Update(&deployment) + + reset, delta, err := checkForReset(deployment, p.implementer) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "namespace": deployment.Namespace, + "annotations": deployment.GetAnnotations(), + "deployment": deployment.Name, + }).Error("provider.kubernetes: got error while checking deployment for reset") + continue + } + + // need to get the new version in order to update + if reset { + // FIXME: giving some time for k8s to start updating as it + // throws an error if you try to modify deployment that's currently being updated + time.Sleep(2 * time.Second) + + current, err := p.getDeployment(deployment.Namespace, deployment.Name) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "namespace": deployment.Namespace, + "annotations": deployment.GetAnnotations(), + "deployment": deployment.Name, + }).Error("provider.kubernetes: got error while refreshing deployment after reset") + continue + } + // apply back our changes for images + refresh, err := applyChanges(*current, delta) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "namespace": deployment.Namespace, + "annotations": deployment.GetAnnotations(), + "deployment": deployment.Name, + }).Error("provider.kubernetes: got error while applying deployment changes after reset") + continue + } + err = p.implementer.Update(&refresh) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "namespace": deployment.Namespace, + "deployment": deployment.Name, + }).Error("provider.kubernetes: got error while update deployment") + continue + } + // success + continue + } + + err = p.implementer.Update(&deployment) if err != nil { log.WithFields(log.Fields{ "error": err, @@ -118,6 +180,76 @@ func (p *Provider) updateDeployments(deployments []v1beta1.Deployment) (updated return } +// applies required changes for deployment, looks for images with tag 0.0.0 and +// updates +func applyChanges(current v1beta1.Deployment, delta map[string]string) (v1beta1.Deployment, error) { + for idx, c := range current.Spec.Template.Spec.Containers { + if strings.HasSuffix(c.Image, forceUpdateResetTag) { + desiredImage, err := getDesiredImage(delta, c.Image) + if err != nil { + return v1beta1.Deployment{}, err + } + current.Spec.Template.Spec.Containers[idx].Image = desiredImage + log.Infof("provider.kubernetes: delta changed applied: %s", current.Spec.Template.Spec.Containers[idx].Image) + } + } + return current, nil +} + +func getDesiredImage(delta map[string]string, currentImage string) (string, error) { + currentRef, err := image.Parse(currentImage) + if err != nil { + return "", err + } + for repository, tag := range delta { + if repository == currentRef.Repository() { + ref, err := image.Parse(repository) + if err != nil { + return "", err + } + + // updating image + if ref.Registry() == image.DefaultRegistryHostname { + return fmt.Sprintf("%s:%s", ref.ShortName(), tag), nil + } + return fmt.Sprintf("%s:%s", ref.Repository(), tag), nil + } + } + return "", fmt.Errorf("image %s not found in deltas", currentImage) +} + +// checkForReset returns delta to apply +func checkForReset(deployment v1beta1.Deployment, implementer Implementer) (bool, map[string]string, error) { + reset := false + annotations := deployment.GetAnnotations() + delta := make(map[string]string) + for idx, c := range deployment.Spec.Template.Spec.Containers { + if shouldPullImage(annotations, c.Image) { + ref, err := image.Parse(c.Image) + if err != nil { + return false, nil, err + } + + c = updateContainer(c, ref, forceUpdateResetTag) + + // ensuring pull policy + c.ImagePullPolicy = v1.PullAlways + log.WithFields(log.Fields{ + "image": c.Image, + "namespace": deployment.Namespace, + "deployment": deployment.Name, + }).Info("provider.kubernetes: reseting image for force pull...") + deployment.Spec.Template.Spec.Containers[idx] = c + reset = true + delta[ref.Repository()] = ref.Tag() + } + } + if reset { + return reset, delta, implementer.Update(&deployment) + } + return false, nil, nil +} + // getDeployment - helper function to get specific deployment func (p *Provider) getDeployment(namespace, name string) (*v1beta1.Deployment, error) { return p.implementer.Deployment(namespace, name) @@ -125,10 +257,6 @@ func (p *Provider) getDeployment(namespace, name string) (*v1beta1.Deployment, e // gets impacted deployments by changed repository func (p *Provider) impactedDeployments(repo *types.Repository) ([]v1beta1.Deployment, error) { - newVersion, err := version.GetVersion(repo.Tag) - if err != nil { - return nil, fmt.Errorf("failed to parse version from repository tag, error: %s", err) - } deploymentLists, err := p.deployments() if err != nil { @@ -142,101 +270,64 @@ func (p *Provider) impactedDeployments(repo *types.Repository) ([]v1beta1.Deploy for _, deploymentList := range deploymentLists { for _, deployment := range deploymentList.Items { + labels := deployment.GetLabels() - policyStr, ok := labels[types.KeelPolicyLabel] - // if no policy is set - skipping this deployment - if !ok { + + policy := policies.GetPolicy(labels) + if policy == types.PolicyTypeNone { + // skip continue } - policy := types.ParsePolicy(policyStr) - - log.WithFields(log.Fields{ - "labels": labels, - "name": deployment.Name, - "namespace": deployment.Namespace, - "policy": policy, - }).Info("provider.kubernetes: keel policy found, checking deployment...") - - shouldUpdateDeployment := false - for idx, c := range deployment.Spec.Template.Spec.Containers { - // Remove version if any - containerImageName := versionreg.ReplaceAllString(c.Image, "") - - log.WithFields(log.Fields{ - "name": deployment.Name, - "namespace": deployment.Namespace, - "parsed_image_name": containerImageName, - "target_image_name": repo.Name, - "target_tag": repo.Tag, - "policy": policy, - "image": c.Image, - }).Info("provider.kubernetes: checking image") - - if containerImageName != repo.Name { - continue - } + // annotation cleanup + annotations := deployment.GetAnnotations() + delete(annotations, forceUpdateImageAnnotation) + deployment.SetAnnotations(annotations) - currentVersion, err := version.GetVersionFromImageName(c.Image) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "image_name": c.Image, - "keel_policy": policy, - }).Error("provider.kubernetes: failed to get image version, is it tagged as semver?") + newVersion, err := version.GetVersion(repo.Tag) + if err != nil { + // failed to get new version tag + if policy == types.PolicyTypeForce { + updated, shouldUpdateDeployment, err := p.checkUnversionedDeployment(policy, repo, deployment) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "deployment": deployment.Name, + "namespace": deployment.Namespace, + }).Error("provider.kubernetes: got error while checking unversioned deployment") + continue + } + + if shouldUpdateDeployment { + impacted = append(impacted, updated) + } + + // success, unversioned deployment marked for update continue } log.WithFields(log.Fields{ - "labels": labels, - "name": deployment.Name, - "namespace": deployment.Namespace, - "image": c.Image, - "current_version": currentVersion.String(), - "policy": policy, - }).Info("provider.kubernetes: current image version") - - shouldUpdateContainer, err := version.ShouldUpdate(currentVersion, newVersion, policy) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "new_version": newVersion.String(), - "current_version": currentVersion.String(), - "keel_policy": policy, - }).Error("provider.kubernetes: got error while checking whether deployment should be updated") - continue - } + "error": err, + "repository_tag": repo.Tag, + "deployment": deployment.Name, + "namespace": deployment.Namespace, + "policy": policy, + }).Warn("provider.kubernetes: got error while parsing repository tag") + continue + } + updated, shouldUpdateDeployment, err := p.checkVersionedDeployment(newVersion, policy, repo, deployment) + if err != nil { log.WithFields(log.Fields{ - "labels": labels, - "name": deployment.Name, - "namespace": deployment.Namespace, - "image": c.Image, - "current_version": currentVersion.String(), - "new_version": newVersion.String(), - "policy": policy, - "should_update": shouldUpdateContainer, - }).Info("provider.kubernetes: checked version, deciding whether to update") - - if shouldUpdateContainer { - // updating image - c.Image = fmt.Sprintf("%s:%s", containerImageName, newVersion.String()) - deployment.Spec.Template.Spec.Containers[idx] = c - // marking this deployment for update - shouldUpdateDeployment = true - - log.WithFields(log.Fields{ - "parsed_image": containerImageName, - "raw_image_name": c.Image, - "target_image": repo.Name, - "target_image_tag": repo.Tag, - "policy": policy, - }).Info("provider.kubernetes: impacted deployment container found") - } + "error": err, + "deployment": deployment.Name, + "namespace": deployment.Namespace, + }).Error("provider.kubernetes: got error while checking versioned deployment") + continue } if shouldUpdateDeployment { - impacted = append(impacted, deployment) + impacted = append(impacted, updated) } } } diff --git a/provider/kubernetes/kubernetes_test.go b/provider/kubernetes/kubernetes_test.go index 12f569eca..66f8444f8 100644 --- a/provider/kubernetes/kubernetes_test.go +++ b/provider/kubernetes/kubernetes_test.go @@ -1,14 +1,12 @@ package kubernetes import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "testing" + "github.com/rusenask/keel/types" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/pkg/api/v1" "k8s.io/client-go/pkg/apis/extensions/v1beta1" - - "github.com/rusenask/keel/types" - - "testing" ) type fakeImplementer struct { @@ -202,7 +200,7 @@ func TestGetImpacted(t *testing.T) { } if len(deps) != 1 { - t.Errorf("expected to find 1 deployment but found %s", len(deps)) + t.Errorf("expected to find 1 deployment but found %d", len(deps)) } found := false @@ -510,16 +508,17 @@ func TestGetImpactedUntaggedImage(t *testing.T) { v1beta1.Deployment{ meta_v1.TypeMeta{}, meta_v1.ObjectMeta{ - Name: "dep-1", - Namespace: "xxxx", - Labels: map[string]string{types.KeelPolicyLabel: "all"}, + Name: "dep-1", + Namespace: "xxxx", + Labels: map[string]string{types.KeelPolicyLabel: "all"}, + Annotations: map[string]string{}, }, v1beta1.DeploymentSpec{ Template: v1.PodTemplateSpec{ Spec: v1.PodSpec{ Containers: []v1.Container{ v1.Container{ - Image: "gcr.io/v2-namespace/hello-world", + Image: "gcr.io/v2-namespace/foo-world", }, }, }, @@ -530,9 +529,10 @@ func TestGetImpactedUntaggedImage(t *testing.T) { v1beta1.Deployment{ meta_v1.TypeMeta{}, meta_v1.ObjectMeta{ - Name: "dep-2", - Namespace: "xxxx", - Labels: map[string]string{types.KeelPolicyLabel: "all"}, + Name: "dep-2", + Namespace: "xxxx", + Annotations: map[string]string{}, + Labels: map[string]string{types.KeelPolicyLabel: "all"}, }, v1beta1.DeploymentSpec{ Template: v1.PodTemplateSpec{ @@ -585,3 +585,99 @@ func TestGetImpactedUntaggedImage(t *testing.T) { } } + +// test to check whether we get impacted deployment when it's untagged (we should) +func TestGetImpactedUntaggedOneImage(t *testing.T) { + fp := &fakeImplementer{} + fp.namespaces = &v1.NamespaceList{ + Items: []v1.Namespace{ + v1.Namespace{ + meta_v1.TypeMeta{}, + meta_v1.ObjectMeta{Name: "xxxx"}, + v1.NamespaceSpec{}, + v1.NamespaceStatus{}, + }, + }, + } + fp.deploymentList = &v1beta1.DeploymentList{ + Items: []v1beta1.Deployment{ + v1beta1.Deployment{ + meta_v1.TypeMeta{}, + meta_v1.ObjectMeta{ + Name: "dep-1", + Namespace: "xxxx", + Labels: map[string]string{types.KeelPolicyLabel: "all"}, + Annotations: map[string]string{}, + }, + v1beta1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{ + Image: "gcr.io/v2-namespace/hello-world", + }, + }, + }, + }, + }, + v1beta1.DeploymentStatus{}, + }, + v1beta1.Deployment{ + meta_v1.TypeMeta{}, + meta_v1.ObjectMeta{ + Name: "dep-2", + Namespace: "xxxx", + Annotations: map[string]string{}, + Labels: map[string]string{types.KeelPolicyLabel: "all"}, + }, + v1beta1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{ + Image: "gcr.io/v2-namespace/hello-world:1.1.1", + }, + }, + }, + }, + }, + v1beta1.DeploymentStatus{}, + }, + }, + } + + provider, err := NewProvider(fp) + if err != nil { + t.Fatalf("failed to get provider: %s", err) + } + + // creating "new version" event + repo := &types.Repository{ + Name: "gcr.io/v2-namespace/hello-world", + Tag: "1.1.2", + } + + deps, err := provider.impactedDeployments(repo) + if err != nil { + t.Errorf("failed to get deployments: %s", err) + } + + if len(deps) != 2 { + t.Errorf("expected to find 2 deployment but found %s", len(deps)) + } + + found := false + for _, c := range deps[0].Spec.Template.Spec.Containers { + + containerImageName := versionreg.ReplaceAllString(c.Image, "") + + if containerImageName == repo.Name { + found = true + } + } + + if !found { + t.Errorf("couldn't find expected deployment in impacted deployment list") + } + +} diff --git a/provider/kubernetes/unversioned_updates.go b/provider/kubernetes/unversioned_updates.go new file mode 100644 index 000000000..384d885c4 --- /dev/null +++ b/provider/kubernetes/unversioned_updates.go @@ -0,0 +1,96 @@ +package kubernetes + +import ( + "fmt" + + "k8s.io/client-go/pkg/apis/extensions/v1beta1" + + "github.com/rusenask/keel/types" + "github.com/rusenask/keel/util/image" + + log "github.com/Sirupsen/logrus" +) + +func (p *Provider) checkUnversionedDeployment(policy types.PolicyType, repo *types.Repository, deployment v1beta1.Deployment) (updated v1beta1.Deployment, shouldUpdateDeployment bool, err error) { + eventRepoRef, err := image.Parse(repo.Name) + if err != nil { + return + } + + labels := deployment.GetLabels() + log.WithFields(log.Fields{ + "labels": labels, + "name": deployment.Name, + "namespace": deployment.Namespace, + "policy": policy, + }).Info("provider.kubernetes.checkVersionedDeployment: keel policy found, checking deployment...") + + shouldUpdateDeployment = false + + for idx, c := range deployment.Spec.Template.Spec.Containers { + // Remove version if any + // containerImageName := versionreg.ReplaceAllString(c.Image, "") + + containerImageRef, err := image.Parse(c.Image) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "image_name": c.Image, + }).Error("provider.kubernetes: failed to parse image name") + continue + } + + log.WithFields(log.Fields{ + "name": deployment.Name, + "namespace": deployment.Namespace, + "parsed_image_name": containerImageRef.Remote(), + "target_image_name": repo.Name, + "target_tag": repo.Tag, + "policy": policy, + "image": c.Image, + }).Info("provider.kubernetes: checking image") + + if containerImageRef.Repository() != eventRepoRef.Repository() { + log.WithFields(log.Fields{ + "parsed_image_name": containerImageRef.Remote(), + "target_image_name": repo.Name, + }).Info("provider.kubernetes: images do not match, ignoring") + continue + } + + // updating image + if containerImageRef.Registry() == image.DefaultRegistryHostname { + c.Image = fmt.Sprintf("%s:%s", containerImageRef.ShortName(), repo.Tag) + } else { + c.Image = fmt.Sprintf("%s:%s", containerImageRef.Repository(), repo.Tag) + } + + deployment.Spec.Template.Spec.Containers[idx] = c + // marking this deployment for update + shouldUpdateDeployment = true + + // updating annotations + annotations := deployment.GetAnnotations() + // updating digest if available + if repo.Digest != "" { + + // annotations[types.KeelDigestAnnotation+"/"+containerImageRef.Remote()] = repo.Digest + } + + // adding image for updates + annotations = addImageToPull(annotations, c.Image) + + deployment.SetAnnotations(annotations) + + log.WithFields(log.Fields{ + "parsed_image": containerImageRef.Remote(), + "raw_image_name": c.Image, + "target_image": repo.Name, + "target_image_tag": repo.Tag, + "policy": policy, + }).Info("provider.kubernetes: impacted deployment container found") + + } + + return deployment, shouldUpdateDeployment, nil +} diff --git a/provider/kubernetes/unversioned_updates_test.go b/provider/kubernetes/unversioned_updates_test.go new file mode 100644 index 000000000..3b1a160d5 --- /dev/null +++ b/provider/kubernetes/unversioned_updates_test.go @@ -0,0 +1,206 @@ +package kubernetes + +import ( + "reflect" + "testing" + + "github.com/rusenask/keel/types" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/pkg/apis/extensions/v1beta1" +) + +func TestProvider_checkUnversionedDeployment(t *testing.T) { + type fields struct { + implementer Implementer + events chan *types.Event + stop chan struct{} + } + type args struct { + policy types.PolicyType + repo *types.Repository + deployment v1beta1.Deployment + } + tests := []struct { + name string + fields fields + args args + wantUpdated v1beta1.Deployment + wantShouldUpdateDeployment bool + wantErr bool + }{ + { + name: "force update untagged to latest", + args: args{ + policy: types.PolicyTypeForce, + repo: &types.Repository{Name: "gcr.io/v2-namespace/hello-world", Tag: "latest"}, + deployment: v1beta1.Deployment{ + meta_v1.TypeMeta{}, + meta_v1.ObjectMeta{ + Name: "dep-1", + Namespace: "xxxx", + Annotations: map[string]string{}, + Labels: map[string]string{types.KeelPolicyLabel: "all"}, + }, + v1beta1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{ + Image: "gcr.io/v2-namespace/hello-world", + }, + }, + }, + }, + }, + v1beta1.DeploymentStatus{}, + }, + }, + wantUpdated: v1beta1.Deployment{ + meta_v1.TypeMeta{}, + meta_v1.ObjectMeta{ + Name: "dep-1", + Namespace: "xxxx", + Annotations: map[string]string{forceUpdateImageAnnotation: "gcr.io/v2-namespace/hello-world:latest"}, + Labels: map[string]string{types.KeelPolicyLabel: "all"}, + }, + v1beta1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{ + Image: "gcr.io/v2-namespace/hello-world:latest", + }, + }, + }, + }, + }, + v1beta1.DeploymentStatus{}, + }, + wantShouldUpdateDeployment: true, + wantErr: false, + }, + { + name: "different image name ", + args: args{ + policy: types.PolicyTypeForce, + repo: &types.Repository{Name: "gcr.io/v2-namespace/hello-world", Tag: "latest"}, + deployment: v1beta1.Deployment{ + meta_v1.TypeMeta{}, + meta_v1.ObjectMeta{ + Name: "dep-1", + Namespace: "xxxx", + Annotations: map[string]string{}, + Labels: map[string]string{types.KeelPolicyLabel: "all"}, + }, + v1beta1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{ + Image: "gcr.io/v2-namespace/goodbye-world:earliest", + }, + }, + }, + }, + }, + v1beta1.DeploymentStatus{}, + }, + }, + wantUpdated: v1beta1.Deployment{ + meta_v1.TypeMeta{}, + meta_v1.ObjectMeta{ + Name: "dep-1", + Namespace: "xxxx", + Annotations: map[string]string{}, + Labels: map[string]string{types.KeelPolicyLabel: "all"}, + }, + v1beta1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{ + Image: "gcr.io/v2-namespace/goodbye-world:earliest", + }, + }, + }, + }, + }, + v1beta1.DeploymentStatus{}, + }, + wantShouldUpdateDeployment: false, + wantErr: false, + }, + { + name: "dockerhub short image name ", + args: args{ + policy: types.PolicyTypeForce, + repo: &types.Repository{Name: "karolisr/keel", Tag: "0.2.0"}, + deployment: v1beta1.Deployment{ + meta_v1.TypeMeta{}, + meta_v1.ObjectMeta{ + Name: "dep-1", + Namespace: "xxxx", + Annotations: map[string]string{}, + Labels: map[string]string{types.KeelPolicyLabel: "force"}, + }, + v1beta1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{ + Image: "karolisr/keel:latest", + }, + }, + }, + }, + }, + v1beta1.DeploymentStatus{}, + }, + }, + wantUpdated: v1beta1.Deployment{ + meta_v1.TypeMeta{}, + meta_v1.ObjectMeta{ + Name: "dep-1", + Namespace: "xxxx", + Annotations: map[string]string{forceUpdateImageAnnotation: "karolisr/keel:0.2.0"}, + Labels: map[string]string{types.KeelPolicyLabel: "force"}, + }, + v1beta1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{ + Image: "karolisr/keel:0.2.0", + }, + }, + }, + }, + }, + v1beta1.DeploymentStatus{}, + }, + wantShouldUpdateDeployment: true, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p := &Provider{ + implementer: tt.fields.implementer, + events: tt.fields.events, + stop: tt.fields.stop, + } + gotUpdated, gotShouldUpdateDeployment, err := p.checkUnversionedDeployment(tt.args.policy, tt.args.repo, tt.args.deployment) + if (err != nil) != tt.wantErr { + t.Errorf("Provider.checkUnversionedDeployment() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(gotUpdated, tt.wantUpdated) { + t.Errorf("Provider.checkUnversionedDeployment() gotUpdated = %v, want %v", gotUpdated, tt.wantUpdated) + } + if gotShouldUpdateDeployment != tt.wantShouldUpdateDeployment { + t.Errorf("Provider.checkUnversionedDeployment() gotShouldUpdateDeployment = %v, want %v", gotShouldUpdateDeployment, tt.wantShouldUpdateDeployment) + } + }) + } +} diff --git a/provider/kubernetes/versioned_updates.go b/provider/kubernetes/versioned_updates.go new file mode 100644 index 000000000..40eb8f6d5 --- /dev/null +++ b/provider/kubernetes/versioned_updates.go @@ -0,0 +1,173 @@ +package kubernetes + +import ( + "fmt" + + "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/pkg/apis/extensions/v1beta1" + + "github.com/rusenask/keel/types" + "github.com/rusenask/keel/util/image" + + "github.com/rusenask/keel/util/version" + + log "github.com/Sirupsen/logrus" +) + +func (p *Provider) checkVersionedDeployment(newVersion *types.Version, policy types.PolicyType, repo *types.Repository, deployment v1beta1.Deployment) (updated v1beta1.Deployment, shouldUpdateDeployment bool, err error) { + eventRepoRef, err := image.Parse(repo.Name) + if err != nil { + return + } + + labels := deployment.GetLabels() + + log.WithFields(log.Fields{ + "labels": labels, + "name": deployment.Name, + "namespace": deployment.Namespace, + "policy": policy, + }).Info("provider.kubernetes.checkVersionedDeployment: keel policy found, checking deployment...") + + shouldUpdateDeployment = false + + for idx, c := range deployment.Spec.Template.Spec.Containers { + // Remove version if any + // containerImageName := versionreg.ReplaceAllString(c.Image, "") + + conatinerImageRef, err := image.Parse(c.Image) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "image_name": c.Image, + }).Error("provider.kubernetes: failed to parse image name") + continue + } + + log.WithFields(log.Fields{ + "name": deployment.Name, + "namespace": deployment.Namespace, + "parsed_image_name": conatinerImageRef.Remote(), + "target_image_name": repo.Name, + "target_tag": repo.Tag, + "policy": policy, + "image": c.Image, + }).Info("provider.kubernetes: checking image") + + if conatinerImageRef.Repository() != eventRepoRef.Repository() { + log.WithFields(log.Fields{ + "parsed_image_name": conatinerImageRef.Remote(), + "target_image_name": repo.Name, + }).Info("provider.kubernetes: images do not match, ignoring") + continue + } + + // if policy is force, don't bother with version checking + // same with `latest` images, update them to versioned ones + if policy == types.PolicyTypeForce || conatinerImageRef.Tag() == "latest" { + c = updateContainer(c, conatinerImageRef, newVersion.String()) + + deployment.Spec.Template.Spec.Containers[idx] = c + + // marking this deployment for update + shouldUpdateDeployment = true + // updating digest if available + annotations := deployment.GetAnnotations() + + if repo.Digest != "" { + // annotations[types.KeelDigestAnnotation+"/"+conatinerImageRef.Remote()] = repo.Digest + } + annotations = addImageToPull(annotations, c.Image) + + deployment.SetAnnotations(annotations) + log.WithFields(log.Fields{ + "parsed_image": conatinerImageRef.Remote(), + "raw_image_name": c.Image, + "target_image": repo.Name, + "target_image_tag": repo.Tag, + "policy": policy, + }).Info("provider.kubernetes: impacted deployment container found") + + // success, moving to next container + continue + } + + currentVersion, err := version.GetVersionFromImageName(c.Image) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "container_image": c.Image, + "container_image_tag": conatinerImageRef.Tag(), + "keel_policy": policy, + }).Error("provider.kubernetes: failed to get image version, is it tagged as semver?") + continue + } + + log.WithFields(log.Fields{ + "labels": labels, + "name": deployment.Name, + "namespace": deployment.Namespace, + "image": c.Image, + "current_version": currentVersion.String(), + "policy": policy, + }).Info("provider.kubernetes: current image version") + + shouldUpdateContainer, err := version.ShouldUpdate(currentVersion, newVersion, policy) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "new_version": newVersion.String(), + "current_version": currentVersion.String(), + "keel_policy": policy, + }).Error("provider.kubernetes: got error while checking whether deployment should be updated") + continue + } + + log.WithFields(log.Fields{ + "labels": labels, + "name": deployment.Name, + "namespace": deployment.Namespace, + "image": c.Image, + "current_version": currentVersion.String(), + "new_version": newVersion.String(), + "policy": policy, + "should_update": shouldUpdateContainer, + }).Info("provider.kubernetes: checked version, deciding whether to update") + + if shouldUpdateContainer { + c = updateContainer(c, conatinerImageRef, newVersion.String()) + deployment.Spec.Template.Spec.Containers[idx] = c + // marking this deployment for update + shouldUpdateDeployment = true + + // updating annotations + annotations := deployment.GetAnnotations() + // updating digest if available + if repo.Digest != "" { + // annotations[types.KeelDigestAnnotation+"/"+conatinerImageRef.Remote()] = repo.Digest + } + deployment.SetAnnotations(annotations) + + log.WithFields(log.Fields{ + "parsed_image": conatinerImageRef.Remote(), + "raw_image_name": c.Image, + "target_image": repo.Name, + "target_image_tag": repo.Tag, + "policy": policy, + }).Info("provider.kubernetes: impacted deployment container found") + } + } + + return deployment, shouldUpdateDeployment, nil +} + +func updateContainer(container v1.Container, ref *image.Reference, version string) v1.Container { + // updating image + if ref.Registry() == image.DefaultRegistryHostname { + container.Image = fmt.Sprintf("%s:%s", ref.ShortName(), version) + } else { + container.Image = fmt.Sprintf("%s:%s", ref.Repository(), version) + } + + return container +} diff --git a/provider/kubernetes/versioned_updates_test.go b/provider/kubernetes/versioned_updates_test.go new file mode 100644 index 000000000..8e87ee69b --- /dev/null +++ b/provider/kubernetes/versioned_updates_test.go @@ -0,0 +1,284 @@ +package kubernetes + +import ( + "reflect" + "testing" + + "github.com/rusenask/keel/types" + "github.com/rusenask/keel/util/version" + + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/pkg/apis/extensions/v1beta1" +) + +func unsafeGetVersion(ver string) *types.Version { + v, err := version.GetVersion(ver) + if err != nil { + panic(err) + } + return v +} + +func TestProvider_checkVersionedDeployment(t *testing.T) { + type fields struct { + implementer Implementer + events chan *types.Event + stop chan struct{} + } + type args struct { + newVersion *types.Version + policy types.PolicyType + repo *types.Repository + deployment v1beta1.Deployment + } + tests := []struct { + name string + fields fields + args args + wantUpdated v1beta1.Deployment + wantShouldUpdateDeployment bool + wantErr bool + }{ + { + name: "standard version bump", + args: args{ + newVersion: unsafeGetVersion("1.1.2"), + policy: types.PolicyTypeAll, + repo: &types.Repository{Name: "gcr.io/v2-namespace/hello-world", Tag: "1.1.2"}, + deployment: v1beta1.Deployment{ + meta_v1.TypeMeta{}, + meta_v1.ObjectMeta{ + Name: "dep-1", + Namespace: "xxxx", + Annotations: map[string]string{}, + Labels: map[string]string{types.KeelPolicyLabel: "all"}, + }, + v1beta1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{ + Image: "gcr.io/v2-namespace/hello-world:1.1.1", + }, + }, + }, + }, + }, + v1beta1.DeploymentStatus{}, + }, + }, + wantUpdated: v1beta1.Deployment{ + meta_v1.TypeMeta{}, + meta_v1.ObjectMeta{ + Name: "dep-1", + Namespace: "xxxx", + Annotations: map[string]string{}, + Labels: map[string]string{types.KeelPolicyLabel: "all"}, + }, + v1beta1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{ + Image: "gcr.io/v2-namespace/hello-world:1.1.2", + }, + }, + }, + }, + }, + v1beta1.DeploymentStatus{}, + }, + wantShouldUpdateDeployment: true, + wantErr: false, + }, + { + name: "standard ignore version bump", + args: args{ + newVersion: unsafeGetVersion("1.1.1"), + policy: types.PolicyTypeAll, + repo: &types.Repository{Name: "gcr.io/v2-namespace/hello-world", Tag: "1.1.1"}, + deployment: v1beta1.Deployment{ + meta_v1.TypeMeta{}, + meta_v1.ObjectMeta{ + Name: "dep-1", + Namespace: "xxxx", + Annotations: map[string]string{}, + Labels: map[string]string{types.KeelPolicyLabel: "all"}, + }, + v1beta1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{ + Image: "gcr.io/v2-namespace/hello-world:1.1.1", + }, + }, + }, + }, + }, + v1beta1.DeploymentStatus{}, + }, + }, + wantUpdated: v1beta1.Deployment{ + meta_v1.TypeMeta{}, + meta_v1.ObjectMeta{ + Name: "dep-1", + Namespace: "xxxx", + Annotations: map[string]string{}, + Labels: map[string]string{types.KeelPolicyLabel: "all"}, + }, + v1beta1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{ + Image: "gcr.io/v2-namespace/hello-world:1.1.1", + }, + }, + }, + }, + }, + v1beta1.DeploymentStatus{}, + }, + wantShouldUpdateDeployment: false, + wantErr: false, + }, + { + name: "multiple containers, version bump one", + args: args{ + newVersion: unsafeGetVersion("1.1.2"), + policy: types.PolicyTypeAll, + repo: &types.Repository{Name: "gcr.io/v2-namespace/hello-world", Tag: "1.1.2"}, + deployment: v1beta1.Deployment{ + meta_v1.TypeMeta{}, + meta_v1.ObjectMeta{ + Name: "dep-1", + Namespace: "xxxx", + Annotations: map[string]string{}, + Labels: map[string]string{types.KeelPolicyLabel: "all"}, + }, + v1beta1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{ + Image: "gcr.io/v2-namespace/hello-world:1.1.1", + }, + v1.Container{ + Image: "yo-world:1.1.1", + }, + }, + }, + }, + }, + v1beta1.DeploymentStatus{}, + }, + }, + wantUpdated: v1beta1.Deployment{ + meta_v1.TypeMeta{}, + meta_v1.ObjectMeta{ + Name: "dep-1", + Namespace: "xxxx", + Annotations: map[string]string{}, + Labels: map[string]string{types.KeelPolicyLabel: "all"}, + }, + v1beta1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{ + Image: "gcr.io/v2-namespace/hello-world:1.1.2", + }, + v1.Container{ + Image: "yo-world:1.1.1", + }, + }, + }, + }, + }, + v1beta1.DeploymentStatus{}, + }, + wantShouldUpdateDeployment: true, + wantErr: false, + }, + { + name: "force update untagged container", + args: args{ + newVersion: unsafeGetVersion("1.1.2"), + policy: types.PolicyTypeForce, + repo: &types.Repository{Name: "gcr.io/v2-namespace/hello-world", Tag: "1.1.2"}, + deployment: v1beta1.Deployment{ + meta_v1.TypeMeta{}, + meta_v1.ObjectMeta{ + Name: "dep-1", + Namespace: "xxxx", + Annotations: map[string]string{}, + Labels: map[string]string{types.KeelPolicyLabel: "force"}, + }, + v1beta1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{ + Image: "gcr.io/v2-namespace/hello-world:latest", + }, + v1.Container{ + Image: "yo-world:1.1.1", + }, + }, + }, + }, + }, + v1beta1.DeploymentStatus{}, + }, + }, + wantUpdated: v1beta1.Deployment{ + meta_v1.TypeMeta{}, + meta_v1.ObjectMeta{ + Name: "dep-1", + Namespace: "xxxx", + Annotations: map[string]string{forceUpdateImageAnnotation: "gcr.io/v2-namespace/hello-world:1.1.2"}, + Labels: map[string]string{types.KeelPolicyLabel: "force"}, + }, + v1beta1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{ + Image: "gcr.io/v2-namespace/hello-world:1.1.2", + }, + v1.Container{ + Image: "yo-world:1.1.1", + }, + }, + }, + }, + }, + v1beta1.DeploymentStatus{}, + }, + wantShouldUpdateDeployment: true, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p := &Provider{ + implementer: tt.fields.implementer, + events: tt.fields.events, + stop: tt.fields.stop, + } + gotUpdated, gotShouldUpdateDeployment, err := p.checkVersionedDeployment(tt.args.newVersion, tt.args.policy, tt.args.repo, tt.args.deployment) + if (err != nil) != tt.wantErr { + t.Errorf("Provider.checkVersionedDeployment() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(gotUpdated, tt.wantUpdated) { + t.Errorf("Provider.checkVersionedDeployment() gotUpdated = %v, want %v", gotUpdated, tt.wantUpdated) + } + if gotShouldUpdateDeployment != tt.wantShouldUpdateDeployment { + t.Errorf("Provider.checkVersionedDeployment() gotShouldUpdateDeployment = %v, want %v", gotShouldUpdateDeployment, tt.wantShouldUpdateDeployment) + } + }) + } +} diff --git a/readme.md b/readme.md index 0f56b550c..7e6b906e4 100644 --- a/readme.md +++ b/readme.md @@ -4,7 +4,8 @@ Lightweight (uses ~10MB RAM when running) [Kubernetes](https://kubernetes.io/) s * Google's pubsub integration with [Google Container Registry](https://cloud.google.com/container-registry/) * [DockerHub Webhooks](https://docs.docker.com/docker-hub/webhooks/) -* Webhooks +* [Webhooks](https://github.com/rusenask/keel#webhook) +* [Polling](https://github.com/rusenask/keel#polling) (watch specific tag and update on SHA digest change) ## Keel overview @@ -40,7 +41,7 @@ metadata: namespace: default labels: name: "wd" - keel.observer/policy: all + keel.sh/sh: all spec: replicas: 1 template: @@ -73,6 +74,7 @@ Available policy options: * __major__ - update major versions * __minor__ - update only minor versions (ignores major) * __patch__ - update only patch versions (ignores minor and major versions) +* __force__ - force update even if tag is not semver, ie: `latest` ## Deployment and triggers @@ -88,7 +90,7 @@ gcloud container node-pools create new-pool --cluster CLUSTER_NAME --scopes http Make sure that in the Keel's deployment.yml you have set environment variables __PUBSUB=1__ and __PROJECT_ID=your-project-id__. -#### Webhook integration +#### Webhooks Keel supports two types of webhooks: @@ -99,7 +101,65 @@ Keel supports two types of webhooks: If you don't want to expose your Keel service - I would recommend using [https://webhookrelay.com/](https://webhookrelay.com/) which can deliver webhooks to your internal Keel service through a sidecar container. +#### Polling +Since only the owners of docker registries can control webhooks - it's sometimes convenient to use +polling. Be aware that registries can be rate limited so it's a good practice to set up reasonable polling intervals. + +Add label: +``` +keel.sh/trigger=poll +``` + +To specify custom polling schedule, use annotations: +``` +keel.sh/pollSchedule=@every 1m +``` + +Example deployment file for polling: + +``` +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: wd + namespace: default + labels: + name: "wd" + keel.sh/policy: force + keel.sh/trigger: poll + annotations: + keel.sh/pollSchedule: "@every 10m" +spec: + replicas: 1 + template: + metadata: + name: wd + labels: + app: wd + + spec: + containers: + - image: karolisr/webhook-demo + imagePullPolicy: Always + name: wd + command: ["/bin/webhook-demo"] + ports: + - containerPort: 8090 + livenessProbe: + httpGet: + path: /healthz + port: 8090 + initialDelaySeconds: 30 + timeoutSeconds: 10 + securityContext: + privileged: true +``` + +Authenticated Registries + +__comming soon__ ### Step 2: Kubernetes @@ -114,6 +174,7 @@ Now, edit [deployment file](https://github.com/rusenask/keel/blob/master/hack/de kubectl create -f hack/deployment.yml ``` -Once Keel is deployed in your Kubernetes cluster - it occasionally scans your current deployments and looks for ones that have label _keel.observer/policy_. It then checks whether appropriate subscriptions and topics are set for GCR registries, if not - auto-creates them. +Once Keel is deployed in your Kubernetes cluster - it occasionally scans your current deployments and looks for ones that have label _keel.sh/policy_. It then checks whether appropriate subscriptions and topics are set for GCR registries, if not - auto-creates them. If you have any quetions or notice a problem - raise an issue. + diff --git a/registry/registry.go b/registry/registry.go new file mode 100644 index 000000000..baf45fc6c --- /dev/null +++ b/registry/registry.go @@ -0,0 +1,80 @@ +package registry + +import ( + "errors" + + "github.com/rusenask/docker-registry-client/registry" + + log "github.com/Sirupsen/logrus" +) + +// errors +var ( + ErrTagNotSupplied = errors.New("tag not supplied") +) + +// Repository - holds repository related info +type Repository struct { + Name string + Tags []string // available tags +} + +type Client interface { + Get(opts Opts) (*Repository, error) + Digest(opts Opts) (digest string, err error) +} + +func New() *DefaultClient { + return &DefaultClient{} +} + +type DefaultClient struct { +} + +type Opts struct { + Registry, Name, Tag string + Username, Password string // if "" - anonymous +} + +// Get - get repository +func (c *DefaultClient) Get(opts Opts) (*Repository, error) { + + repo := &Repository{} + hub, err := registry.New(opts.Registry, opts.Username, opts.Password) + if err != nil { + return nil, err + } + + tags, err := hub.Tags(opts.Name) + if err != nil { + return nil, err + } + repo.Tags = tags + + return repo, nil +} + +// Digest - get digest for repo +func (c *DefaultClient) Digest(opts Opts) (digest string, err error) { + if opts.Tag == "" { + return "", ErrTagNotSupplied + } + + log.WithFields(log.Fields{ + "registry": opts.Registry, + "repository": opts.Name, + "tag": opts.Tag, + }).Info("registry client: getting digest") + + hub, err := registry.New(opts.Registry, opts.Username, opts.Password) + if err != nil { + return + } + + manifestDigest, err := hub.ManifestDigest(opts.Name, opts.Tag) + if err != nil { + return + } + + return manifestDigest.String(), nil +} diff --git a/registry/registry_test.go b/registry/registry_test.go new file mode 100644 index 000000000..784b9b1b0 --- /dev/null +++ b/registry/registry_test.go @@ -0,0 +1,43 @@ +package registry + +import ( + "github.com/rusenask/keel/constants" + + "testing" + + "fmt" +) + +func TestDigest(t *testing.T) { + + client := New() + digest, err := client.Digest(Opts{ + Registry: "https://index.docker.io", + Name: "karolisr/keel", + Tag: "0.2.2", + }) + + if err != nil { + t.Errorf("error while getting digest: %s", err) + } + + if digest != "sha256:0604af35299dd37ff23937d115d103532948b568a9dd8197d14c256a8ab8b0bb" { + t.Errorf("unexpected digest: %s", digest) + } +} + +func TestGet(t *testing.T) { + client := New() + repo, err := client.Get(Opts{ + // Registry: "https://index.docker.io", + Registry: constants.DefaultDockerRegistry, + Name: "karolisr/keel", + }) + + if err != nil { + t.Errorf("error while getting repo: %s", err) + } + + fmt.Println(repo.Name) + fmt.Println(repo.Tags) +} diff --git a/trigger/poll/manager.go b/trigger/poll/manager.go new file mode 100644 index 000000000..104ee8a4e --- /dev/null +++ b/trigger/poll/manager.go @@ -0,0 +1,173 @@ +package poll + +import ( + "context" + "sync" + "time" + + "k8s.io/client-go/pkg/apis/extensions/v1beta1" + + "github.com/rusenask/cron" + // "github.com/rusenask/keel/image" + "github.com/rusenask/keel/provider/kubernetes" + "github.com/rusenask/keel/types" + "github.com/rusenask/keel/util/policies" + + log "github.com/Sirupsen/logrus" +) + +// DefaultManager - default manager is responsible for scanning deployments and identifying +// deployments that have market +type DefaultManager struct { + implementer kubernetes.Implementer + // repository watcher + watcher Watcher + + mu *sync.Mutex + + // scanTick - scan interval in seconds, defaults to 60 seconds + scanTick int + + // root context + ctx context.Context +} + +// NewPollManager - new default poller +func NewPollManager(implementer kubernetes.Implementer, watcher Watcher) *DefaultManager { + return &DefaultManager{ + implementer: implementer, + watcher: watcher, + mu: &sync.Mutex{}, + scanTick: 55, + } +} + +// Start - start scanning deployment for changes +func (s *DefaultManager) Start(ctx context.Context) error { + // setting root context + s.ctx = ctx + + // initial scan + err := s.scan(ctx) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + }).Error("trigger.poll.manager: scan failed") + } + + for _ = range time.Tick(time.Duration(s.scanTick) * time.Second) { + select { + case <-ctx.Done(): + return nil + default: + log.Debug("performing scan") + err := s.scan(ctx) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + }).Error("trigger.poll.manager: scan failed") + } + } + } + + return nil +} + +func (s *DefaultManager) scan(ctx context.Context) error { + deploymentLists, err := s.deployments() + if err != nil { + return err + } + + for _, deploymentList := range deploymentLists { + for _, deployment := range deploymentList.Items { + labels := deployment.GetLabels() + + // ignoring unlabelled deployments + policy := policies.GetPolicy(labels) + if policy == types.PolicyTypeNone { + continue + } + + // trigger type, we only care for "poll" type triggers + trigger := policies.GetTriggerPolicy(labels) + if trigger != types.TriggerTypePoll { + continue + } + + err = s.checkDeployment(&deployment) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "deployment": deployment.Name, + "namespace": deployment.Namespace, + }).Error("trigger.poll.manager: failed to check deployment poll status") + } + } + } + return nil +} + +// checkDeployment - checks whether we are already watching for this deployment +func (s *DefaultManager) checkDeployment(deployment *v1beta1.Deployment) error { + annotations := deployment.GetAnnotations() + + for _, c := range deployment.Spec.Template.Spec.Containers { + + schedule, ok := annotations[types.KeelPollScheduleAnnotation] + if ok { + _, err := cron.Parse(schedule) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "schedule": schedule, + "image": c.Image, + "deployment": deployment.Name, + "namespace": deployment.Namespace, + }).Error("trigger.poll.manager: failed to parse poll schedule") + return err + } + } else { + schedule = types.KeelPollDefaultSchedule + } + + err := s.watcher.Watch(c.Image, schedule, "", "") + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "schedule": schedule, + "image": c.Image, + "deployment": deployment.Name, + "namespace": deployment.Namespace, + }).Error("trigger.poll.manager: failed to start watching repository") + return err + } + // continue + } + + return nil +} + +func (s *DefaultManager) deployments() ([]*v1beta1.DeploymentList, error) { + // namespaces := p.client.Namespaces() + deployments := []*v1beta1.DeploymentList{} + + n, err := s.implementer.Namespaces() + if err != nil { + return nil, err + } + + for _, n := range n.Items { + l, err := s.implementer.Deployments(n.GetName()) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "namespace": n.GetName(), + }).Error("trigger.pubsub.manager: failed to list deployments") + continue + } + deployments = append(deployments, l) + } + + return deployments, nil +} diff --git a/trigger/poll/manager_test.go b/trigger/poll/manager_test.go new file mode 100644 index 000000000..e966de87f --- /dev/null +++ b/trigger/poll/manager_test.go @@ -0,0 +1,99 @@ +package poll + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/pkg/apis/extensions/v1beta1" + + "github.com/rusenask/keel/provider" + "github.com/rusenask/keel/types" + "github.com/rusenask/keel/util/image" + keelTesting "github.com/rusenask/keel/util/testing" + + "testing" +) + +func TestCheckDeployment(t *testing.T) { + // fake provider listening for events + fp := &fakeProvider{} + providers := provider.New([]provider.Provider{fp}) + // implementer should not be called in this case + k8sImplementer := &keelTesting.FakeK8sImplementer{} + + // returning some sha + frc := &fakeRegistryClient{ + digestToReturn: "sha256:0604af35299dd37ff23937d115d103532948b568a9dd8197d14c256a8ab8b0bb", + } + + watcher := NewRepositoryWatcher(providers, frc) + + pm := NewPollManager(k8sImplementer, watcher) + + imageA := "gcr.io/v2-namespace/hello-world:1.1.1" + imageB := "gcr.io/v2-namespace/greetings-world:1.1.1" + + dep := &v1beta1.Deployment{ + meta_v1.TypeMeta{}, + meta_v1.ObjectMeta{ + Name: "dep-1", + Namespace: "xxxx", + Labels: map[string]string{types.KeelPolicyLabel: "all"}, + }, + v1beta1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{ + Image: imageA, + }, + v1.Container{ + Image: imageB, + }, + }, + }, + }, + }, + v1beta1.DeploymentStatus{}, + } + + err := pm.checkDeployment(dep) + if err != nil { + t.Errorf("deployment check failed: %s", err) + } + + // 2 subscriptions should be added + entries := watcher.cron.Entries() + if len(entries) != 2 { + t.Errorf("unexpected list of cron entries: %d", len(entries)) + } + + ref, _ := image.Parse(imageA) + keyA := getImageIdentifier(ref) + if watcher.watched[keyA].digest != frc.digestToReturn { + t.Errorf("unexpected digest") + } + if watcher.watched[keyA].schedule != types.KeelPollDefaultSchedule { + t.Errorf("unexpected schedule: %s", watcher.watched[keyA].schedule) + } + if watcher.watched[keyA].imageRef.Remote() != ref.Remote() { + t.Errorf("unexpected remote remote: %s", watcher.watched[keyA].imageRef.Remote()) + } + if watcher.watched[keyA].imageRef.Tag() != ref.Tag() { + t.Errorf("unexpected tag: %s", watcher.watched[keyA].imageRef.Tag()) + } + + refB, _ := image.Parse(imageB) + keyB := getImageIdentifier(refB) + if watcher.watched[keyB].digest != frc.digestToReturn { + t.Errorf("unexpected digest") + } + if watcher.watched[keyB].schedule != types.KeelPollDefaultSchedule { + t.Errorf("unexpected schedule: %s", watcher.watched[keyB].schedule) + } + if watcher.watched[keyB].imageRef.Remote() != refB.Remote() { + t.Errorf("unexpected remote remote: %s", watcher.watched[keyB].imageRef.Remote()) + } + if watcher.watched[keyB].imageRef.Tag() != refB.Tag() { + t.Errorf("unexpected tag: %s", watcher.watched[keyB].imageRef.Tag()) + } +} diff --git a/trigger/poll/watcher.go b/trigger/poll/watcher.go new file mode 100644 index 000000000..5e7459d23 --- /dev/null +++ b/trigger/poll/watcher.go @@ -0,0 +1,245 @@ +package poll + +import ( + "context" + + "github.com/rusenask/cron" + "github.com/rusenask/keel/provider" + "github.com/rusenask/keel/registry" + "github.com/rusenask/keel/types" + "github.com/rusenask/keel/util/image" + + log "github.com/Sirupsen/logrus" +) + +type Watcher interface { + Watch(imageName, registryUsername, registryPassword, schedule string) error + Unwatch(image string) error +} + +type watchDetails struct { + imageRef *image.Reference + registryUsername string // "" for anonymous + registryPassword string // "" for anonymous + digest string // image digest + schedule string +} + +// RepositoryWatcher - repository watcher cron +type RepositoryWatcher struct { + providers provider.Providers + + // registry client + registryClient registry.Client + + // internal map of internal watches + // map[registry/name]=image.Reference + watched map[string]*watchDetails + + cron *cron.Cron +} + +// NewRepositoryWatcher - create new repository watcher +func NewRepositoryWatcher(providers provider.Providers, registryClient registry.Client) *RepositoryWatcher { + c := cron.New() + + return &RepositoryWatcher{ + providers: providers, + registryClient: registryClient, + watched: make(map[string]*watchDetails), + cron: c, + } +} + +func (w *RepositoryWatcher) Start(ctx context.Context) { + // starting cron job + w.cron.Start() + go func() { + for { + select { + case <-ctx.Done(): + w.cron.Stop() + } + } + }() +} + +func getImageIdentifier(ref *image.Reference) string { + return ref.Registry() + "/" + ref.ShortName() +} + +// Unwatch - stop watching for changes +func (w *RepositoryWatcher) Unwatch(imageName string) error { + imageRef, err := image.Parse(imageName) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "image_name": imageName, + }).Error("trigger.poll.RepositoryWatcher.Unwatch: failed to parse image") + return err + } + key := getImageIdentifier(imageRef) + _, ok := w.watched[key] + if ok { + w.cron.DeleteJob(key) + delete(w.watched, key) + } + + return nil +} + +// Watch - starts watching repository for changes, if it's already watching - ignores, +// if details changed - updates details +func (w *RepositoryWatcher) Watch(imageName, schedule, registryUsername, registryPassword string) error { + + imageRef, err := image.Parse(imageName) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "image_name": imageName, + }).Error("trigger.poll.RepositoryWatcher.Watch: failed to parse image") + return err + } + + key := getImageIdentifier(imageRef) + + // checking whether it's already being watched + details, ok := w.watched[key] + if !ok { + err = w.addJob(imageRef, registryUsername, registryPassword, schedule) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "image_name": imageName, + "registry_username": registryUsername, + }).Error("trigger.poll.RepositoryWatcher.Watch: failed to add image watch job") + + } + return err + } + + // checking schedule + if details.schedule != schedule { + w.cron.UpdateJob(key, schedule) + } + + // checking auth details, if changed - need to update + if details.registryPassword != registryPassword || details.registryUsername != registryUsername { + // recreating job + w.cron.DeleteJob(key) + err = w.addJob(imageRef, registryUsername, registryPassword, schedule) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "image_name": imageName, + "registry_username": registryUsername, + }).Error("trigger.poll.RepositoryWatcher.Watch: failed to add image watch job") + } + return err + } + + // nothing to do + + return nil +} + +func (w *RepositoryWatcher) addJob(ref *image.Reference, registryUsername, registryPassword, schedule string) error { + // getting initial digest + reg := ref.Scheme() + "://" + ref.Registry() + + digest, err := w.registryClient.Digest(registry.Opts{ + Registry: reg, + Name: ref.ShortName(), + Tag: ref.Tag(), + }) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "image": ref.Remote(), + }).Error("trigger.poll.RepositoryWatcher.addJob: failed to get image digest") + return err + } + + key := getImageIdentifier(ref) + details := &watchDetails{ + imageRef: ref, + digest: digest, // current image digest + registryUsername: registryUsername, + registryPassword: registryPassword, + schedule: schedule, + } + + // adding job to internal map + w.watched[key] = details + + // adding new job + job := NewWatchTagJob(w.providers, w.registryClient, details) + log.WithFields(log.Fields{ + "job_name": key, + "image": ref.Remote(), + "digest": digest, + "schedule": schedule, + }).Info("trigger.poll.RepositoryWatcher: new job added") + return w.cron.AddJob(key, schedule, job) + +} + +// WatchTagJob - Watch specific tag job +type WatchTagJob struct { + providers provider.Providers + registryClient registry.Client + details *watchDetails +} + +// NewWatchTagJob - new watch tag job monitors specific tag by checking digest based on specified +// cron style schedule +func NewWatchTagJob(providers provider.Providers, registryClient registry.Client, details *watchDetails) *WatchTagJob { + return &WatchTagJob{ + providers: providers, + registryClient: registryClient, + details: details, + } +} + +// Run - main function to check schedule +func (j *WatchTagJob) Run() { + reg := j.details.imageRef.Scheme() + "://" + j.details.imageRef.Registry() + currentDigest, err := j.registryClient.Digest(registry.Opts{ + Registry: reg, + Name: j.details.imageRef.ShortName(), + Tag: j.details.imageRef.Tag(), + }) + + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "image": j.details.imageRef.Remote(), + }).Error("trigger.poll.WatchTagJob: failed to check digest") + return + } + + log.WithFields(log.Fields{ + "current_digest": j.details.digest, + "new_digest": currentDigest, + "image_name": j.details.imageRef.Remote(), + }).Info("trigger.poll.WatchTagJob: checking digest") + + // checking whether image digest has changed + if j.details.digest != currentDigest { + // updating digest + j.details.digest = currentDigest + + event := types.Event{ + Repository: types.Repository{ + Name: j.details.imageRef.Remote(), + Tag: j.details.imageRef.Tag(), + Digest: currentDigest, + }, + TriggerName: types.TriggerTypePoll.String(), + } + log.Info("trigger.poll.WatchTagJob: digest change detected, submiting event to providers") + + j.providers.Submit(event) + + } +} diff --git a/trigger/poll/watcher_test.go b/trigger/poll/watcher_test.go new file mode 100644 index 000000000..e7b054d77 --- /dev/null +++ b/trigger/poll/watcher_test.go @@ -0,0 +1,125 @@ +package poll + +import ( + "testing" + + "github.com/rusenask/keel/provider" + "github.com/rusenask/keel/registry" + "github.com/rusenask/keel/types" + "github.com/rusenask/keel/util/image" +) + +// ======== fake registry client for testing ======= +type fakeRegistryClient struct { + opts registry.Opts // opts set if anything called Digest(opts Opts) + + digestToReturn string +} + +func (c *fakeRegistryClient) Get(opts registry.Opts) (*registry.Repository, error) { + return nil, nil +} + +func (c *fakeRegistryClient) Digest(opts registry.Opts) (digest string, err error) { + return c.digestToReturn, nil +} + +// ======== fake provider for testing ======= +type fakeProvider struct { + submitted []types.Event +} + +func (p *fakeProvider) Submit(event types.Event) error { + p.submitted = append(p.submitted, event) + return nil +} + +func (p *fakeProvider) GetName() string { + return "fakeProvider" +} + +func TestWatchTagJob(t *testing.T) { + + fp := &fakeProvider{} + providers := provider.New([]provider.Provider{fp}) + + frc := &fakeRegistryClient{ + digestToReturn: "sha256:0604af35299dd37ff23937d115d103532948b568a9dd8197d14c256a8ab8b0bb", + } + + reference, _ := image.Parse("foo/bar:1.1") + + details := &watchDetails{ + imageRef: reference, + digest: "sha256:123123123", + } + + job := NewWatchTagJob(providers, frc, details) + + job.Run() + + // checking whether new job was submitted + + submitted := fp.submitted[0] + + if submitted.Repository.Name != "index.docker.io/foo/bar:1.1" { + t.Errorf("unexpected event repository name: %s", submitted.Repository.Name) + } + + if submitted.Repository.Tag != "1.1" { + t.Errorf("unexpected event repository tag: %s", submitted.Repository.Tag) + } + + if submitted.Repository.Digest != frc.digestToReturn { + t.Errorf("unexpected event repository digest: %s", submitted.Repository.Digest) + } + + // digest should be updated + + if job.details.digest != frc.digestToReturn { + t.Errorf("job details digest wasn't updated") + } +} + +func TestWatchTagJobLatest(t *testing.T) { + + fp := &fakeProvider{} + providers := provider.New([]provider.Provider{fp}) + + frc := &fakeRegistryClient{ + digestToReturn: "sha256:0604af35299dd37ff23937d115d103532948b568a9dd8197d14c256a8ab8b0bb", + } + + reference, _ := image.Parse("foo/bar:latest") + + details := &watchDetails{ + imageRef: reference, + digest: "sha256:123123123", + } + + job := NewWatchTagJob(providers, frc, details) + + job.Run() + + // checking whether new job was submitted + + submitted := fp.submitted[0] + + if submitted.Repository.Name != "index.docker.io/foo/bar:latest" { + t.Errorf("unexpected event repository name: %s", submitted.Repository.Name) + } + + if submitted.Repository.Tag != "latest" { + t.Errorf("unexpected event repository tag: %s", submitted.Repository.Tag) + } + + if submitted.Repository.Digest != frc.digestToReturn { + t.Errorf("unexpected event repository digest: %s", submitted.Repository.Digest) + } + + // digest should be updated + + if job.details.digest != frc.digestToReturn { + t.Errorf("job details digest wasn't updated") + } +} diff --git a/trigger/pubsub/manager.go b/trigger/pubsub/manager.go index 094a522b9..6cb259d38 100644 --- a/trigger/pubsub/manager.go +++ b/trigger/pubsub/manager.go @@ -10,6 +10,7 @@ import ( "github.com/rusenask/keel/provider/kubernetes" "github.com/rusenask/keel/types" + "github.com/rusenask/keel/util/policies" log "github.com/Sirupsen/logrus" ) @@ -35,6 +36,8 @@ type DefaultManager struct { ctx context.Context } +// Subscriber - subscribe is responsible to listen for repository events and +// inform providers type Subscriber interface { Subscribe(ctx context.Context, topic, subscription string) error } @@ -91,9 +94,10 @@ func (s *DefaultManager) scan(ctx context.Context) error { for _, deploymentList := range deploymentLists { for _, deployment := range deploymentList.Items { labels := deployment.GetLabels() - _, ok := labels[types.KeelPolicyLabel] - // if no keel policy is set - skipping this deployment - if !ok { + + // ignoring unlabelled deployments + policy := policies.GetPolicy(labels) + if policy == types.PolicyTypeNone { continue } diff --git a/types/types.go b/types/types.go index f8ff1d7cf..38cfffbcb 100644 --- a/types/types.go +++ b/types/types.go @@ -7,12 +7,29 @@ import ( ) const KeelDefaultPort = 9300 -const KeelPolicyLabel = "keel.observer/policy" + +// KeelPolicyLabel - keel update policies (version checking) +const KeelPolicyLabel = "keel.sh/policy" + +// KeelTriggerLabel - trigger label is used to specify custom trigger types +// for example keel.sh/trigger=poll would signal poll trigger to start watching for repository +// changes +const KeelTriggerLabel = "keel.sh/trigger" + +// KeelPollScheduleAnnotation - optional variable to setup custom schedule for polling, defaults to @every 10m +const KeelPollScheduleAnnotation = "keel.sh/pollSchedule" + +// KeelPollDefaultSchedule - defaul polling schedule +const KeelPollDefaultSchedule = "@every 1m" + +// KeelDigestAnnotation - digest annotation +const KeelDigestAnnotation = "keel.sh/digest" type Repository struct { - Host string `json:"host,omitempty"` - Name string `json:"name,omitempty"` - Tag string `json:"tag,omitempty"` + Host string `json:"host"` + Name string `json:"name"` + Tag string `json:"tag"` + Digest string `json:"digest"` // optional digest field } type Event struct { @@ -49,6 +66,34 @@ func (v Version) String() string { return buf.String() } +// TriggerType - trigger types +type TriggerType int + +// Available trigger types +const ( + TriggerTypeDefault TriggerType = iota // default policy is to wait for external triggers + TriggerTypePoll // poll policy sets up watchers for the affected repositories +) + +func (t TriggerType) String() string { + switch t { + case TriggerTypeDefault: + return "default" + case TriggerTypePoll: + return "poll" + default: + return "unknown" + } +} + +func ParseTrigger(trigger string) TriggerType { + switch trigger { + case "poll": + return TriggerTypePoll + } + return TriggerTypeDefault +} + // PolicyType - policy type type PolicyType int @@ -63,15 +108,17 @@ func ParsePolicy(policy string) PolicyType { return PolicyTypeMinor case "patch": return PolicyTypePatch + case "force": + return PolicyTypeForce default: - return PolicyTypeUnknown + return PolicyTypeNone } } func (t PolicyType) String() string { switch t { - case PolicyTypeUnknown: - return "unknown" + case PolicyTypeNone: + return "none" case PolicyTypeAll: return "all" case PolicyTypeMajor: @@ -89,7 +136,7 @@ func (t PolicyType) String() string { // available policies const ( - PolicyTypeUnknown = iota + PolicyTypeNone = iota PolicyTypeAll PolicyTypeMajor PolicyTypeMinor diff --git a/types/types_test.go b/types/types_test.go index 6a01abed1..244b0ed37 100644 --- a/types/types_test.go +++ b/types/types_test.go @@ -36,7 +36,12 @@ func TestParsePolicy(t *testing.T) { { name: "random", args: args{policy: "rand"}, - want: PolicyTypeUnknown, + want: PolicyTypeNone, + }, + { + name: "force", + args: args{policy: "force"}, + want: PolicyTypeForce, }, } for _, tt := range tests { diff --git a/util/image/parse.go b/util/image/parse.go new file mode 100644 index 000000000..0960d7822 --- /dev/null +++ b/util/image/parse.go @@ -0,0 +1,127 @@ +package image + +import ( + "strings" +) + +// Reference is an opaque object that include identifier such as a name, tag, repository, registry, etc... +type Reference struct { + named Named + tag string + scheme string // registry scheme, i.e. http, https +} + +// Name returns the image's name. (ie: debian[:8.2]) +func (r Reference) Name() string { + return r.named.RemoteName() + r.tag +} + +// ShortName returns the image's name (ie: debian) +func (r Reference) ShortName() string { + return r.named.RemoteName() +} + +// Tag returns the image's tag (or digest). +func (r Reference) Tag() string { + if len(r.tag) > 1 { + return r.tag[1:] + } + return "" +} + +// Registry returns the image's registry. (ie: host[:port]) +func (r Reference) Registry() string { + return r.named.Hostname() +} + +// Scheme returns registry's scheme. (ie: https) +func (r Reference) Scheme() string { + return r.scheme +} + +// Repository returns the image's repository. (ie: registry/name) +func (r Reference) Repository() string { + return r.named.FullName() +} + +// Remote returns the image's remote identifier. (ie: registry/name[:tag]) +func (r Reference) Remote() string { + return r.named.FullName() + r.tag +} + +func clean(url string) (cleaned string, scheme string) { + + s := url + + if strings.HasPrefix(url, "http://") { + scheme = "http" + s = strings.Replace(url, "http://", "", 1) + } else if strings.HasPrefix(url, "https://") { + scheme = "https" + s = strings.Replace(url, "https://", "", 1) + } + + if scheme == "" { + scheme = DefaultScheme + } + + return s, scheme +} + +// Parse returns a Reference from analyzing the given remote identifier. +func Parse(remote string) (*Reference, error) { + + cleaned, scheme := clean(remote) + n, err := ParseNamed(cleaned) + + if err != nil { + return nil, err + } + + n = WithDefaultTag(n) + + var t string + switch x := n.(type) { + case Canonical: + t = "@" + x.Digest().String() + case NamedTagged: + t = ":" + x.Tag() + } + + return &Reference{named: n, tag: t, scheme: scheme}, nil +} + +// ParseRepo - parses remote +// pretty much the same as Parse but better for testing +func ParseRepo(remote string) (*Repository, error) { + + cleaned, scheme := clean(remote) + + n, err := ParseNamed(cleaned) + + if err != nil { + return nil, err + } + + n = WithDefaultTag(n) + + var t string + switch x := n.(type) { + case Canonical: + t = "@" + x.Digest().String() + case NamedTagged: + t = ":" + x.Tag() + } + + ref := &Reference{named: n, tag: t, scheme: scheme} + + return &Repository{ + Name: ref.Name(), + Repository: ref.Repository(), + Registry: ref.Registry(), + Remote: ref.Remote(), + ShortName: ref.ShortName(), + Tag: ref.Tag(), + Scheme: ref.scheme, + }, nil +} diff --git a/util/image/parse_test.go b/util/image/parse_test.go new file mode 100644 index 000000000..203eea076 --- /dev/null +++ b/util/image/parse_test.go @@ -0,0 +1,116 @@ +package image + +import ( + "reflect" + "testing" +) + +func TestShortParseWithTag(t *testing.T) { + + reference, err := Parse("foo/bar:1.1") + if err != nil { + t.Errorf("error while parsing tag: %s", err) + } + + if reference.Remote() != DefaultRegistryHostname+"/foo/bar:1.1" { + t.Errorf("unexpected remote: %s", reference.Remote()) + } + + if reference.Tag() != "1.1" { + t.Errorf("unexpected tag: %s", reference.Tag()) + } + + if reference.Registry() != DefaultRegistryHostname { + t.Errorf("unexpected registry: %s", reference.Registry()) + } + + if reference.ShortName() != "foo/bar" { + t.Errorf("unexpected name: %s", reference.ShortName()) + } + + if reference.Name() != "foo/bar:1.1" { + t.Errorf("unexpected name: %s", reference.Name()) + } + +} + +func TestParseRepo(t *testing.T) { + type args struct { + remote string + } + tests := []struct { + name string + args args + want *Repository + wantErr bool + }{ + { + name: "foo/bar:1.1", + args: args{remote: "foo/bar:1.1"}, + want: &Repository{ + Name: "foo/bar:1.1", + Repository: "index.docker.io/foo/bar", + Remote: "index.docker.io/foo/bar:1.1", + Registry: DefaultRegistryHostname, + ShortName: "foo/bar", + Tag: "1.1", + Scheme: "https", + }, + wantErr: false, + }, + { + name: "localhost.localdomain/foo/bar:1.1", + args: args{remote: "localhost.localdomain/foo/bar:1.1"}, + want: &Repository{ + Name: "foo/bar:1.1", + Repository: "localhost.localdomain/foo/bar", + Remote: "localhost.localdomain/foo/bar:1.1", + Registry: "localhost.localdomain", + ShortName: "foo/bar", + Tag: "1.1", + Scheme: "https", + }, + wantErr: false, + }, + { + name: "https://httphost.sh/foo/bar:1.1", + args: args{remote: "https://httphost.sh/foo/bar:1.1"}, + want: &Repository{ + Name: "foo/bar:1.1", + Repository: "httphost.sh/foo/bar", + Remote: "httphost.sh/foo/bar:1.1", + Registry: "httphost.sh", + ShortName: "foo/bar", + Tag: "1.1", + Scheme: "https", + }, + wantErr: false, + }, + { + name: "localhost.localdomain/foo/bar (no tag)", + args: args{remote: "localhost.localdomain/foo/bar"}, + want: &Repository{ + Name: "foo/bar:latest", + Repository: "localhost.localdomain/foo/bar", + Remote: "localhost.localdomain/foo/bar:latest", + Registry: "localhost.localdomain", + ShortName: "foo/bar", + Tag: "latest", + Scheme: "https", + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ParseRepo(tt.args.remote) + if (err != nil) != tt.wantErr { + t.Errorf("ParseRepo() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("ParseRepo() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/util/image/reference.go b/util/image/reference.go new file mode 100644 index 000000000..3ea9d1172 --- /dev/null +++ b/util/image/reference.go @@ -0,0 +1,214 @@ +package image + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" +) + +const ( + // DefaultTag defines the default tag used when performing images related actions and no tag or digest is specified + DefaultTag = "latest" + // DefaultRegistryHostname is the default built-in hostname + DefaultRegistryHostname = "index.docker.io" + + // DefaultScheme is default scheme for registries + DefaultScheme = "https" + + // // LegacyDefaultHostname is automatically converted to DefaultHostname + // LegacyDefaultHostname = "index.docker.io" + // DefaultRepoPrefix is the prefix used for default repositories in default host + DefaultRepoPrefix = "library/" +) + +// Repository is an object created from Named interface +type Repository struct { + Name string // Name returns the image's name. (ie: debian[:8.2]) + Repository string // Repository returns the image's repository. (ie: registry/name) + Registry string // Registry returns the image's registry. (ie: host[:port]) + Scheme string // Registry scheme. (ie: http) + ShortName string // ShortName returns the image's name (ie: debian) + Remote string // Remote returns the image's remote identifier. (ie: registry/name[:tag]) + Tag string // Tag returns the image's tag (or digest). +} + +// Named is an object with a full name +type Named interface { + // Name returns normalized repository name, like "ubuntu". + Name() string + // String returns full reference, like "ubuntu@sha256:abcdef..." + String() string + // FullName returns full repository name with hostname, like "docker.io/library/ubuntu" + FullName() string + // Hostname returns hostname for the reference, like "docker.io" + Hostname() string + // RemoteName returns the repository component of the full name, like "library/ubuntu" + RemoteName() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Canonical reference is an object with a fully unique +// name including a name with hostname and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name, otherwise an error is +// returned. +// If an error was encountered it is returned, along with a nil Reference. +func ParseNamed(s string) (Named, error) { + named, err := reference.ParseNamed(s) + if err != nil { + return nil, fmt.Errorf("Error parsing reference: %q is not a valid repository/tag", s) + } + r, err := WithName(named.Name()) + if err != nil { + return nil, err + } + if canonical, isCanonical := named.(reference.Canonical); isCanonical { + return WithDigest(r, canonical.Digest()) + } + + if tagged, isTagged := named.(reference.NamedTagged); isTagged { + return WithTag(r, tagged.Tag()) + } + return r, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + name, err := normalize(name) + if err != nil { + return nil, err + } + if err := validateName(name); err != nil { + return nil, err + } + r, err := reference.WithName(name) + if err != nil { + return nil, err + } + return &namedRef{r}, nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + r, err := reference.WithTag(name, tag) + if err != nil { + return nil, err + } + return &taggedRef{namedRef{r}}, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + r, err := reference.WithDigest(name, digest) + if err != nil { + return nil, err + } + return &canonicalRef{namedRef{r}}, nil +} + +type namedRef struct { + reference.Named +} +type taggedRef struct { + namedRef +} +type canonicalRef struct { + namedRef +} + +func (r *namedRef) FullName() string { + hostname, remoteName := splitHostname(r.Name()) + return hostname + "/" + remoteName +} +func (r *namedRef) Hostname() string { + hostname, _ := splitHostname(r.Name()) + return hostname +} +func (r *namedRef) RemoteName() string { + _, remoteName := splitHostname(r.Name()) + return remoteName +} +func (r *taggedRef) Tag() string { + return r.namedRef.Named.(reference.NamedTagged).Tag() +} +func (r *canonicalRef) Digest() digest.Digest { + return r.namedRef.Named.(reference.Canonical).Digest() +} + +// WithDefaultTag adds a default tag to a reference if it only has a repo name. +func WithDefaultTag(ref Named) Named { + if IsNameOnly(ref) { + ref, _ = WithTag(ref, DefaultTag) + } + return ref +} + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// splitHostname splits a repository name to hostname and remotename string. +// If no valid hostname is found, the default hostname is used. Repository name +// needs to be already validated before. +func splitHostname(name string) (hostname, remoteName string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + hostname, remoteName = DefaultRegistryHostname, name + } else { + hostname, remoteName = name[:i], name[i+1:] + } + // if hostname == LegacyDefaultHostname { + // hostname = DefaultHostname + // } + if hostname == DefaultRegistryHostname && !strings.ContainsRune(remoteName, '/') { + remoteName = DefaultRepoPrefix + remoteName + } + + return +} + +// normalize returns a repository name in its normalized form, meaning it +// will not contain default hostname nor library/ prefix for official images. +func normalize(name string) (string, error) { + host, remoteName := splitHostname(name) + if strings.ToLower(remoteName) != remoteName { + return "", errors.New("invalid reference format: repository name must be lowercase") + } + if host == DefaultRegistryHostname { + if strings.HasPrefix(remoteName, DefaultRepoPrefix) { + return strings.TrimPrefix(remoteName, DefaultRepoPrefix), nil + } + return remoteName, nil + } + return name, nil +} + +func validateName(name string) error { + if err := ValidateID(name); err == nil { + return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name) + } + return nil +} diff --git a/util/image/validation.go b/util/image/validation.go new file mode 100644 index 000000000..15f9aac61 --- /dev/null +++ b/util/image/validation.go @@ -0,0 +1,16 @@ +package image + +import ( + "fmt" + "regexp" +) + +var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) + +// ValidateID checks whether an ID string is a valid image ID. +func ValidateID(id string) error { + if ok := validHex.MatchString(id); !ok { + return fmt.Errorf("image ID '%s' is invalid ", id) + } + return nil +} \ No newline at end of file diff --git a/util/policies/policies.go b/util/policies/policies.go new file mode 100644 index 000000000..2061e59f3 --- /dev/null +++ b/util/policies/policies.go @@ -0,0 +1,29 @@ +package policies + +import ( + "github.com/rusenask/keel/types" +) + +// GetPolicy - gets policy +func GetPolicy(labels map[string]string) types.PolicyType { + for k, v := range labels { + switch k { + case types.KeelPolicyLabel: + return types.ParsePolicy(v) + case "keel.observer/policy": + return types.ParsePolicy(v) + } + } + + return types.PolicyTypeNone +} + +// GetTriggerPolicy - checks for trigger label, if not set - returns +// default trigger type +func GetTriggerPolicy(labels map[string]string) types.TriggerType { + trigger, ok := labels[types.KeelTriggerLabel] + if ok { + return types.ParseTrigger(trigger) + } + return types.TriggerTypeDefault +} diff --git a/util/policies/policies_test.go b/util/policies/policies_test.go new file mode 100644 index 000000000..2dfe843a0 --- /dev/null +++ b/util/policies/policies_test.go @@ -0,0 +1,42 @@ +package policies + +import ( + "reflect" + "testing" + + "github.com/rusenask/keel/types" +) + +func TestGetPolicy(t *testing.T) { + type args struct { + labels map[string]string + } + tests := []struct { + name string + args args + want types.PolicyType + }{ + { + name: "policy all", + args: args{labels: map[string]string{types.KeelPolicyLabel: "all"}}, + want: types.PolicyTypeAll, + }, + { + name: "policy minor", + args: args{labels: map[string]string{types.KeelPolicyLabel: "minor"}}, + want: types.PolicyTypeMinor, + }, + { + name: "legacy policy minor", + args: args{labels: map[string]string{"keel.observer/policy": "minor"}}, + want: types.PolicyTypeMinor, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := GetPolicy(tt.args.labels); !reflect.DeepEqual(got, tt.want) { + t.Errorf("GetPolicy() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/util/testing/testing.go b/util/testing/testing.go new file mode 100644 index 000000000..e8c25092d --- /dev/null +++ b/util/testing/testing.go @@ -0,0 +1,34 @@ +package testing + +import ( + // meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/pkg/apis/extensions/v1beta1" +) + +type FakeK8sImplementer struct { + NamespacesList *v1.NamespaceList + DeploymentSingle *v1beta1.Deployment + DeploymentList *v1beta1.DeploymentList + + // stores value of an updated deployment + Updated *v1beta1.Deployment +} + +func (i *FakeK8sImplementer) Namespaces() (*v1.NamespaceList, error) { + return i.NamespacesList, nil +} + +func (i *FakeK8sImplementer) Deployment(namespace, name string) (*v1beta1.Deployment, error) { + return i.DeploymentSingle, nil +} + +func (i *FakeK8sImplementer) Deployments(namespace string) (*v1beta1.DeploymentList, error) { + return i.DeploymentList, nil +} + +func (i *FakeK8sImplementer) Update(deployment *v1beta1.Deployment) error { + i.Updated = deployment + return nil +} diff --git a/vendor/github.com/docker/distribution/.gitignore b/vendor/github.com/docker/distribution/.gitignore new file mode 100644 index 000000000..1c3ae0a77 --- /dev/null +++ b/vendor/github.com/docker/distribution/.gitignore @@ -0,0 +1,37 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# never checkin from the bin file (for now) +bin/* + +# Test key files +*.pem + +# Cover profiles +*.out + +# Editor/IDE specific files. +*.sublime-project +*.sublime-workspace diff --git a/vendor/github.com/docker/distribution/.mailmap b/vendor/github.com/docker/distribution/.mailmap new file mode 100644 index 000000000..d99106019 --- /dev/null +++ b/vendor/github.com/docker/distribution/.mailmap @@ -0,0 +1,18 @@ +Stephen J Day Stephen Day +Stephen J Day Stephen Day +Olivier Gambier Olivier Gambier +Brian Bland Brian Bland +Brian Bland Brian Bland +Josh Hawn Josh Hawn +Richard Scothern Richard +Richard Scothern Richard Scothern +Andrew Meredith Andrew Meredith +harche harche +Jessie Frazelle +Sharif Nassar Sharif Nassar +Sven Dowideit Sven Dowideit +Vincent Giersch Vincent Giersch +davidli davidli +Omer Cohen Omer Cohen +Eric Yang Eric Yang +Nikita Tarasov Nikita diff --git a/vendor/github.com/docker/distribution/AUTHORS b/vendor/github.com/docker/distribution/AUTHORS new file mode 100644 index 000000000..252ff8aa2 --- /dev/null +++ b/vendor/github.com/docker/distribution/AUTHORS @@ -0,0 +1,182 @@ +a-palchikov +Aaron Lehmann +Aaron Schlesinger +Aaron Vinson +Adam Duke +Adam Enger +Adrian Mouat +Ahmet Alp Balkan +Alex Chan +Alex Elman +Alexey Gladkov +allencloud +amitshukla +Amy Lindburg +Andrew Hsu +Andrew Meredith +Andrew T Nguyen +Andrey Kostov +Andy Goldstein +Anis Elleuch +Anton Tiurin +Antonio Mercado +Antonio Murdaca +Anusha Ragunathan +Arien Holthuizen +Arnaud Porterie +Arthur Baars +Asuka Suzuki +Avi Miller +Ayose Cazorla +BadZen +Ben Bodenmiller +Ben Firshman +bin liu +Brian Bland +burnettk +Carson A +Cezar Sa Espinola +Charles Smith +Chris Dillon +cuiwei13 +cyli +Daisuke Fujita +Daniel Huhn +Darren Shepherd +Dave Trombley +Dave Tucker +David Lawrence +David Verhasselt +David Xia +davidli +Dejan Golja +Derek McGowan +Diogo Mónica +DJ Enriquez +Donald Huang +Doug Davis +Edgar Lee +Eric Yang +Fabio Berchtold +Fabio Huser +farmerworking +Felix Yan +Florentin Raud +Frank Chen +Frederick F. Kautz IV +gabriell nascimento +Gleb Schukin +harche +Henri Gomez +Hu Keping +Hua Wang +HuKeping +Ian Babrou +igayoso +Jack Griffin +James Findley +Jason Freidman +Jason Heiss +Jeff Nickoloff +Jess Frazelle +Jessie Frazelle +jhaohai +Jianqing Wang +Jihoon Chung +Joao Fernandes +John Mulhausen +John Starks +Jon Johnson +Jon Poler +Jonathan Boulle +Jordan Liggitt +Josh Chorlton +Josh Hawn +Julien Fernandez +Ke Xu +Keerthan Mala +Kelsey Hightower +Kenneth Lim +Kenny Leung +Li Yi +Liu Hua +liuchang0812 +Lloyd Ramey +Louis Kottmann +Luke Carpenter +Marcus Martins +Mary Anthony +Matt Bentley +Matt Duch +Matt Moore +Matt Robenolt +Matthew Green +Michael Prokop +Michal Minar +Michal Minář +Mike Brown +Miquel Sabaté +Misty Stanley-Jones +Misty Stanley-Jones +Morgan Bauer +moxiegirl +Nathan Sullivan +nevermosby +Nghia Tran +Nikita Tarasov +Noah Treuhaft +Nuutti Kotivuori +Oilbeater +Olivier Gambier +Olivier Jacques +Omer Cohen +Patrick Devine +Phil Estes +Philip Misiowiec +Pierre-Yves Ritschard +Qiao Anran +Randy Barlow +Richard Scothern +Rodolfo Carvalho +Rusty Conover +Sean Boran +Sebastiaan van Stijn +Sebastien Coavoux +Serge Dubrouski +Sharif Nassar +Shawn Falkner-Horine +Shreyas Karnik +Simon Thulbourn +spacexnice +Spencer Rinehart +Stan Hu +Stefan Majewsky +Stefan Weil +Stephen J Day +Sungho Moon +Sven Dowideit +Sylvain Baubeau +Ted Reed +tgic +Thomas Sjögren +Tianon Gravi +Tibor Vass +Tonis Tiigi +Tony Holdstock-Brown +Trevor Pounds +Troels Thomsen +Victor Vieux +Victoria Bialas +Vincent Batts +Vincent Demeester +Vincent Giersch +W. Trevor King +weiyuan.yl +xg.song +xiekeyang +Yann ROBERT +yaoyao.xyy +yuexiao-wang +yuzou +zhouhaibing089 +姜继忠 diff --git a/vendor/github.com/docker/distribution/BUILDING.md b/vendor/github.com/docker/distribution/BUILDING.md new file mode 100644 index 000000000..2d5a10119 --- /dev/null +++ b/vendor/github.com/docker/distribution/BUILDING.md @@ -0,0 +1,119 @@ + +# Building the registry source + +## Use-case + +This is useful if you intend to actively work on the registry. + +### Alternatives + +Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/). + +People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`. + +OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md). + +### Gotchas + +You are expected to know your way around with go & git. + +If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you. + +## Build the development environment + +The first prerequisite of properly building distribution targets is to have a Go +development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html) +for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the +environment. + +If a Go development environment is setup, one can use `go get` to install the +`registry` command from the current latest: + + go get github.com/docker/distribution/cmd/registry + +The above will install the source repository into the `GOPATH`. + +Now create the directory for the registry data (this might require you to set permissions properly) + + mkdir -p /var/lib/registry + +... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location. + +The `registry` +binary can then be run with the following: + + $ $GOPATH/bin/registry --version + $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown + +> __NOTE:__ While you do not need to use `go get` to checkout the distribution +> project, for these build instructions to work, the project must be checked +> out in the correct location in the `GOPATH`. This should almost always be +> `$GOPATH/src/github.com/docker/distribution`. + +The registry can be run with the default config using the following +incantation: + + $ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml + INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] debug server listening localhost:5001 + +If it is working, one should see the above log messages. + +### Repeatable Builds + +For the full development experience, one should `cd` into +`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go` +commands, such as `go test`, should work per package (please see +[Developing](#developing) if they don't work). + +A `Makefile` has been provided as a convenience to support repeatable builds. +Please install the following into `GOPATH` for it to work: + + go get github.com/tools/godep github.com/golang/lint/golint + +**TODO(stevvooe):** Add a `make setup` command to Makefile to run this. Have to think about how to interact with Godeps properly. + +Once these commands are available in the `GOPATH`, run `make` to get a full +build: + + $ make + + clean + + fmt + + vet + + lint + + build + github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar + github.com/Sirupsen/logrus + github.com/docker/libtrust + ... + github.com/yvasiyarov/gorelic + github.com/docker/distribution/registry/handlers + github.com/docker/distribution/cmd/registry + + test + ... + ok github.com/docker/distribution/digest 7.875s + ok github.com/docker/distribution/manifest 0.028s + ok github.com/docker/distribution/notifications 17.322s + ? github.com/docker/distribution/registry [no test files] + ok github.com/docker/distribution/registry/api/v2 0.101s + ? github.com/docker/distribution/registry/auth [no test files] + ok github.com/docker/distribution/registry/auth/silly 0.011s + ... + + /Users/sday/go/src/github.com/docker/distribution/bin/registry + + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template + + binaries + +The above provides a repeatable build using the contents of the vendored +Godeps directory. This includes formatting, vetting, linting, building, +testing and generating tagged binaries. We can verify this worked by running +the registry binary generated in the "./bin" directory: + + $ ./bin/registry -version + ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m + +### Optional build tags + +Optional [build tags](http://golang.org/pkg/go/build/) can be provided using +the environment variable `DOCKER_BUILDTAGS`. diff --git a/vendor/github.com/docker/distribution/CHANGELOG.md b/vendor/github.com/docker/distribution/CHANGELOG.md new file mode 100644 index 000000000..b1a5c6824 --- /dev/null +++ b/vendor/github.com/docker/distribution/CHANGELOG.md @@ -0,0 +1,114 @@ +# Changelog + +## 2.6.1 (2017-04-05) + +#### Registry +- Fix `Forwarded` header handling, revert use of `X-Forwarded-Port` +- Use driver `Stat` for registry health check + +## 2.6.0 (2017-01-18) + +#### Storage +- S3: fixed bug in delete due to read-after-write inconsistency +- S3: allow EC2 IAM roles to be used when authorizing region endpoints +- S3: add Object ACL Support +- S3: fix delete method's notion of subpaths +- S3: use multipart upload API in `Move` method for performance +- S3: add v2 signature signing for legacy S3 clones +- Swift: add simple heuristic to detect incomplete DLOs during read ops +- Swift: support different user and tenant domains +- Swift: bulk deletes in chunks +- Aliyun OSS: fix delete method's notion of subpaths +- Aliyun OSS: optimize data copy after upload finishes +- Azure: close leaking response body +- Fix storage drivers dropping non-EOF errors when listing repositories +- Compare path properly when listing repositories in catalog +- Add a foreign layer URL host whitelist +- Improve catalog enumerate runtime + +#### Registry +- Export `storage.CreateOptions` in top-level package +- Enable notifications to endpoints that use self-signed certificates +- Properly validate multi-URL foreign layers +- Add control over validation of URLs in pushed manifests +- Proxy mode: fix socket leak when pull is cancelled +- Tag service: properly handle error responses on HEAD request +- Support for custom authentication URL in proxying registry +- Add configuration option to disable access logging +- Add notification filtering by target media type +- Manifest: `References()` returns all children +- Honor `X-Forwarded-Port` and Forwarded headers +- Reference: Preserve tag and digest in With* functions +- Add policy configuration for enforcing repository classes + +#### Client +- Changes the client Tags `All()` method to follow links +- Allow registry clients to connect via HTTP2 +- Better handling of OAuth errors in client + +#### Spec +- Manifest: clarify relationship between urls and foreign layers +- Authorization: add support for repository classes + +#### Manifest +- Override media type returned from `Stat()` for existing manifests +- Add plugin mediatype to distribution manifest + +#### Docs +- Document `TOOMANYREQUESTS` error code +- Document required Let's Encrypt port +- Improve documentation around implementation of OAuth2 +- Improve documentation for configuration + +#### Auth +- Add support for registry type in scope +- Add support for using v2 ping challenges for v1 +- Add leeway to JWT `nbf` and `exp` checking +- htpasswd: dynamically parse htpasswd file +- Fix missing auth headers with PATCH HTTP request when pushing to default port + +#### Dockerfile +- Update to go1.7 +- Reorder Dockerfile steps for better layer caching + +#### Notes + +Documentation has moved to the documentation repository at +`github.com/docker/docker.github.io/tree/master/registry` + +The registry is go 1.7 compliant, and passes newer, more restrictive `lint` and `vet` ing. + + +## 2.5.0 (2016-06-14) + +#### Storage +- Ensure uploads directory is cleaned after upload is committed +- Add ability to cap concurrent operations in filesystem driver +- S3: Add 'us-gov-west-1' to the valid region list +- Swift: Handle ceph not returning Last-Modified header for HEAD requests +- Add redirect middleware + +#### Registry +- Add support for blobAccessController middleware +- Add support for layers from foreign sources +- Remove signature store +- Add support for Let's Encrypt +- Correct yaml key names in configuration + +#### Client +- Add option to get content digest from manifest get + +#### Spec +- Update the auth spec scope grammar to reflect the fact that hostnames are optionally supported +- Clarify API documentation around catalog fetch behavior + +#### API +- Support returning HTTP 429 (Too Many Requests) + +#### Documentation +- Update auth documentation examples to show "expires in" as int + +#### Docker Image +- Use Alpine Linux as base image + + diff --git a/vendor/github.com/docker/distribution/CONTRIBUTING.md b/vendor/github.com/docker/distribution/CONTRIBUTING.md new file mode 100644 index 000000000..7cc7aedff --- /dev/null +++ b/vendor/github.com/docker/distribution/CONTRIBUTING.md @@ -0,0 +1,140 @@ +# Contributing to the registry + +## Before reporting an issue... + +### If your problem is with... + + - automated builds + - your account on the [Docker Hub](https://hub.docker.com/) + - any other [Docker Hub](https://hub.docker.com/) issue + +Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com) + +### If you... + + - need help setting up your registry + - can't figure out something + - are not sure what's going on or what your problem is + +Then please do not open an issue here yet - you should first try one of the following support forums: + + - irc: #docker-distribution on freenode + - mailing-list: or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution + +## Reporting an issue properly + +By following these simple rules you will get better and faster feedback on your issue. + + - search the bugtracker for an already reported issue + +### If you found an issue that describes your problem: + + - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments + - please refrain from adding "same thing here" or "+1" comments + - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button + - comment if you have some new, technical and relevant information to add to the case + - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue. + +### If you have not found an existing issue that describes your problem: + + 1. create a new issue, with a succinct title that describes your issue: + - bad title: "It doesn't work with my docker" + - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST" + 2. copy the output of: + - `docker version` + - `docker info` + - `docker exec registry -version` + 3. copy the command line you used to launch your Registry + 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) + 5. reproduce your problem and get your docker daemon logs showing the error + 6. if relevant, copy your registry logs that show the error + 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used) + 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry + +## Contributing a patch for a known bug, or a small correction + +You should follow the basic GitHub workflow: + + 1. fork + 2. commit a change + 3. make sure the tests pass + 4. PR + +Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple: + + - configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com` + - sign your commits using `-s`: `git commit -s -m "My commit"` + +Some simple rules to ensure quick merge: + + - clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`) + - prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once + - if you need to amend your PR following comments, please squash instead of adding more commits + +## Contributing new features + +You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve. + +If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning. +If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. + +Then you should submit your implementation, clearly linking to the issue (and possible proposal). + +Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged. + +It's mandatory to: + + - interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines) + - address maintainers' comments and modify your submission accordingly + - write tests for any new code + +Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry. + +Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493) + +## Coding Style + +Unless explicitly stated, we follow all coding guidelines from the Go +community. While some of these standards may seem arbitrary, they somehow seem +to result in a solid, consistent codebase. + +It is possible that the code base does not currently comply with these +guidelines. We are not looking for a massive PR that fixes this, since that +goes against the spirit of the guidelines. All new contributions should make a +best effort to clean up and make the code base better than they left it. +Obviously, apply your best judgement. Remember, the goal here is to make the +code base easier for humans to navigate and understand. Always keep that in +mind when nudging others to comply. + +The rules: + +1. All code should be formatted with `gofmt -s`. +2. All code should pass the default levels of + [`golint`](https://github.com/golang/lint). +3. All code should follow the guidelines covered in [Effective + Go](http://golang.org/doc/effective_go.html) and [Go Code Review + Comments](https://github.com/golang/go/wiki/CodeReviewComments). +4. Comment the code. Tell us the why, the history and the context. +5. Document _all_ declarations and methods, even private ones. Declare + expectations, caveats and anything else that may be important. If a type + gets exported, having the comments already there will ensure it's ready. +6. Variable name length should be proportional to its context and no longer. + `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. + In practice, short methods will have short variable names and globals will + have longer names. +7. No underscores in package names. If you need a compound name, step back, + and re-examine why you need a compound name. If you still think you need a + compound name, lose the underscore. +8. No utils or helpers packages. If a function is not general enough to + warrant its own package, it has not been written generally enough to be a + part of a util package. Just leave it unexported and well-documented. +9. All tests should run with `go test` and outside tooling should not be + required. No, we don't need another unit testing framework. Assertion + packages are acceptable if they provide _real_ incremental value. +10. Even though we call these "rules" above, they are actually just + guidelines. Since you've read all the rules, you now know that. + +If you are having trouble getting into the mood of idiomatic Go, we recommend +reading through [Effective Go](http://golang.org/doc/effective_go.html). The +[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the +kool-aid is a lot easier than going thirsty. diff --git a/vendor/github.com/docker/distribution/Dockerfile b/vendor/github.com/docker/distribution/Dockerfile new file mode 100644 index 000000000..426954a11 --- /dev/null +++ b/vendor/github.com/docker/distribution/Dockerfile @@ -0,0 +1,18 @@ +FROM golang:1.7-alpine + +ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution +ENV DOCKER_BUILDTAGS include_oss include_gcs + +RUN set -ex \ + && apk add --no-cache make git + +WORKDIR $DISTRIBUTION_DIR +COPY . $DISTRIBUTION_DIR +COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml + +RUN make PREFIX=/go clean binaries + +VOLUME ["/var/lib/registry"] +EXPOSE 5000 +ENTRYPOINT ["registry"] +CMD ["serve", "/etc/docker/registry/config.yml"] diff --git a/vendor/github.com/docker/distribution/Godeps/Godeps.json b/vendor/github.com/docker/distribution/Godeps/Godeps.json new file mode 100644 index 000000000..dbdd89149 --- /dev/null +++ b/vendor/github.com/docker/distribution/Godeps/Godeps.json @@ -0,0 +1,458 @@ +{ + "ImportPath": "github.com/docker/distribution", + "GoVersion": "go1.6", + "GodepVersion": "v74", + "Packages": [ + "./..." + ], + "Deps": [ + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/storage", + "Comment": "v5.0.0-beta-6-g0b5fe2a", + "Rev": "0b5fe2abe0271ba07049eacaa65922d67c319543" + }, + { + "ImportPath": "github.com/Sirupsen/logrus", + "Comment": "v0.7.3", + "Rev": "55eb11d21d2a31a3cc93838241d04800f52e823d" + }, + { + "ImportPath": "github.com/Sirupsen/logrus/formatters/logstash", + "Comment": "v0.7.3", + "Rev": "55eb11d21d2a31a3cc93838241d04800f52e823d" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/awserr", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/awsutil", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/client", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/client/metadata", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/corehandlers", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/defaults", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/ec2metadata", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/request", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/session", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/signer/v4", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/endpoints", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/restxml", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/waiter", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/cloudfront/sign", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/s3", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath", + "Comment": "v1.2.4", + "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" + }, + { + "ImportPath": "github.com/bugsnag/bugsnag-go", + "Comment": "v1.0.2-5-gb1d1530", + "Rev": "b1d153021fcd90ca3f080db36bec96dc690fb274" + }, + { + "ImportPath": "github.com/bugsnag/bugsnag-go/errors", + "Comment": "v1.0.2-5-gb1d1530", + "Rev": "b1d153021fcd90ca3f080db36bec96dc690fb274" + }, + { + "ImportPath": "github.com/bugsnag/osext", + "Rev": "0dd3f918b21bec95ace9dc86c7e70266cfc5c702" + }, + { + "ImportPath": "github.com/bugsnag/panicwrap", + "Comment": "1.0.0-2-ge2c2850", + "Rev": "e2c28503fcd0675329da73bf48b33404db873782" + }, + { + "ImportPath": "github.com/denverdino/aliyungo/common", + "Rev": "afedced274aa9a7fcdd47ac97018f0f8db4e5de2" + }, + { + "ImportPath": "github.com/denverdino/aliyungo/oss", + "Rev": "afedced274aa9a7fcdd47ac97018f0f8db4e5de2" + }, + { + "ImportPath": "github.com/denverdino/aliyungo/util", + "Rev": "afedced274aa9a7fcdd47ac97018f0f8db4e5de2" + }, + { + "ImportPath": "github.com/docker/goamz/aws", + "Rev": "f0a21f5b2e12f83a505ecf79b633bb2035cf6f85" + }, + { + "ImportPath": "github.com/docker/goamz/s3", + "Rev": "f0a21f5b2e12f83a505ecf79b633bb2035cf6f85" + }, + { + "ImportPath": "github.com/docker/libtrust", + "Rev": "fa567046d9b14f6aa788882a950d69651d230b21" + }, + { + "ImportPath": "github.com/garyburd/redigo/internal", + "Rev": "535138d7bcd717d6531c701ef5933d98b1866257" + }, + { + "ImportPath": "github.com/garyburd/redigo/redis", + "Rev": "535138d7bcd717d6531c701ef5933d98b1866257" + }, + { + "ImportPath": "github.com/golang/protobuf/proto", + "Rev": "8d92cf5fc15a4382f8964b08e1f42a75c0591aa3" + }, + { + "ImportPath": "github.com/gorilla/context", + "Rev": "14f550f51af52180c2eefed15e5fd18d63c0a64a" + }, + { + "ImportPath": "github.com/gorilla/handlers", + "Rev": "60c7bfde3e33c201519a200a4507a158cc03a17b" + }, + { + "ImportPath": "github.com/gorilla/mux", + "Rev": "e444e69cbd2e2e3e0749a2f3c717cec491552bbf" + }, + { + "ImportPath": "github.com/inconshreveable/mousetrap", + "Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" + }, + { + "ImportPath": "github.com/mitchellh/mapstructure", + "Rev": "482a9fd5fa83e8c4e7817413b80f3eb8feec03ef" + }, + { + "ImportPath": "github.com/ncw/swift", + "Rev": "ce444d6d47c51d4dda9202cd38f5094dd8e27e86" + }, + { + "ImportPath": "github.com/ncw/swift/swifttest", + "Rev": "ce444d6d47c51d4dda9202cd38f5094dd8e27e86" + }, + { + "ImportPath": "github.com/spf13/cobra", + "Rev": "312092086bed4968099259622145a0c9ae280064" + }, + { + "ImportPath": "github.com/spf13/pflag", + "Rev": "5644820622454e71517561946e3d94b9f9db6842" + }, + { + "ImportPath": "github.com/stevvooe/resumable", + "Rev": "51ad44105773cafcbe91927f70ac68e1bf78f8b4" + }, + { + "ImportPath": "github.com/stevvooe/resumable/sha256", + "Rev": "51ad44105773cafcbe91927f70ac68e1bf78f8b4" + }, + { + "ImportPath": "github.com/stevvooe/resumable/sha512", + "Rev": "51ad44105773cafcbe91927f70ac68e1bf78f8b4" + }, + { + "ImportPath": "github.com/yvasiyarov/go-metrics", + "Rev": "57bccd1ccd43f94bb17fdd8bf3007059b802f85e" + }, + { + "ImportPath": "github.com/yvasiyarov/gorelic", + "Comment": "v0.0.6-8-ga9bba5b", + "Rev": "a9bba5b9ab508a086f9a12b8c51fab68478e2128" + }, + { + "ImportPath": "github.com/yvasiyarov/newrelic_platform_go", + "Rev": "b21fdbd4370f3717f3bbd2bf41c223bc273068e6" + }, + { + "ImportPath": "golang.org/x/crypto/bcrypt", + "Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b" + }, + { + "ImportPath": "golang.org/x/crypto/blowfish", + "Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b" + }, + { + "ImportPath": "golang.org/x/crypto/ocsp", + "Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b" + }, + { + "ImportPath": "golang.org/x/net/context", + "Rev": "4876518f9e71663000c348837735820161a42df7" + }, + { + "ImportPath": "golang.org/x/net/context/ctxhttp", + "Rev": "4876518f9e71663000c348837735820161a42df7" + }, + { + "ImportPath": "golang.org/x/net/http2", + "Rev": "4876518f9e71663000c348837735820161a42df7" + }, + { + "ImportPath": "golang.org/x/net/http2/hpack", + "Rev": "4876518f9e71663000c348837735820161a42df7" + }, + { + "ImportPath": "golang.org/x/net/internal/timeseries", + "Rev": "4876518f9e71663000c348837735820161a42df7" + }, + { + "ImportPath": "golang.org/x/net/trace", + "Rev": "4876518f9e71663000c348837735820161a42df7" + }, + { + "ImportPath": "golang.org/x/oauth2", + "Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf" + }, + { + "ImportPath": "golang.org/x/oauth2/google", + "Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf" + }, + { + "ImportPath": "golang.org/x/oauth2/internal", + "Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf" + }, + { + "ImportPath": "golang.org/x/oauth2/jws", + "Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf" + }, + { + "ImportPath": "golang.org/x/oauth2/jwt", + "Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf" + }, + { + "ImportPath": "golang.org/x/time/rate", + "Rev": "a4bde12657593d5e90d0533a3e4fd95e635124cb" + }, + { + "ImportPath": "google.golang.org/api/gensupport", + "Rev": "9bf6e6e569ff057f75d9604a46c52928f17d2b54" + }, + { + "ImportPath": "google.golang.org/api/googleapi", + "Rev": "9bf6e6e569ff057f75d9604a46c52928f17d2b54" + }, + { + "ImportPath": "google.golang.org/api/googleapi/internal/uritemplates", + "Rev": "9bf6e6e569ff057f75d9604a46c52928f17d2b54" + }, + { + "ImportPath": "google.golang.org/api/storage/v1", + "Rev": "9bf6e6e569ff057f75d9604a46c52928f17d2b54" + }, + { + "ImportPath": "google.golang.org/appengine", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/appengine/internal", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/appengine/internal/app_identity", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/appengine/internal/base", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/appengine/internal/datastore", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/appengine/internal/log", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/appengine/internal/modules", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/appengine/internal/remote_api", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/cloud", + "Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2" + }, + { + "ImportPath": "google.golang.org/cloud/compute/metadata", + "Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2" + }, + { + "ImportPath": "google.golang.org/cloud/internal", + "Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2" + }, + { + "ImportPath": "google.golang.org/cloud/internal/opts", + "Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2" + }, + { + "ImportPath": "google.golang.org/cloud/storage", + "Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2" + }, + { + "ImportPath": "google.golang.org/grpc", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/codes", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/credentials", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/grpclog", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/internal", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/metadata", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/naming", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/peer", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/transport", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "gopkg.in/check.v1", + "Rev": "64131543e7896d5bcc6bd5a76287eb75ea96c673" + }, + { + "ImportPath": "gopkg.in/yaml.v2", + "Rev": "bef53efd0c76e49e6de55ead051f886bea7e9420" + }, + { + "ImportPath": "rsc.io/letsencrypt", + "Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c" + }, + { + "ImportPath": "rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme", + "Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c" + }, + { + "ImportPath": "rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1", + "Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c" + }, + { + "ImportPath": "rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher", + "Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c" + }, + { + "ImportPath": "rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json", + "Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c" + } + ] +} diff --git a/vendor/github.com/docker/distribution/Godeps/Readme b/vendor/github.com/docker/distribution/Godeps/Readme new file mode 100644 index 000000000..4cdaa53d5 --- /dev/null +++ b/vendor/github.com/docker/distribution/Godeps/Readme @@ -0,0 +1,5 @@ +This directory tree is generated automatically by godep. + +Please do not edit. + +See https://github.com/tools/godep for more information. diff --git a/vendor/github.com/docker/distribution/LICENSE b/vendor/github.com/docker/distribution/LICENSE new file mode 100644 index 000000000..e06d20818 --- /dev/null +++ b/vendor/github.com/docker/distribution/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/docker/distribution/MAINTAINERS b/vendor/github.com/docker/distribution/MAINTAINERS new file mode 100644 index 000000000..bda400150 --- /dev/null +++ b/vendor/github.com/docker/distribution/MAINTAINERS @@ -0,0 +1,58 @@ +# Distribution maintainers file +# +# This file describes who runs the docker/distribution project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "aaronlehmann", + "dmcgowan", + "dmp42", + "richardscothern", + "shykes", + "stevvooe", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aaronlehmann] + Name = "Aaron Lehmann" + Email = "aaron.lehmann@docker.com" + GitHub = "aaronlehmann" + + [people.dmcgowan] + Name = "Derek McGowan" + Email = "derek@mcgstyle.net" + GitHub = "dmcgowan" + + [people.dmp42] + Name = "Olivier Gambier" + Email = "olivier@docker.com" + GitHub = "dmp42" + + [people.richardscothern] + Name = "Richard Scothern" + Email = "richard.scothern@gmail.com" + GitHub = "richardscothern" + + [people.shykes] + Name = "Solomon Hykes" + Email = "solomon@docker.com" + GitHub = "shykes" + + [people.stevvooe] + Name = "Stephen Day" + Email = "stephen.day@docker.com" + GitHub = "stevvooe" diff --git a/vendor/github.com/docker/distribution/Makefile b/vendor/github.com/docker/distribution/Makefile new file mode 100644 index 000000000..47b8f1d0b --- /dev/null +++ b/vendor/github.com/docker/distribution/Makefile @@ -0,0 +1,109 @@ +# Set an output prefix, which is the local directory if not specified +PREFIX?=$(shell pwd) + + +# Used to populate version variable in main package. +VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) + +# Allow turning off function inlining and variable registerization +ifeq (${DISABLE_OPTIMIZATION},true) + GO_GCFLAGS=-gcflags "-N -l" + VERSION:="$(VERSION)-noopt" +endif + +GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)" + +.PHONY: all build binaries clean dep-restore dep-save dep-validate fmt lint test test-full vet +.DEFAULT: all +all: fmt vet lint build test binaries + +AUTHORS: .mailmap .git/HEAD + git log --format='%aN <%aE>' | sort -fu > $@ + +# This only needs to be generated by hand when cutting full releases. +version/version.go: + ./version/version.sh > $@ + +# Required for go 1.5 to build +GO15VENDOREXPERIMENT := 1 + +# Go files +GOFILES=$(shell find . -type f -name '*.go') + +# Package list +PKGS=$(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/) + +# Resolving binary dependencies for specific targets +GOLINT=$(shell which golint || echo '') +GODEP=$(shell which godep || echo '') + +${PREFIX}/bin/registry: $(GOFILES) + @echo "+ $@" + @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry + +${PREFIX}/bin/digest: $(GOFILES) + @echo "+ $@" + @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest + +${PREFIX}/bin/registry-api-descriptor-template: $(GOFILES) + @echo "+ $@" + @go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template + +docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template + ./bin/registry-api-descriptor-template $< > $@ + +vet: + @echo "+ $@" + @go vet -tags "${DOCKER_BUILDTAGS}" $(PKGS) + +fmt: + @echo "+ $@" + @test -z "$$(gofmt -s -l . 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" || \ + (echo >&2 "+ please format Go code with 'gofmt -s'" && false) + +lint: + @echo "+ $@" + $(if $(GOLINT), , \ + $(error Please install golint: `go get -u github.com/golang/lint/golint`)) + @test -z "$$($(GOLINT) ./... 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" + +build: + @echo "+ $@" + @go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} $(PKGS) + +test: + @echo "+ $@" + @go test -test.short -tags "${DOCKER_BUILDTAGS}" $(PKGS) + +test-full: + @echo "+ $@" + @go test -tags "${DOCKER_BUILDTAGS}" $(PKGS) + +binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/digest ${PREFIX}/bin/registry-api-descriptor-template + @echo "+ $@" + +clean: + @echo "+ $@" + @rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/digest" "${PREFIX}/bin/registry-api-descriptor-template" + +dep-save: + @echo "+ $@" + $(if $(GODEP), , \ + $(error Please install godep: go get github.com/tools/godep)) + @$(GODEP) save $(PKGS) + +dep-restore: + @echo "+ $@" + $(if $(GODEP), , \ + $(error Please install godep: go get github.com/tools/godep)) + @$(GODEP) restore -v + +dep-validate: dep-restore + @echo "+ $@" + @rm -Rf .vendor.bak + @mv vendor .vendor.bak + @rm -Rf Godeps + @$(GODEP) save ./... + @test -z "$$(diff -r vendor .vendor.bak 2>&1 | tee /dev/stderr)" || \ + (echo >&2 "+ borked dependencies! what you have in Godeps/Godeps.json does not match with what you have in vendor" && false) + @rm -Rf .vendor.bak diff --git a/vendor/github.com/docker/distribution/README.md b/vendor/github.com/docker/distribution/README.md new file mode 100644 index 000000000..a6e8db0fb --- /dev/null +++ b/vendor/github.com/docker/distribution/README.md @@ -0,0 +1,131 @@ +# Distribution + +The Docker toolset to pack, ship, store, and deliver content. + +This repository's main product is the Docker Registry 2.0 implementation +for storing and distributing Docker images. It supersedes the +[docker/docker-registry](https://github.com/docker/docker-registry) +project with a new API design, focused around security and performance. + + + +[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master) +[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution) + +This repository contains the following components: + +|**Component** |Description | +|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | +| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | +| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | +| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. | + +### How does this integrate with Docker engine? + +This project should provide an implementation to a V2 API for use in the [Docker +core project](https://github.com/docker/docker). The API should be embeddable +and simplify the process of securely pulling and pushing content from `docker` +daemons. + +### What are the long term goals of the Distribution project? + +The _Distribution_ project has the further long term goal of providing a +secure tool chain for distributing content. The specifications, APIs and tools +should be as useful with Docker as they are without. + +Our goal is to design a professional grade and extensible content distribution +system that allow users to: + +* Enjoy an efficient, secured and reliable way to store, manage, package and + exchange content +* Hack/roll their own on top of healthy open-source components +* Implement their own home made solution through good specs, and solid + extensions mechanism. + +## More about Registry 2.0 + +The new registry implementation provides the following benefits: + +- faster push and pull +- new, more efficient implementation +- simplified deployment +- pluggable storage backend +- webhook notifications + +For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). + +### Who needs to deploy a registry? + +By default, Docker users pull images from Docker's public registry instance. +[Installing Docker](https://docs.docker.com/engine/installation/) gives users this +ability. Users can also push images to a repository on Docker's public registry, +if they have a [Docker Hub](https://hub.docker.com/) account. + +For some users and even companies, this default behavior is sufficient. For +others, it is not. + +For example, users with their own software products may want to maintain a +registry for private, company images. Also, you may wish to deploy your own +image repository for images used to test or in continuous integration. For these +use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md) +may be the better choice. + +### Migration to Registry 2.0 + +For those who have previously deployed their own registry based on the Registry +1.0 implementation and wish to deploy a Registry 2.0 while retaining images, +data migration is required. A tool to assist with migration efforts has been +created. For more information see [docker/migrator] +(https://github.com/docker/migrator). + +## Contribute + +Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute +issues, fixes, and patches to this project. If you are contributing code, see +the instructions for [building a development environment](BUILDING.md). + +## Support + +If any issues are encountered while using the _Distribution_ project, several +avenues are available for support: + + + + + + + + + + + + + + + + + + +
+ IRC + + #docker-distribution on FreeNode +
+ Issue Tracker + + github.com/docker/distribution/issues +
+ Google Groups + + https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution +
+ Mailing List + + docker@dockerproject.org +
+ + +## License + +This project is distributed under [Apache License, Version 2.0](LICENSE). diff --git a/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md b/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md new file mode 100644 index 000000000..49235cecd --- /dev/null +++ b/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md @@ -0,0 +1,36 @@ +## Registry Release Checklist + +10. Compile release notes detailing features and since the last release. Update the `CHANGELOG.md` file. + +20. Update the version file: `https://github.com/docker/distribution/blob/master/version/version.go` + +30. Update the `MAINTAINERS` (if necessary), `AUTHORS` and `.mailmap` files. + + ``` +make AUTHORS +``` + +40. Create a signed tag. + + Distribution uses semantic versioning. Tags are of the format `vx.y.z[-rcn]` +You will need PGP installed and a PGP key which has been added to your Github account. The comment for the tag should include the release notes. + +50. Push the signed tag + +60. Create a new [release](https://github.com/docker/distribution/releases). In the case of a release candidate, tick the `pre-release` checkbox. + +70. Update the registry binary in [distribution library image repo](https://github.com/docker/distribution-library-image) by running the update script and opening a pull request. + +80. Update the official image. Add the new version in the [official images repo](https://github.com/docker-library/official-images) by appending a new version to the `registry/registry` file with the git hash pointed to by the signed tag. Update the major version to point to the latest version and the minor version to point to new patch release if necessary. +e.g. to release `2.3.1` + + `2.3.1 (new)` + + `2.3.0 -> 2.3.0` can be removed + + `2 -> 2.3.1` + + `2.3 -> 2.3.1` + +90. Build a new distribution/registry image on [Docker hub](https://hub.docker.com/u/distribution/dashboard) by adding a new automated build with the new tag and re-building the images. + diff --git a/vendor/github.com/docker/distribution/ROADMAP.md b/vendor/github.com/docker/distribution/ROADMAP.md new file mode 100644 index 000000000..701127afe --- /dev/null +++ b/vendor/github.com/docker/distribution/ROADMAP.md @@ -0,0 +1,267 @@ +# Roadmap + +The Distribution Project consists of several components, some of which are +still being defined. This document defines the high-level goals of the +project, identifies the current components, and defines the release- +relationship to the Docker Platform. + +* [Distribution Goals](#distribution-goals) +* [Distribution Components](#distribution-components) +* [Project Planning](#project-planning): release-relationship to the Docker Platform. + +This road map is a living document, providing an overview of the goals and +considerations made in respect of the future of the project. + +## Distribution Goals + +- Replace the existing [docker registry](github.com/docker/docker-registry) + implementation as the primary implementation. +- Replace the existing push and pull code in the docker engine with the + distribution package. +- Define a strong data model for distributing docker images +- Provide a flexible distribution tool kit for use in the docker platform +- Unlock new distribution models + +## Distribution Components + +Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming +features and bugfixes for a component will be added to the relevant milestone. If a feature or +bugfix is not part of a milestone, it is currently unscheduled for +implementation. + +* [Registry](#registry) +* [Distribution Package](#distribution-package) + +*** + +### Registry + +The new Docker registry is the main portion of the distribution repository. +Registry 2.0 is the first release of the next-generation registry. This was +primarily focused on implementing the [new registry +API](https://github.com/docker/distribution/blob/master/docs/spec/api.md), +with a focus on security and performance. + +Following from the Distribution project goals above, we have a set of goals +for registry v2 that we would like to follow in the design. New features +should be compared against these goals. + +#### Data Storage and Distribution First + +The registry's first goal is to provide a reliable, consistent storage +location for Docker images. The registry should only provide the minimal +amount of indexing required to fetch image data and no more. + +This means we should be selective in new features and API additions, including +those that may require expensive, ever growing indexes. Requests should be +servable in "constant time". + +#### Content Addressability + +All data objects used in the registry API should be content addressable. +Content identifiers should be secure and verifiable. This provides a secure, +reliable base from which to build more advanced content distribution systems. + +#### Content Agnostic + +In the past, changes to the image format would require large changes in Docker +and the Registry. By decoupling the distribution and image format, we can +allow the formats to progress without having to coordinate between the two. +This means that we should be focused on decoupling Docker from the registry +just as much as decoupling the registry from Docker. Such an approach will +allow us to unlock new distribution models that haven't been possible before. + +We can take this further by saying that the new registry should be content +agnostic. The registry provides a model of names, tags, manifests and content +addresses and that model can be used to work with content. + +#### Simplicity + +The new registry should be closer to a microservice component than its +predecessor. This means it should have a narrower API and a low number of +service dependencies. It should be easy to deploy. + +This means that other solutions should be explored before changing the API or +adding extra dependencies. If functionality is required, can it be added as an +extension or companion service. + +#### Extensibility + +The registry should provide extension points to add functionality. By keeping +the scope narrow, but providing the ability to add functionality. + +Features like search, indexing, synchronization and registry explorers fall +into this category. No such feature should be added unless we've found it +impossible to do through an extension. + +#### Active Feature Discussions + +The following are feature discussions that are currently active. + +If you don't see your favorite, unimplemented feature, feel free to contact us +via IRC or the mailing list and we can talk about adding it. The goal here is +to make sure that new features go through a rigid design process before +landing in the registry. + +##### Proxying to other Registries + +A _pull-through caching_ mode exists for the registry, but is restricted from +within the docker client to only mirror the official Docker Hub. This functionality +can be expanded when image provenance has been specified and implemented in the +distribution project. + +##### Metadata storage + +Metadata for the registry is currently stored with the manifest and layer data on +the storage backend. While this is a big win for simplicity and reliably maintaining +state, it comes with the cost of consistency and high latency. The mutable registry +metadata operations should be abstracted behind an API which will allow ACID compliant +storage systems to handle metadata. + +##### Peer to Peer transfer + +Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit + +##### Indexing, Search and Discovery + +The original registry provided some implementation of search for use with +private registries. Support has been elided from V2 since we'd like to both +decouple search functionality from the registry. The makes the registry +simpler to deploy, especially in use cases where search is not needed, and +let's us decouple the image format from the registry. + +There are explorations into using the catalog API and notification system to +build external indexes. The current line of thought is that we will define a +common search API to index and query docker images. Such a system could be run +as a companion to a registry or set of registries to power discovery. + +The main issue with search and discovery is that there are so many ways to +accomplish it. There are two aspects to this project. The first is deciding on +how it will be done, including an API definition that can work with changing +data formats. The second is the process of integrating with `docker search`. +We expect that someone attempts to address the problem with the existing tools +and propose it as a standard search API or uses it to inform a standardization +process. Once this has been explored, we integrate with the docker client. + +Please see the following for more detail: + +- https://github.com/docker/distribution/issues/206 + +##### Deletes + +> __NOTE:__ Deletes are a much asked for feature. Before requesting this +feature or participating in discussion, we ask that you read this section in +full and understand the problems behind deletes. + +While, at first glance, implementing deleting seems simple, there are a number +mitigating factors that make many solutions not ideal or even pathological in +the context of a registry. The following paragraph discuss the background and +approaches that could be applied to arrive at a solution. + +The goal of deletes in any system is to remove unused or unneeded data. Only +data requested for deletion should be removed and no other data. Removing +unintended data is worse than _not_ removing data that was requested for +removal but ideally, both are supported. Generally, according to this rule, we +err on holding data longer than needed, ensuring that it is only removed when +we can be certain that it can be removed. With the current behavior, we opt to +hold onto the data forever, ensuring that data cannot be incorrectly removed. + +To understand the problems with implementing deletes, one must understand the +data model. All registry data is stored in a filesystem layout, implemented on +a "storage driver", effectively a _virtual file system_ (VFS). The storage +system must assume that this VFS layer will be eventually consistent and has +poor read- after-write consistency, since this is the lower common denominator +among the storage drivers. This is mitigated by writing values in reverse- +dependent order, but makes wider transactional operations unsafe. + +Layered on the VFS model is a content-addressable _directed, acyclic graph_ +(DAG) made up of blobs. Manifests reference layers. Tags reference manifests. +Since the same data can be referenced by multiple manifests, we only store +data once, even if it is in different repositories. Thus, we have a set of +blobs, referenced by tags and manifests. If we want to delete a blob we need +to be certain that it is no longer referenced by another manifest or tag. When +we delete a manifest, we also can try to delete the referenced blobs. Deciding +whether or not a blob has an active reference is the crux of the problem. + +Conceptually, deleting a manifest and its resources is quite simple. Just find +all the manifests, enumerate the referenced blobs and delete the blobs not in +that set. An astute observer will recognize this as a garbage collection +problem. As with garbage collection in programming languages, this is very +simple when one always has a consistent view. When one adds parallelism and an +inconsistent view of data, it becomes very challenging. + +A simple example can demonstrate this. Let's say we are deleting a manifest +_A_ in one process. We scan the manifest and decide that all the blobs are +ready for deletion. Concurrently, we have another process accepting a new +manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_ +is accepted and all the blobs are considered present, so the operation +proceeds. The original process then deletes the referenced blobs, assuming +they were unreferenced. The manifest _B_, which we thought had all of its data +present, can no longer be served by the registry, since the dependent data has +been deleted. + +Deleting data from the registry safely requires some way to coordinate this +operation. The following approaches are being considered: + +- _Reference Counting_ - Maintain a count of references to each blob. This is + challenging for a number of reasons: 1. maintaining a consistent consensus + of reference counts across a set of Registries and 2. Building the initial + list of reference counts for an existing registry. These challenges can be + met with a consensus protocol like Paxos or Raft in the first case and a + necessary but simple scan in the second.. +- _Lock the World GC_ - Halt all writes to the data store. Walk the data store + and find all blob references. Delete all unreferenced blobs. This approach + is very simple but requires disabling writes for a period of time while the + service reads all data. This is slow and expensive but very accurate and + effective. +- _Generational GC_ - Do something similar to above but instead of blocking + writes, writes are sent to another storage backend while reads are broadcast + to the new and old backends. GC is then performed on the read-only portion. + Because writes land in the new backend, the data in the read-only section + can be safely deleted. The main drawbacks of this approach are complexity + and coordination. +- _Centralized Oracle_ - Using a centralized, transactional database, we can + know exactly which data is referenced at any given time. This avoids + coordination problem by managing this data in a single location. We trade + off metadata scalability for simplicity and performance. This is a very good + option for most registry deployments. This would create a bottleneck for + registry metadata. However, metadata is generally not the main bottleneck + when serving images. + +Please let us know if other solutions exist that we have yet to enumerate. +Note that for any approach, implementation is a massive consideration. For +example, a mark-sweep based solution may seem simple but the amount of work in +coordination offset the extra work it might take to build a _Centralized +Oracle_. We'll accept proposals for any solution but please coordinate with us +before dropping code. + +At this time, we have traded off simplicity and ease of deployment for disk +space. Simplicity and ease of deployment tend to reduce developer involvement, +which is currently the most expensive resource in software engineering. Taking +on any solution for deletes will greatly effect these factors, trading off +very cheap disk space for a complex deployment and operational story. + +Please see the following issues for more detail: + +- https://github.com/docker/distribution/issues/422 +- https://github.com/docker/distribution/issues/461 +- https://github.com/docker/distribution/issues/462 + +### Distribution Package + +At its core, the Distribution Project is a set of Go packages that make up +Distribution Components. At this time, most of these packages make up the +Registry implementation. + +The package itself is considered unstable. If you're using it, please take care to vendor the dependent version. + +For feature additions, please see the Registry section. In the future, we may break out a +separate Roadmap for distribution-specific features that apply to more than +just the registry. + +*** + +### Project Planning + +An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress. + diff --git a/vendor/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go new file mode 100644 index 000000000..1f91ae21e --- /dev/null +++ b/vendor/github.com/docker/distribution/blobs.go @@ -0,0 +1,257 @@ +package distribution + +import ( + "errors" + "fmt" + "io" + "net/http" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" +) + +var ( + // ErrBlobExists returned when blob already exists + ErrBlobExists = errors.New("blob exists") + + // ErrBlobDigestUnsupported when blob digest is an unsupported version. + ErrBlobDigestUnsupported = errors.New("unsupported blob digest") + + // ErrBlobUnknown when blob is not found. + ErrBlobUnknown = errors.New("unknown blob") + + // ErrBlobUploadUnknown returned when upload is not found. + ErrBlobUploadUnknown = errors.New("blob upload unknown") + + // ErrBlobInvalidLength returned when the blob has an expected length on + // commit, meaning mismatched with the descriptor or an invalid value. + ErrBlobInvalidLength = errors.New("blob invalid length") +) + +// ErrBlobInvalidDigest returned when digest check fails. +type ErrBlobInvalidDigest struct { + Digest digest.Digest + Reason error +} + +func (err ErrBlobInvalidDigest) Error() string { + return fmt.Sprintf("invalid digest for referenced layer: %v, %v", + err.Digest, err.Reason) +} + +// ErrBlobMounted returned when a blob is mounted from another repository +// instead of initiating an upload session. +type ErrBlobMounted struct { + From reference.Canonical + Descriptor Descriptor +} + +func (err ErrBlobMounted) Error() string { + return fmt.Sprintf("blob mounted from: %v to: %v", + err.From, err.Descriptor) +} + +// Descriptor describes targeted content. Used in conjunction with a blob +// store, a descriptor can be used to fetch, store and target any kind of +// blob. The struct also describes the wire protocol format. Fields should +// only be added but never changed. +type Descriptor struct { + // MediaType describe the type of the content. All text based formats are + // encoded as utf-8. + MediaType string `json:"mediaType,omitempty"` + + // Size in bytes of content. + Size int64 `json:"size,omitempty"` + + // Digest uniquely identifies the content. A byte stream can be verified + // against against this digest. + Digest digest.Digest `json:"digest,omitempty"` + + // URLs contains the source URLs of this content. + URLs []string `json:"urls,omitempty"` + + // NOTE: Before adding a field here, please ensure that all + // other options have been exhausted. Much of the type relationships + // depend on the simplicity of this type. +} + +// Descriptor returns the descriptor, to make it satisfy the Describable +// interface. Note that implementations of Describable are generally objects +// which can be described, not simply descriptors; this exception is in place +// to make it more convenient to pass actual descriptors to functions that +// expect Describable objects. +func (d Descriptor) Descriptor() Descriptor { + return d +} + +// BlobStatter makes blob descriptors available by digest. The service may +// provide a descriptor of a different digest if the provided digest is not +// canonical. +type BlobStatter interface { + // Stat provides metadata about a blob identified by the digest. If the + // blob is unknown to the describer, ErrBlobUnknown will be returned. + Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error) +} + +// BlobDeleter enables deleting blobs from storage. +type BlobDeleter interface { + Delete(ctx context.Context, dgst digest.Digest) error +} + +// BlobEnumerator enables iterating over blobs from storage +type BlobEnumerator interface { + Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error +} + +// BlobDescriptorService manages metadata about a blob by digest. Most +// implementations will not expose such an interface explicitly. Such mappings +// should be maintained by interacting with the BlobIngester. Hence, this is +// left off of BlobService and BlobStore. +type BlobDescriptorService interface { + BlobStatter + + // SetDescriptor assigns the descriptor to the digest. The provided digest and + // the digest in the descriptor must map to identical content but they may + // differ on their algorithm. The descriptor must have the canonical + // digest of the content and the digest algorithm must match the + // annotators canonical algorithm. + // + // Such a facility can be used to map blobs between digest domains, with + // the restriction that the algorithm of the descriptor must match the + // canonical algorithm (ie sha256) of the annotator. + SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error + + // Clear enables descriptors to be unlinked + Clear(ctx context.Context, dgst digest.Digest) error +} + +// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService. +type BlobDescriptorServiceFactory interface { + BlobAccessController(svc BlobDescriptorService) BlobDescriptorService +} + +// ReadSeekCloser is the primary reader type for blob data, combining +// io.ReadSeeker with io.Closer. +type ReadSeekCloser interface { + io.ReadSeeker + io.Closer +} + +// BlobProvider describes operations for getting blob data. +type BlobProvider interface { + // Get returns the entire blob identified by digest along with the descriptor. + Get(ctx context.Context, dgst digest.Digest) ([]byte, error) + + // Open provides a ReadSeekCloser to the blob identified by the provided + // descriptor. If the blob is not known to the service, an error will be + // returned. + Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error) +} + +// BlobServer can serve blobs via http. +type BlobServer interface { + // ServeBlob attempts to serve the blob, identifed by dgst, via http. The + // service may decide to redirect the client elsewhere or serve the data + // directly. + // + // This handler only issues successful responses, such as 2xx or 3xx, + // meaning it serves data or issues a redirect. If the blob is not + // available, an error will be returned and the caller may still issue a + // response. + // + // The implementation may serve the same blob from a different digest + // domain. The appropriate headers will be set for the blob, unless they + // have already been set by the caller. + ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error +} + +// BlobIngester ingests blob data. +type BlobIngester interface { + // Put inserts the content p into the blob service, returning a descriptor + // or an error. + Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error) + + // Create allocates a new blob writer to add a blob to this service. The + // returned handle can be written to and later resumed using an opaque + // identifier. With this approach, one can Close and Resume a BlobWriter + // multiple times until the BlobWriter is committed or cancelled. + Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error) + + // Resume attempts to resume a write to a blob, identified by an id. + Resume(ctx context.Context, id string) (BlobWriter, error) +} + +// BlobCreateOption is a general extensible function argument for blob creation +// methods. A BlobIngester may choose to honor any or none of the given +// BlobCreateOptions, which can be specific to the implementation of the +// BlobIngester receiving them. +// TODO (brianbland): unify this with ManifestServiceOption in the future +type BlobCreateOption interface { + Apply(interface{}) error +} + +// CreateOptions is a collection of blob creation modifiers relevant to general +// blob storage intended to be configured by the BlobCreateOption.Apply method. +type CreateOptions struct { + Mount struct { + ShouldMount bool + From reference.Canonical + // Stat allows to pass precalculated descriptor to link and return. + // Blob access check will be skipped if set. + Stat *Descriptor + } +} + +// BlobWriter provides a handle for inserting data into a blob store. +// Instances should be obtained from BlobWriteService.Writer and +// BlobWriteService.Resume. If supported by the store, a writer can be +// recovered with the id. +type BlobWriter interface { + io.WriteCloser + io.ReaderFrom + + // Size returns the number of bytes written to this blob. + Size() int64 + + // ID returns the identifier for this writer. The ID can be used with the + // Blob service to later resume the write. + ID() string + + // StartedAt returns the time this blob write was started. + StartedAt() time.Time + + // Commit completes the blob writer process. The content is verified + // against the provided provisional descriptor, which may result in an + // error. Depending on the implementation, written data may be validated + // against the provisional descriptor fields. If MediaType is not present, + // the implementation may reject the commit or assign "application/octet- + // stream" to the blob. The returned descriptor may have a different + // digest depending on the blob store, referred to as the canonical + // descriptor. + Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error) + + // Cancel ends the blob write without storing any data and frees any + // associated resources. Any data written thus far will be lost. Cancel + // implementations should allow multiple calls even after a commit that + // result in a no-op. This allows use of Cancel in a defer statement, + // increasing the assurance that it is correctly called. + Cancel(ctx context.Context) error +} + +// BlobService combines the operations to access, read and write blobs. This +// can be used to describe remote blob services. +type BlobService interface { + BlobStatter + BlobProvider + BlobIngester +} + +// BlobStore represent the entire suite of blob related operations. Such an +// implementation can access, read, write, delete and serve blobs. +type BlobStore interface { + BlobService + BlobServer + BlobDeleter +} diff --git a/vendor/github.com/docker/distribution/circle.yml b/vendor/github.com/docker/distribution/circle.yml new file mode 100644 index 000000000..61f8be0cb --- /dev/null +++ b/vendor/github.com/docker/distribution/circle.yml @@ -0,0 +1,93 @@ +# Pony-up! +machine: + pre: + # Install gvm + - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer) + # Install codecov for coverage + - pip install --user codecov + + post: + # go + - gvm install go1.7 --prefer-binary --name=stable + + environment: + # Convenient shortcuts to "common" locations + CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME + BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME + # Trick circle brainflat "no absolute path" behavior + BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR + DOCKER_BUILDTAGS: "include_oss include_gcs" + # Workaround Circle parsing dumb bugs and/or YAML wonkyness + CIRCLE_PAIN: "mode: set" + + hosts: + # Not used yet + fancy: 127.0.0.1 + +dependencies: + pre: + # Copy the code to the gopath of all go versions + - > + gvm use stable && + mkdir -p "$(dirname $BASE_STABLE)" && + cp -R "$CHECKOUT" "$BASE_STABLE" + + override: + # Install dependencies for every copied clone/go version + - gvm use stable && go get github.com/tools/godep: + pwd: $BASE_STABLE + + post: + # For the stable go version, additionally install linting tools + - > + gvm use stable && + go get github.com/axw/gocov/gocov github.com/golang/lint/golint + +test: + pre: + # Output the go versions we are going to test + # - gvm use old && go version + - gvm use stable && go version + + # todo(richard): replace with a more robust vendoring solution. Removed due to a fundamental disagreement in godep philosophies. + # Ensure validation of dependencies + # - gvm use stable && if test -n "`git diff --stat=1000 master | grep -Ei \"vendor|godeps\"`"; then make dep-validate; fi: + # pwd: $BASE_STABLE + + # First thing: build everything. This will catch compile errors, and it's + # also necessary for go vet to work properly (see #807). + - gvm use stable && godep go install $(go list ./... | grep -v "/vendor/"): + pwd: $BASE_STABLE + + # FMT + - gvm use stable && make fmt: + pwd: $BASE_STABLE + + # VET + - gvm use stable && make vet: + pwd: $BASE_STABLE + + # LINT + - gvm use stable && make lint: + pwd: $BASE_STABLE + + override: + # Test stable, and report + - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE': + timeout: 1000 + pwd: $BASE_STABLE + + # Test stable with race + - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | grep -v "registry/handlers" | grep -v "registry/storage/driver" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -race -tags "$DOCKER_BUILDTAGS" -test.short $PACKAGE': + timeout: 1000 + pwd: $BASE_STABLE + post: + # Report to codecov + - bash <(curl -s https://codecov.io/bash): + pwd: $BASE_STABLE + + ## Notes + # Do we want these as well? + # - go get code.google.com/p/go.tools/cmd/goimports + # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" + # http://labix.org/gocheck diff --git a/vendor/github.com/docker/distribution/cmd/digest/main.go b/vendor/github.com/docker/distribution/cmd/digest/main.go new file mode 100644 index 000000000..49426a886 --- /dev/null +++ b/vendor/github.com/docker/distribution/cmd/digest/main.go @@ -0,0 +1,97 @@ +package main + +import ( + "flag" + "fmt" + "io" + "log" + "os" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/version" +) + +var ( + algorithm = digest.Canonical + showVersion bool +) + +type job struct { + name string + reader io.Reader +} + +func init() { + flag.Var(&algorithm, "a", "select the digest algorithm (shorthand)") + flag.Var(&algorithm, "algorithm", "select the digest algorithm") + flag.BoolVar(&showVersion, "version", false, "show the version and exit") + + log.SetFlags(0) + log.SetPrefix(os.Args[0] + ": ") +} + +func usage() { + fmt.Fprintf(os.Stderr, "usage: %s [files...]\n", os.Args[0]) + fmt.Fprintf(os.Stderr, ` +Calculate the digest of one or more input files, emitting the result +to standard out. If no files are provided, the digest of stdin will +be calculated. + +`) + flag.PrintDefaults() +} + +func unsupported() { + log.Fatalf("unsupported digest algorithm: %v", algorithm) +} + +func main() { + var jobs []job + + flag.Usage = usage + flag.Parse() + if showVersion { + version.PrintVersion() + return + } + + var fail bool // if we fail on one item, foul the exit code + if flag.NArg() > 0 { + for _, path := range flag.Args() { + fp, err := os.Open(path) + + if err != nil { + log.Printf("%s: %v", path, err) + fail = true + continue + } + defer fp.Close() + + jobs = append(jobs, job{name: path, reader: fp}) + } + } else { + // just read stdin + jobs = append(jobs, job{name: "-", reader: os.Stdin}) + } + + digestFn := algorithm.FromReader + + if !algorithm.Available() { + unsupported() + } + + for _, job := range jobs { + dgst, err := digestFn(job.reader) + if err != nil { + log.Printf("%s: %v", job.name, err) + fail = true + continue + } + + fmt.Printf("%v\t%s\n", dgst, job.name) + } + + if fail { + os.Exit(1) + } +} diff --git a/vendor/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go b/vendor/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go new file mode 100644 index 000000000..e9cbc42a4 --- /dev/null +++ b/vendor/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go @@ -0,0 +1,131 @@ +// registry-api-descriptor-template uses the APIDescriptor defined in the +// api/v2 package to execute templates passed to the command line. +// +// For example, to generate a new API specification, one would execute the +// following command from the repo root: +// +// $ registry-api-descriptor-template docs/spec/api.md.tmpl > docs/spec/api.md +// +// The templates are passed in the api/v2.APIDescriptor object. Please see the +// package documentation for fields available on that object. The template +// syntax is from Go's standard library text/template package. For information +// on Go's template syntax, please see golang.org/pkg/text/template. +package main + +import ( + "log" + "net/http" + "os" + "path/filepath" + "regexp" + "text/template" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" +) + +var spaceRegex = regexp.MustCompile(`\n\s*`) + +func main() { + + if len(os.Args) != 2 { + log.Fatalln("please specify a template to execute.") + } + + path := os.Args[1] + filename := filepath.Base(path) + + funcMap := template.FuncMap{ + "removenewlines": func(s string) string { + return spaceRegex.ReplaceAllString(s, " ") + }, + "statustext": http.StatusText, + "prettygorilla": prettyGorillaMuxPath, + } + + tmpl := template.Must(template.New(filename).Funcs(funcMap).ParseFiles(path)) + + data := struct { + RouteDescriptors []v2.RouteDescriptor + ErrorDescriptors []errcode.ErrorDescriptor + }{ + RouteDescriptors: v2.APIDescriptor.RouteDescriptors, + ErrorDescriptors: append(errcode.GetErrorCodeGroup("registry.api.v2"), + // The following are part of the specification but provided by errcode default. + errcode.ErrorCodeUnauthorized.Descriptor(), + errcode.ErrorCodeDenied.Descriptor(), + errcode.ErrorCodeUnsupported.Descriptor()), + } + + if err := tmpl.Execute(os.Stdout, data); err != nil { + log.Fatalln(err) + } +} + +// prettyGorillaMuxPath removes the regular expressions from a gorilla/mux +// route string, making it suitable for documentation. +func prettyGorillaMuxPath(s string) string { + // Stateful parser that removes regular expressions from gorilla + // routes. It correctly handles balanced bracket pairs. + + var output string + var label string + var level int + +start: + if s[0] == '{' { + s = s[1:] + level++ + goto capture + } + + output += string(s[0]) + s = s[1:] + + goto end +capture: + switch s[0] { + case '{': + level++ + case '}': + level-- + + if level == 0 { + s = s[1:] + goto label + } + case ':': + s = s[1:] + goto skip + default: + label += string(s[0]) + } + s = s[1:] + goto capture +skip: + switch s[0] { + case '{': + level++ + case '}': + level-- + } + s = s[1:] + + if level == 0 { + goto label + } + + goto skip +label: + if label != "" { + output += "<" + label + ">" + label = "" + } +end: + if s != "" { + goto start + } + + return output + +} diff --git a/vendor/github.com/docker/distribution/cmd/registry/config-cache.yml b/vendor/github.com/docker/distribution/cmd/registry/config-cache.yml new file mode 100644 index 000000000..7a274ea59 --- /dev/null +++ b/vendor/github.com/docker/distribution/cmd/registry/config-cache.yml @@ -0,0 +1,55 @@ +version: 0.1 +log: + level: debug + fields: + service: registry + environment: development +storage: + cache: + blobdescriptor: redis + filesystem: + rootdirectory: /var/lib/registry-cache + maintenance: + uploadpurging: + enabled: false +http: + addr: :5000 + secret: asecretforlocaldevelopment + debug: + addr: localhost:5001 + headers: + X-Content-Type-Options: [nosniff] +redis: + addr: localhost:6379 + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms +notifications: + endpoints: + - name: local-8082 + url: http://localhost:5003/callback + headers: + Authorization: [Bearer ] + timeout: 1s + threshold: 10 + backoff: 1s + disabled: true + - name: local-8083 + url: http://localhost:8083/callback + timeout: 1s + threshold: 10 + backoff: 1s + disabled: true +proxy: + remoteurl: https://registry-1.docker.io + username: username + password: password +health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 diff --git a/vendor/github.com/docker/distribution/cmd/registry/config-dev.yml b/vendor/github.com/docker/distribution/cmd/registry/config-dev.yml new file mode 100644 index 000000000..b6438be50 --- /dev/null +++ b/vendor/github.com/docker/distribution/cmd/registry/config-dev.yml @@ -0,0 +1,66 @@ +version: 0.1 +log: + level: debug + fields: + service: registry + environment: development + hooks: + - type: mail + disabled: true + levels: + - panic + options: + smtp: + addr: mail.example.com:25 + username: mailuser + password: password + insecure: true + from: sender@example.com + to: + - errors@example.com +storage: + delete: + enabled: true + cache: + blobdescriptor: redis + filesystem: + rootdirectory: /var/lib/registry + maintenance: + uploadpurging: + enabled: false +http: + addr: :5000 + debug: + addr: localhost:5001 + headers: + X-Content-Type-Options: [nosniff] +redis: + addr: localhost:6379 + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms +notifications: + endpoints: + - name: local-5003 + url: http://localhost:5003/callback + headers: + Authorization: [Bearer ] + timeout: 1s + threshold: 10 + backoff: 1s + disabled: true + - name: local-8083 + url: http://localhost:8083/callback + timeout: 1s + threshold: 10 + backoff: 1s + disabled: true +health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 diff --git a/vendor/github.com/docker/distribution/cmd/registry/config-example.yml b/vendor/github.com/docker/distribution/cmd/registry/config-example.yml new file mode 100644 index 000000000..3277f9a2e --- /dev/null +++ b/vendor/github.com/docker/distribution/cmd/registry/config-example.yml @@ -0,0 +1,18 @@ +version: 0.1 +log: + fields: + service: registry +storage: + cache: + blobdescriptor: inmemory + filesystem: + rootdirectory: /var/lib/registry +http: + addr: :5000 + headers: + X-Content-Type-Options: [nosniff] +health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 diff --git a/vendor/github.com/docker/distribution/cmd/registry/main.go b/vendor/github.com/docker/distribution/cmd/registry/main.go new file mode 100644 index 000000000..c077a0c18 --- /dev/null +++ b/vendor/github.com/docker/distribution/cmd/registry/main.go @@ -0,0 +1,25 @@ +package main + +import ( + _ "net/http/pprof" + + "github.com/docker/distribution/registry" + _ "github.com/docker/distribution/registry/auth/htpasswd" + _ "github.com/docker/distribution/registry/auth/silly" + _ "github.com/docker/distribution/registry/auth/token" + _ "github.com/docker/distribution/registry/proxy" + _ "github.com/docker/distribution/registry/storage/driver/azure" + _ "github.com/docker/distribution/registry/storage/driver/filesystem" + _ "github.com/docker/distribution/registry/storage/driver/gcs" + _ "github.com/docker/distribution/registry/storage/driver/inmemory" + _ "github.com/docker/distribution/registry/storage/driver/middleware/cloudfront" + _ "github.com/docker/distribution/registry/storage/driver/middleware/redirect" + _ "github.com/docker/distribution/registry/storage/driver/oss" + _ "github.com/docker/distribution/registry/storage/driver/s3-aws" + _ "github.com/docker/distribution/registry/storage/driver/s3-goamz" + _ "github.com/docker/distribution/registry/storage/driver/swift" +) + +func main() { + registry.RootCmd.Execute() +} diff --git a/vendor/github.com/docker/distribution/configuration/configuration.go b/vendor/github.com/docker/distribution/configuration/configuration.go new file mode 100644 index 000000000..ec50a6b18 --- /dev/null +++ b/vendor/github.com/docker/distribution/configuration/configuration.go @@ -0,0 +1,643 @@ +package configuration + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strings" + "time" +) + +// Configuration is a versioned registry configuration, intended to be provided by a yaml file, and +// optionally modified by environment variables. +// +// Note that yaml field names should never include _ characters, since this is the separator used +// in environment variable names. +type Configuration struct { + // Version is the version which defines the format of the rest of the configuration + Version Version `yaml:"version"` + + // Log supports setting various parameters related to the logging + // subsystem. + Log struct { + // AccessLog configures access logging. + AccessLog struct { + // Disabled disables access logging. + Disabled bool `yaml:"disabled,omitempty"` + } `yaml:"accesslog,omitempty"` + + // Level is the granularity at which registry operations are logged. + Level Loglevel `yaml:"level"` + + // Formatter overrides the default formatter with another. Options + // include "text", "json" and "logstash". + Formatter string `yaml:"formatter,omitempty"` + + // Fields allows users to specify static string fields to include in + // the logger context. + Fields map[string]interface{} `yaml:"fields,omitempty"` + + // Hooks allows users to configure the log hooks, to enabling the + // sequent handling behavior, when defined levels of log message emit. + Hooks []LogHook `yaml:"hooks,omitempty"` + } + + // Loglevel is the level at which registry operations are logged. This is + // deprecated. Please use Log.Level in the future. + Loglevel Loglevel `yaml:"loglevel,omitempty"` + + // Storage is the configuration for the registry's storage driver + Storage Storage `yaml:"storage"` + + // Auth allows configuration of various authorization methods that may be + // used to gate requests. + Auth Auth `yaml:"auth,omitempty"` + + // Middleware lists all middlewares to be used by the registry. + Middleware map[string][]Middleware `yaml:"middleware,omitempty"` + + // Reporting is the configuration for error reporting + Reporting Reporting `yaml:"reporting,omitempty"` + + // HTTP contains configuration parameters for the registry's http + // interface. + HTTP struct { + // Addr specifies the bind address for the registry instance. + Addr string `yaml:"addr,omitempty"` + + // Net specifies the net portion of the bind address. A default empty value means tcp. + Net string `yaml:"net,omitempty"` + + // Host specifies an externally-reachable address for the registry, as a fully + // qualified URL. + Host string `yaml:"host,omitempty"` + + Prefix string `yaml:"prefix,omitempty"` + + // Secret specifies the secret key which HMAC tokens are created with. + Secret string `yaml:"secret,omitempty"` + + // RelativeURLs specifies that relative URLs should be returned in + // Location headers + RelativeURLs bool `yaml:"relativeurls,omitempty"` + + // TLS instructs the http server to listen with a TLS configuration. + // This only support simple tls configuration with a cert and key. + // Mostly, this is useful for testing situations or simple deployments + // that require tls. If more complex configurations are required, use + // a proxy or make a proposal to add support here. + TLS struct { + // Certificate specifies the path to an x509 certificate file to + // be used for TLS. + Certificate string `yaml:"certificate,omitempty"` + + // Key specifies the path to the x509 key file, which should + // contain the private portion for the file specified in + // Certificate. + Key string `yaml:"key,omitempty"` + + // Specifies the CA certs for client authentication + // A file may contain multiple CA certificates encoded as PEM + ClientCAs []string `yaml:"clientcas,omitempty"` + + // LetsEncrypt is used to configuration setting up TLS through + // Let's Encrypt instead of manually specifying certificate and + // key. If a TLS certificate is specified, the Let's Encrypt + // section will not be used. + LetsEncrypt struct { + // CacheFile specifies cache file to use for lets encrypt + // certificates and keys. + CacheFile string `yaml:"cachefile,omitempty"` + + // Email is the email to use during Let's Encrypt registration + Email string `yaml:"email,omitempty"` + } `yaml:"letsencrypt,omitempty"` + } `yaml:"tls,omitempty"` + + // Headers is a set of headers to include in HTTP responses. A common + // use case for this would be security headers such as + // Strict-Transport-Security. The map keys are the header names, and + // the values are the associated header payloads. + Headers http.Header `yaml:"headers,omitempty"` + + // Debug configures the http debug interface, if specified. This can + // include services such as pprof, expvar and other data that should + // not be exposed externally. Left disabled by default. + Debug struct { + // Addr specifies the bind address for the debug server. + Addr string `yaml:"addr,omitempty"` + } `yaml:"debug,omitempty"` + + // HTTP2 configuration options + HTTP2 struct { + // Specifies wether the registry should disallow clients attempting + // to connect via http2. If set to true, only http/1.1 is supported. + Disabled bool `yaml:"disabled,omitempty"` + } `yaml:"http2,omitempty"` + } `yaml:"http,omitempty"` + + // Notifications specifies configuration about various endpoint to which + // registry events are dispatched. + Notifications Notifications `yaml:"notifications,omitempty"` + + // Redis configures the redis pool available to the registry webapp. + Redis struct { + // Addr specifies the the redis instance available to the application. + Addr string `yaml:"addr,omitempty"` + + // Password string to use when making a connection. + Password string `yaml:"password,omitempty"` + + // DB specifies the database to connect to on the redis instance. + DB int `yaml:"db,omitempty"` + + DialTimeout time.Duration `yaml:"dialtimeout,omitempty"` // timeout for connect + ReadTimeout time.Duration `yaml:"readtimeout,omitempty"` // timeout for reads of data + WriteTimeout time.Duration `yaml:"writetimeout,omitempty"` // timeout for writes of data + + // Pool configures the behavior of the redis connection pool. + Pool struct { + // MaxIdle sets the maximum number of idle connections. + MaxIdle int `yaml:"maxidle,omitempty"` + + // MaxActive sets the maximum number of connections that should be + // opened before blocking a connection request. + MaxActive int `yaml:"maxactive,omitempty"` + + // IdleTimeout sets the amount time to wait before closing + // inactive connections. + IdleTimeout time.Duration `yaml:"idletimeout,omitempty"` + } `yaml:"pool,omitempty"` + } `yaml:"redis,omitempty"` + + Health Health `yaml:"health,omitempty"` + + Proxy Proxy `yaml:"proxy,omitempty"` + + // Compatibility is used for configurations of working with older or deprecated features. + Compatibility struct { + // Schema1 configures how schema1 manifests will be handled + Schema1 struct { + // TrustKey is the signing key to use for adding the signature to + // schema1 manifests. + TrustKey string `yaml:"signingkeyfile,omitempty"` + } `yaml:"schema1,omitempty"` + } `yaml:"compatibility,omitempty"` + + // Validation configures validation options for the registry. + Validation struct { + // Enabled enables the other options in this section. + Enabled bool `yaml:"enabled,omitempty"` + // Manifests configures manifest validation. + Manifests struct { + // URLs configures validation for URLs in pushed manifests. + URLs struct { + // Allow specifies regular expressions (https://godoc.org/regexp/syntax) + // that URLs in pushed manifests must match. + Allow []string `yaml:"allow,omitempty"` + // Deny specifies regular expressions (https://godoc.org/regexp/syntax) + // that URLs in pushed manifests must not match. + Deny []string `yaml:"deny,omitempty"` + } `yaml:"urls,omitempty"` + } `yaml:"manifests,omitempty"` + } `yaml:"validation,omitempty"` + + // Policy configures registry policy options. + Policy struct { + // Repository configures policies for repositories + Repository struct { + // Classes is a list of repository classes which the + // registry allows content for. This class is matched + // against the configuration media type inside uploaded + // manifests. When non-empty, the registry will enforce + // the class in authorized resources. + Classes []string `yaml:"classes"` + } `yaml:"repository,omitempty"` + } `yaml:"policy,omitempty"` +} + +// LogHook is composed of hook Level and Type. +// After hooks configuration, it can execute the next handling automatically, +// when defined levels of log message emitted. +// Example: hook can sending an email notification when error log happens in app. +type LogHook struct { + // Disable lets user select to enable hook or not. + Disabled bool `yaml:"disabled,omitempty"` + + // Type allows user to select which type of hook handler they want. + Type string `yaml:"type,omitempty"` + + // Levels set which levels of log message will let hook executed. + Levels []string `yaml:"levels,omitempty"` + + // MailOptions allows user to configurate email parameters. + MailOptions MailOptions `yaml:"options,omitempty"` +} + +// MailOptions provides the configuration sections to user, for specific handler. +type MailOptions struct { + SMTP struct { + // Addr defines smtp host address + Addr string `yaml:"addr,omitempty"` + + // Username defines user name to smtp host + Username string `yaml:"username,omitempty"` + + // Password defines password of login user + Password string `yaml:"password,omitempty"` + + // Insecure defines if smtp login skips the secure certification. + Insecure bool `yaml:"insecure,omitempty"` + } `yaml:"smtp,omitempty"` + + // From defines mail sending address + From string `yaml:"from,omitempty"` + + // To defines mail receiving address + To []string `yaml:"to,omitempty"` +} + +// FileChecker is a type of entry in the health section for checking files. +type FileChecker struct { + // Interval is the duration in between checks + Interval time.Duration `yaml:"interval,omitempty"` + // File is the path to check + File string `yaml:"file,omitempty"` + // Threshold is the number of times a check must fail to trigger an + // unhealthy state + Threshold int `yaml:"threshold,omitempty"` +} + +// HTTPChecker is a type of entry in the health section for checking HTTP URIs. +type HTTPChecker struct { + // Timeout is the duration to wait before timing out the HTTP request + Timeout time.Duration `yaml:"timeout,omitempty"` + // StatusCode is the expected status code + StatusCode int + // Interval is the duration in between checks + Interval time.Duration `yaml:"interval,omitempty"` + // URI is the HTTP URI to check + URI string `yaml:"uri,omitempty"` + // Headers lists static headers that should be added to all requests + Headers http.Header `yaml:"headers"` + // Threshold is the number of times a check must fail to trigger an + // unhealthy state + Threshold int `yaml:"threshold,omitempty"` +} + +// TCPChecker is a type of entry in the health section for checking TCP servers. +type TCPChecker struct { + // Timeout is the duration to wait before timing out the TCP connection + Timeout time.Duration `yaml:"timeout,omitempty"` + // Interval is the duration in between checks + Interval time.Duration `yaml:"interval,omitempty"` + // Addr is the TCP address to check + Addr string `yaml:"addr,omitempty"` + // Threshold is the number of times a check must fail to trigger an + // unhealthy state + Threshold int `yaml:"threshold,omitempty"` +} + +// Health provides the configuration section for health checks. +type Health struct { + // FileCheckers is a list of paths to check + FileCheckers []FileChecker `yaml:"file,omitempty"` + // HTTPCheckers is a list of URIs to check + HTTPCheckers []HTTPChecker `yaml:"http,omitempty"` + // TCPCheckers is a list of URIs to check + TCPCheckers []TCPChecker `yaml:"tcp,omitempty"` + // StorageDriver configures a health check on the configured storage + // driver + StorageDriver struct { + // Enabled turns on the health check for the storage driver + Enabled bool `yaml:"enabled,omitempty"` + // Interval is the duration in between checks + Interval time.Duration `yaml:"interval,omitempty"` + // Threshold is the number of times a check must fail to trigger an + // unhealthy state + Threshold int `yaml:"threshold,omitempty"` + } `yaml:"storagedriver,omitempty"` +} + +// v0_1Configuration is a Version 0.1 Configuration struct +// This is currently aliased to Configuration, as it is the current version +type v0_1Configuration Configuration + +// UnmarshalYAML implements the yaml.Unmarshaler interface +// Unmarshals a string of the form X.Y into a Version, validating that X and Y can represent uints +func (version *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { + var versionString string + err := unmarshal(&versionString) + if err != nil { + return err + } + + newVersion := Version(versionString) + if _, err := newVersion.major(); err != nil { + return err + } + + if _, err := newVersion.minor(); err != nil { + return err + } + + *version = newVersion + return nil +} + +// CurrentVersion is the most recent Version that can be parsed +var CurrentVersion = MajorMinorVersion(0, 1) + +// Loglevel is the level at which operations are logged +// This can be error, warn, info, or debug +type Loglevel string + +// UnmarshalYAML implements the yaml.Umarshaler interface +// Unmarshals a string into a Loglevel, lowercasing the string and validating that it represents a +// valid loglevel +func (loglevel *Loglevel) UnmarshalYAML(unmarshal func(interface{}) error) error { + var loglevelString string + err := unmarshal(&loglevelString) + if err != nil { + return err + } + + loglevelString = strings.ToLower(loglevelString) + switch loglevelString { + case "error", "warn", "info", "debug": + default: + return fmt.Errorf("Invalid loglevel %s Must be one of [error, warn, info, debug]", loglevelString) + } + + *loglevel = Loglevel(loglevelString) + return nil +} + +// Parameters defines a key-value parameters mapping +type Parameters map[string]interface{} + +// Storage defines the configuration for registry object storage +type Storage map[string]Parameters + +// Type returns the storage driver type, such as filesystem or s3 +func (storage Storage) Type() string { + var storageType []string + + // Return only key in this map + for k := range storage { + switch k { + case "maintenance": + // allow configuration of maintenance + case "cache": + // allow configuration of caching + case "delete": + // allow configuration of delete + case "redirect": + // allow configuration of redirect + default: + storageType = append(storageType, k) + } + } + if len(storageType) > 1 { + panic("multiple storage drivers specified in configuration or environment: " + strings.Join(storageType, ", ")) + } + if len(storageType) == 1 { + return storageType[0] + } + return "" +} + +// Parameters returns the Parameters map for a Storage configuration +func (storage Storage) Parameters() Parameters { + return storage[storage.Type()] +} + +// setParameter changes the parameter at the provided key to the new value +func (storage Storage) setParameter(key string, value interface{}) { + storage[storage.Type()][key] = value +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface +// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters +func (storage *Storage) UnmarshalYAML(unmarshal func(interface{}) error) error { + var storageMap map[string]Parameters + err := unmarshal(&storageMap) + if err == nil { + if len(storageMap) > 1 { + types := make([]string, 0, len(storageMap)) + for k := range storageMap { + switch k { + case "maintenance": + // allow for configuration of maintenance + case "cache": + // allow configuration of caching + case "delete": + // allow configuration of delete + case "redirect": + // allow configuration of redirect + default: + types = append(types, k) + } + } + + if len(types) > 1 { + return fmt.Errorf("Must provide exactly one storage type. Provided: %v", types) + } + } + *storage = storageMap + return nil + } + + var storageType string + err = unmarshal(&storageType) + if err == nil { + *storage = Storage{storageType: Parameters{}} + return nil + } + + return err +} + +// MarshalYAML implements the yaml.Marshaler interface +func (storage Storage) MarshalYAML() (interface{}, error) { + if storage.Parameters() == nil { + return storage.Type(), nil + } + return map[string]Parameters(storage), nil +} + +// Auth defines the configuration for registry authorization. +type Auth map[string]Parameters + +// Type returns the auth type, such as htpasswd or token +func (auth Auth) Type() string { + // Return only key in this map + for k := range auth { + return k + } + return "" +} + +// Parameters returns the Parameters map for an Auth configuration +func (auth Auth) Parameters() Parameters { + return auth[auth.Type()] +} + +// setParameter changes the parameter at the provided key to the new value +func (auth Auth) setParameter(key string, value interface{}) { + auth[auth.Type()][key] = value +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface +// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters +func (auth *Auth) UnmarshalYAML(unmarshal func(interface{}) error) error { + var m map[string]Parameters + err := unmarshal(&m) + if err == nil { + if len(m) > 1 { + types := make([]string, 0, len(m)) + for k := range m { + types = append(types, k) + } + + // TODO(stevvooe): May want to change this slightly for + // authorization to allow multiple challenges. + return fmt.Errorf("must provide exactly one type. Provided: %v", types) + + } + *auth = m + return nil + } + + var authType string + err = unmarshal(&authType) + if err == nil { + *auth = Auth{authType: Parameters{}} + return nil + } + + return err +} + +// MarshalYAML implements the yaml.Marshaler interface +func (auth Auth) MarshalYAML() (interface{}, error) { + if auth.Parameters() == nil { + return auth.Type(), nil + } + return map[string]Parameters(auth), nil +} + +// Notifications configures multiple http endpoints. +type Notifications struct { + // Endpoints is a list of http configurations for endpoints that + // respond to webhook notifications. In the future, we may allow other + // kinds of endpoints, such as external queues. + Endpoints []Endpoint `yaml:"endpoints,omitempty"` +} + +// Endpoint describes the configuration of an http webhook notification +// endpoint. +type Endpoint struct { + Name string `yaml:"name"` // identifies the endpoint in the registry instance. + Disabled bool `yaml:"disabled"` // disables the endpoint + URL string `yaml:"url"` // post url for the endpoint. + Headers http.Header `yaml:"headers"` // static headers that should be added to all requests + Timeout time.Duration `yaml:"timeout"` // HTTP timeout + Threshold int `yaml:"threshold"` // circuit breaker threshold before backing off on failure + Backoff time.Duration `yaml:"backoff"` // backoff duration + IgnoredMediaTypes []string `yaml:"ignoredmediatypes"` // target media types to ignore +} + +// Reporting defines error reporting methods. +type Reporting struct { + // Bugsnag configures error reporting for Bugsnag (bugsnag.com). + Bugsnag BugsnagReporting `yaml:"bugsnag,omitempty"` + // NewRelic configures error reporting for NewRelic (newrelic.com) + NewRelic NewRelicReporting `yaml:"newrelic,omitempty"` +} + +// BugsnagReporting configures error reporting for Bugsnag (bugsnag.com). +type BugsnagReporting struct { + // APIKey is the Bugsnag api key. + APIKey string `yaml:"apikey,omitempty"` + // ReleaseStage tracks where the registry is deployed. + // Examples: production, staging, development + ReleaseStage string `yaml:"releasestage,omitempty"` + // Endpoint is used for specifying an enterprise Bugsnag endpoint. + Endpoint string `yaml:"endpoint,omitempty"` +} + +// NewRelicReporting configures error reporting for NewRelic (newrelic.com) +type NewRelicReporting struct { + // LicenseKey is the NewRelic user license key + LicenseKey string `yaml:"licensekey,omitempty"` + // Name is the component name of the registry in NewRelic + Name string `yaml:"name,omitempty"` + // Verbose configures debug output to STDOUT + Verbose bool `yaml:"verbose,omitempty"` +} + +// Middleware configures named middlewares to be applied at injection points. +type Middleware struct { + // Name the middleware registers itself as + Name string `yaml:"name"` + // Flag to disable middleware easily + Disabled bool `yaml:"disabled,omitempty"` + // Map of parameters that will be passed to the middleware's initialization function + Options Parameters `yaml:"options"` +} + +// Proxy configures the registry as a pull through cache +type Proxy struct { + // RemoteURL is the URL of the remote registry + RemoteURL string `yaml:"remoteurl"` + + // Username of the hub user + Username string `yaml:"username"` + + // Password of the hub user + Password string `yaml:"password"` +} + +// Parse parses an input configuration yaml document into a Configuration struct +// This should generally be capable of handling old configuration format versions +// +// Environment variables may be used to override configuration parameters other than version, +// following the scheme below: +// Configuration.Abc may be replaced by the value of REGISTRY_ABC, +// Configuration.Abc.Xyz may be replaced by the value of REGISTRY_ABC_XYZ, and so forth +func Parse(rd io.Reader) (*Configuration, error) { + in, err := ioutil.ReadAll(rd) + if err != nil { + return nil, err + } + + p := NewParser("registry", []VersionedParseInfo{ + { + Version: MajorMinorVersion(0, 1), + ParseAs: reflect.TypeOf(v0_1Configuration{}), + ConversionFunc: func(c interface{}) (interface{}, error) { + if v0_1, ok := c.(*v0_1Configuration); ok { + if v0_1.Loglevel == Loglevel("") { + v0_1.Loglevel = Loglevel("info") + } + if v0_1.Storage.Type() == "" { + return nil, fmt.Errorf("No storage configuration provided") + } + return (*Configuration)(v0_1), nil + } + return nil, fmt.Errorf("Expected *v0_1Configuration, received %#v", c) + }, + }, + }) + + config := new(Configuration) + err = p.Parse(in, config) + if err != nil { + return nil, err + } + + return config, nil +} diff --git a/vendor/github.com/docker/distribution/configuration/configuration_test.go b/vendor/github.com/docker/distribution/configuration/configuration_test.go new file mode 100644 index 000000000..3e1583dd1 --- /dev/null +++ b/vendor/github.com/docker/distribution/configuration/configuration_test.go @@ -0,0 +1,529 @@ +package configuration + +import ( + "bytes" + "net/http" + "os" + "reflect" + "strings" + "testing" + + . "gopkg.in/check.v1" + "gopkg.in/yaml.v2" +) + +// Hook up gocheck into the "go test" runner +func Test(t *testing.T) { TestingT(t) } + +// configStruct is a canonical example configuration, which should map to configYamlV0_1 +var configStruct = Configuration{ + Version: "0.1", + Log: struct { + AccessLog struct { + Disabled bool `yaml:"disabled,omitempty"` + } `yaml:"accesslog,omitempty"` + Level Loglevel `yaml:"level"` + Formatter string `yaml:"formatter,omitempty"` + Fields map[string]interface{} `yaml:"fields,omitempty"` + Hooks []LogHook `yaml:"hooks,omitempty"` + }{ + Fields: map[string]interface{}{"environment": "test"}, + }, + Loglevel: "info", + Storage: Storage{ + "s3": Parameters{ + "region": "us-east-1", + "bucket": "my-bucket", + "rootdirectory": "/registry", + "encrypt": true, + "secure": false, + "accesskey": "SAMPLEACCESSKEY", + "secretkey": "SUPERSECRET", + "host": nil, + "port": 42, + }, + }, + Auth: Auth{ + "silly": Parameters{ + "realm": "silly", + "service": "silly", + }, + }, + Reporting: Reporting{ + Bugsnag: BugsnagReporting{ + APIKey: "BugsnagApiKey", + }, + }, + Notifications: Notifications{ + Endpoints: []Endpoint{ + { + Name: "endpoint-1", + URL: "http://example.com", + Headers: http.Header{ + "Authorization": []string{"Bearer "}, + }, + IgnoredMediaTypes: []string{"application/octet-stream"}, + }, + }, + }, + HTTP: struct { + Addr string `yaml:"addr,omitempty"` + Net string `yaml:"net,omitempty"` + Host string `yaml:"host,omitempty"` + Prefix string `yaml:"prefix,omitempty"` + Secret string `yaml:"secret,omitempty"` + RelativeURLs bool `yaml:"relativeurls,omitempty"` + TLS struct { + Certificate string `yaml:"certificate,omitempty"` + Key string `yaml:"key,omitempty"` + ClientCAs []string `yaml:"clientcas,omitempty"` + LetsEncrypt struct { + CacheFile string `yaml:"cachefile,omitempty"` + Email string `yaml:"email,omitempty"` + } `yaml:"letsencrypt,omitempty"` + } `yaml:"tls,omitempty"` + Headers http.Header `yaml:"headers,omitempty"` + Debug struct { + Addr string `yaml:"addr,omitempty"` + } `yaml:"debug,omitempty"` + HTTP2 struct { + Disabled bool `yaml:"disabled,omitempty"` + } `yaml:"http2,omitempty"` + }{ + TLS: struct { + Certificate string `yaml:"certificate,omitempty"` + Key string `yaml:"key,omitempty"` + ClientCAs []string `yaml:"clientcas,omitempty"` + LetsEncrypt struct { + CacheFile string `yaml:"cachefile,omitempty"` + Email string `yaml:"email,omitempty"` + } `yaml:"letsencrypt,omitempty"` + }{ + ClientCAs: []string{"/path/to/ca.pem"}, + }, + Headers: http.Header{ + "X-Content-Type-Options": []string{"nosniff"}, + }, + HTTP2: struct { + Disabled bool `yaml:"disabled,omitempty"` + }{ + Disabled: false, + }, + }, +} + +// configYamlV0_1 is a Version 0.1 yaml document representing configStruct +var configYamlV0_1 = ` +version: 0.1 +log: + fields: + environment: test +loglevel: info +storage: + s3: + region: us-east-1 + bucket: my-bucket + rootdirectory: /registry + encrypt: true + secure: false + accesskey: SAMPLEACCESSKEY + secretkey: SUPERSECRET + host: ~ + port: 42 +auth: + silly: + realm: silly + service: silly +notifications: + endpoints: + - name: endpoint-1 + url: http://example.com + headers: + Authorization: [Bearer ] + ignoredmediatypes: + - application/octet-stream +reporting: + bugsnag: + apikey: BugsnagApiKey +http: + clientcas: + - /path/to/ca.pem + headers: + X-Content-Type-Options: [nosniff] +` + +// inmemoryConfigYamlV0_1 is a Version 0.1 yaml document specifying an inmemory +// storage driver with no parameters +var inmemoryConfigYamlV0_1 = ` +version: 0.1 +loglevel: info +storage: inmemory +auth: + silly: + realm: silly + service: silly +notifications: + endpoints: + - name: endpoint-1 + url: http://example.com + headers: + Authorization: [Bearer ] + ignoredmediatypes: + - application/octet-stream +http: + headers: + X-Content-Type-Options: [nosniff] +` + +type ConfigSuite struct { + expectedConfig *Configuration +} + +var _ = Suite(new(ConfigSuite)) + +func (suite *ConfigSuite) SetUpTest(c *C) { + os.Clearenv() + suite.expectedConfig = copyConfig(configStruct) +} + +// TestMarshalRoundtrip validates that configStruct can be marshaled and +// unmarshaled without changing any parameters +func (suite *ConfigSuite) TestMarshalRoundtrip(c *C) { + configBytes, err := yaml.Marshal(suite.expectedConfig) + c.Assert(err, IsNil) + config, err := Parse(bytes.NewReader(configBytes)) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseSimple validates that configYamlV0_1 can be parsed into a struct +// matching configStruct +func (suite *ConfigSuite) TestParseSimple(c *C) { + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseInmemory validates that configuration yaml with storage provided as +// a string can be parsed into a Configuration struct with no storage parameters +func (suite *ConfigSuite) TestParseInmemory(c *C) { + suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}} + suite.expectedConfig.Reporting = Reporting{} + suite.expectedConfig.Log.Fields = nil + + config, err := Parse(bytes.NewReader([]byte(inmemoryConfigYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseIncomplete validates that an incomplete yaml configuration cannot +// be parsed without providing environment variables to fill in the missing +// components. +func (suite *ConfigSuite) TestParseIncomplete(c *C) { + incompleteConfigYaml := "version: 0.1" + _, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml))) + c.Assert(err, NotNil) + + suite.expectedConfig.Log.Fields = nil + suite.expectedConfig.Storage = Storage{"filesystem": Parameters{"rootdirectory": "/tmp/testroot"}} + suite.expectedConfig.Auth = Auth{"silly": Parameters{"realm": "silly"}} + suite.expectedConfig.Reporting = Reporting{} + suite.expectedConfig.Notifications = Notifications{} + suite.expectedConfig.HTTP.Headers = nil + + // Note: this also tests that REGISTRY_STORAGE and + // REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY can be used together + os.Setenv("REGISTRY_STORAGE", "filesystem") + os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot") + os.Setenv("REGISTRY_AUTH", "silly") + os.Setenv("REGISTRY_AUTH_SILLY_REALM", "silly") + + config, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseWithSameEnvStorage validates that providing environment variables +// that match the given storage type will only include environment-defined +// parameters and remove yaml-defined parameters +func (suite *ConfigSuite) TestParseWithSameEnvStorage(c *C) { + suite.expectedConfig.Storage = Storage{"s3": Parameters{"region": "us-east-1"}} + + os.Setenv("REGISTRY_STORAGE", "s3") + os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-east-1") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseWithDifferentEnvStorageParams validates that providing environment variables that change +// and add to the given storage parameters will change and add parameters to the parsed +// Configuration struct +func (suite *ConfigSuite) TestParseWithDifferentEnvStorageParams(c *C) { + suite.expectedConfig.Storage.setParameter("region", "us-west-1") + suite.expectedConfig.Storage.setParameter("secure", true) + suite.expectedConfig.Storage.setParameter("newparam", "some Value") + + os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-west-1") + os.Setenv("REGISTRY_STORAGE_S3_SECURE", "true") + os.Setenv("REGISTRY_STORAGE_S3_NEWPARAM", "some Value") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseWithDifferentEnvStorageType validates that providing an environment variable that +// changes the storage type will be reflected in the parsed Configuration struct +func (suite *ConfigSuite) TestParseWithDifferentEnvStorageType(c *C) { + suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}} + + os.Setenv("REGISTRY_STORAGE", "inmemory") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseWithDifferentEnvStorageTypeAndParams validates that providing an environment variable +// that changes the storage type will be reflected in the parsed Configuration struct and that +// environment storage parameters will also be included +func (suite *ConfigSuite) TestParseWithDifferentEnvStorageTypeAndParams(c *C) { + suite.expectedConfig.Storage = Storage{"filesystem": Parameters{}} + suite.expectedConfig.Storage.setParameter("rootdirectory", "/tmp/testroot") + + os.Setenv("REGISTRY_STORAGE", "filesystem") + os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseWithSameEnvLoglevel validates that providing an environment variable defining the log +// level to the same as the one provided in the yaml will not change the parsed Configuration struct +func (suite *ConfigSuite) TestParseWithSameEnvLoglevel(c *C) { + os.Setenv("REGISTRY_LOGLEVEL", "info") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseWithDifferentEnvLoglevel validates that providing an environment variable defining the +// log level will override the value provided in the yaml document +func (suite *ConfigSuite) TestParseWithDifferentEnvLoglevel(c *C) { + suite.expectedConfig.Loglevel = "error" + + os.Setenv("REGISTRY_LOGLEVEL", "error") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseInvalidLoglevel validates that the parser will fail to parse a +// configuration if the loglevel is malformed +func (suite *ConfigSuite) TestParseInvalidLoglevel(c *C) { + invalidConfigYaml := "version: 0.1\nloglevel: derp\nstorage: inmemory" + _, err := Parse(bytes.NewReader([]byte(invalidConfigYaml))) + c.Assert(err, NotNil) + + os.Setenv("REGISTRY_LOGLEVEL", "derp") + + _, err = Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, NotNil) + +} + +// TestParseWithDifferentEnvReporting validates that environment variables +// properly override reporting parameters +func (suite *ConfigSuite) TestParseWithDifferentEnvReporting(c *C) { + suite.expectedConfig.Reporting.Bugsnag.APIKey = "anotherBugsnagApiKey" + suite.expectedConfig.Reporting.Bugsnag.Endpoint = "localhost:8080" + suite.expectedConfig.Reporting.NewRelic.LicenseKey = "NewRelicLicenseKey" + suite.expectedConfig.Reporting.NewRelic.Name = "some NewRelic NAME" + + os.Setenv("REGISTRY_REPORTING_BUGSNAG_APIKEY", "anotherBugsnagApiKey") + os.Setenv("REGISTRY_REPORTING_BUGSNAG_ENDPOINT", "localhost:8080") + os.Setenv("REGISTRY_REPORTING_NEWRELIC_LICENSEKEY", "NewRelicLicenseKey") + os.Setenv("REGISTRY_REPORTING_NEWRELIC_NAME", "some NewRelic NAME") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseInvalidVersion validates that the parser will fail to parse a newer configuration +// version than the CurrentVersion +func (suite *ConfigSuite) TestParseInvalidVersion(c *C) { + suite.expectedConfig.Version = MajorMinorVersion(CurrentVersion.Major(), CurrentVersion.Minor()+1) + configBytes, err := yaml.Marshal(suite.expectedConfig) + c.Assert(err, IsNil) + _, err = Parse(bytes.NewReader(configBytes)) + c.Assert(err, NotNil) +} + +// TestParseExtraneousVars validates that environment variables referring to +// nonexistent variables don't cause side effects. +func (suite *ConfigSuite) TestParseExtraneousVars(c *C) { + suite.expectedConfig.Reporting.Bugsnag.Endpoint = "localhost:8080" + + // A valid environment variable + os.Setenv("REGISTRY_REPORTING_BUGSNAG_ENDPOINT", "localhost:8080") + + // Environment variables which shouldn't set config items + os.Setenv("registry_REPORTING_NEWRELIC_LICENSEKEY", "NewRelicLicenseKey") + os.Setenv("REPORTING_NEWRELIC_NAME", "some NewRelic NAME") + os.Setenv("REGISTRY_DUCKS", "quack") + os.Setenv("REGISTRY_REPORTING_ASDF", "ghjk") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseEnvVarImplicitMaps validates that environment variables can set +// values in maps that don't already exist. +func (suite *ConfigSuite) TestParseEnvVarImplicitMaps(c *C) { + readonly := make(map[string]interface{}) + readonly["enabled"] = true + + maintenance := make(map[string]interface{}) + maintenance["readonly"] = readonly + + suite.expectedConfig.Storage["maintenance"] = maintenance + + os.Setenv("REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED", "true") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseEnvWrongTypeMap validates that incorrectly attempting to unmarshal a +// string over existing map fails. +func (suite *ConfigSuite) TestParseEnvWrongTypeMap(c *C) { + os.Setenv("REGISTRY_STORAGE_S3", "somestring") + + _, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, NotNil) +} + +// TestParseEnvWrongTypeStruct validates that incorrectly attempting to +// unmarshal a string into a struct fails. +func (suite *ConfigSuite) TestParseEnvWrongTypeStruct(c *C) { + os.Setenv("REGISTRY_STORAGE_LOG", "somestring") + + _, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, NotNil) +} + +// TestParseEnvWrongTypeSlice validates that incorrectly attempting to +// unmarshal a string into a slice fails. +func (suite *ConfigSuite) TestParseEnvWrongTypeSlice(c *C) { + os.Setenv("REGISTRY_LOG_HOOKS", "somestring") + + _, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, NotNil) +} + +// TestParseEnvMany tests several environment variable overrides. +// The result is not checked - the goal of this test is to detect panics +// from misuse of reflection. +func (suite *ConfigSuite) TestParseEnvMany(c *C) { + os.Setenv("REGISTRY_VERSION", "0.1") + os.Setenv("REGISTRY_LOG_LEVEL", "debug") + os.Setenv("REGISTRY_LOG_FORMATTER", "json") + os.Setenv("REGISTRY_LOG_HOOKS", "json") + os.Setenv("REGISTRY_LOG_FIELDS", "abc: xyz") + os.Setenv("REGISTRY_LOG_HOOKS", "- type: asdf") + os.Setenv("REGISTRY_LOGLEVEL", "debug") + os.Setenv("REGISTRY_STORAGE", "s3") + os.Setenv("REGISTRY_AUTH_PARAMS", "param1: value1") + os.Setenv("REGISTRY_AUTH_PARAMS_VALUE2", "value2") + os.Setenv("REGISTRY_AUTH_PARAMS_VALUE2", "value2") + + _, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) +} + +func checkStructs(c *C, t reflect.Type, structsChecked map[string]struct{}) { + for t.Kind() == reflect.Ptr || t.Kind() == reflect.Map || t.Kind() == reflect.Slice { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + return + } + if _, present := structsChecked[t.String()]; present { + // Already checked this type + return + } + + structsChecked[t.String()] = struct{}{} + + byUpperCase := make(map[string]int) + for i := 0; i < t.NumField(); i++ { + sf := t.Field(i) + + // Check that the yaml tag does not contain an _. + yamlTag := sf.Tag.Get("yaml") + if strings.Contains(yamlTag, "_") { + c.Fatalf("yaml field name includes _ character: %s", yamlTag) + } + upper := strings.ToUpper(sf.Name) + if _, present := byUpperCase[upper]; present { + c.Fatalf("field name collision in configuration object: %s", sf.Name) + } + byUpperCase[upper] = i + + checkStructs(c, sf.Type, structsChecked) + } +} + +// TestValidateConfigStruct makes sure that the config struct has no members +// with yaml tags that would be ambiguous to the environment variable parser. +func (suite *ConfigSuite) TestValidateConfigStruct(c *C) { + structsChecked := make(map[string]struct{}) + checkStructs(c, reflect.TypeOf(Configuration{}), structsChecked) +} + +func copyConfig(config Configuration) *Configuration { + configCopy := new(Configuration) + + configCopy.Version = MajorMinorVersion(config.Version.Major(), config.Version.Minor()) + configCopy.Loglevel = config.Loglevel + configCopy.Log = config.Log + configCopy.Log.Fields = make(map[string]interface{}, len(config.Log.Fields)) + for k, v := range config.Log.Fields { + configCopy.Log.Fields[k] = v + } + + configCopy.Storage = Storage{config.Storage.Type(): Parameters{}} + for k, v := range config.Storage.Parameters() { + configCopy.Storage.setParameter(k, v) + } + configCopy.Reporting = Reporting{ + Bugsnag: BugsnagReporting{config.Reporting.Bugsnag.APIKey, config.Reporting.Bugsnag.ReleaseStage, config.Reporting.Bugsnag.Endpoint}, + NewRelic: NewRelicReporting{config.Reporting.NewRelic.LicenseKey, config.Reporting.NewRelic.Name, config.Reporting.NewRelic.Verbose}, + } + + configCopy.Auth = Auth{config.Auth.Type(): Parameters{}} + for k, v := range config.Auth.Parameters() { + configCopy.Auth.setParameter(k, v) + } + + configCopy.Notifications = Notifications{Endpoints: []Endpoint{}} + for _, v := range config.Notifications.Endpoints { + configCopy.Notifications.Endpoints = append(configCopy.Notifications.Endpoints, v) + } + + configCopy.HTTP.Headers = make(http.Header) + for k, v := range config.HTTP.Headers { + configCopy.HTTP.Headers[k] = v + } + + return configCopy +} diff --git a/vendor/github.com/docker/distribution/configuration/parser.go b/vendor/github.com/docker/distribution/configuration/parser.go new file mode 100644 index 000000000..8b81dd5d5 --- /dev/null +++ b/vendor/github.com/docker/distribution/configuration/parser.go @@ -0,0 +1,283 @@ +package configuration + +import ( + "fmt" + "os" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "gopkg.in/yaml.v2" +) + +// Version is a major/minor version pair of the form Major.Minor +// Major version upgrades indicate structure or type changes +// Minor version upgrades should be strictly additive +type Version string + +// MajorMinorVersion constructs a Version from its Major and Minor components +func MajorMinorVersion(major, minor uint) Version { + return Version(fmt.Sprintf("%d.%d", major, minor)) +} + +func (version Version) major() (uint, error) { + majorPart := strings.Split(string(version), ".")[0] + major, err := strconv.ParseUint(majorPart, 10, 0) + return uint(major), err +} + +// Major returns the major version portion of a Version +func (version Version) Major() uint { + major, _ := version.major() + return major +} + +func (version Version) minor() (uint, error) { + minorPart := strings.Split(string(version), ".")[1] + minor, err := strconv.ParseUint(minorPart, 10, 0) + return uint(minor), err +} + +// Minor returns the minor version portion of a Version +func (version Version) Minor() uint { + minor, _ := version.minor() + return minor +} + +// VersionedParseInfo defines how a specific version of a configuration should +// be parsed into the current version +type VersionedParseInfo struct { + // Version is the version which this parsing information relates to + Version Version + // ParseAs defines the type which a configuration file of this version + // should be parsed into + ParseAs reflect.Type + // ConversionFunc defines a method for converting the parsed configuration + // (of type ParseAs) into the current configuration version + // Note: this method signature is very unclear with the absence of generics + ConversionFunc func(interface{}) (interface{}, error) +} + +type envVar struct { + name string + value string +} + +type envVars []envVar + +func (a envVars) Len() int { return len(a) } +func (a envVars) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a envVars) Less(i, j int) bool { return a[i].name < a[j].name } + +// Parser can be used to parse a configuration file and environment of a defined +// version into a unified output structure +type Parser struct { + prefix string + mapping map[Version]VersionedParseInfo + env envVars +} + +// NewParser returns a *Parser with the given environment prefix which handles +// versioned configurations which match the given parseInfos +func NewParser(prefix string, parseInfos []VersionedParseInfo) *Parser { + p := Parser{prefix: prefix, mapping: make(map[Version]VersionedParseInfo)} + + for _, parseInfo := range parseInfos { + p.mapping[parseInfo.Version] = parseInfo + } + + for _, env := range os.Environ() { + envParts := strings.SplitN(env, "=", 2) + p.env = append(p.env, envVar{envParts[0], envParts[1]}) + } + + // We must sort the environment variables lexically by name so that + // more specific variables are applied before less specific ones + // (i.e. REGISTRY_STORAGE before + // REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY). This sucks, but it's a + // lot simpler and easier to get right than unmarshalling map entries + // into temporaries and merging with the existing entry. + sort.Sort(p.env) + + return &p +} + +// Parse reads in the given []byte and environment and writes the resulting +// configuration into the input v +// +// Environment variables may be used to override configuration parameters other +// than version, following the scheme below: +// v.Abc may be replaced by the value of PREFIX_ABC, +// v.Abc.Xyz may be replaced by the value of PREFIX_ABC_XYZ, and so forth +func (p *Parser) Parse(in []byte, v interface{}) error { + var versionedStruct struct { + Version Version + } + + if err := yaml.Unmarshal(in, &versionedStruct); err != nil { + return err + } + + parseInfo, ok := p.mapping[versionedStruct.Version] + if !ok { + return fmt.Errorf("Unsupported version: %q", versionedStruct.Version) + } + + parseAs := reflect.New(parseInfo.ParseAs) + err := yaml.Unmarshal(in, parseAs.Interface()) + if err != nil { + return err + } + + for _, envVar := range p.env { + pathStr := envVar.name + if strings.HasPrefix(pathStr, strings.ToUpper(p.prefix)+"_") { + path := strings.Split(pathStr, "_") + + err = p.overwriteFields(parseAs, pathStr, path[1:], envVar.value) + if err != nil { + return err + } + } + } + + c, err := parseInfo.ConversionFunc(parseAs.Interface()) + if err != nil { + return err + } + reflect.ValueOf(v).Elem().Set(reflect.Indirect(reflect.ValueOf(c))) + return nil +} + +// overwriteFields replaces configuration values with alternate values specified +// through the environment. Precondition: an empty path slice must never be +// passed in. +func (p *Parser) overwriteFields(v reflect.Value, fullpath string, path []string, payload string) error { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + panic("encountered nil pointer while handling environment variable " + fullpath) + } + v = reflect.Indirect(v) + } + switch v.Kind() { + case reflect.Struct: + return p.overwriteStruct(v, fullpath, path, payload) + case reflect.Map: + return p.overwriteMap(v, fullpath, path, payload) + case reflect.Interface: + if v.NumMethod() == 0 { + if !v.IsNil() { + return p.overwriteFields(v.Elem(), fullpath, path, payload) + } + // Interface was empty; create an implicit map + var template map[string]interface{} + wrappedV := reflect.MakeMap(reflect.TypeOf(template)) + v.Set(wrappedV) + return p.overwriteMap(wrappedV, fullpath, path, payload) + } + } + return nil +} + +func (p *Parser) overwriteStruct(v reflect.Value, fullpath string, path []string, payload string) error { + // Generate case-insensitive map of struct fields + byUpperCase := make(map[string]int) + for i := 0; i < v.NumField(); i++ { + sf := v.Type().Field(i) + upper := strings.ToUpper(sf.Name) + if _, present := byUpperCase[upper]; present { + panic(fmt.Sprintf("field name collision in configuration object: %s", sf.Name)) + } + byUpperCase[upper] = i + } + + fieldIndex, present := byUpperCase[path[0]] + if !present { + logrus.Warnf("Ignoring unrecognized environment variable %s", fullpath) + return nil + } + field := v.Field(fieldIndex) + sf := v.Type().Field(fieldIndex) + + if len(path) == 1 { + // Env var specifies this field directly + fieldVal := reflect.New(sf.Type) + err := yaml.Unmarshal([]byte(payload), fieldVal.Interface()) + if err != nil { + return err + } + field.Set(reflect.Indirect(fieldVal)) + return nil + } + + // If the field is nil, must create an object + switch sf.Type.Kind() { + case reflect.Map: + if field.IsNil() { + field.Set(reflect.MakeMap(sf.Type)) + } + case reflect.Ptr: + if field.IsNil() { + field.Set(reflect.New(sf.Type)) + } + } + + err := p.overwriteFields(field, fullpath, path[1:], payload) + if err != nil { + return err + } + + return nil +} + +func (p *Parser) overwriteMap(m reflect.Value, fullpath string, path []string, payload string) error { + if m.Type().Key().Kind() != reflect.String { + // non-string keys unsupported + logrus.Warnf("Ignoring environment variable %s involving map with non-string keys", fullpath) + return nil + } + + if len(path) > 1 { + // If a matching key exists, get its value and continue the + // overwriting process. + for _, k := range m.MapKeys() { + if strings.ToUpper(k.String()) == path[0] { + mapValue := m.MapIndex(k) + // If the existing value is nil, we want to + // recreate it instead of using this value. + if (mapValue.Kind() == reflect.Ptr || + mapValue.Kind() == reflect.Interface || + mapValue.Kind() == reflect.Map) && + mapValue.IsNil() { + break + } + return p.overwriteFields(mapValue, fullpath, path[1:], payload) + } + } + } + + // (Re)create this key + var mapValue reflect.Value + if m.Type().Elem().Kind() == reflect.Map { + mapValue = reflect.MakeMap(m.Type().Elem()) + } else { + mapValue = reflect.New(m.Type().Elem()) + } + if len(path) > 1 { + err := p.overwriteFields(mapValue, fullpath, path[1:], payload) + if err != nil { + return err + } + } else { + err := yaml.Unmarshal([]byte(payload), mapValue.Interface()) + if err != nil { + return err + } + } + + m.SetMapIndex(reflect.ValueOf(strings.ToLower(path[0])), reflect.Indirect(mapValue)) + + return nil +} diff --git a/vendor/github.com/docker/distribution/context/context.go b/vendor/github.com/docker/distribution/context/context.go new file mode 100644 index 000000000..23cbf5b54 --- /dev/null +++ b/vendor/github.com/docker/distribution/context/context.go @@ -0,0 +1,85 @@ +package context + +import ( + "sync" + + "github.com/docker/distribution/uuid" + "golang.org/x/net/context" +) + +// Context is a copy of Context from the golang.org/x/net/context package. +type Context interface { + context.Context +} + +// instanceContext is a context that provides only an instance id. It is +// provided as the main background context. +type instanceContext struct { + Context + id string // id of context, logged as "instance.id" + once sync.Once // once protect generation of the id +} + +func (ic *instanceContext) Value(key interface{}) interface{} { + if key == "instance.id" { + ic.once.Do(func() { + // We want to lazy initialize the UUID such that we don't + // call a random generator from the package initialization + // code. For various reasons random could not be available + // https://github.com/docker/distribution/issues/782 + ic.id = uuid.Generate().String() + }) + return ic.id + } + + return ic.Context.Value(key) +} + +var background = &instanceContext{ + Context: context.Background(), +} + +// Background returns a non-nil, empty Context. The background context +// provides a single key, "instance.id" that is globally unique to the +// process. +func Background() Context { + return background +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. Use context Values only for request-scoped data that transits processes +// and APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key, val interface{}) Context { + return context.WithValue(parent, key, val) +} + +// stringMapContext is a simple context implementation that checks a map for a +// key, falling back to a parent if not present. +type stringMapContext struct { + context.Context + m map[string]interface{} +} + +// WithValues returns a context that proxies lookups through a map. Only +// supports string keys. +func WithValues(ctx context.Context, m map[string]interface{}) context.Context { + mo := make(map[string]interface{}, len(m)) // make our own copy. + for k, v := range m { + mo[k] = v + } + + return stringMapContext{ + Context: ctx, + m: mo, + } +} + +func (smc stringMapContext) Value(key interface{}) interface{} { + if ks, ok := key.(string); ok { + if v, ok := smc.m[ks]; ok { + return v + } + } + + return smc.Context.Value(key) +} diff --git a/vendor/github.com/docker/distribution/context/doc.go b/vendor/github.com/docker/distribution/context/doc.go new file mode 100644 index 000000000..3b4ab8882 --- /dev/null +++ b/vendor/github.com/docker/distribution/context/doc.go @@ -0,0 +1,89 @@ +// Package context provides several utilities for working with +// golang.org/x/net/context in http requests. Primarily, the focus is on +// logging relevant request information but this package is not limited to +// that purpose. +// +// The easiest way to get started is to get the background context: +// +// ctx := context.Background() +// +// The returned context should be passed around your application and be the +// root of all other context instances. If the application has a version, this +// line should be called before anything else: +// +// ctx := context.WithVersion(context.Background(), version) +// +// The above will store the version in the context and will be available to +// the logger. +// +// Logging +// +// The most useful aspect of this package is GetLogger. This function takes +// any context.Context interface and returns the current logger from the +// context. Canonical usage looks like this: +// +// GetLogger(ctx).Infof("something interesting happened") +// +// GetLogger also takes optional key arguments. The keys will be looked up in +// the context and reported with the logger. The following example would +// return a logger that prints the version with each log message: +// +// ctx := context.Context(context.Background(), "version", version) +// GetLogger(ctx, "version").Infof("this log message has a version field") +// +// The above would print out a log message like this: +// +// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m +// +// When used with WithLogger, we gain the ability to decorate the context with +// loggers that have information from disparate parts of the call stack. +// Following from the version example, we can build a new context with the +// configured logger such that we always print the version field: +// +// ctx = WithLogger(ctx, GetLogger(ctx, "version")) +// +// Since the logger has been pushed to the context, we can now get the version +// field for free with our log messages. Future calls to GetLogger on the new +// context will have the version field: +// +// GetLogger(ctx).Infof("this log message has a version field") +// +// This becomes more powerful when we start stacking loggers. Let's say we +// have the version logger from above but also want a request id. Using the +// context above, in our request scoped function, we place another logger in +// the context: +// +// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context +// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id")) +// +// When GetLogger is called on the new context, "http.request.id" will be +// included as a logger field, along with the original "version" field: +// +// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m +// +// Note that this only affects the new context, the previous context, with the +// version field, can be used independently. Put another way, the new logger, +// added to the request context, is unique to that context and can have +// request scoped varaibles. +// +// HTTP Requests +// +// This package also contains several methods for working with http requests. +// The concepts are very similar to those described above. We simply place the +// request in the context using WithRequest. This makes the request variables +// available. GetRequestLogger can then be called to get request specific +// variables in a log line: +// +// ctx = WithRequest(ctx, req) +// GetRequestLogger(ctx).Infof("request variables") +// +// Like above, if we want to include the request data in all log messages in +// the context, we push the logger to a new context and use that one: +// +// ctx = WithLogger(ctx, GetRequestLogger(ctx)) +// +// The concept is fairly powerful and ensures that calls throughout the stack +// can be traced in log messages. Using the fields like "http.request.id", one +// can analyze call flow for a particular request with a simple grep of the +// logs. +package context diff --git a/vendor/github.com/docker/distribution/context/http.go b/vendor/github.com/docker/distribution/context/http.go new file mode 100644 index 000000000..7fe9b8ab0 --- /dev/null +++ b/vendor/github.com/docker/distribution/context/http.go @@ -0,0 +1,366 @@ +package context + +import ( + "errors" + "net" + "net/http" + "strings" + "sync" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/docker/distribution/uuid" + "github.com/gorilla/mux" +) + +// Common errors used with this package. +var ( + ErrNoRequestContext = errors.New("no http request in context") + ErrNoResponseWriterContext = errors.New("no http response in context") +) + +func parseIP(ipStr string) net.IP { + ip := net.ParseIP(ipStr) + if ip == nil { + log.Warnf("invalid remote IP address: %q", ipStr) + } + return ip +} + +// RemoteAddr extracts the remote address of the request, taking into +// account proxy headers. +func RemoteAddr(r *http.Request) string { + if prior := r.Header.Get("X-Forwarded-For"); prior != "" { + proxies := strings.Split(prior, ",") + if len(proxies) > 0 { + remoteAddr := strings.Trim(proxies[0], " ") + if parseIP(remoteAddr) != nil { + return remoteAddr + } + } + } + // X-Real-Ip is less supported, but worth checking in the + // absence of X-Forwarded-For + if realIP := r.Header.Get("X-Real-Ip"); realIP != "" { + if parseIP(realIP) != nil { + return realIP + } + } + + return r.RemoteAddr +} + +// RemoteIP extracts the remote IP of the request, taking into +// account proxy headers. +func RemoteIP(r *http.Request) string { + addr := RemoteAddr(r) + + // Try parsing it as "IP:port" + if ip, _, err := net.SplitHostPort(addr); err == nil { + return ip + } + + return addr +} + +// WithRequest places the request on the context. The context of the request +// is assigned a unique id, available at "http.request.id". The request itself +// is available at "http.request". Other common attributes are available under +// the prefix "http.request.". If a request is already present on the context, +// this method will panic. +func WithRequest(ctx Context, r *http.Request) Context { + if ctx.Value("http.request") != nil { + // NOTE(stevvooe): This needs to be considered a programming error. It + // is unlikely that we'd want to have more than one request in + // context. + panic("only one request per context") + } + + return &httpRequestContext{ + Context: ctx, + startedAt: time.Now(), + id: uuid.Generate().String(), + r: r, + } +} + +// GetRequest returns the http request in the given context. Returns +// ErrNoRequestContext if the context does not have an http request associated +// with it. +func GetRequest(ctx Context) (*http.Request, error) { + if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok { + return r, nil + } + return nil, ErrNoRequestContext +} + +// GetRequestID attempts to resolve the current request id, if possible. An +// error is return if it is not available on the context. +func GetRequestID(ctx Context) string { + return GetStringValue(ctx, "http.request.id") +} + +// WithResponseWriter returns a new context and response writer that makes +// interesting response statistics available within the context. +func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) { + if closeNotifier, ok := w.(http.CloseNotifier); ok { + irwCN := &instrumentedResponseWriterCN{ + instrumentedResponseWriter: instrumentedResponseWriter{ + ResponseWriter: w, + Context: ctx, + }, + CloseNotifier: closeNotifier, + } + + return irwCN, irwCN + } + + irw := instrumentedResponseWriter{ + ResponseWriter: w, + Context: ctx, + } + return &irw, &irw +} + +// GetResponseWriter returns the http.ResponseWriter from the provided +// context. If not present, ErrNoResponseWriterContext is returned. The +// returned instance provides instrumentation in the context. +func GetResponseWriter(ctx Context) (http.ResponseWriter, error) { + v := ctx.Value("http.response") + + rw, ok := v.(http.ResponseWriter) + if !ok || rw == nil { + return nil, ErrNoResponseWriterContext + } + + return rw, nil +} + +// getVarsFromRequest let's us change request vars implementation for testing +// and maybe future changes. +var getVarsFromRequest = mux.Vars + +// WithVars extracts gorilla/mux vars and makes them available on the returned +// context. Variables are available at keys with the prefix "vars.". For +// example, if looking for the variable "name", it can be accessed as +// "vars.name". Implementations that are accessing values need not know that +// the underlying context is implemented with gorilla/mux vars. +func WithVars(ctx Context, r *http.Request) Context { + return &muxVarsContext{ + Context: ctx, + vars: getVarsFromRequest(r), + } +} + +// GetRequestLogger returns a logger that contains fields from the request in +// the current context. If the request is not available in the context, no +// fields will display. Request loggers can safely be pushed onto the context. +func GetRequestLogger(ctx Context) Logger { + return GetLogger(ctx, + "http.request.id", + "http.request.method", + "http.request.host", + "http.request.uri", + "http.request.referer", + "http.request.useragent", + "http.request.remoteaddr", + "http.request.contenttype") +} + +// GetResponseLogger reads the current response stats and builds a logger. +// Because the values are read at call time, pushing a logger returned from +// this function on the context will lead to missing or invalid data. Only +// call this at the end of a request, after the response has been written. +func GetResponseLogger(ctx Context) Logger { + l := getLogrusLogger(ctx, + "http.response.written", + "http.response.status", + "http.response.contenttype") + + duration := Since(ctx, "http.request.startedat") + + if duration > 0 { + l = l.WithField("http.response.duration", duration.String()) + } + + return l +} + +// httpRequestContext makes information about a request available to context. +type httpRequestContext struct { + Context + + startedAt time.Time + id string + r *http.Request +} + +// Value returns a keyed element of the request for use in the context. To get +// the request itself, query "request". For other components, access them as +// "request.". For example, r.RequestURI +func (ctx *httpRequestContext) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "http.request" { + return ctx.r + } + + if !strings.HasPrefix(keyStr, "http.request.") { + goto fallback + } + + parts := strings.Split(keyStr, ".") + + if len(parts) != 3 { + goto fallback + } + + switch parts[2] { + case "uri": + return ctx.r.RequestURI + case "remoteaddr": + return RemoteAddr(ctx.r) + case "method": + return ctx.r.Method + case "host": + return ctx.r.Host + case "referer": + referer := ctx.r.Referer() + if referer != "" { + return referer + } + case "useragent": + return ctx.r.UserAgent() + case "id": + return ctx.id + case "startedat": + return ctx.startedAt + case "contenttype": + ct := ctx.r.Header.Get("Content-Type") + if ct != "" { + return ct + } + } + } + +fallback: + return ctx.Context.Value(key) +} + +type muxVarsContext struct { + Context + vars map[string]string +} + +func (ctx *muxVarsContext) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "vars" { + return ctx.vars + } + + if strings.HasPrefix(keyStr, "vars.") { + keyStr = strings.TrimPrefix(keyStr, "vars.") + } + + if v, ok := ctx.vars[keyStr]; ok { + return v + } + } + + return ctx.Context.Value(key) +} + +// instrumentedResponseWriterCN provides response writer information in a +// context. It implements http.CloseNotifier so that users can detect +// early disconnects. +type instrumentedResponseWriterCN struct { + instrumentedResponseWriter + http.CloseNotifier +} + +// instrumentedResponseWriter provides response writer information in a +// context. This variant is only used in the case where CloseNotifier is not +// implemented by the parent ResponseWriter. +type instrumentedResponseWriter struct { + http.ResponseWriter + Context + + mu sync.Mutex + status int + written int64 +} + +func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) { + n, err = irw.ResponseWriter.Write(p) + + irw.mu.Lock() + irw.written += int64(n) + + // Guess the likely status if not set. + if irw.status == 0 { + irw.status = http.StatusOK + } + + irw.mu.Unlock() + + return +} + +func (irw *instrumentedResponseWriter) WriteHeader(status int) { + irw.ResponseWriter.WriteHeader(status) + + irw.mu.Lock() + irw.status = status + irw.mu.Unlock() +} + +func (irw *instrumentedResponseWriter) Flush() { + if flusher, ok := irw.ResponseWriter.(http.Flusher); ok { + flusher.Flush() + } +} + +func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "http.response" { + return irw + } + + if !strings.HasPrefix(keyStr, "http.response.") { + goto fallback + } + + parts := strings.Split(keyStr, ".") + + if len(parts) != 3 { + goto fallback + } + + irw.mu.Lock() + defer irw.mu.Unlock() + + switch parts[2] { + case "written": + return irw.written + case "status": + return irw.status + case "contenttype": + contentType := irw.Header().Get("Content-Type") + if contentType != "" { + return contentType + } + } + } + +fallback: + return irw.Context.Value(key) +} + +func (irw *instrumentedResponseWriterCN) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "http.response" { + return irw + } + } + + return irw.instrumentedResponseWriter.Value(key) +} diff --git a/vendor/github.com/docker/distribution/context/http_test.go b/vendor/github.com/docker/distribution/context/http_test.go new file mode 100644 index 000000000..3d4b3c8eb --- /dev/null +++ b/vendor/github.com/docker/distribution/context/http_test.go @@ -0,0 +1,285 @@ +package context + +import ( + "net/http" + "net/http/httptest" + "net/http/httputil" + "net/url" + "reflect" + "testing" + "time" +) + +func TestWithRequest(t *testing.T) { + var req http.Request + + start := time.Now() + req.Method = "GET" + req.Host = "example.com" + req.RequestURI = "/test-test" + req.Header = make(http.Header) + req.Header.Set("Referer", "foo.com/referer") + req.Header.Set("User-Agent", "test/0.1") + + ctx := WithRequest(Background(), &req) + for _, testcase := range []struct { + key string + expected interface{} + }{ + { + key: "http.request", + expected: &req, + }, + { + key: "http.request.id", + }, + { + key: "http.request.method", + expected: req.Method, + }, + { + key: "http.request.host", + expected: req.Host, + }, + { + key: "http.request.uri", + expected: req.RequestURI, + }, + { + key: "http.request.referer", + expected: req.Referer(), + }, + { + key: "http.request.useragent", + expected: req.UserAgent(), + }, + { + key: "http.request.remoteaddr", + expected: req.RemoteAddr, + }, + { + key: "http.request.startedat", + }, + } { + v := ctx.Value(testcase.key) + + if v == nil { + t.Fatalf("value not found for %q", testcase.key) + } + + if testcase.expected != nil && v != testcase.expected { + t.Fatalf("%s: %v != %v", testcase.key, v, testcase.expected) + } + + // Key specific checks! + switch testcase.key { + case "http.request.id": + if _, ok := v.(string); !ok { + t.Fatalf("request id not a string: %v", v) + } + case "http.request.startedat": + vt, ok := v.(time.Time) + if !ok { + t.Fatalf("value not a time: %v", v) + } + + now := time.Now() + if vt.After(now) { + t.Fatalf("time generated too late: %v > %v", vt, now) + } + + if vt.Before(start) { + t.Fatalf("time generated too early: %v < %v", vt, start) + } + } + } +} + +type testResponseWriter struct { + flushed bool + status int + written int64 + header http.Header +} + +func (trw *testResponseWriter) Header() http.Header { + if trw.header == nil { + trw.header = make(http.Header) + } + + return trw.header +} + +func (trw *testResponseWriter) Write(p []byte) (n int, err error) { + if trw.status == 0 { + trw.status = http.StatusOK + } + + n = len(p) + trw.written += int64(n) + return +} + +func (trw *testResponseWriter) WriteHeader(status int) { + trw.status = status +} + +func (trw *testResponseWriter) Flush() { + trw.flushed = true +} + +func TestWithResponseWriter(t *testing.T) { + trw := testResponseWriter{} + ctx, rw := WithResponseWriter(Background(), &trw) + + if ctx.Value("http.response") != rw { + t.Fatalf("response not available in context: %v != %v", ctx.Value("http.response"), rw) + } + + grw, err := GetResponseWriter(ctx) + if err != nil { + t.Fatalf("error getting response writer: %v", err) + } + + if grw != rw { + t.Fatalf("unexpected response writer returned: %#v != %#v", grw, rw) + } + + if ctx.Value("http.response.status") != 0 { + t.Fatalf("response status should always be a number and should be zero here: %v != 0", ctx.Value("http.response.status")) + } + + if n, err := rw.Write(make([]byte, 1024)); err != nil { + t.Fatalf("unexpected error writing: %v", err) + } else if n != 1024 { + t.Fatalf("unexpected number of bytes written: %v != %v", n, 1024) + } + + if ctx.Value("http.response.status") != http.StatusOK { + t.Fatalf("unexpected response status in context: %v != %v", ctx.Value("http.response.status"), http.StatusOK) + } + + if ctx.Value("http.response.written") != int64(1024) { + t.Fatalf("unexpected number reported bytes written: %v != %v", ctx.Value("http.response.written"), 1024) + } + + // Make sure flush propagates + rw.(http.Flusher).Flush() + + if !trw.flushed { + t.Fatalf("response writer not flushed") + } + + // Write another status and make sure context is correct. This normally + // wouldn't work except for in this contrived testcase. + rw.WriteHeader(http.StatusBadRequest) + + if ctx.Value("http.response.status") != http.StatusBadRequest { + t.Fatalf("unexpected response status in context: %v != %v", ctx.Value("http.response.status"), http.StatusBadRequest) + } +} + +func TestWithVars(t *testing.T) { + var req http.Request + vars := map[string]string{ + "foo": "asdf", + "bar": "qwer", + } + + getVarsFromRequest = func(r *http.Request) map[string]string { + if r != &req { + t.Fatalf("unexpected request: %v != %v", r, req) + } + + return vars + } + + ctx := WithVars(Background(), &req) + for _, testcase := range []struct { + key string + expected interface{} + }{ + { + key: "vars", + expected: vars, + }, + { + key: "vars.foo", + expected: "asdf", + }, + { + key: "vars.bar", + expected: "qwer", + }, + } { + v := ctx.Value(testcase.key) + + if !reflect.DeepEqual(v, testcase.expected) { + t.Fatalf("%q: %v != %v", testcase.key, v, testcase.expected) + } + } +} + +// SingleHostReverseProxy will insert an X-Forwarded-For header, and can be used to test +// RemoteAddr(). A fake RemoteAddr cannot be set on the HTTP request - it is overwritten +// at the transport layer to 127.0.0.1: . However, as the X-Forwarded-For header +// just contains the IP address, it is different enough for testing. +func TestRemoteAddr(t *testing.T) { + var expectedRemote string + backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + if r.RemoteAddr == expectedRemote { + t.Errorf("Unexpected matching remote addresses") + } + + actualRemote := RemoteAddr(r) + if expectedRemote != actualRemote { + t.Errorf("Mismatching remote hosts: %v != %v", expectedRemote, actualRemote) + } + + w.WriteHeader(200) + })) + + defer backend.Close() + backendURL, err := url.Parse(backend.URL) + if err != nil { + t.Fatal(err) + } + + proxy := httputil.NewSingleHostReverseProxy(backendURL) + frontend := httptest.NewServer(proxy) + defer frontend.Close() + + // X-Forwarded-For set by proxy + expectedRemote = "127.0.0.1" + proxyReq, err := http.NewRequest("GET", frontend.URL, nil) + if err != nil { + t.Fatal(err) + } + + _, err = http.DefaultClient.Do(proxyReq) + if err != nil { + t.Fatal(err) + } + + // RemoteAddr in X-Real-Ip + getReq, err := http.NewRequest("GET", backend.URL, nil) + if err != nil { + t.Fatal(err) + } + + expectedRemote = "1.2.3.4" + getReq.Header["X-Real-ip"] = []string{expectedRemote} + _, err = http.DefaultClient.Do(getReq) + if err != nil { + t.Fatal(err) + } + + // Valid X-Real-Ip and invalid X-Forwarded-For + getReq.Header["X-forwarded-for"] = []string{"1.2.3"} + _, err = http.DefaultClient.Do(getReq) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/distribution/context/logger.go b/vendor/github.com/docker/distribution/context/logger.go new file mode 100644 index 000000000..fbb6a0511 --- /dev/null +++ b/vendor/github.com/docker/distribution/context/logger.go @@ -0,0 +1,116 @@ +package context + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "runtime" +) + +// Logger provides a leveled-logging interface. +type Logger interface { + // standard logger methods + Print(args ...interface{}) + Printf(format string, args ...interface{}) + Println(args ...interface{}) + + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatalln(args ...interface{}) + + Panic(args ...interface{}) + Panicf(format string, args ...interface{}) + Panicln(args ...interface{}) + + // Leveled methods, from logrus + Debug(args ...interface{}) + Debugf(format string, args ...interface{}) + Debugln(args ...interface{}) + + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Errorln(args ...interface{}) + + Info(args ...interface{}) + Infof(format string, args ...interface{}) + Infoln(args ...interface{}) + + Warn(args ...interface{}) + Warnf(format string, args ...interface{}) + Warnln(args ...interface{}) +} + +// WithLogger creates a new context with provided logger. +func WithLogger(ctx Context, logger Logger) Context { + return WithValue(ctx, "logger", logger) +} + +// GetLoggerWithField returns a logger instance with the specified field key +// and value without affecting the context. Extra specified keys will be +// resolved from the context. +func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{}) Logger { + return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value) +} + +// GetLoggerWithFields returns a logger instance with the specified fields +// without affecting the context. Extra specified keys will be resolved from +// the context. +func GetLoggerWithFields(ctx Context, fields map[interface{}]interface{}, keys ...interface{}) Logger { + // must convert from interface{} -> interface{} to string -> interface{} for logrus. + lfields := make(logrus.Fields, len(fields)) + for key, value := range fields { + lfields[fmt.Sprint(key)] = value + } + + return getLogrusLogger(ctx, keys...).WithFields(lfields) +} + +// GetLogger returns the logger from the current context, if present. If one +// or more keys are provided, they will be resolved on the context and +// included in the logger. While context.Value takes an interface, any key +// argument passed to GetLogger will be passed to fmt.Sprint when expanded as +// a logging key field. If context keys are integer constants, for example, +// its recommended that a String method is implemented. +func GetLogger(ctx Context, keys ...interface{}) Logger { + return getLogrusLogger(ctx, keys...) +} + +// GetLogrusLogger returns the logrus logger for the context. If one more keys +// are provided, they will be resolved on the context and included in the +// logger. Only use this function if specific logrus functionality is +// required. +func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry { + var logger *logrus.Entry + + // Get a logger, if it is present. + loggerInterface := ctx.Value("logger") + if loggerInterface != nil { + if lgr, ok := loggerInterface.(*logrus.Entry); ok { + logger = lgr + } + } + + if logger == nil { + fields := logrus.Fields{} + + // Fill in the instance id, if we have it. + instanceID := ctx.Value("instance.id") + if instanceID != nil { + fields["instance.id"] = instanceID + } + + fields["go.version"] = runtime.Version() + // If no logger is found, just return the standard logger. + logger = logrus.StandardLogger().WithFields(fields) + } + + fields := logrus.Fields{} + for _, key := range keys { + v := ctx.Value(key) + if v != nil { + fields[fmt.Sprint(key)] = v + } + } + + return logger.WithFields(fields) +} diff --git a/vendor/github.com/docker/distribution/context/trace.go b/vendor/github.com/docker/distribution/context/trace.go new file mode 100644 index 000000000..721964a84 --- /dev/null +++ b/vendor/github.com/docker/distribution/context/trace.go @@ -0,0 +1,104 @@ +package context + +import ( + "runtime" + "time" + + "github.com/docker/distribution/uuid" +) + +// WithTrace allocates a traced timing span in a new context. This allows a +// caller to track the time between calling WithTrace and the returned done +// function. When the done function is called, a log message is emitted with a +// "trace.duration" field, corresponding to the elapsed time and a +// "trace.func" field, corresponding to the function that called WithTrace. +// +// The logging keys "trace.id" and "trace.parent.id" are provided to implement +// dapper-like tracing. This function should be complemented with a WithSpan +// method that could be used for tracing distributed RPC calls. +// +// The main benefit of this function is to post-process log messages or +// intercept them in a hook to provide timing data. Trace ids and parent ids +// can also be linked to provide call tracing, if so required. +// +// Here is an example of the usage: +// +// func timedOperation(ctx Context) { +// ctx, done := WithTrace(ctx) +// defer done("this will be the log message") +// // ... function body ... +// } +// +// If the function ran for roughly 1s, such a usage would emit a log message +// as follows: +// +// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id= ... +// +// Notice that the function name is automatically resolved, along with the +// package and a trace id is emitted that can be linked with parent ids. +func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) { + if ctx == nil { + ctx = Background() + } + + pc, file, line, _ := runtime.Caller(1) + f := runtime.FuncForPC(pc) + ctx = &traced{ + Context: ctx, + id: uuid.Generate().String(), + start: time.Now(), + parent: GetStringValue(ctx, "trace.id"), + fnname: f.Name(), + file: file, + line: line, + } + + return ctx, func(format string, a ...interface{}) { + GetLogger(ctx, + "trace.duration", + "trace.id", + "trace.parent.id", + "trace.func", + "trace.file", + "trace.line"). + Debugf(format, a...) + } +} + +// traced represents a context that is traced for function call timing. It +// also provides fast lookup for the various attributes that are available on +// the trace. +type traced struct { + Context + id string + parent string + start time.Time + fnname string + file string + line int +} + +func (ts *traced) Value(key interface{}) interface{} { + switch key { + case "trace.start": + return ts.start + case "trace.duration": + return time.Since(ts.start) + case "trace.id": + return ts.id + case "trace.parent.id": + if ts.parent == "" { + return nil // must return nil to signal no parent. + } + + return ts.parent + case "trace.func": + return ts.fnname + case "trace.file": + return ts.file + case "trace.line": + return ts.line + } + + return ts.Context.Value(key) +} diff --git a/vendor/github.com/docker/distribution/context/trace_test.go b/vendor/github.com/docker/distribution/context/trace_test.go new file mode 100644 index 000000000..4b969fbb0 --- /dev/null +++ b/vendor/github.com/docker/distribution/context/trace_test.go @@ -0,0 +1,85 @@ +package context + +import ( + "runtime" + "testing" + "time" +) + +// TestWithTrace ensures that tracing has the expected values in the context. +func TestWithTrace(t *testing.T) { + pc, file, _, _ := runtime.Caller(0) // get current caller. + f := runtime.FuncForPC(pc) + + base := []valueTestCase{ + { + key: "trace.id", + notnilorempty: true, + }, + + { + key: "trace.file", + expected: file, + notnilorempty: true, + }, + { + key: "trace.line", + notnilorempty: true, + }, + { + key: "trace.start", + notnilorempty: true, + }, + } + + ctx, done := WithTrace(Background()) + defer done("this will be emitted at end of test") + + checkContextForValues(t, ctx, append(base, valueTestCase{ + key: "trace.func", + expected: f.Name(), + })) + + traced := func() { + parentID := ctx.Value("trace.id") // ensure the parent trace id is correct. + + pc, _, _, _ := runtime.Caller(0) // get current caller. + f := runtime.FuncForPC(pc) + ctx, done := WithTrace(ctx) + defer done("this should be subordinate to the other trace") + time.Sleep(time.Second) + checkContextForValues(t, ctx, append(base, valueTestCase{ + key: "trace.func", + expected: f.Name(), + }, valueTestCase{ + key: "trace.parent.id", + expected: parentID, + })) + } + traced() + + time.Sleep(time.Second) +} + +type valueTestCase struct { + key string + expected interface{} + notnilorempty bool // just check not empty/not nil +} + +func checkContextForValues(t *testing.T, ctx Context, values []valueTestCase) { + + for _, testcase := range values { + v := ctx.Value(testcase.key) + if testcase.notnilorempty { + if v == nil || v == "" { + t.Fatalf("value was nil or empty for %q: %#v", testcase.key, v) + } + continue + } + + if v != testcase.expected { + t.Fatalf("unexpected value for key %q: %v != %v", testcase.key, v, testcase.expected) + } + } +} diff --git a/vendor/github.com/docker/distribution/context/util.go b/vendor/github.com/docker/distribution/context/util.go new file mode 100644 index 000000000..cb9ef52e3 --- /dev/null +++ b/vendor/github.com/docker/distribution/context/util.go @@ -0,0 +1,24 @@ +package context + +import ( + "time" +) + +// Since looks up key, which should be a time.Time, and returns the duration +// since that time. If the key is not found, the value returned will be zero. +// This is helpful when inferring metrics related to context execution times. +func Since(ctx Context, key interface{}) time.Duration { + if startedAt, ok := ctx.Value(key).(time.Time); ok { + return time.Since(startedAt) + } + return 0 +} + +// GetStringValue returns a string value from the context. The empty string +// will be returned if not found. +func GetStringValue(ctx Context, key interface{}) (value string) { + if valuev, ok := ctx.Value(key).(string); ok { + value = valuev + } + return value +} diff --git a/vendor/github.com/docker/distribution/context/version.go b/vendor/github.com/docker/distribution/context/version.go new file mode 100644 index 000000000..746cda02e --- /dev/null +++ b/vendor/github.com/docker/distribution/context/version.go @@ -0,0 +1,16 @@ +package context + +// WithVersion stores the application version in the context. The new context +// gets a logger to ensure log messages are marked with the application +// version. +func WithVersion(ctx Context, version string) Context { + ctx = WithValue(ctx, "version", version) + // push a new logger onto the stack + return WithLogger(ctx, GetLogger(ctx, "version")) +} + +// GetVersion returns the application version from the context. An empty +// string may returned if the version was not set on the context. +func GetVersion(ctx Context) string { + return GetStringValue(ctx, "version") +} diff --git a/vendor/github.com/docker/distribution/context/version_test.go b/vendor/github.com/docker/distribution/context/version_test.go new file mode 100644 index 000000000..b81652691 --- /dev/null +++ b/vendor/github.com/docker/distribution/context/version_test.go @@ -0,0 +1,19 @@ +package context + +import "testing" + +func TestVersionContext(t *testing.T) { + ctx := Background() + + if GetVersion(ctx) != "" { + t.Fatalf("context should not yet have a version") + } + + expected := "2.1-whatever" + ctx = WithVersion(ctx, expected) + version := GetVersion(ctx) + + if version != expected { + t.Fatalf("version was not set: %q != %q", version, expected) + } +} diff --git a/vendor/github.com/docker/distribution/contrib/apache/README.MD b/vendor/github.com/docker/distribution/contrib/apache/README.MD new file mode 100644 index 000000000..29f6bae18 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/apache/README.MD @@ -0,0 +1,36 @@ +# Apache HTTPd sample for Registry v1, v2 and mirror + +3 containers involved + +* Docker Registry v1 (registry 0.9.1) +* Docker Registry v2 (registry 2.0.0) +* Docker Registry v1 in mirror mode + +HTTP for mirror and HTTPS for v1 & v2 + +* http://registry.example.com proxify Docker Registry 1.0 in Mirror mode +* https://registry.example.com proxify Docker Registry 1.0 or 2.0 in Hosting mode + +## 3 Docker containers should be started + +* Docker Registry 1.0 in Mirror mode : port 5001 +* Docker Registry 1.0 in Hosting mode : port 5000 +* Docker Registry 2.0 in Hosting mode : port 5002 + +### Registry v1 + + docker run -d -e SETTINGS_FLAVOR=dev -v /var/lib/docker-registry/storage/hosting-v1:/tmp -p 5000:5000 registry:0.9.1" + +### Mirror + + docker run -d -e SETTINGS_FLAVOR=dev -e STANDALONE=false -e MIRROR_SOURCE=https://registry-1.docker.io -e MIRROR_SOURCE_INDEX=https://index.docker.io \ + -e MIRROR_TAGS_CACHE_TTL=172800 -v /var/lib/docker-registry/storage/mirror:/tmp -p 5001:5000 registry:0.9.1" + +### Registry v2 + + docker run -d -e SETTINGS_FLAVOR=dev -v /var/lib/axway/docker-registry/storage/hosting2-v2:/tmp -p 5002:5000 registry:2" + +# For Hosting mode access + +* users should have account (valid-user) to be able to fetch images +* only users using account docker-deployer will be allowed to push images diff --git a/vendor/github.com/docker/distribution/contrib/apache/apache.conf b/vendor/github.com/docker/distribution/contrib/apache/apache.conf new file mode 100644 index 000000000..3300a7c02 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/apache/apache.conf @@ -0,0 +1,127 @@ +# +# Sample Apache 2.x configuration where : +# + + + + ServerName registry.example.com + ServerAlias www.registry.example.com + + ProxyRequests off + ProxyPreserveHost on + + # no proxy for /error/ (Apache HTTPd errors messages) + ProxyPass /error/ ! + + ProxyPass /_ping http://localhost:5001/_ping + ProxyPassReverse /_ping http://localhost:5001/_ping + + ProxyPass /v1 http://localhost:5001/v1 + ProxyPassReverse /v1 http://localhost:5001/v1 + + # Logs + ErrorLog ${APACHE_LOG_DIR}/mirror_error_log + CustomLog ${APACHE_LOG_DIR}/mirror_access_log combined env=!dontlog + + + + + + + ServerName registry.example.com + ServerAlias www.registry.example.com + + SSLEngine on + SSLCertificateFile /etc/apache2/ssl/registry.example.com.crt + SSLCertificateKeyFile /etc/apache2/ssl/registry.example.com.key + + # Higher Strength SSL Ciphers + SSLProtocol all -SSLv2 -SSLv3 -TLSv1 + SSLCipherSuite RC4-SHA:HIGH + SSLHonorCipherOrder on + + # Logs + ErrorLog ${APACHE_LOG_DIR}/registry_error_ssl_log + CustomLog ${APACHE_LOG_DIR}/registry_access_ssl_log combined env=!dontlog + + Header always set "Docker-Distribution-Api-Version" "registry/2.0" + Header onsuccess set "Docker-Distribution-Api-Version" "registry/2.0" + RequestHeader set X-Forwarded-Proto "https" + + ProxyRequests off + ProxyPreserveHost on + + # no proxy for /error/ (Apache HTTPd errors messages) + ProxyPass /error/ ! + + # + # Registry v1 + # + + ProxyPass /v1 http://localhost:5000/v1 + ProxyPassReverse /v1 http://localhost:5000/v1 + + ProxyPass /_ping http://localhost:5000/_ping + ProxyPassReverse /_ping http://localhost:5000/_ping + + # Authentication require for push + + Order deny,allow + Allow from all + AuthName "Registry Authentication" + AuthType basic + AuthUserFile "/etc/apache2/htpasswd/registry-htpasswd" + + # Read access to authentified users + + Require valid-user + + + # Write access to docker-deployer account only + + Require user docker-deployer + + + + + # Allow ping to run unauthenticated. + + Satisfy any + Allow from all + + + # Allow ping to run unauthenticated. + + Satisfy any + Allow from all + + + # + # Registry v2 + # + + ProxyPass /v2 http://localhost:5002/v2 + ProxyPassReverse /v2 http://localhost:5002/v2 + + + Order deny,allow + Allow from all + AuthName "Registry Authentication" + AuthType basic + AuthUserFile "/etc/apache2/htpasswd/registry-htpasswd" + + # Read access to authentified users + + Require valid-user + + + # Write access to docker-deployer only + + Require user docker-deployer + + + + + + + diff --git a/vendor/github.com/docker/distribution/contrib/compose/README.md b/vendor/github.com/docker/distribution/contrib/compose/README.md new file mode 100644 index 000000000..a9522fd24 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/compose/README.md @@ -0,0 +1,147 @@ +# Docker Compose V1 + V2 registry + +This compose configuration configures a `v1` and `v2` registry behind an `nginx` +proxy. By default, you can access the combined registry at `localhost:5000`. + +The configuration does not support pushing images to `v2` and pulling from `v1`. +If a `docker` client has a version less than 1.6, Nginx will route its requests +to the 1.0 registry. Requests from newer clients will route to the 2.0 registry. + +### Install Docker Compose + +1. Open a new terminal on the host with your `distribution` source. + +2. Get the `docker-compose` binary. + + $ sudo wget https://github.com/docker/compose/releases/download/1.1.0/docker-compose-`uname -s`-`uname -m` -O /usr/local/bin/docker-compose + + This command installs the binary in the `/usr/local/bin` directory. + +3. Add executable permissions to the binary. + + $ sudo chmod +x /usr/local/bin/docker-compose + +## Build and run with Compose + +1. In your terminal, navigate to the `distribution/contrib/compose` directory + + This directory includes a single `docker-compose.yml` configuration. + + nginx: + build: "nginx" + ports: + - "5000:5000" + links: + - registryv1:registryv1 + - registryv2:registryv2 + registryv1: + image: registry + ports: + - "5000" + registryv2: + build: "../../" + ports: + - "5000" + + This configuration builds a new `nginx` image as specified by the + `nginx/Dockerfile` file. The 1.0 registry comes from Docker's official + public image. Finally, the registry 2.0 image is built from the + `distribution/Dockerfile` you've used previously. + +2. Get a registry 1.0 image. + + $ docker pull registry:0.9.1 + + The Compose configuration looks for this image locally. If you don't do this + step, later steps can fail. + +3. Build `nginx`, the registry 2.0 image, and + + $ docker-compose build + registryv1 uses an image, skipping + Building registryv2... + Step 0 : FROM golang:1.4 + + ... + + Removing intermediate container 9f5f5068c3f3 + Step 4 : COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf + ---> 74acc70fa106 + Removing intermediate container edb84c2b40cb + Successfully built 74acc70fa106 + + The commmand outputs its progress until it completes. + +4. Start your configuration with compose. + + $ docker-compose up + Recreating compose_registryv1_1... + Recreating compose_registryv2_1... + Recreating compose_nginx_1... + Attaching to compose_registryv1_1, compose_registryv2_1, compose_nginx_1 + ... + + +5. In another terminal, display the running configuration. + + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + a81ad2557702 compose_nginx:latest "nginx -g 'daemon of 8 minutes ago Up 8 minutes 80/tcp, 443/tcp, 0.0.0.0:5000->5000/tcp compose_nginx_1 + 0618437450dd compose_registryv2:latest "registry cmd/regist 8 minutes ago Up 8 minutes 0.0.0.0:32777->5000/tcp compose_registryv2_1 + aa82b1ed8e61 registry:latest "docker-registry" 8 minutes ago Up 8 minutes 0.0.0.0:32776->5000/tcp compose_registryv1_1 + +### Explore a bit + +1. Check for TLS on your `nginx` server. + + $ curl -v https://localhost:5000 + * Rebuilt URL to: https://localhost:5000/ + * Hostname was NOT found in DNS cache + * Trying 127.0.0.1... + * Connected to localhost (127.0.0.1) port 5000 (#0) + * successfully set certificate verify locations: + * CAfile: none + CApath: /etc/ssl/certs + * SSLv3, TLS handshake, Client hello (1): + * SSLv3, TLS handshake, Server hello (2): + * SSLv3, TLS handshake, CERT (11): + * SSLv3, TLS alert, Server hello (2): + * SSL certificate problem: self signed certificate + * Closing connection 0 + curl: (60) SSL certificate problem: self signed certificate + More details here: http://curl.haxx.se/docs/sslcerts.html + +2. Tag the `v1` registry image. + + $ docker tag registry:latest localhost:5000/registry_one:latest + +2. Push it to the localhost. + + $ docker push localhost:5000/registry_one:latest + + If you are using the 1.6 Docker client, this pushes the image the `v2 `registry. + +4. Use `curl` to list the image in the registry. + + $ curl -v -X GET http://localhost:32777/v2/registry1/tags/list + * Hostname was NOT found in DNS cache + * Trying 127.0.0.1... + * Connected to localhost (127.0.0.1) port 32777 (#0) + > GET /v2/registry1/tags/list HTTP/1.1 + > User-Agent: curl/7.36.0 + > Host: localhost:32777 + > Accept: */* + > + < HTTP/1.1 200 OK + < Content-Type: application/json; charset=utf-8 + < Docker-Distribution-Api-Version: registry/2.0 + < Date: Tue, 14 Apr 2015 22:34:13 GMT + < Content-Length: 39 + < + {"name":"registry1","tags":["latest"]} + * Connection #0 to host localhost left intact + + This example refers to the specific port assigned to the 2.0 registry. You saw + this port earlier, when you used `docker ps` to show your running containers. + + diff --git a/vendor/github.com/docker/distribution/contrib/compose/docker-compose.yml b/vendor/github.com/docker/distribution/contrib/compose/docker-compose.yml new file mode 100644 index 000000000..5cd048588 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/compose/docker-compose.yml @@ -0,0 +1,15 @@ +nginx: + build: "nginx" + ports: + - "5000:5000" + links: + - registryv1:registryv1 + - registryv2:registryv2 +registryv1: + image: registry + ports: + - "5000" +registryv2: + build: "../../" + ports: + - "5000" diff --git a/vendor/github.com/docker/distribution/contrib/compose/nginx/Dockerfile b/vendor/github.com/docker/distribution/contrib/compose/nginx/Dockerfile new file mode 100644 index 000000000..2b252ec7d --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/compose/nginx/Dockerfile @@ -0,0 +1,6 @@ +FROM nginx:1.7 + +COPY nginx.conf /etc/nginx/nginx.conf +COPY registry.conf /etc/nginx/conf.d/registry.conf +COPY docker-registry.conf /etc/nginx/docker-registry.conf +COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf diff --git a/vendor/github.com/docker/distribution/contrib/compose/nginx/docker-registry-v2.conf b/vendor/github.com/docker/distribution/contrib/compose/nginx/docker-registry-v2.conf new file mode 100644 index 000000000..65c4d7766 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/compose/nginx/docker-registry-v2.conf @@ -0,0 +1,6 @@ +proxy_pass http://docker-registry-v2; +proxy_set_header Host $http_host; # required for docker client's sake +proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_set_header X-Forwarded-Proto $scheme; +proxy_read_timeout 900; diff --git a/vendor/github.com/docker/distribution/contrib/compose/nginx/docker-registry.conf b/vendor/github.com/docker/distribution/contrib/compose/nginx/docker-registry.conf new file mode 100644 index 000000000..7b039a54a --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/compose/nginx/docker-registry.conf @@ -0,0 +1,7 @@ +proxy_pass http://docker-registry; +proxy_set_header Host $http_host; # required for docker client's sake +proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_set_header X-Forwarded-Proto $scheme; +proxy_set_header Authorization ""; # For basic auth through nginx in v1 to work, please comment this line +proxy_read_timeout 900; diff --git a/vendor/github.com/docker/distribution/contrib/compose/nginx/nginx.conf b/vendor/github.com/docker/distribution/contrib/compose/nginx/nginx.conf new file mode 100644 index 000000000..63cd180d6 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/compose/nginx/nginx.conf @@ -0,0 +1,27 @@ +user nginx; +worker_processes 1; + +error_log /var/log/nginx/error.log warn; +pid /var/run/nginx.pid; + +events { + worker_connections 1024; +} + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + + sendfile on; + + keepalive_timeout 65; + + include /etc/nginx/conf.d/*.conf; +} + diff --git a/vendor/github.com/docker/distribution/contrib/compose/nginx/registry.conf b/vendor/github.com/docker/distribution/contrib/compose/nginx/registry.conf new file mode 100644 index 000000000..47ffd2379 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/compose/nginx/registry.conf @@ -0,0 +1,41 @@ +# Docker registry proxy for api versions 1 and 2 + +upstream docker-registry { + server registryv1:5000; +} + +upstream docker-registry-v2 { + server registryv2:5000; +} + +# No client auth or TLS +server { + listen 5000; + server_name localhost; + + # disable any limits to avoid HTTP 413 for large image uploads + client_max_body_size 0; + + # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) + chunked_transfer_encoding on; + + location /v2/ { + # Do not allow connections from docker 1.5 and earlier + # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents + if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) { + return 404; + } + + # To add basic authentication to v2 use auth_basic setting plus add_header + # auth_basic "registry.localhost"; + # auth_basic_user_file test.password; + # add_header 'Docker-Distribution-Api-Version' 'registry/2.0' always; + + include docker-registry-v2.conf; + } + + location / { + include docker-registry.conf; + } +} + diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/Dockerfile b/vendor/github.com/docker/distribution/contrib/docker-integration/Dockerfile new file mode 100644 index 000000000..7a047a689 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/Dockerfile @@ -0,0 +1,9 @@ +FROM distribution/golem:0.1 + +MAINTAINER Docker Distribution Team + +RUN apk add --no-cache git + +ENV TMPDIR /var/lib/docker/tmp + +WORKDIR /go/src/github.com/docker/distribution/contrib/docker-integration diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/README.md b/vendor/github.com/docker/distribution/contrib/docker-integration/README.md new file mode 100644 index 000000000..bc5be9d96 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/README.md @@ -0,0 +1,63 @@ +# Docker Registry Integration Testing + +These integration tests cover interactions between registry clients such as +the docker daemon and the registry server. All tests can be run using the +[golem integration test runner](https://github.com/docker/golem) + +The integration tests configure components using docker compose +(see docker-compose.yaml) and the runner can be using the golem +configuration file (see golem.conf). + +## Running integration tests + +### Run using multiversion script + +The integration tests in the `contrib/docker-integration` directory can be simply +run by executing the run script `./run_multiversion.sh`. If there is no running +daemon to connect to, run as `./run_multiversion.sh -d`. + +This command will build the distribution image from the locally checked out +version and run against multiple versions of docker defined in the script. To +run a specific version of the registry or docker, Golem will need to be +executed manually. + +### Run manually using Golem + +Using the golem tool directly allows running against multiple versions of +the registry and docker. Running against multiple versions of the registry +can be useful for testing changes in the docker daemon which are not +covered by the default run script. + +#### Installing Golem + +Golem is distributed as an executable binary which can be installed from +the [release page](https://github.com/docker/golem/releases/tag/v0.1). + +#### Running golem with docker + +Additionally golem can be run as a docker image requiring no additonal +installation. + +`docker run --privileged -v "$GOPATH/src/github.com/docker/distribution/contrib/docker-integration:/test" -w /test distribution/golem golem -rundaemon .` + +#### Golem custom images + +Golem tests version of software by defining the docker image to test. + +Run with registry 2.2.1 and docker 1.10.3 + +`golem -i golem-dind:latest,docker:1.10.3-dind,1.10.3 -i golem-distribution:latest,registry:2.2.1 .` + + +#### Use golem caching for developing tests + +Golem allows caching image configuration to reduce test start up time. +Using this cache will allow tests with the same set of images to start +up quickly. This can be useful when developing tests and needing the +test to run quickly. If there are changes which effect the image (such as +building a new registry image), then startup time will be slower. + +Run this command multiple times and after the first time test runs +should start much quicker. +`golem -cache ~/.cache/docker/golem -i golem-dind:latest,docker:1.10.3-dind,1.10.3 -i golem-distribution:latest,registry:2.2.1 .` + diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml b/vendor/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml new file mode 100644 index 000000000..4d4f3856f --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml @@ -0,0 +1,91 @@ +nginx: + build: "nginx" + ports: + - "5000:5000" + - "5002:5002" + - "5440:5440" + - "5441:5441" + - "5442:5442" + - "5443:5443" + - "5444:5444" + - "5445:5445" + - "5446:5446" + - "5447:5447" + - "5448:5448" + - "5554:5554" + - "5555:5555" + - "5556:5556" + - "5557:5557" + - "5558:5558" + - "5559:5559" + - "5600:5600" + - "6666:6666" + links: + - registryv2:registryv2 + - malevolent:malevolent + - registryv2token:registryv2token + - tokenserver:tokenserver + - registryv2tokenoauth:registryv2tokenoauth + - registryv2tokenoauthnotls:registryv2tokenoauthnotls + - tokenserveroauth:tokenserveroauth +registryv2: + image: golem-distribution:latest + ports: + - "5000" +registryv2token: + image: golem-distribution:latest + ports: + - "5000" + volumes: + - ./tokenserver/registry-config.yml:/etc/docker/registry/config.yml + - ./tokenserver/certs/localregistry.cert:/etc/docker/registry/localregistry.cert + - ./tokenserver/certs/localregistry.key:/etc/docker/registry/localregistry.key + - ./tokenserver/certs/signing.cert:/etc/docker/registry/tokenbundle.pem +tokenserver: + build: "tokenserver" + command: "--debug -addr 0.0.0.0:5556 -issuer registry-test -passwd .htpasswd -tlscert tls.cert -tlskey tls.key -key sign.key -realm http://auth.localregistry:5556" + ports: + - "5556" +registryv2tokenoauth: + image: golem-distribution:latest + ports: + - "5000" + volumes: + - ./tokenserver-oauth/registry-config.yml:/etc/docker/registry/config.yml + - ./tokenserver-oauth/certs/localregistry.cert:/etc/docker/registry/localregistry.cert + - ./tokenserver-oauth/certs/localregistry.key:/etc/docker/registry/localregistry.key + - ./tokenserver-oauth/certs/signing.cert:/etc/docker/registry/tokenbundle.pem +registryv2tokenoauthnotls: + image: golem-distribution:latest + ports: + - "5000" + volumes: + - ./tokenserver-oauth/registry-config-notls.yml:/etc/docker/registry/config.yml + - ./tokenserver-oauth/certs/signing.cert:/etc/docker/registry/tokenbundle.pem +tokenserveroauth: + build: "tokenserver-oauth" + command: "--debug -addr 0.0.0.0:5559 -issuer registry-test -passwd .htpasswd -tlscert tls.cert -tlskey tls.key -key sign.key -realm http://auth.localregistry:5559" + ports: + - "5559" +malevolent: + image: "dmcgowan/malevolent:0.1.0" + command: "-l 0.0.0.0:6666 -r http://registryv2:5000 -c /certs/localregistry.cert -k /certs/localregistry.key" + links: + - registryv2:registryv2 + volumes: + - ./malevolent-certs:/certs:ro + ports: + - "6666" +docker: + image: golem-dind:latest + container_name: dockerdaemon + command: "docker daemon --debug -s $DOCKER_GRAPHDRIVER" + privileged: true + environment: + DOCKER_GRAPHDRIVER: + volumes: + - /etc/generated_certs.d:/etc/docker/certs.d + - /var/lib/docker + links: + - nginx:localregistry + - nginx:auth.localregistry diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/golem.conf b/vendor/github.com/docker/distribution/contrib/docker-integration/golem.conf new file mode 100644 index 000000000..99c8d600c --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/golem.conf @@ -0,0 +1,18 @@ +[[suite]] + dind=true + images=[ "nginx:1.9", "dmcgowan/token-server:simple", "dmcgowan/token-server:oauth", "dmcgowan/malevolent:0.1.0" ] + + [[suite.pretest]] + command="sh ./install_certs.sh /etc/generated_certs.d" + [[suite.testrunner]] + command="bats -t ." + format="tap" + env=["TEST_REPO=hello-world", "TEST_TAG=latest", "TEST_USER=testuser", "TEST_PASSWORD=passpassword", "TEST_REGISTRY=localregistry", "TEST_SKIP_PULL=true"] + [[suite.customimage]] + tag="golem-distribution:latest" + default="registry:2.2.1" + [[suite.customimage]] + tag="golem-dind:latest" + default="docker:1.10.1-dind" + version="1.10.1" + diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/helpers.bash b/vendor/github.com/docker/distribution/contrib/docker-integration/helpers.bash new file mode 100644 index 000000000..e1813d3e5 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/helpers.bash @@ -0,0 +1,101 @@ +# has_digest enforces the last output line is "Digest: sha256:..." +# the input is the output from a docker push cli command +function has_digest() { + filtered=$(echo "$1" |sed -rn '/[dD]igest\: sha(256|384|512)/ p') + [ "$filtered" != "" ] + # See http://wiki.alpinelinux.org/wiki/Regex#BREs before making changes to regex + digest=$(expr "$filtered" : ".*\(sha[0-9]\{3,3\}:[a-z0-9]*\)") +} + +# tempImage creates a new image using the provided name +# requires bats +function tempImage() { + dir=$(mktemp -d) + run dd if=/dev/urandom of="$dir/f" bs=1024 count=512 + cat < "$dir/Dockerfile" +FROM scratch +COPY f /f + +CMD [] +DockerFileContent + + cp_t $dir "/tmpbuild/" + exec_t "cd /tmpbuild/; docker build --no-cache -t $1 .; rm -rf /tmpbuild/" +} + +# skip basic auth tests with Docker 1.6, where they don't pass due to +# certificate issues, requires bats +function basic_auth_version_check() { + run sh -c 'docker version | fgrep -q "Client version: 1.6."' + if [ "$status" -eq 0 ]; then + skip "Basic auth tests don't support 1.6.x" + fi +} + +# login issues a login to docker to the provided server +# uses user, password, and email variables set outside of function +# requies bats +function login() { + rm -f /root/.docker/config.json + run docker_t login -u $user -p $password -e $email $1 + if [ "$status" -ne 0 ]; then + echo $output + fi + [ "$status" -eq 0 ] + # First line is WARNING about credential save or email deprecation (maybe both) + [ "${lines[2]}" = "Login Succeeded" -o "${lines[1]}" = "Login Succeeded" ] +} + +function login_oauth() { + login $@ + + tmpFile=$(mktemp) + get_file_t /root/.docker/config.json $tmpFile + run awk -v RS="" "/\"$1\": \\{[[:space:]]+\"auth\": \"[[:alnum:]]+\",[[:space:]]+\"identitytoken\"/ {exit 3}" $tmpFile + [ "$status" -eq 3 ] +} + +function parse_version() { + version=$(echo "$1" | cut -d '-' -f1) # Strip anything after '-' + major=$(echo "$version" | cut -d . -f1) + minor=$(echo "$version" | cut -d . -f2) + rev=$(echo "$version" | cut -d . -f3) + + version=$((major * 1000 * 1000 + minor * 1000 + rev)) +} + +function version_check() { + name=$1 + checkv=$2 + minv=$3 + parse_version "$checkv" + v=$version + parse_version "$minv" + if [ "$v" -lt "$version" ]; then + skip "$name version \"$checkv\" does not meet required version \"$minv\"" + fi +} + +function get_file_t() { + docker cp dockerdaemon:$1 $2 +} + +function cp_t() { + docker cp $1 dockerdaemon:$2 +} + +function exec_t() { + docker exec dockerdaemon sh -c "$@" +} + +function docker_t() { + docker exec dockerdaemon docker $@ +} + +# build reates a new docker image id from another image +function build() { + docker exec -i dockerdaemon docker build --no-cache -t $1 - <> $2/ca.crt +} + +install_test_certs $installdir + +# Malevolent server +install_ca_file ./malevolent-certs/ca.pem $installdir/$hostname:6666 + +# Token server +install_ca_file ./tokenserver/certs/ca.pem $installdir/$hostname:5554 +install_ca_file ./tokenserver/certs/ca.pem $installdir/$hostname:5555 +install_ca_file ./tokenserver/certs/ca.pem $installdir/$hostname:5557 +install_ca_file ./tokenserver/certs/ca.pem $installdir/$hostname:5558 +append_ca_file ./tokenserver/certs/ca.pem $installdir/$hostname:5600 + diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/malevolent-certs/localregistry.cert b/vendor/github.com/docker/distribution/contrib/docker-integration/malevolent-certs/localregistry.cert new file mode 100644 index 000000000..071e7a2bc --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/malevolent-certs/localregistry.cert @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDETCCAfugAwIBAgIQZRKt7OeG+TlC2riszYwQQTALBgkqhkiG9w0BAQswJjER +MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDgyMDIz +MjE0OVoXDTE4MDgwNDIzMjE0OVowKzERMA8GA1UEChMIUXVpY2tUTFMxFjAUBgNV +BAMTDWxvY2FscmVnaXN0cnkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQDPdsUBStNMz4coXfQVIJIafG85VkngM4fV7hrg7AbiGLCWvq8cWOrYM50G9Wmo +twK1WeQ6bigYOjINgSfTxcy3adciVZIIJyXqboz6n2V0yRPWpakof939bvuAurAP +tSqQ2V5fGN0ZZn4J4IbXMSovKwo7sG3X6i4q/8DYHZ/mKjvCRMPC3MGWqunknpkm +dzyKbIFHaDKlAqIOwTsDhHvGzm/9n3D+h4sl5ZPBobuBEV2u5GR0H5ujak4+Kczt +thCWtRkzCfnjW0TEanheSYJGu8OgCGoFjQnHotgqvOO6iHZCsrB3gf8WQeou+y9e ++OyLZv3FmqdC9SXr3b0LGQTFAgMBAAGjOjA4MA4GA1UdDwEB/wQEAwIAoDAMBgNV +HRMBAf8EAjAAMBgGA1UdEQQRMA+CDWxvY2FscmVnaXN0cnkwCwYJKoZIhvcNAQEL +A4IBAQC/PP2Y9QVhO8t4BXML1QpNRWqXG8Gg0P1XIh6M6FoxcGIodLdbzui828YB +wm9ZlyKars+nDdgLdQWawdV7hSd6s2NeQlHYQSGLsdTAVkgIxiD7D2Tw3kAZ6Zrj +dPikoVAc+rBMm/BXQLzy95IAbBVOHOpBkOOgF+TYxeLnOc3GzbUqBi1Pq97DMaxr +DaDuywH55P/6v7qt610UIsZ6+RZ78iiRx4Q+oRxEqGT0rXI76gVxOFabbJuFr1n1 +kEWa3u/BssJzX3KVAm7oUtaBnj2SH5fokFmvZ5lBXA4QO/5doOa8yZiFFvvQs7EY +SWDxLrvS33UCtsCcpPggjehnxKaC +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/malevolent-certs/localregistry.key b/vendor/github.com/docker/distribution/contrib/docker-integration/malevolent-certs/localregistry.key new file mode 100644 index 000000000..c5bf7ac12 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/malevolent-certs/localregistry.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEAz3bFAUrTTM+HKF30FSCSGnxvOVZJ4DOH1e4a4OwG4hiwlr6v +HFjq2DOdBvVpqLcCtVnkOm4oGDoyDYEn08XMt2nXIlWSCCcl6m6M+p9ldMkT1qWp +KH/d/W77gLqwD7UqkNleXxjdGWZ+CeCG1zEqLysKO7Bt1+ouKv/A2B2f5io7wkTD +wtzBlqrp5J6ZJnc8imyBR2gypQKiDsE7A4R7xs5v/Z9w/oeLJeWTwaG7gRFdruRk +dB+bo2pOPinM7bYQlrUZMwn541tExGp4XkmCRrvDoAhqBY0Jx6LYKrzjuoh2QrKw +d4H/FkHqLvsvXvjsi2b9xZqnQvUl6929CxkExQIDAQABAoIBAQCZjCUI7NFwwxQc +m1UAogeglMJZJHUu+9SoUD8Sg34grvdbyqueBm1iMOkiclaOKU1W3b4eRNNmAwRy +nEnW4km+4hX48m5PnHHijYnIIFsd0YjeT+Pf9qtdXFvGjeWq6oIjjM3dAnD50LKu +KsCB2oCHQoqjXNQfftJGvt2C1oI2/WvdOR4prnGXElVfASswX4PkP5LCfLhIx+Fr +7ErfaRIKigLSaAWLKaw3IlL12Q/KkuGcnzYIzIRwY4VJ64ENN6M3+KknfGovQItL +sCxceSe61THDP9AAI3Mequm8z3H0CImOWhJCge5l7ttLLMXZXqGxDCVx+3zvqlCa +X0cgGSVBAoGBAOvTN3oJJx1vnh1mRj8+hqzFq1bjm4T/Wp314QWLeo++43II4uMM +5hxUlO5ViY1sKxQrGwK+9c9ddxAvm5OAFFkzgW9EhDCu0tXUb2/vAJQ93SgqbcRu +coXWJpk0eNW/ouk2s1X8dzs+sCs3a4H64fEEj8yhwoyovjfucspsn7t1AoGBAOE2 +ayLKx7CcWCiD/VGNvP7714MDst2isyq8reg8LEMmAaXR2IWWj5eGwKrImTQCsrjW +P37aBp1lcWuuYRKl/WEGBy6JLNdATyUoYc1Yo+8YdenekkOtOHHJerlK3OKi3ZVp +q4HJY9wzKg/wYLcbTmjjzKj+OBIZWwig73XUHwoRAoGBAJnuIrYbp1aFdvXFvnCl +xY6c8DwlEWx8qY+V4S2XX4bYmOnkdwSxdLplU1lGqCSRyIS/pj/imdyjK4Z7LNfY +sG+RORmB5a9JTgGZSqwLm5snzmXbXA7t8P7/S+6Q25baIeKMe/7SbplTT/bFk/0h +371MtvhhVfYuZwtnL7KFuLXJAoGBAMQ3UHKYsBC8tsZd8Pf8AL07mFHKiC04Etfa +Wb5rpri+RVM+mGITgnmnavehHHHHJAWMjPetZ3P8rSv/Ww4PVsoQoXM3Cr1jh1E9 +dLCfWPz4l8syIscaBYKF4wnLItXGxj3mOgoy93EjlrMaYHlILjGOv4JBM4L5WmoT +JW7IaF6xAoGAZ4K8MwU/cAah8VinMmLGxvWWuBSgTTebuY5zN603MvFLKv5necuc +BZfTTxD+gOnxRT6QAh++tOsbBmsgR9HmTSlQSSgw1L7cwGyXzLCDYw+5K/03KXSU +DaFdgtfcDDJO8WtjOgjyTRzEAOsqFta1ige4pIu5fTilNVMQlhts5Iw= +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/malevolent.bats b/vendor/github.com/docker/distribution/contrib/docker-integration/malevolent.bats new file mode 100644 index 000000000..0112ff6f1 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/malevolent.bats @@ -0,0 +1,192 @@ +#!/usr/bin/env bats + +# This tests various expected error scenarios when pulling bad content + +load helpers + +host="localregistry:6666" +base="malevolent-test" + +function setup() { + tempImage $base:latest +} + +@test "Test malevolent proxy pass through" { + docker_t tag -f $base:latest $host/$base/nochange:latest + run docker_t push $host/$base/nochange:latest + echo $output + [ "$status" -eq 0 ] + has_digest "$output" + + run docker_t pull $host/$base/nochange:latest + echo "$output" + [ "$status" -eq 0 ] +} + +@test "Test malevolent image name change" { + imagename="$host/$base/rename" + image="$imagename:lastest" + docker_t tag -f $base:latest $image + run docker_t push $image + [ "$status" -eq 0 ] + has_digest "$output" + + # Pull attempt should fail to verify manifest digest + run docker_t pull "$imagename@$digest" + echo "$output" + [ "$status" -ne 0 ] +} + +@test "Test malevolent altered layer" { + image="$host/$base/addfile:latest" + tempImage $image + run docker_t push $image + echo "$output" + [ "$status" -eq 0 ] + has_digest "$output" + + # Remove image to ensure layer is pulled and digest verified + docker_t rmi -f $image + + run docker_t pull $image + echo "$output" + [ "$status" -ne 0 ] +} + +@test "Test malevolent altered layer (by digest)" { + imagename="$host/$base/addfile" + image="$imagename:latest" + tempImage $image + run docker_t push $image + echo "$output" + [ "$status" -eq 0 ] + has_digest "$output" + + # Remove image to ensure layer is pulled and digest verified + docker_t rmi -f $image + + run docker_t pull "$imagename@$digest" + echo "$output" + [ "$status" -ne 0 ] +} + +@test "Test malevolent poisoned images" { + truncid="777cf9284131" + poison="${truncid}d77ca0863fb7f054c0a276d7e227b5e9a5d62b497979a481fa32" + image1="$host/$base/image1/poison:$poison" + tempImage $image1 + run docker_t push $image1 + echo "$output" + [ "$status" -eq 0 ] + has_digest "$output" + + image2="$host/$base/image2/poison:$poison" + tempImage $image2 + run docker_t push $image2 + echo "$output" + [ "$status" -eq 0 ] + has_digest "$output" + + + # Remove image to ensure layer is pulled and digest verified + docker_t rmi -f $image1 + docker_t rmi -f $image2 + + run docker_t pull $image1 + echo "$output" + [ "$status" -eq 0 ] + run docker_t pull $image2 + echo "$output" + [ "$status" -eq 0 ] + + # Test if there are multiple images + run docker_t images + echo "$output" + [ "$status" -eq 0 ] + + # Test images have same ID and not the poison + id1=$(docker_t inspect --format="{{.Id}}" $image1) + id2=$(docker_t inspect --format="{{.Id}}" $image2) + + # Remove old images + docker_t rmi -f $image1 + docker_t rmi -f $image2 + + [ "$id1" != "$id2" ] + + [ "$id1" != "$truncid" ] + + [ "$id2" != "$truncid" ] +} + +@test "Test malevolent altered identical images" { + truncid1="777cf9284131" + poison1="${truncid1}d77ca0863fb7f054c0a276d7e227b5e9a5d62b497979a481fa32" + truncid2="888cf9284131" + poison2="${truncid2}d77ca0863fb7f054c0a276d7e227b5e9a5d62b497979a481fa64" + + image1="$host/$base/image1/alteredid:$poison1" + tempImage $image1 + run docker_t push $image1 + echo "$output" + [ "$status" -eq 0 ] + has_digest "$output" + + image2="$host/$base/image2/alteredid:$poison2" + docker_t tag -f $image1 $image2 + run docker_t push $image2 + echo "$output" + [ "$status" -eq 0 ] + has_digest "$output" + + + # Remove image to ensure layer is pulled and digest verified + docker_t rmi -f $image1 + docker_t rmi -f $image2 + + run docker_t pull $image1 + echo "$output" + [ "$status" -eq 0 ] + run docker_t pull $image2 + echo "$output" + [ "$status" -eq 0 ] + + # Test if there are multiple images + run docker_t images + echo "$output" + [ "$status" -eq 0 ] + + # Test images have same ID and not the poison + id1=$(docker_t inspect --format="{{.Id}}" $image1) + id2=$(docker_t inspect --format="{{.Id}}" $image2) + + # Remove old images + docker_t rmi -f $image1 + docker_t rmi -f $image2 + + [ "$id1" == "$id2" ] + + [ "$id1" != "$truncid1" ] + + [ "$id2" != "$truncid2" ] +} + +@test "Test malevolent resumeable pull" { + version_check docker "$GOLEM_DIND_VERSION" "1.11.0" + version_check registry "$GOLEM_DISTRIBUTION_VERSION" "2.3.0" + + imagename="$host/$base/resumeable" + image="$imagename:latest" + tempImage $image + run docker_t push $image + echo "$output" + [ "$status" -eq 0 ] + has_digest "$output" + + # Remove image to ensure layer is pulled and digest verified + docker_t rmi -f $image + + run docker_t pull "$imagename@$digest" + echo "$output" + [ "$status" -eq 0 ] +} diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/Dockerfile b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/Dockerfile new file mode 100644 index 000000000..17f999d24 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/Dockerfile @@ -0,0 +1,10 @@ +FROM nginx:1.9 + +COPY nginx.conf /etc/nginx/nginx.conf +COPY registry.conf /etc/nginx/conf.d/registry.conf +COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf +COPY registry-noauth.conf /etc/nginx/registry-noauth.conf +COPY registry-basic.conf /etc/nginx/registry-basic.conf +COPY test.passwd /etc/nginx/test.passwd +COPY ssl /etc/nginx/ssl +COPY v1 /var/www/html/v1 diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry-v2.conf b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry-v2.conf new file mode 100644 index 000000000..65c4d7766 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry-v2.conf @@ -0,0 +1,6 @@ +proxy_pass http://docker-registry-v2; +proxy_set_header Host $http_host; # required for docker client's sake +proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_set_header X-Forwarded-Proto $scheme; +proxy_read_timeout 900; diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/nginx.conf b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/nginx.conf new file mode 100644 index 000000000..543eab69e --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/nginx.conf @@ -0,0 +1,61 @@ +user nginx; +worker_processes 1; + +error_log /var/log/nginx/error.log warn; +pid /var/run/nginx.pid; + +events { + worker_connections 1024; +} + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + + sendfile on; + + keepalive_timeout 65; + + include /etc/nginx/conf.d/*.conf; +} + +# Setup TCP proxies +stream { + # Malevolent proxy + server { + listen 6666; + proxy_pass malevolent:6666; + } + + # Registry configured for token server + server { + listen 5554; + listen 5555; + proxy_pass registryv2token:5000; + } + + # Token server + server { + listen 5556; + proxy_pass tokenserver:5556; + } + + # Registry configured for token server with oauth + server { + listen 5557; + listen 5558; + proxy_pass registryv2tokenoauth:5000; + } + + # Token server with oauth + server { + listen 5559; + proxy_pass tokenserveroauth:5559; + } +} diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry-basic.conf b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry-basic.conf new file mode 100644 index 000000000..117ea5849 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry-basic.conf @@ -0,0 +1,8 @@ +client_max_body_size 0; +chunked_transfer_encoding on; +location /v2/ { + auth_basic "registry.localhost"; + auth_basic_user_file test.passwd; + add_header 'Docker-Distribution-Api-Version' 'registry/2.0' always; + include docker-registry-v2.conf; +} diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry-noauth.conf b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry-noauth.conf new file mode 100644 index 000000000..6e182d446 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry-noauth.conf @@ -0,0 +1,5 @@ +client_max_body_size 0; +chunked_transfer_encoding on; +location /v2/ { + include docker-registry-v2.conf; +} diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry.conf b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry.conf new file mode 100644 index 000000000..e693d569a --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry.conf @@ -0,0 +1,260 @@ +# Docker registry proxy for api version 2 + +upstream docker-registry-v2 { + server registryv2:5000; +} + +# No client auth or TLS +server { + listen 5000; + server_name localhost; + + # disable any limits to avoid HTTP 413 for large image uploads + client_max_body_size 0; + + # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) + chunked_transfer_encoding on; + + location /v2/ { + # Do not allow connections from docker 1.5 and earlier + # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents + if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) { + return 404; + } + + include docker-registry-v2.conf; + } +} + +# No client auth or TLS (V2 Only) +server { + listen 5002; + server_name localhost; + + # disable any limits to avoid HTTP 413 for large image uploads + client_max_body_size 0; + + # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) + chunked_transfer_encoding on; + + location / { + include docker-registry-v2.conf; + } +} + +# TLS Configuration chart +# Username/Password: testuser/passpassword +# | ca | client | basic | notes +# 5440 | yes | no | no | Tests CA certificate +# 5441 | yes | no | yes | Tests basic auth over TLS +# 5442 | yes | yes | no | Tests client auth with client CA +# 5443 | yes | yes | no | Tests client auth without client CA +# 5444 | yes | yes | yes | Tests using basic auth + tls auth +# 5445 | no | no | no | Tests insecure using TLS +# 5446 | no | no | yes | Tests sending credentials to server with insecure TLS +# 5447 | no | yes | no | Tests client auth to insecure +# 5448 | yes | no | no | Bad SSL version + +server { + listen 5440; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + include registry-noauth.conf; +} + +server { + listen 5441; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + include registry-basic.conf; +} + +server { + listen 5442; + listen 5443; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-noauth.conf; +} + +server { + listen 5444; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-basic.conf; +} + +server { + listen 5445; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; + include registry-noauth.conf; +} + +server { + listen 5446; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; + include registry-basic.conf; +} + +server { + listen 5447; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-noauth.conf; +} + +server { + listen 5448; + server_name localhost; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; + ssl_protocols SSLv3; + include registry-noauth.conf; +} + +# Add configuration for localregistry server_name +# Requires configuring /etc/hosts to use +# Set /etc/hosts entry to external IP, not 127.0.0.1 for testing +# Docker secure/insecure registry features +server { + listen 5440; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + include registry-noauth.conf; +} + +server { + listen 5441; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + include registry-basic.conf; +} + +server { + listen 5442; + listen 5443; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-noauth.conf; +} + +server { + listen 5444; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-basic.conf; +} + +server { + listen 5445; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; + include registry-noauth.conf; +} + +server { + listen 5446; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; + include registry-basic.conf; +} + +server { + listen 5447; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; + ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; + ssl_verify_client on; + include registry-noauth.conf; +} + +server { + listen 5448; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + ssl_protocols SSLv3; + include registry-noauth.conf; +} + + +# V1 search test +# Registry configured with token auth and no tls +# TLS termination done by nginx, search results +# served by nginx + +upstream docker-registry-v2-oauth { + server registryv2tokenoauthnotls:5000; +} + +server { + listen 5600; + server_name localregistry; + ssl on; + ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; + ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; + + root /var/www/html; + + client_max_body_size 0; + chunked_transfer_encoding on; + location /v2/ { + proxy_buffering off; + proxy_pass http://docker-registry-v2-oauth; + proxy_set_header Host $http_host; # required for docker client's sake + proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_read_timeout 900; + } + + location /v1/search { + if ($http_authorization !~ "Bearer [a-zA-Z0-9\._-]+") { + return 401; + } + try_files /v1/search.json =404; + add_header Content-Type application/json; + } +} diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/test.passwd b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/test.passwd new file mode 100644 index 000000000..4e55de816 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/test.passwd @@ -0,0 +1 @@ +testuser:$apr1$YmLhHjm6$AjP4z8J1WgcUNxU8J4ue5. diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/v1/search.json b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/v1/search.json new file mode 100644 index 000000000..3da8f1adb --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/v1/search.json @@ -0,0 +1 @@ +{"num_pages":1,"num_results":2,"page":1,"page_size": 25,"query":"testsearch","results":[{"description":"","is_automated":false,"is_official":false,"is_trusted":false, "name":"dmcgowan/testsearch-1","star_count":1000},{"description":"Some automated build","is_automated":true,"is_official":false,"is_trusted":false,"name":"dmcgowan/testsearch-2","star_count":10}]} diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/run_multiversion.sh b/vendor/github.com/docker/distribution/contrib/docker-integration/run_multiversion.sh new file mode 100755 index 000000000..b673d3114 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/run_multiversion.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash + +# Run the integration tests with multiple versions of the Docker engine + +set -e +set -x + +DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) + + +if [ "$TMPDIR" != "" ] && [ ! -d "$TMPDIR" ]; then + mkdir -p $TMPDIR +fi + +cachedir=`mktemp -t -d golem-cache.XXXXXX` +trap "rm -rf $cachedir" EXIT + +if [ "$1" == "-d" ]; then + # Drivers to use for Docker engines the tests are going to create. + STORAGE_DRIVER=${STORAGE_DRIVER:-overlay} + + docker daemon --log-level=panic --storage-driver="$STORAGE_DRIVER" & + DOCKER_PID=$! + + # Wait for it to become reachable. + tries=10 + until docker version &> /dev/null; do + (( tries-- )) + if [ $tries -le 0 ]; then + echo >&2 "error: daemon failed to start" + exit 1 + fi + sleep 1 + done + + trap "kill $DOCKER_PID" EXIT +fi + +distimage=$(docker build -q $DIR/../..) +fullversion=$(git describe --match 'v[0-9]*' --dirty='.m' --always) +distversion=${fullversion:1} + +echo "Testing image $distimage with distribution version $distversion" + +# Pull needed images before invoking golem to get pull time +# These images are defined in golem.conf +time docker pull nginx:1.9 +time docker pull golang:1.6 +time docker pull registry:0.9.1 +time docker pull dmcgowan/token-server:simple +time docker pull dmcgowan/token-server:oauth +time docker pull distribution/golem-runner:0.1-bats + +time docker pull docker:1.9.1-dind +time docker pull docker:1.10.3-dind +time docker pull docker:1.11.1-dind + +golem -cache $cachedir \ + -i "golem-distribution:latest,$distimage,$distversion" \ + -i "golem-dind:latest,docker:1.9.1-dind,1.9.1" \ + -i "golem-dind:latest,docker:1.10.3-dind,1.10.3" \ + -i "golem-dind:latest,docker:1.11.1-dind,1.11.1" \ + $DIR + diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tls.bats b/vendor/github.com/docker/distribution/contrib/docker-integration/tls.bats new file mode 100644 index 000000000..46f85e398 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tls.bats @@ -0,0 +1,109 @@ +#!/usr/bin/env bats + +# Registry host name, should be set to non-localhost address and match +# DNS name in nginx/ssl certificates and what is installed in /etc/docker/cert.d + +load helpers + +hostname="localregistry" +base="hello-world" +image="${base}:latest" + +# Login information, should match values in nginx/test.passwd +user=${TEST_USER:-"testuser"} +password=${TEST_PASSWORD:-"passpassword"} +email="distribution@docker.com" + +function setup() { + tempImage $image +} + +@test "Test valid certificates" { + docker_t tag -f $image $hostname:5440/$image + run docker_t push $hostname:5440/$image + [ "$status" -eq 0 ] + has_digest "$output" +} + +@test "Test basic auth" { + basic_auth_version_check + login $hostname:5441 + docker_t tag -f $image $hostname:5441/$image + run docker_t push $hostname:5441/$image + [ "$status" -eq 0 ] + has_digest "$output" +} + +@test "Test basic auth with build" { + basic_auth_version_check + login $hostname:5441 + + image1=$hostname:5441/$image-build + image2=$hostname:5441/$image-build-2 + + tempImage $image1 + + run docker_t push $image1 + [ "$status" -eq 0 ] + has_digest "$output" + + docker_t rmi $image1 + + run build $image2 $image1 + echo $output + [ "$status" -eq 0 ] + + run docker_t push $image2 + echo $output + [ "$status" -eq 0 ] + has_digest "$output" +} + +@test "Test TLS client auth" { + docker_t tag -f $image $hostname:5442/$image + run docker_t push $hostname:5442/$image + [ "$status" -eq 0 ] + has_digest "$output" +} + +@test "Test TLS client with invalid certificate authority fails" { + docker_t tag -f $image $hostname:5443/$image + run docker_t push $hostname:5443/$image + [ "$status" -ne 0 ] +} + +@test "Test basic auth with TLS client auth" { + basic_auth_version_check + login $hostname:5444 + docker_t tag -f $image $hostname:5444/$image + run docker_t push $hostname:5444/$image + [ "$status" -eq 0 ] + has_digest "$output" +} + +@test "Test unknown certificate authority fails" { + docker_t tag -f $image $hostname:5445/$image + run docker_t push $hostname:5445/$image + [ "$status" -ne 0 ] +} + +@test "Test basic auth with unknown certificate authority fails" { + run login $hostname:5446 + [ "$status" -ne 0 ] + docker_t tag -f $image $hostname:5446/$image + run docker_t push $hostname:5446/$image + [ "$status" -ne 0 ] +} + +@test "Test TLS client auth to server with unknown certificate authority fails" { + docker_t tag -f $image $hostname:5447/$image + run docker_t push $hostname:5447/$image + [ "$status" -ne 0 ] +} + +@test "Test failure to connect to server fails to fallback to SSLv3" { + docker_t tag -f $image $hostname:5448/$image + run docker_t push $hostname:5448/$image + [ "$status" -ne 0 ] +} + diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/token.bats b/vendor/github.com/docker/distribution/contrib/docker-integration/token.bats new file mode 100644 index 000000000..256885a22 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/token.bats @@ -0,0 +1,135 @@ +#!/usr/bin/env bats + +# This tests contacting a registry using a token server + +load helpers + +user="testuser" +password="testpassword" +email="a@nowhere.com" +base="hello-world" + +@test "Test token server login" { + run docker_t login -u $user -p $password -e $email localregistry:5554 + echo $output + [ "$status" -eq 0 ] + + # First line is WARNING about credential save or email deprecation + [ "${lines[2]}" = "Login Succeeded" -o "${lines[1]}" = "Login Succeeded" ] +} + +@test "Test token server bad login" { + run docker_t login -u "testuser" -p "badpassword" -e $email localregistry:5554 + [ "$status" -ne 0 ] + + run docker_t login -u "baduser" -p "testpassword" -e $email localregistry:5554 + [ "$status" -ne 0 ] +} + +@test "Test push and pull with token auth" { + login localregistry:5555 + image="localregistry:5555/testuser/token" + build $image "$base:latest" + + run docker_t push $image + echo $output + [ "$status" -eq 0 ] + + docker_t rmi $image + + docker_t pull $image +} + +@test "Test push and pull with token auth wrong namespace" { + login localregistry:5555 + image="localregistry:5555/notuser/token" + build $image "$base:latest" + + run docker_t push $image + [ "$status" -ne 0 ] +} + +@test "Test oauth token server login" { + version_check docker "$GOLEM_DIND_VERSION" "1.11.0" + + login_oauth localregistry:5557 +} + +@test "Test oauth token server bad login" { + version_check docker "$GOLEM_DIND_VERSION" "1.11.0" + + run docker_t login -u "testuser" -p "badpassword" -e $email localregistry:5557 + [ "$status" -ne 0 ] + + run docker_t login -u "baduser" -p "testpassword" -e $email localregistry:5557 + [ "$status" -ne 0 ] +} + +@test "Test oauth push and pull with token auth" { + version_check docker "$GOLEM_DIND_VERSION" "1.11.0" + + login_oauth localregistry:5558 + image="localregistry:5558/testuser/token" + build $image "$base:latest" + + run docker_t push $image + echo $output + [ "$status" -eq 0 ] + + docker_t rmi $image + + docker_t pull $image +} + +@test "Test oauth push and build with token auth" { + version_check docker "$GOLEM_DIND_VERSION" "1.11.0" + + login_oauth localregistry:5558 + image="localregistry:5558/testuser/token-build" + tempImage $image + + run docker_t push $image + echo $output + [ "$status" -eq 0 ] + has_digest "$output" + + docker_t rmi $image + + image2="localregistry:5558/testuser/token-build-2" + run build $image2 $image + echo $output + [ "$status" -eq 0 ] + + run docker_t push $image2 + echo $output + [ "$status" -eq 0 ] + has_digest "$output" + +} + +@test "Test oauth push and pull with token auth wrong namespace" { + version_check docker "$GOLEM_DIND_VERSION" "1.11.0" + + login_oauth localregistry:5558 + image="localregistry:5558/notuser/token" + build $image "$base:latest" + + run docker_t push $image + [ "$status" -ne 0 ] +} + +@test "Test oauth with v1 search" { + version_check docker "$GOLEM_DIND_VERSION" "1.12.0" + + run docker_t search localregistry:5600/testsearch + [ "$status" -ne 0 ] + + login_oauth localregistry:5600 + + run docker_t search localregistry:5600/testsearch + echo $output + [ "$status" -eq 0 ] + + echo $output | grep "testsearch-1" + echo $output | grep "testsearch-2" +} diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/.htpasswd b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/.htpasswd new file mode 100644 index 000000000..0bbf57400 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/.htpasswd @@ -0,0 +1 @@ +testuser:$2y$05$T2MlBvkN1R/yICNnLuf1leOlOfAY0DvybctbbWUFKlojfkShVgn4m diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/Dockerfile b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/Dockerfile new file mode 100644 index 000000000..d8c7b5cc0 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/Dockerfile @@ -0,0 +1,8 @@ +FROM dmcgowan/token-server:oauth + +WORKDIR / + +COPY ./.htpasswd /.htpasswd +COPY ./certs/auth.localregistry.cert /tls.cert +COPY ./certs/auth.localregistry.key /tls.key +COPY ./certs/signing.key /sign.key diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/auth.localregistry.cert b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/auth.localregistry.cert new file mode 100644 index 000000000..4144ca168 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/auth.localregistry.cert @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDHDCCAgagAwIBAgIRAKhhQMnqZx+hkOmoUYgPb+kwCwYJKoZIhvcNAQELMCYx +ETAPBgNVBAoTCFF1aWNrVExTMREwDwYDVQQDEwhRdWlja1RMUzAeFw0xNjAxMjgw +MDQyMzFaFw0xOTAxMTIwMDQyMzFaMDAxETAPBgNVBAoTCFF1aWNrVExTMRswGQYD +VQQDExJhdXRoLmxvY2FscmVnaXN0cnkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQD1tUf1EghBlIRrE83yF4zDgRu7vH2Jo0kygKJUWtQQe+DfXyjjE/fg +FdKnnoEjsIeF9hxNbTt0ldDz7/n97pbMhoiXULi9iq4jlgSzVL2XEAgrON0YSY/c +Lmmd1KSa/pOUZr2WMAYPZ+FdQfE1W7SMNbErPefBqYdFzpZ+esAtvbajYwIjl8Vy +9c4bidx4vgnNrR9GcFYibjC5sj8syh/OtbzzqiVGT8YcPpmMG6KNRkausa4gqpon +NKYG8C3WDaiPCLYKcvFrFfdEWF/m2oj14eXACXT9iwp8r4bsLgXrZwqcpKOWfVRu +qHC8aV476EYgxWCAOANExUdUaRt5wL/jAgMBAAGjPzA9MA4GA1UdDwEB/wQEAwIA +oDAMBgNVHRMBAf8EAjAAMB0GA1UdEQQWMBSCEmF1dGgubG9jYWxyZWdpc3RyeTAL +BgkqhkiG9w0BAQsDggEBABxPGK9FdGDxcLowNsExKnnZvmQT3H0u+Dux1gkp0AhH +KOrmx3LUENUKLSgotzx133tgOgR5lzAWVFy7bhLwlPhOslxf2oEfztsAMd/tY8rW +PrG2ZqYqlzEQQ9INbAc3woo5A3slN07uhP3F16jNqoMM4zRmw6Ba70CluGKT7x5+ +xVjKoWITLjWDXT5m35PnsN8CpBaFzXYcod/5p9XwCFp0s+aNxfpZECCV/3yqIr+J +ALzroPh43FAlG96o4NyYZ2Msp63newN19R2+TgpV4nXuw2mLVDpvetP7RRqnpvj/ +qwRgt5j4hFjJWb61M0ELL7A9fA71h1ImdGCvnArdBQs= +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/auth.localregistry.key b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/auth.localregistry.key new file mode 100644 index 000000000..4c499bb21 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/auth.localregistry.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA9bVH9RIIQZSEaxPN8heMw4Ebu7x9iaNJMoCiVFrUEHvg318o +4xP34BXSp56BI7CHhfYcTW07dJXQ8+/5/e6WzIaIl1C4vYquI5YEs1S9lxAIKzjd +GEmP3C5pndSkmv6TlGa9ljAGD2fhXUHxNVu0jDWxKz3nwamHRc6WfnrALb22o2MC +I5fFcvXOG4nceL4Jza0fRnBWIm4wubI/LMofzrW886olRk/GHD6ZjBuijUZGrrGu +IKqaJzSmBvAt1g2ojwi2CnLxaxX3RFhf5tqI9eHlwAl0/YsKfK+G7C4F62cKnKSj +ln1UbqhwvGleO+hGIMVggDgDRMVHVGkbecC/4wIDAQABAoIBAQCrsjXKRwOF8CZo +PLqZBWPT6hBbK+f9miC4LbNBhwbRTf9hl7mWlImOCTHe95/+NIk/Ty+P21jEqzwM +ehETJPoziX9BXaL6sEHnlBlMx1aEjStoKKA3LJBeqAAdzk4IEQVHmlO4824IreqJ +pF7Njnunzo0zTlr4tWJVoXsAfv5z9tNtdkxYBbIa0fjfGtlqXU3gLq58FCON3mB/ +NGc0AyA1UFGp0FzpdEcwTGD4InsXbcmsl2l/VPBJuZbryITRqWs6BbK++80DRhNt +afMhP+IzKrWSCp0rBYrqqz6AevtlKdEfQK1yXPEjN/63QLMevt8mF/1JCp//TQnf +Z6bIQbAhAoGBAP7vFA0PcvoXt9MXvvAwrKY1s6pNw4nWPG27qY1/m+DkBwP8IQms +4AWGv1wscZzXJYTvaLO5/qjmGUj50ohcVEvyZJioh1pKXA8Chxvd6rBA/O/Lj5E0 +3MOSA5Q0gxJ0Mhv0zGbbyN5fY8D8zhxoqQP4LoW+UdZG2Oi6JxsQ9c9dAoGBAPa8 +U3bGuM5OGA9EWP7mkB/VnjDTL1aEIN3cOHbHIKwH/loxdYcNMBE7vwxV1CzgIzXT +wsL0iE15fQdK938u0+um8aH5QtbWNI8tdk1XVjEC/i3C7N6WVUutneCKUDb4QxiB +9OvWCbNNiN+xTKBBM93YlwO3GYfrW9Pmm9q1+hg/AoGBALJlUS22gun50PxaIJZq +KVcCO2DQnCYHki/j48mN4+HjD/m85M2lePrFCYIR48syTyIQer9SR5+frVAA6k/b +9G1VCQo+3MDVSkiCp1Nb3tBKGfYgB65ARMBinDiI6rPuNeaUTrkn0g+yxtaU0hLV +Nnj9omia/x+oYj+xjI4HN0xNAoGARy92dSJIV104m88ATip/EnAzP6ruUWu1f8z1 +jW9OAdQckjEK03f+kjpGmGx61qekAPejjVO3r4KJi/0ZAtyjz61OsYiUvB748wYO +x6mW+HUAmHtQk7eTzE2+6vV8xx9BXGTCIPiTu+N2xfMFRIcLS8odZ7j/6LMCv1Qd +SzCNg0kCgYBaNlEs4pK1VxZZpEWwVmFpgIxfEfxLIaGrek6wBTcCn/VA2M0oHuez +mlMio8VY0yWPBJz30JflDiTmYIvteLPMHT0N0J6isiXLhzJSFI4+cAMLE2Q5v8rz +W+W5/L8YZeierW0qJat1BrgStaf5ZLpiOc9pKBSwycydPH5BfVdK/A== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/localregistry.cert b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/localregistry.cert new file mode 100644 index 000000000..105acc4f3 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/localregistry.cert @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDETCCAfugAwIBAgIQN7rT95eAy75c4n6/AsDJODALBgkqhkiG9w0BAQswJjER +MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE2MDEyODAw +NDIzMloXDTE5MDExMjAwNDIzMlowKzERMA8GA1UEChMIUXVpY2tUTFMxFjAUBgNV +BAMTDWxvY2FscmVnaXN0cnkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQDLi75QEkl/qekcoOJlNv9y1IXvrbU2ssl4ViJiZRjWx+/CkyCCOyf9YUpAgRLr +Pskqde2mwhuNP8yBlOBb17Sapz7N3+hJi5j9vLBAFcamPeF3PqxjFv7j5TKkRmSI +dFYQclREwMUd3qEH322KkqOnsEEfdmCgFqWORe+QR5AxzxQP3Pnd4OYH1yZCh0MQ +P2pJgrxxf2I5I/m1AUgoHV1cdBbCv9LGohJPpMtwPC0dJpgMFcnf6hT37At236AY +V437HiRruY7iPWkYFrSPWpwdslJ32MZvRN5RS163jZXjiZ7qWnQOiiDJfXe4evB/ +yQLN4m0qVQxsMz7rkY7OsqaXAgMBAAGjOjA4MA4GA1UdDwEB/wQEAwIAoDAMBgNV +HRMBAf8EAjAAMBgGA1UdEQQRMA+CDWxvY2FscmVnaXN0cnkwCwYJKoZIhvcNAQEL +A4IBAQAyUb3EuMaOylBeV8+4KeBiE4lxykDOwLLSk3jXRsVVtfJpX3v8l5vwo/Jf +iG8tzzz+7uiskI96u3TsekUtVkUxujfKevMP+369K/59s7NRmwwlFMyB2fvL14B2 +oweVjWvM/8fZl6irtFdbJFXXRm7paKso5cmfImxhojAwohgcd4XTVLE/7juYa582 +AaBdRuIiyL71MU9qa1mC5+57AaSLPYaPKpahemgYYkV1Z403Kd6rXchxdQ8JIAL8 ++0oYTSC+svnz1tUU/V5E5id9LQaTmDN5iIVFhNpqAaZmR45UI86woWvnkMb8Ants +4aknwTwY3300PuTqBdQufvOFDRN5 +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/localregistry.key b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/localregistry.key new file mode 100644 index 000000000..cb69a0f3e --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/localregistry.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAy4u+UBJJf6npHKDiZTb/ctSF7621NrLJeFYiYmUY1sfvwpMg +gjsn/WFKQIES6z7JKnXtpsIbjT/MgZTgW9e0mqc+zd/oSYuY/bywQBXGpj3hdz6s +Yxb+4+UypEZkiHRWEHJURMDFHd6hB99tipKjp7BBH3ZgoBaljkXvkEeQMc8UD9z5 +3eDmB9cmQodDED9qSYK8cX9iOSP5tQFIKB1dXHQWwr/SxqIST6TLcDwtHSaYDBXJ +3+oU9+wLdt+gGFeN+x4ka7mO4j1pGBa0j1qcHbJSd9jGb0TeUUtet42V44me6lp0 +DoogyX13uHrwf8kCzeJtKlUMbDM+65GOzrKmlwIDAQABAoIBAF6vFMp+lz4RteSh +Wm8m1FGAVwWVUpStOlcGClynFpTi0L88XYT3K7UMStQSttBDlqRv0ysdZF+ia+lj +bbKLdvHyFp8CJzX/AB4YZgyJlKzEYFtuBhbaHZu5hIMyU5W+OELSTCznV0p7w4C8 +CGLLr+FTdhfCo1QU9NJn6fa9s2/XRdSClBBalAHYs0ZS7ZckaF/sPiC/VapfBMet +qjJXNYiO6pXYriGWKF9zdAMfk2CM0BVWbnwQZkMSEQirrTcJwm3ezyloXCv2nywK +/VzbUT1HJVyzo5oAwTd0MwDc2oEMiFzlfO028zY4LDltpia+SyWvFi5NaIqzFESc +yLgJacECgYEA3jvH+ZQHQf42Md8TCciokaYvwWIKJdk4WRjbvE5cBZekyXAm7/3b +/1VFDKsy2RPlfmfHP3wy9rlnjzsRveB5qaclgS8aI67AYsWd/yRgfRatl7Ve9bHl +LY6VM5L/DZTxykcqivwjc77XoDuBfUKs6tyuSLQku+FOTbLtNYlUCHECgYEA6nkR +lkXufyLmDhNb3093RsYvPcs1kGaIIGTnz3cxWNh485DgsyLBuYQ5ugupQkzM8YSt +ohDTmVpggqjlXQxCg0Zw8gkEV0v8KsLGjn1CuTJg/mBArXlelq1FEeRAYC9/YfOz +ocXegHV7wDKKtcraNZFsEc7Z0LwbC9wtzSFG44cCgYASkMX1CLPOhJE8e1lY0OWc +PVjx++HDJbF6aAQ7aARyBygiF/d4xylw3EvHcinuTqY2eC8CE7siN3z6T0H9Ldqc +HLWaZDf30SqLVd0MKprQ+GsKKIHFXtY5hxbZ1ybtmIrWjjl0oPnJOqFC5pW7xC0z +9bmtozcKZxkmjpMYjN9zUQKBgQCqV6KLRerqunPgLfhE1/qTlE+l2QflDFhBEI3I +j5NuNHZKnSphehK7sHAv1WD2Jc2OeRGb+BWCB8Ktqf5YBxwbOwW7EQnyUeW1OyP9 +SMs8uHj21P6oCNDLLr5LLUQHnPoyM1aBZLstICzziMR1JhY5bJjSpzBfEQmlKCSu +LkrN6QKBgQCRXrBJRUxeJj7wCnCSq0Clf9NhCpQnwo4bEx8sKlj8K8ku8MvwQwoM +3KfWc7bOl6A2/mM/k4yoHtBMM9X9xqYtsgeFhxuiWBcfTmTxWh73LQ48Kgbrgodt +6yTccnjr7OtBidD85c6lgjAUgcL43QY8mlw0OhzXAZ2R5HWFp4ht+w== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/signing.cert b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/signing.cert new file mode 100644 index 000000000..45166f2d8 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/signing.cert @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC9TCCAd+gAwIBAgIRAJ6IIisIZxL86oe3oeoAgWUwCwYJKoZIhvcNAQELMCYx +ETAPBgNVBAoTCFF1aWNrVExTMREwDwYDVQQDEwhRdWlja1RMUzAeFw0xNjAxMjgw +MDQyMzNaFw0xOTAxMTIwMDQyMzNaMBMxETAPBgNVBAoTCFF1aWNrVExTMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3IXUwqSdO2QTj2ET6fJPGe+KWVnt +QCQQWjkWVpOz8L2A29BRvv9z6lYNf9sOM0Xb5IUAgoZ/s3U6LNYT/RWYFBfeo40r +Xd/MNKAn0kFsSb6BIKmUwPqFeqc8wiPX6yY4SbF1sUTkCTkw3yFHg/AIlwmhpFH3 +9mAmV+x0kTzFR/78ZDD5CUNS59bbu+7UqB06YrJuVEwPY98YixSPXTcaKimsUe+K +IY8FQ6yN6l27MK56wlj4hw2gYz+cyBUBCExCgYMQlOSg2ilH4qYyFvccSDUH7jTA +NwpsIBfdoUVbI+j2ivn+ZGD614LtIStGgUu0mDDVxVOWnRvq/z7LMaa2jwIDAQAB +ozUwMzAOBgNVHQ8BAf8EBAMCAKAwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDAYDVR0T +AQH/BAIwADALBgkqhkiG9w0BAQsDggEBAJq3JzTLrIWCF8rHLTTm1icE9PjOO0sV +a1wrmdJ6NwRbJ66dLZ/4G/NZjVOnce9WFHYLFSEG+wx5YVUPuJXpJaSdy0h8F0Uw +hiJwgeVsGg7vcf4G6mWHrsauDOhylnD31UtYPX1Ao/jcntyyf+gCQpY1J/B8l1yU +LNOwvWLVLpZwZ4ehbKA/UnDXgA+3uHvpzl//cPe0cnt+Mhrgzk5mIMwVR6zCZw1G +oVutAHpv2PXxRwTMu51J+QtSL2b2w3mGHxDLpmz8UdXOtkxdpmDT8kIOtX0T5yGL +29F3fa81iZPs02GWjSGOfOzmCCvaA4C5KJvY/WulF7OOgwvrBpQwqTI= +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/signing.key b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/signing.key new file mode 100644 index 000000000..475625402 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/certs/signing.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA3IXUwqSdO2QTj2ET6fJPGe+KWVntQCQQWjkWVpOz8L2A29BR +vv9z6lYNf9sOM0Xb5IUAgoZ/s3U6LNYT/RWYFBfeo40rXd/MNKAn0kFsSb6BIKmU +wPqFeqc8wiPX6yY4SbF1sUTkCTkw3yFHg/AIlwmhpFH39mAmV+x0kTzFR/78ZDD5 +CUNS59bbu+7UqB06YrJuVEwPY98YixSPXTcaKimsUe+KIY8FQ6yN6l27MK56wlj4 +hw2gYz+cyBUBCExCgYMQlOSg2ilH4qYyFvccSDUH7jTANwpsIBfdoUVbI+j2ivn+ +ZGD614LtIStGgUu0mDDVxVOWnRvq/z7LMaa2jwIDAQABAoIBAD2tiNZv6DImSXo+ +sq0qQomEf/OBvWPFMnWppd/NK/TXa+UPHO4I0MjoDJqIEC6zCU+fC4d2St1MmlrT +/X85vPFRw8mGwGxfHeRSLxEVj04I5GDYTWy0JQUrJUk/cTKp2/Bwm/RaylTyFAM0 +caYrSpvD69vjuTDFr7PDxM6iaqM53zK/vD8kCe81z+wN0UbAKsLlUOKztjH6SzL9 +uVOkekIT/j3L2xxyQhjmhfA3TuCP4uNK/+6/4ovl9Nj4pQsFomsCk4phgqy9SOm1 +4yufmVd8k7J3cppMlMPNc+7tqe2Xn593Y8QT95y3yhtkFECF70yBw64HMDDpA22p +5b/JV9ECgYEA9H4RBXOwbdjcpCa9H3mFjHqUQCqNme1vOSGiflZh9KBCDKgdqugm +KHpvAECADie0p6XRHpxRvufKnGFkJwedfeiKz51T+0dqgPxWncYT1TC+cAjOSzfM +wBpUOcAyvTTviwGbg4bLanHo4remzCbcnRvHQX4YfPFCjT9GhsU+XEUCgYEA5ubz +IlSu1wwFJpoO24ZykGUyqGUQXzR0NrXiLrpF0764qjmHyF8SPJPv1XegSxP/nUTz +SjVfJ7wye/X9qlOpBY8mzy9qQMMKc1cQBV1yVW8IRZ7pMYQZO7qmrZD/DWTa5qWt +pqSbIH2FKedELsKJA/SBtczKjspOdDKyh0UelsMCgYA7DyTfc0XAEy2hPXZb3wgC +mi2rnlvcPf2rCFPvPsCkzf2GfynDehaVmpWrsuj8Al1iTezI/yvD+Mv5oJEH2JAT +tROq+S8rOOIiTFJEBHAQBJlMCOSESPNdyD5mQOZAzEO9CWNejzYd/WwrL//Luut5 +zBcC3AngTIsuAYXw0j6xHQKBgQDamkAJep7k3W5q82OplgoUhpqFLtlnKSP1QBFZ +J+U/6Mqv7jONEeUUEQL42H6bVd2kqUikMw9ZcSVikquLfBUDPFoDwOIZWg4k0IJM +cgHyvGHad+5SgLva/oUawbGWnqtXvfc/U4vCINPXrimxE1/grLW4xp/mu8W24OCA +jIG/PQKBgD/Apl+sfqiB/6ONBjjIswA4yFkEXHSZNpAgcPwhA+cO5D0afEWz2HIx +VeOh5NjN1EL0hX8clFW4bfkK1Vr0kjvbMUXnBWaibUgpiVQl9O9WjaKQLZrp4sRu +x2kJ07Qn6ri7f/lsqOELZwBy95iHWRdePptaAKkRGxJstHI7dgUt +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/registry-config-notls.yml b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/registry-config-notls.yml new file mode 100644 index 000000000..ed6b3ea5d --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/registry-config-notls.yml @@ -0,0 +1,15 @@ +version: 0.1 +loglevel: debug +storage: + cache: + blobdescriptor: inmemory + filesystem: + rootdirectory: /tmp/registry-dev +http: + addr: 0.0.0.0:5000 +auth: + token: + realm: "https://auth.localregistry:5559/token/" + issuer: "registry-test" + service: "registry-test" + rootcertbundle: "/etc/docker/registry/tokenbundle.pem" diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/registry-config.yml b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/registry-config.yml new file mode 100644 index 000000000..630ef057c --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver-oauth/registry-config.yml @@ -0,0 +1,18 @@ +version: 0.1 +loglevel: debug +storage: + cache: + blobdescriptor: inmemory + filesystem: + rootdirectory: /tmp/registry-dev +http: + addr: 0.0.0.0:5000 + tls: + certificate: "/etc/docker/registry/localregistry.cert" + key: "/etc/docker/registry/localregistry.key" +auth: + token: + realm: "https://auth.localregistry:5559/token/" + issuer: "registry-test" + service: "registry-test" + rootcertbundle: "/etc/docker/registry/tokenbundle.pem" diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/.htpasswd b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/.htpasswd new file mode 100644 index 000000000..0bbf57400 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/.htpasswd @@ -0,0 +1 @@ +testuser:$2y$05$T2MlBvkN1R/yICNnLuf1leOlOfAY0DvybctbbWUFKlojfkShVgn4m diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/Dockerfile b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/Dockerfile new file mode 100644 index 000000000..345246874 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/Dockerfile @@ -0,0 +1,8 @@ +FROM dmcgowan/token-server:simple + +WORKDIR / + +COPY ./.htpasswd /.htpasswd +COPY ./certs/auth.localregistry.cert /tls.cert +COPY ./certs/auth.localregistry.key /tls.key +COPY ./certs/signing.key /sign.key diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/auth.localregistry.cert b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/auth.localregistry.cert new file mode 100644 index 000000000..4144ca168 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/auth.localregistry.cert @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDHDCCAgagAwIBAgIRAKhhQMnqZx+hkOmoUYgPb+kwCwYJKoZIhvcNAQELMCYx +ETAPBgNVBAoTCFF1aWNrVExTMREwDwYDVQQDEwhRdWlja1RMUzAeFw0xNjAxMjgw +MDQyMzFaFw0xOTAxMTIwMDQyMzFaMDAxETAPBgNVBAoTCFF1aWNrVExTMRswGQYD +VQQDExJhdXRoLmxvY2FscmVnaXN0cnkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQD1tUf1EghBlIRrE83yF4zDgRu7vH2Jo0kygKJUWtQQe+DfXyjjE/fg +FdKnnoEjsIeF9hxNbTt0ldDz7/n97pbMhoiXULi9iq4jlgSzVL2XEAgrON0YSY/c +Lmmd1KSa/pOUZr2WMAYPZ+FdQfE1W7SMNbErPefBqYdFzpZ+esAtvbajYwIjl8Vy +9c4bidx4vgnNrR9GcFYibjC5sj8syh/OtbzzqiVGT8YcPpmMG6KNRkausa4gqpon +NKYG8C3WDaiPCLYKcvFrFfdEWF/m2oj14eXACXT9iwp8r4bsLgXrZwqcpKOWfVRu +qHC8aV476EYgxWCAOANExUdUaRt5wL/jAgMBAAGjPzA9MA4GA1UdDwEB/wQEAwIA +oDAMBgNVHRMBAf8EAjAAMB0GA1UdEQQWMBSCEmF1dGgubG9jYWxyZWdpc3RyeTAL +BgkqhkiG9w0BAQsDggEBABxPGK9FdGDxcLowNsExKnnZvmQT3H0u+Dux1gkp0AhH +KOrmx3LUENUKLSgotzx133tgOgR5lzAWVFy7bhLwlPhOslxf2oEfztsAMd/tY8rW +PrG2ZqYqlzEQQ9INbAc3woo5A3slN07uhP3F16jNqoMM4zRmw6Ba70CluGKT7x5+ +xVjKoWITLjWDXT5m35PnsN8CpBaFzXYcod/5p9XwCFp0s+aNxfpZECCV/3yqIr+J +ALzroPh43FAlG96o4NyYZ2Msp63newN19R2+TgpV4nXuw2mLVDpvetP7RRqnpvj/ +qwRgt5j4hFjJWb61M0ELL7A9fA71h1ImdGCvnArdBQs= +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/auth.localregistry.key b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/auth.localregistry.key new file mode 100644 index 000000000..4c499bb21 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/auth.localregistry.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA9bVH9RIIQZSEaxPN8heMw4Ebu7x9iaNJMoCiVFrUEHvg318o +4xP34BXSp56BI7CHhfYcTW07dJXQ8+/5/e6WzIaIl1C4vYquI5YEs1S9lxAIKzjd +GEmP3C5pndSkmv6TlGa9ljAGD2fhXUHxNVu0jDWxKz3nwamHRc6WfnrALb22o2MC +I5fFcvXOG4nceL4Jza0fRnBWIm4wubI/LMofzrW886olRk/GHD6ZjBuijUZGrrGu +IKqaJzSmBvAt1g2ojwi2CnLxaxX3RFhf5tqI9eHlwAl0/YsKfK+G7C4F62cKnKSj +ln1UbqhwvGleO+hGIMVggDgDRMVHVGkbecC/4wIDAQABAoIBAQCrsjXKRwOF8CZo +PLqZBWPT6hBbK+f9miC4LbNBhwbRTf9hl7mWlImOCTHe95/+NIk/Ty+P21jEqzwM +ehETJPoziX9BXaL6sEHnlBlMx1aEjStoKKA3LJBeqAAdzk4IEQVHmlO4824IreqJ +pF7Njnunzo0zTlr4tWJVoXsAfv5z9tNtdkxYBbIa0fjfGtlqXU3gLq58FCON3mB/ +NGc0AyA1UFGp0FzpdEcwTGD4InsXbcmsl2l/VPBJuZbryITRqWs6BbK++80DRhNt +afMhP+IzKrWSCp0rBYrqqz6AevtlKdEfQK1yXPEjN/63QLMevt8mF/1JCp//TQnf +Z6bIQbAhAoGBAP7vFA0PcvoXt9MXvvAwrKY1s6pNw4nWPG27qY1/m+DkBwP8IQms +4AWGv1wscZzXJYTvaLO5/qjmGUj50ohcVEvyZJioh1pKXA8Chxvd6rBA/O/Lj5E0 +3MOSA5Q0gxJ0Mhv0zGbbyN5fY8D8zhxoqQP4LoW+UdZG2Oi6JxsQ9c9dAoGBAPa8 +U3bGuM5OGA9EWP7mkB/VnjDTL1aEIN3cOHbHIKwH/loxdYcNMBE7vwxV1CzgIzXT +wsL0iE15fQdK938u0+um8aH5QtbWNI8tdk1XVjEC/i3C7N6WVUutneCKUDb4QxiB +9OvWCbNNiN+xTKBBM93YlwO3GYfrW9Pmm9q1+hg/AoGBALJlUS22gun50PxaIJZq +KVcCO2DQnCYHki/j48mN4+HjD/m85M2lePrFCYIR48syTyIQer9SR5+frVAA6k/b +9G1VCQo+3MDVSkiCp1Nb3tBKGfYgB65ARMBinDiI6rPuNeaUTrkn0g+yxtaU0hLV +Nnj9omia/x+oYj+xjI4HN0xNAoGARy92dSJIV104m88ATip/EnAzP6ruUWu1f8z1 +jW9OAdQckjEK03f+kjpGmGx61qekAPejjVO3r4KJi/0ZAtyjz61OsYiUvB748wYO +x6mW+HUAmHtQk7eTzE2+6vV8xx9BXGTCIPiTu+N2xfMFRIcLS8odZ7j/6LMCv1Qd +SzCNg0kCgYBaNlEs4pK1VxZZpEWwVmFpgIxfEfxLIaGrek6wBTcCn/VA2M0oHuez +mlMio8VY0yWPBJz30JflDiTmYIvteLPMHT0N0J6isiXLhzJSFI4+cAMLE2Q5v8rz +W+W5/L8YZeierW0qJat1BrgStaf5ZLpiOc9pKBSwycydPH5BfVdK/A== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/localregistry.cert b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/localregistry.cert new file mode 100644 index 000000000..105acc4f3 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/localregistry.cert @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDETCCAfugAwIBAgIQN7rT95eAy75c4n6/AsDJODALBgkqhkiG9w0BAQswJjER +MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE2MDEyODAw +NDIzMloXDTE5MDExMjAwNDIzMlowKzERMA8GA1UEChMIUXVpY2tUTFMxFjAUBgNV +BAMTDWxvY2FscmVnaXN0cnkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQDLi75QEkl/qekcoOJlNv9y1IXvrbU2ssl4ViJiZRjWx+/CkyCCOyf9YUpAgRLr +Pskqde2mwhuNP8yBlOBb17Sapz7N3+hJi5j9vLBAFcamPeF3PqxjFv7j5TKkRmSI +dFYQclREwMUd3qEH322KkqOnsEEfdmCgFqWORe+QR5AxzxQP3Pnd4OYH1yZCh0MQ +P2pJgrxxf2I5I/m1AUgoHV1cdBbCv9LGohJPpMtwPC0dJpgMFcnf6hT37At236AY +V437HiRruY7iPWkYFrSPWpwdslJ32MZvRN5RS163jZXjiZ7qWnQOiiDJfXe4evB/ +yQLN4m0qVQxsMz7rkY7OsqaXAgMBAAGjOjA4MA4GA1UdDwEB/wQEAwIAoDAMBgNV +HRMBAf8EAjAAMBgGA1UdEQQRMA+CDWxvY2FscmVnaXN0cnkwCwYJKoZIhvcNAQEL +A4IBAQAyUb3EuMaOylBeV8+4KeBiE4lxykDOwLLSk3jXRsVVtfJpX3v8l5vwo/Jf +iG8tzzz+7uiskI96u3TsekUtVkUxujfKevMP+369K/59s7NRmwwlFMyB2fvL14B2 +oweVjWvM/8fZl6irtFdbJFXXRm7paKso5cmfImxhojAwohgcd4XTVLE/7juYa582 +AaBdRuIiyL71MU9qa1mC5+57AaSLPYaPKpahemgYYkV1Z403Kd6rXchxdQ8JIAL8 ++0oYTSC+svnz1tUU/V5E5id9LQaTmDN5iIVFhNpqAaZmR45UI86woWvnkMb8Ants +4aknwTwY3300PuTqBdQufvOFDRN5 +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/localregistry.key b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/localregistry.key new file mode 100644 index 000000000..cb69a0f3e --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/localregistry.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAy4u+UBJJf6npHKDiZTb/ctSF7621NrLJeFYiYmUY1sfvwpMg +gjsn/WFKQIES6z7JKnXtpsIbjT/MgZTgW9e0mqc+zd/oSYuY/bywQBXGpj3hdz6s +Yxb+4+UypEZkiHRWEHJURMDFHd6hB99tipKjp7BBH3ZgoBaljkXvkEeQMc8UD9z5 +3eDmB9cmQodDED9qSYK8cX9iOSP5tQFIKB1dXHQWwr/SxqIST6TLcDwtHSaYDBXJ +3+oU9+wLdt+gGFeN+x4ka7mO4j1pGBa0j1qcHbJSd9jGb0TeUUtet42V44me6lp0 +DoogyX13uHrwf8kCzeJtKlUMbDM+65GOzrKmlwIDAQABAoIBAF6vFMp+lz4RteSh +Wm8m1FGAVwWVUpStOlcGClynFpTi0L88XYT3K7UMStQSttBDlqRv0ysdZF+ia+lj +bbKLdvHyFp8CJzX/AB4YZgyJlKzEYFtuBhbaHZu5hIMyU5W+OELSTCznV0p7w4C8 +CGLLr+FTdhfCo1QU9NJn6fa9s2/XRdSClBBalAHYs0ZS7ZckaF/sPiC/VapfBMet +qjJXNYiO6pXYriGWKF9zdAMfk2CM0BVWbnwQZkMSEQirrTcJwm3ezyloXCv2nywK +/VzbUT1HJVyzo5oAwTd0MwDc2oEMiFzlfO028zY4LDltpia+SyWvFi5NaIqzFESc +yLgJacECgYEA3jvH+ZQHQf42Md8TCciokaYvwWIKJdk4WRjbvE5cBZekyXAm7/3b +/1VFDKsy2RPlfmfHP3wy9rlnjzsRveB5qaclgS8aI67AYsWd/yRgfRatl7Ve9bHl +LY6VM5L/DZTxykcqivwjc77XoDuBfUKs6tyuSLQku+FOTbLtNYlUCHECgYEA6nkR +lkXufyLmDhNb3093RsYvPcs1kGaIIGTnz3cxWNh485DgsyLBuYQ5ugupQkzM8YSt +ohDTmVpggqjlXQxCg0Zw8gkEV0v8KsLGjn1CuTJg/mBArXlelq1FEeRAYC9/YfOz +ocXegHV7wDKKtcraNZFsEc7Z0LwbC9wtzSFG44cCgYASkMX1CLPOhJE8e1lY0OWc +PVjx++HDJbF6aAQ7aARyBygiF/d4xylw3EvHcinuTqY2eC8CE7siN3z6T0H9Ldqc +HLWaZDf30SqLVd0MKprQ+GsKKIHFXtY5hxbZ1ybtmIrWjjl0oPnJOqFC5pW7xC0z +9bmtozcKZxkmjpMYjN9zUQKBgQCqV6KLRerqunPgLfhE1/qTlE+l2QflDFhBEI3I +j5NuNHZKnSphehK7sHAv1WD2Jc2OeRGb+BWCB8Ktqf5YBxwbOwW7EQnyUeW1OyP9 +SMs8uHj21P6oCNDLLr5LLUQHnPoyM1aBZLstICzziMR1JhY5bJjSpzBfEQmlKCSu +LkrN6QKBgQCRXrBJRUxeJj7wCnCSq0Clf9NhCpQnwo4bEx8sKlj8K8ku8MvwQwoM +3KfWc7bOl6A2/mM/k4yoHtBMM9X9xqYtsgeFhxuiWBcfTmTxWh73LQ48Kgbrgodt +6yTccnjr7OtBidD85c6lgjAUgcL43QY8mlw0OhzXAZ2R5HWFp4ht+w== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/signing.cert b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/signing.cert new file mode 100644 index 000000000..45166f2d8 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/signing.cert @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC9TCCAd+gAwIBAgIRAJ6IIisIZxL86oe3oeoAgWUwCwYJKoZIhvcNAQELMCYx +ETAPBgNVBAoTCFF1aWNrVExTMREwDwYDVQQDEwhRdWlja1RMUzAeFw0xNjAxMjgw +MDQyMzNaFw0xOTAxMTIwMDQyMzNaMBMxETAPBgNVBAoTCFF1aWNrVExTMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3IXUwqSdO2QTj2ET6fJPGe+KWVnt +QCQQWjkWVpOz8L2A29BRvv9z6lYNf9sOM0Xb5IUAgoZ/s3U6LNYT/RWYFBfeo40r +Xd/MNKAn0kFsSb6BIKmUwPqFeqc8wiPX6yY4SbF1sUTkCTkw3yFHg/AIlwmhpFH3 +9mAmV+x0kTzFR/78ZDD5CUNS59bbu+7UqB06YrJuVEwPY98YixSPXTcaKimsUe+K +IY8FQ6yN6l27MK56wlj4hw2gYz+cyBUBCExCgYMQlOSg2ilH4qYyFvccSDUH7jTA +NwpsIBfdoUVbI+j2ivn+ZGD614LtIStGgUu0mDDVxVOWnRvq/z7LMaa2jwIDAQAB +ozUwMzAOBgNVHQ8BAf8EBAMCAKAwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDAYDVR0T +AQH/BAIwADALBgkqhkiG9w0BAQsDggEBAJq3JzTLrIWCF8rHLTTm1icE9PjOO0sV +a1wrmdJ6NwRbJ66dLZ/4G/NZjVOnce9WFHYLFSEG+wx5YVUPuJXpJaSdy0h8F0Uw +hiJwgeVsGg7vcf4G6mWHrsauDOhylnD31UtYPX1Ao/jcntyyf+gCQpY1J/B8l1yU +LNOwvWLVLpZwZ4ehbKA/UnDXgA+3uHvpzl//cPe0cnt+Mhrgzk5mIMwVR6zCZw1G +oVutAHpv2PXxRwTMu51J+QtSL2b2w3mGHxDLpmz8UdXOtkxdpmDT8kIOtX0T5yGL +29F3fa81iZPs02GWjSGOfOzmCCvaA4C5KJvY/WulF7OOgwvrBpQwqTI= +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/signing.key b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/signing.key new file mode 100644 index 000000000..475625402 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/certs/signing.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA3IXUwqSdO2QTj2ET6fJPGe+KWVntQCQQWjkWVpOz8L2A29BR +vv9z6lYNf9sOM0Xb5IUAgoZ/s3U6LNYT/RWYFBfeo40rXd/MNKAn0kFsSb6BIKmU +wPqFeqc8wiPX6yY4SbF1sUTkCTkw3yFHg/AIlwmhpFH39mAmV+x0kTzFR/78ZDD5 +CUNS59bbu+7UqB06YrJuVEwPY98YixSPXTcaKimsUe+KIY8FQ6yN6l27MK56wlj4 +hw2gYz+cyBUBCExCgYMQlOSg2ilH4qYyFvccSDUH7jTANwpsIBfdoUVbI+j2ivn+ +ZGD614LtIStGgUu0mDDVxVOWnRvq/z7LMaa2jwIDAQABAoIBAD2tiNZv6DImSXo+ +sq0qQomEf/OBvWPFMnWppd/NK/TXa+UPHO4I0MjoDJqIEC6zCU+fC4d2St1MmlrT +/X85vPFRw8mGwGxfHeRSLxEVj04I5GDYTWy0JQUrJUk/cTKp2/Bwm/RaylTyFAM0 +caYrSpvD69vjuTDFr7PDxM6iaqM53zK/vD8kCe81z+wN0UbAKsLlUOKztjH6SzL9 +uVOkekIT/j3L2xxyQhjmhfA3TuCP4uNK/+6/4ovl9Nj4pQsFomsCk4phgqy9SOm1 +4yufmVd8k7J3cppMlMPNc+7tqe2Xn593Y8QT95y3yhtkFECF70yBw64HMDDpA22p +5b/JV9ECgYEA9H4RBXOwbdjcpCa9H3mFjHqUQCqNme1vOSGiflZh9KBCDKgdqugm +KHpvAECADie0p6XRHpxRvufKnGFkJwedfeiKz51T+0dqgPxWncYT1TC+cAjOSzfM +wBpUOcAyvTTviwGbg4bLanHo4remzCbcnRvHQX4YfPFCjT9GhsU+XEUCgYEA5ubz +IlSu1wwFJpoO24ZykGUyqGUQXzR0NrXiLrpF0764qjmHyF8SPJPv1XegSxP/nUTz +SjVfJ7wye/X9qlOpBY8mzy9qQMMKc1cQBV1yVW8IRZ7pMYQZO7qmrZD/DWTa5qWt +pqSbIH2FKedELsKJA/SBtczKjspOdDKyh0UelsMCgYA7DyTfc0XAEy2hPXZb3wgC +mi2rnlvcPf2rCFPvPsCkzf2GfynDehaVmpWrsuj8Al1iTezI/yvD+Mv5oJEH2JAT +tROq+S8rOOIiTFJEBHAQBJlMCOSESPNdyD5mQOZAzEO9CWNejzYd/WwrL//Luut5 +zBcC3AngTIsuAYXw0j6xHQKBgQDamkAJep7k3W5q82OplgoUhpqFLtlnKSP1QBFZ +J+U/6Mqv7jONEeUUEQL42H6bVd2kqUikMw9ZcSVikquLfBUDPFoDwOIZWg4k0IJM +cgHyvGHad+5SgLva/oUawbGWnqtXvfc/U4vCINPXrimxE1/grLW4xp/mu8W24OCA +jIG/PQKBgD/Apl+sfqiB/6ONBjjIswA4yFkEXHSZNpAgcPwhA+cO5D0afEWz2HIx +VeOh5NjN1EL0hX8clFW4bfkK1Vr0kjvbMUXnBWaibUgpiVQl9O9WjaKQLZrp4sRu +x2kJ07Qn6ri7f/lsqOELZwBy95iHWRdePptaAKkRGxJstHI7dgUt +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/registry-config.yml b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/registry-config.yml new file mode 100644 index 000000000..bc269056a --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/docker-integration/tokenserver/registry-config.yml @@ -0,0 +1,18 @@ +version: 0.1 +loglevel: debug +storage: + cache: + blobdescriptor: inmemory + filesystem: + rootdirectory: /tmp/registry-dev +http: + addr: 0.0.0.0:5000 + tls: + certificate: "/etc/docker/registry/localregistry.cert" + key: "/etc/docker/registry/localregistry.key" +auth: + token: + realm: "https://auth.localregistry:5556/token/" + issuer: "registry-test" + service: "registry-test" + rootcertbundle: "/etc/docker/registry/tokenbundle.pem" diff --git a/vendor/github.com/docker/distribution/contrib/token-server/errors.go b/vendor/github.com/docker/distribution/contrib/token-server/errors.go new file mode 100644 index 000000000..bcac8ee35 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/token-server/errors.go @@ -0,0 +1,38 @@ +package main + +import ( + "net/http" + + "github.com/docker/distribution/registry/api/errcode" +) + +var ( + errGroup = "tokenserver" + + // ErrorBadTokenOption is returned when a token parameter is invalid + ErrorBadTokenOption = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BAD_TOKEN_OPTION", + Message: "bad token option", + Description: `This error may be returned when a request for a + token contains an option which is not valid`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorMissingRequiredField is returned when a required form field is missing + ErrorMissingRequiredField = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MISSING_REQUIRED_FIELD", + Message: "missing required field", + Description: `This error may be returned when a request for a + token does not contain a required form field`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorUnsupportedValue is returned when a form field has an unsupported value + ErrorUnsupportedValue = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "UNSUPPORTED_VALUE", + Message: "unsupported value", + Description: `This error may be returned when a request for a + token contains a form field with an unsupported value`, + HTTPStatusCode: http.StatusBadRequest, + }) +) diff --git a/vendor/github.com/docker/distribution/contrib/token-server/main.go b/vendor/github.com/docker/distribution/contrib/token-server/main.go new file mode 100644 index 000000000..e9d6d64fa --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/token-server/main.go @@ -0,0 +1,425 @@ +package main + +import ( + "encoding/json" + "flag" + "math/rand" + "net/http" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/auth" + _ "github.com/docker/distribution/registry/auth/htpasswd" + "github.com/docker/libtrust" + "github.com/gorilla/mux" +) + +var ( + enforceRepoClass bool +) + +func main() { + var ( + issuer = &TokenIssuer{} + pkFile string + addr string + debug bool + err error + + passwdFile string + realm string + + cert string + certKey string + ) + + flag.StringVar(&issuer.Issuer, "issuer", "distribution-token-server", "Issuer string for token") + flag.StringVar(&pkFile, "key", "", "Private key file") + flag.StringVar(&addr, "addr", "localhost:8080", "Address to listen on") + flag.BoolVar(&debug, "debug", false, "Debug mode") + + flag.StringVar(&passwdFile, "passwd", ".htpasswd", "Passwd file") + flag.StringVar(&realm, "realm", "", "Authentication realm") + + flag.StringVar(&cert, "tlscert", "", "Certificate file for TLS") + flag.StringVar(&certKey, "tlskey", "", "Certificate key for TLS") + + flag.BoolVar(&enforceRepoClass, "enforce-class", false, "Enforce policy for single repository class") + + flag.Parse() + + if debug { + logrus.SetLevel(logrus.DebugLevel) + } + + if pkFile == "" { + issuer.SigningKey, err = libtrust.GenerateECP256PrivateKey() + if err != nil { + logrus.Fatalf("Error generating private key: %v", err) + } + logrus.Debugf("Using newly generated key with id %s", issuer.SigningKey.KeyID()) + } else { + issuer.SigningKey, err = libtrust.LoadKeyFile(pkFile) + if err != nil { + logrus.Fatalf("Error loading key file %s: %v", pkFile, err) + } + logrus.Debugf("Loaded private key with id %s", issuer.SigningKey.KeyID()) + } + + if realm == "" { + logrus.Fatalf("Must provide realm") + } + + ac, err := auth.GetAccessController("htpasswd", map[string]interface{}{ + "realm": realm, + "path": passwdFile, + }) + if err != nil { + logrus.Fatalf("Error initializing access controller: %v", err) + } + + // TODO: Make configurable + issuer.Expiration = 15 * time.Minute + + ctx := context.Background() + + ts := &tokenServer{ + issuer: issuer, + accessController: ac, + refreshCache: map[string]refreshToken{}, + } + + router := mux.NewRouter() + router.Path("/token/").Methods("GET").Handler(handlerWithContext(ctx, ts.getToken)) + router.Path("/token/").Methods("POST").Handler(handlerWithContext(ctx, ts.postToken)) + + if cert == "" { + err = http.ListenAndServe(addr, router) + } else if certKey == "" { + logrus.Fatalf("Must provide certficate (-tlscert) and key (-tlskey)") + } else { + err = http.ListenAndServeTLS(addr, cert, certKey, router) + } + + if err != nil { + logrus.Infof("Error serving: %v", err) + } + +} + +// handlerWithContext wraps the given context-aware handler by setting up the +// request context from a base context. +func handlerWithContext(ctx context.Context, handler func(context.Context, http.ResponseWriter, *http.Request)) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := context.WithRequest(ctx, r) + logger := context.GetRequestLogger(ctx) + ctx = context.WithLogger(ctx, logger) + + handler(ctx, w, r) + }) +} + +func handleError(ctx context.Context, err error, w http.ResponseWriter) { + ctx, w = context.WithResponseWriter(ctx, w) + + if serveErr := errcode.ServeJSON(w, err); serveErr != nil { + context.GetResponseLogger(ctx).Errorf("error sending error response: %v", serveErr) + return + } + + context.GetResponseLogger(ctx).Info("application error") +} + +var refreshCharacters = []rune("0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +const refreshTokenLength = 15 + +func newRefreshToken() string { + s := make([]rune, refreshTokenLength) + for i := range s { + s[i] = refreshCharacters[rand.Intn(len(refreshCharacters))] + } + return string(s) +} + +type refreshToken struct { + subject string + service string +} + +type tokenServer struct { + issuer *TokenIssuer + accessController auth.AccessController + refreshCache map[string]refreshToken +} + +type tokenResponse struct { + Token string `json:"access_token"` + RefreshToken string `json:"refresh_token,omitempty"` + ExpiresIn int `json:"expires_in,omitempty"` +} + +var repositoryClassCache = map[string]string{} + +func filterAccessList(ctx context.Context, scope string, requestedAccessList []auth.Access) []auth.Access { + if !strings.HasSuffix(scope, "/") { + scope = scope + "/" + } + grantedAccessList := make([]auth.Access, 0, len(requestedAccessList)) + for _, access := range requestedAccessList { + if access.Type == "repository" { + if !strings.HasPrefix(access.Name, scope) { + context.GetLogger(ctx).Debugf("Resource scope not allowed: %s", access.Name) + continue + } + if enforceRepoClass { + if class, ok := repositoryClassCache[access.Name]; ok { + if class != access.Class { + context.GetLogger(ctx).Debugf("Different repository class: %q, previously %q", access.Class, class) + continue + } + } else if strings.EqualFold(access.Action, "push") { + repositoryClassCache[access.Name] = access.Class + } + } + } else if access.Type == "registry" { + if access.Name != "catalog" { + context.GetLogger(ctx).Debugf("Unknown registry resource: %s", access.Name) + continue + } + // TODO: Limit some actions to "admin" users + } else { + context.GetLogger(ctx).Debugf("Skipping unsupported resource type: %s", access.Type) + continue + } + grantedAccessList = append(grantedAccessList, access) + } + return grantedAccessList +} + +type acctSubject struct{} + +func (acctSubject) String() string { return "acctSubject" } + +type requestedAccess struct{} + +func (requestedAccess) String() string { return "requestedAccess" } + +type grantedAccess struct{} + +func (grantedAccess) String() string { return "grantedAccess" } + +// getToken handles authenticating the request and authorizing access to the +// requested scopes. +func (ts *tokenServer) getToken(ctx context.Context, w http.ResponseWriter, r *http.Request) { + context.GetLogger(ctx).Info("getToken") + + params := r.URL.Query() + service := params.Get("service") + scopeSpecifiers := params["scope"] + var offline bool + if offlineStr := params.Get("offline_token"); offlineStr != "" { + var err error + offline, err = strconv.ParseBool(offlineStr) + if err != nil { + handleError(ctx, ErrorBadTokenOption.WithDetail(err), w) + return + } + } + + requestedAccessList := ResolveScopeSpecifiers(ctx, scopeSpecifiers) + + authorizedCtx, err := ts.accessController.Authorized(ctx, requestedAccessList...) + if err != nil { + challenge, ok := err.(auth.Challenge) + if !ok { + handleError(ctx, err, w) + return + } + + // Get response context. + ctx, w = context.WithResponseWriter(ctx, w) + + challenge.SetHeaders(w) + handleError(ctx, errcode.ErrorCodeUnauthorized.WithDetail(challenge.Error()), w) + + context.GetResponseLogger(ctx).Info("get token authentication challenge") + + return + } + ctx = authorizedCtx + + username := context.GetStringValue(ctx, "auth.user.name") + + ctx = context.WithValue(ctx, acctSubject{}, username) + ctx = context.WithLogger(ctx, context.GetLogger(ctx, acctSubject{})) + + context.GetLogger(ctx).Info("authenticated client") + + ctx = context.WithValue(ctx, requestedAccess{}, requestedAccessList) + ctx = context.WithLogger(ctx, context.GetLogger(ctx, requestedAccess{})) + + grantedAccessList := filterAccessList(ctx, username, requestedAccessList) + ctx = context.WithValue(ctx, grantedAccess{}, grantedAccessList) + ctx = context.WithLogger(ctx, context.GetLogger(ctx, grantedAccess{})) + + token, err := ts.issuer.CreateJWT(username, service, grantedAccessList) + if err != nil { + handleError(ctx, err, w) + return + } + + context.GetLogger(ctx).Info("authorized client") + + response := tokenResponse{ + Token: token, + ExpiresIn: int(ts.issuer.Expiration.Seconds()), + } + + if offline { + response.RefreshToken = newRefreshToken() + ts.refreshCache[response.RefreshToken] = refreshToken{ + subject: username, + service: service, + } + } + + ctx, w = context.WithResponseWriter(ctx, w) + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(response) + + context.GetResponseLogger(ctx).Info("get token complete") +} + +type postTokenResponse struct { + Token string `json:"access_token"` + Scope string `json:"scope,omitempty"` + ExpiresIn int `json:"expires_in,omitempty"` + IssuedAt string `json:"issued_at,omitempty"` + RefreshToken string `json:"refresh_token,omitempty"` +} + +// postToken handles authenticating the request and authorizing access to the +// requested scopes. +func (ts *tokenServer) postToken(ctx context.Context, w http.ResponseWriter, r *http.Request) { + grantType := r.PostFormValue("grant_type") + if grantType == "" { + handleError(ctx, ErrorMissingRequiredField.WithDetail("missing grant_type value"), w) + return + } + + service := r.PostFormValue("service") + if service == "" { + handleError(ctx, ErrorMissingRequiredField.WithDetail("missing service value"), w) + return + } + + clientID := r.PostFormValue("client_id") + if clientID == "" { + handleError(ctx, ErrorMissingRequiredField.WithDetail("missing client_id value"), w) + return + } + + var offline bool + switch r.PostFormValue("access_type") { + case "", "online": + case "offline": + offline = true + default: + handleError(ctx, ErrorUnsupportedValue.WithDetail("unknown access_type value"), w) + return + } + + requestedAccessList := ResolveScopeList(ctx, r.PostFormValue("scope")) + + var subject string + var rToken string + switch grantType { + case "refresh_token": + rToken = r.PostFormValue("refresh_token") + if rToken == "" { + handleError(ctx, ErrorUnsupportedValue.WithDetail("missing refresh_token value"), w) + return + } + rt, ok := ts.refreshCache[rToken] + if !ok || rt.service != service { + handleError(ctx, errcode.ErrorCodeUnauthorized.WithDetail("invalid refresh token"), w) + return + } + subject = rt.subject + case "password": + ca, ok := ts.accessController.(auth.CredentialAuthenticator) + if !ok { + handleError(ctx, ErrorUnsupportedValue.WithDetail("password grant type not supported"), w) + return + } + subject = r.PostFormValue("username") + if subject == "" { + handleError(ctx, ErrorUnsupportedValue.WithDetail("missing username value"), w) + return + } + password := r.PostFormValue("password") + if password == "" { + handleError(ctx, ErrorUnsupportedValue.WithDetail("missing password value"), w) + return + } + if err := ca.AuthenticateUser(subject, password); err != nil { + handleError(ctx, errcode.ErrorCodeUnauthorized.WithDetail("invalid credentials"), w) + return + } + default: + handleError(ctx, ErrorUnsupportedValue.WithDetail("unknown grant_type value"), w) + return + } + + ctx = context.WithValue(ctx, acctSubject{}, subject) + ctx = context.WithLogger(ctx, context.GetLogger(ctx, acctSubject{})) + + context.GetLogger(ctx).Info("authenticated client") + + ctx = context.WithValue(ctx, requestedAccess{}, requestedAccessList) + ctx = context.WithLogger(ctx, context.GetLogger(ctx, requestedAccess{})) + + grantedAccessList := filterAccessList(ctx, subject, requestedAccessList) + ctx = context.WithValue(ctx, grantedAccess{}, grantedAccessList) + ctx = context.WithLogger(ctx, context.GetLogger(ctx, grantedAccess{})) + + token, err := ts.issuer.CreateJWT(subject, service, grantedAccessList) + if err != nil { + handleError(ctx, err, w) + return + } + + context.GetLogger(ctx).Info("authorized client") + + response := postTokenResponse{ + Token: token, + ExpiresIn: int(ts.issuer.Expiration.Seconds()), + IssuedAt: time.Now().UTC().Format(time.RFC3339), + Scope: ToScopeList(grantedAccessList), + } + + if offline { + rToken = newRefreshToken() + ts.refreshCache[rToken] = refreshToken{ + subject: subject, + service: service, + } + } + + if rToken != "" { + response.RefreshToken = rToken + } + + ctx, w = context.WithResponseWriter(ctx, w) + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(response) + + context.GetResponseLogger(ctx).Info("post token complete") +} diff --git a/vendor/github.com/docker/distribution/contrib/token-server/token.go b/vendor/github.com/docker/distribution/contrib/token-server/token.go new file mode 100644 index 000000000..b0c2abff7 --- /dev/null +++ b/vendor/github.com/docker/distribution/contrib/token-server/token.go @@ -0,0 +1,219 @@ +package main + +import ( + "crypto" + "crypto/rand" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "regexp" + "strings" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" + "github.com/docker/distribution/registry/auth/token" + "github.com/docker/libtrust" +) + +// ResolveScopeSpecifiers converts a list of scope specifiers from a token +// request's `scope` query parameters into a list of standard access objects. +func ResolveScopeSpecifiers(ctx context.Context, scopeSpecs []string) []auth.Access { + requestedAccessSet := make(map[auth.Access]struct{}, 2*len(scopeSpecs)) + + for _, scopeSpecifier := range scopeSpecs { + // There should be 3 parts, separated by a `:` character. + parts := strings.SplitN(scopeSpecifier, ":", 3) + + if len(parts) != 3 { + context.GetLogger(ctx).Infof("ignoring unsupported scope format %s", scopeSpecifier) + continue + } + + resourceType, resourceName, actions := parts[0], parts[1], parts[2] + + resourceType, resourceClass := splitResourceClass(resourceType) + if resourceType == "" { + continue + } + + // Actions should be a comma-separated list of actions. + for _, action := range strings.Split(actions, ",") { + requestedAccess := auth.Access{ + Resource: auth.Resource{ + Type: resourceType, + Class: resourceClass, + Name: resourceName, + }, + Action: action, + } + + // Add this access to the requested access set. + requestedAccessSet[requestedAccess] = struct{}{} + } + } + + requestedAccessList := make([]auth.Access, 0, len(requestedAccessSet)) + for requestedAccess := range requestedAccessSet { + requestedAccessList = append(requestedAccessList, requestedAccess) + } + + return requestedAccessList +} + +var typeRegexp = regexp.MustCompile(`^([a-z0-9]+)(\([a-z0-9]+\))?$`) + +func splitResourceClass(t string) (string, string) { + matches := typeRegexp.FindStringSubmatch(t) + if len(matches) < 2 { + return "", "" + } + if len(matches) == 2 || len(matches[2]) < 2 { + return matches[1], "" + } + return matches[1], matches[2][1 : len(matches[2])-1] +} + +// ResolveScopeList converts a scope list from a token request's +// `scope` parameter into a list of standard access objects. +func ResolveScopeList(ctx context.Context, scopeList string) []auth.Access { + scopes := strings.Split(scopeList, " ") + return ResolveScopeSpecifiers(ctx, scopes) +} + +func scopeString(a auth.Access) string { + if a.Class != "" { + return fmt.Sprintf("%s(%s):%s:%s", a.Type, a.Class, a.Name, a.Action) + } + return fmt.Sprintf("%s:%s:%s", a.Type, a.Name, a.Action) +} + +// ToScopeList converts a list of access to a +// scope list string +func ToScopeList(access []auth.Access) string { + var s []string + for _, a := range access { + s = append(s, scopeString(a)) + } + return strings.Join(s, ",") +} + +// TokenIssuer represents an issuer capable of generating JWT tokens +type TokenIssuer struct { + Issuer string + SigningKey libtrust.PrivateKey + Expiration time.Duration +} + +// CreateJWT creates and signs a JSON Web Token for the given subject and +// audience with the granted access. +func (issuer *TokenIssuer) CreateJWT(subject string, audience string, grantedAccessList []auth.Access) (string, error) { + // Make a set of access entries to put in the token's claimset. + resourceActionSets := make(map[auth.Resource]map[string]struct{}, len(grantedAccessList)) + for _, access := range grantedAccessList { + actionSet, exists := resourceActionSets[access.Resource] + if !exists { + actionSet = map[string]struct{}{} + resourceActionSets[access.Resource] = actionSet + } + actionSet[access.Action] = struct{}{} + } + + accessEntries := make([]*token.ResourceActions, 0, len(resourceActionSets)) + for resource, actionSet := range resourceActionSets { + actions := make([]string, 0, len(actionSet)) + for action := range actionSet { + actions = append(actions, action) + } + + accessEntries = append(accessEntries, &token.ResourceActions{ + Type: resource.Type, + Class: resource.Class, + Name: resource.Name, + Actions: actions, + }) + } + + randomBytes := make([]byte, 15) + _, err := io.ReadFull(rand.Reader, randomBytes) + if err != nil { + return "", err + } + randomID := base64.URLEncoding.EncodeToString(randomBytes) + + now := time.Now() + + signingHash := crypto.SHA256 + var alg string + switch issuer.SigningKey.KeyType() { + case "RSA": + alg = "RS256" + case "EC": + alg = "ES256" + default: + panic(fmt.Errorf("unsupported signing key type %q", issuer.SigningKey.KeyType())) + } + + joseHeader := token.Header{ + Type: "JWT", + SigningAlg: alg, + } + + if x5c := issuer.SigningKey.GetExtendedField("x5c"); x5c != nil { + joseHeader.X5c = x5c.([]string) + } else { + var jwkMessage json.RawMessage + jwkMessage, err = issuer.SigningKey.PublicKey().MarshalJSON() + if err != nil { + return "", err + } + joseHeader.RawJWK = &jwkMessage + } + + exp := issuer.Expiration + if exp == 0 { + exp = 5 * time.Minute + } + + claimSet := token.ClaimSet{ + Issuer: issuer.Issuer, + Subject: subject, + Audience: audience, + Expiration: now.Add(exp).Unix(), + NotBefore: now.Unix(), + IssuedAt: now.Unix(), + JWTID: randomID, + + Access: accessEntries, + } + + var ( + joseHeaderBytes []byte + claimSetBytes []byte + ) + + if joseHeaderBytes, err = json.Marshal(joseHeader); err != nil { + return "", fmt.Errorf("unable to encode jose header: %s", err) + } + if claimSetBytes, err = json.Marshal(claimSet); err != nil { + return "", fmt.Errorf("unable to encode claim set: %s", err) + } + + encodedJoseHeader := joseBase64Encode(joseHeaderBytes) + encodedClaimSet := joseBase64Encode(claimSetBytes) + encodingToSign := fmt.Sprintf("%s.%s", encodedJoseHeader, encodedClaimSet) + + var signatureBytes []byte + if signatureBytes, _, err = issuer.SigningKey.Sign(strings.NewReader(encodingToSign), signingHash); err != nil { + return "", fmt.Errorf("unable to sign jwt payload: %s", err) + } + + signature := joseBase64Encode(signatureBytes) + + return fmt.Sprintf("%s.%s", encodingToSign, signature), nil +} + +func joseBase64Encode(data []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(data), "=") +} diff --git a/vendor/github.com/docker/distribution/coverpkg.sh b/vendor/github.com/docker/distribution/coverpkg.sh new file mode 100755 index 000000000..25d419ae8 --- /dev/null +++ b/vendor/github.com/docker/distribution/coverpkg.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash +# Given a subpackage and the containing package, figures out which packages +# need to be passed to `go test -coverpkg`: this includes all of the +# subpackage's dependencies within the containing package, as well as the +# subpackage itself. +DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2} | grep -v github.com/docker/distribution/vendor)" +echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ',' diff --git a/vendor/github.com/docker/distribution/digest/digest.go b/vendor/github.com/docker/distribution/digest/digest.go new file mode 100644 index 000000000..31d821bba --- /dev/null +++ b/vendor/github.com/docker/distribution/digest/digest.go @@ -0,0 +1,139 @@ +package digest + +import ( + "fmt" + "hash" + "io" + "regexp" + "strings" +) + +const ( + // DigestSha256EmptyTar is the canonical sha256 digest of empty data + DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" +) + +// Digest allows simple protection of hex formatted digest strings, prefixed +// by their algorithm. Strings of type Digest have some guarantee of being in +// the correct format and it provides quick access to the components of a +// digest string. +// +// The following is an example of the contents of Digest types: +// +// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc +// +// This allows to abstract the digest behind this type and work only in those +// terms. +type Digest string + +// NewDigest returns a Digest from alg and a hash.Hash object. +func NewDigest(alg Algorithm, h hash.Hash) Digest { + return NewDigestFromBytes(alg, h.Sum(nil)) +} + +// NewDigestFromBytes returns a new digest from the byte contents of p. +// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...) +// functions. This is also useful for rebuilding digests from binary +// serializations. +func NewDigestFromBytes(alg Algorithm, p []byte) Digest { + return Digest(fmt.Sprintf("%s:%x", alg, p)) +} + +// NewDigestFromHex returns a Digest from alg and a the hex encoded digest. +func NewDigestFromHex(alg, hex string) Digest { + return Digest(fmt.Sprintf("%s:%s", alg, hex)) +} + +// DigestRegexp matches valid digest types. +var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`) + +// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match. +var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) + +var ( + // ErrDigestInvalidFormat returned when digest format invalid. + ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format") + + // ErrDigestInvalidLength returned when digest has invalid length. + ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length") + + // ErrDigestUnsupported returned when the digest algorithm is unsupported. + ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") +) + +// ParseDigest parses s and returns the validated digest object. An error will +// be returned if the format is invalid. +func ParseDigest(s string) (Digest, error) { + d := Digest(s) + + return d, d.Validate() +} + +// FromReader returns the most valid digest for the underlying content using +// the canonical digest algorithm. +func FromReader(rd io.Reader) (Digest, error) { + return Canonical.FromReader(rd) +} + +// FromBytes digests the input and returns a Digest. +func FromBytes(p []byte) Digest { + return Canonical.FromBytes(p) +} + +// Validate checks that the contents of d is a valid digest, returning an +// error if not. +func (d Digest) Validate() error { + s := string(d) + + if !DigestRegexpAnchored.MatchString(s) { + return ErrDigestInvalidFormat + } + + i := strings.Index(s, ":") + if i < 0 { + return ErrDigestInvalidFormat + } + + // case: "sha256:" with no hex. + if i+1 == len(s) { + return ErrDigestInvalidFormat + } + + switch algorithm := Algorithm(s[:i]); algorithm { + case SHA256, SHA384, SHA512: + if algorithm.Size()*2 != len(s[i+1:]) { + return ErrDigestInvalidLength + } + break + default: + return ErrDigestUnsupported + } + + return nil +} + +// Algorithm returns the algorithm portion of the digest. This will panic if +// the underlying digest is not in a valid format. +func (d Digest) Algorithm() Algorithm { + return Algorithm(d[:d.sepIndex()]) +} + +// Hex returns the hex digest portion of the digest. This will panic if the +// underlying digest is not in a valid format. +func (d Digest) Hex() string { + return string(d[d.sepIndex()+1:]) +} + +func (d Digest) String() string { + return string(d) +} + +func (d Digest) sepIndex() int { + i := strings.Index(string(d), ":") + + if i < 0 { + panic("could not find ':' in digest: " + d) + } + + return i +} diff --git a/vendor/github.com/docker/distribution/digest/digest_test.go b/vendor/github.com/docker/distribution/digest/digest_test.go new file mode 100644 index 000000000..afb4ebf63 --- /dev/null +++ b/vendor/github.com/docker/distribution/digest/digest_test.go @@ -0,0 +1,82 @@ +package digest + +import ( + "testing" +) + +func TestParseDigest(t *testing.T) { + for _, testcase := range []struct { + input string + err error + algorithm Algorithm + hex string + }{ + { + input: "sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", + algorithm: "sha256", + hex: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", + }, + { + input: "sha384:d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d", + algorithm: "sha384", + hex: "d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d", + }, + { + // empty hex + input: "sha256:", + err: ErrDigestInvalidFormat, + }, + { + // just hex + input: "d41d8cd98f00b204e9800998ecf8427e", + err: ErrDigestInvalidFormat, + }, + { + // not hex + input: "sha256:d41d8cd98f00b204e9800m98ecf8427e", + err: ErrDigestInvalidFormat, + }, + { + // too short + input: "sha256:abcdef0123456789", + err: ErrDigestInvalidLength, + }, + { + // too short (from different algorithm) + input: "sha512:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", + err: ErrDigestInvalidLength, + }, + { + input: "foo:d41d8cd98f00b204e9800998ecf8427e", + err: ErrDigestUnsupported, + }, + } { + digest, err := ParseDigest(testcase.input) + if err != testcase.err { + t.Fatalf("error differed from expected while parsing %q: %v != %v", testcase.input, err, testcase.err) + } + + if testcase.err != nil { + continue + } + + if digest.Algorithm() != testcase.algorithm { + t.Fatalf("incorrect algorithm for parsed digest: %q != %q", digest.Algorithm(), testcase.algorithm) + } + + if digest.Hex() != testcase.hex { + t.Fatalf("incorrect hex for parsed digest: %q != %q", digest.Hex(), testcase.hex) + } + + // Parse string return value and check equality + newParsed, err := ParseDigest(digest.String()) + + if err != nil { + t.Fatalf("unexpected error parsing input %q: %v", testcase.input, err) + } + + if newParsed != digest { + t.Fatalf("expected equal: %q != %q", newParsed, digest) + } + } +} diff --git a/vendor/github.com/docker/distribution/digest/digester.go b/vendor/github.com/docker/distribution/digest/digester.go new file mode 100644 index 000000000..f3105a45b --- /dev/null +++ b/vendor/github.com/docker/distribution/digest/digester.go @@ -0,0 +1,155 @@ +package digest + +import ( + "crypto" + "fmt" + "hash" + "io" +) + +// Algorithm identifies and implementation of a digester by an identifier. +// Note the that this defines both the hash algorithm used and the string +// encoding. +type Algorithm string + +// supported digest types +const ( + SHA256 Algorithm = "sha256" // sha256 with hex encoding + SHA384 Algorithm = "sha384" // sha384 with hex encoding + SHA512 Algorithm = "sha512" // sha512 with hex encoding + + // Canonical is the primary digest algorithm used with the distribution + // project. Other digests may be used but this one is the primary storage + // digest. + Canonical = SHA256 +) + +var ( + // TODO(stevvooe): Follow the pattern of the standard crypto package for + // registration of digests. Effectively, we are a registerable set and + // common symbol access. + + // algorithms maps values to hash.Hash implementations. Other algorithms + // may be available but they cannot be calculated by the digest package. + algorithms = map[Algorithm]crypto.Hash{ + SHA256: crypto.SHA256, + SHA384: crypto.SHA384, + SHA512: crypto.SHA512, + } +) + +// Available returns true if the digest type is available for use. If this +// returns false, New and Hash will return nil. +func (a Algorithm) Available() bool { + h, ok := algorithms[a] + if !ok { + return false + } + + // check availability of the hash, as well + return h.Available() +} + +func (a Algorithm) String() string { + return string(a) +} + +// Size returns number of bytes returned by the hash. +func (a Algorithm) Size() int { + h, ok := algorithms[a] + if !ok { + return 0 + } + return h.Size() +} + +// Set implemented to allow use of Algorithm as a command line flag. +func (a *Algorithm) Set(value string) error { + if value == "" { + *a = Canonical + } else { + // just do a type conversion, support is queried with Available. + *a = Algorithm(value) + } + + return nil +} + +// New returns a new digester for the specified algorithm. If the algorithm +// does not have a digester implementation, nil will be returned. This can be +// checked by calling Available before calling New. +func (a Algorithm) New() Digester { + return &digester{ + alg: a, + hash: a.Hash(), + } +} + +// Hash returns a new hash as used by the algorithm. If not available, the +// method will panic. Check Algorithm.Available() before calling. +func (a Algorithm) Hash() hash.Hash { + if !a.Available() { + // NOTE(stevvooe): A missing hash is usually a programming error that + // must be resolved at compile time. We don't import in the digest + // package to allow users to choose their hash implementation (such as + // when using stevvooe/resumable or a hardware accelerated package). + // + // Applications that may want to resolve the hash at runtime should + // call Algorithm.Available before call Algorithm.Hash(). + panic(fmt.Sprintf("%v not available (make sure it is imported)", a)) + } + + return algorithms[a].New() +} + +// FromReader returns the digest of the reader using the algorithm. +func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { + digester := a.New() + + if _, err := io.Copy(digester.Hash(), rd); err != nil { + return "", err + } + + return digester.Digest(), nil +} + +// FromBytes digests the input and returns a Digest. +func (a Algorithm) FromBytes(p []byte) Digest { + digester := a.New() + + if _, err := digester.Hash().Write(p); err != nil { + // Writes to a Hash should never fail. None of the existing + // hash implementations in the stdlib or hashes vendored + // here can return errors from Write. Having a panic in this + // condition instead of having FromBytes return an error value + // avoids unnecessary error handling paths in all callers. + panic("write to hash function returned error: " + err.Error()) + } + + return digester.Digest() +} + +// TODO(stevvooe): Allow resolution of verifiers using the digest type and +// this registration system. + +// Digester calculates the digest of written data. Writes should go directly +// to the return value of Hash, while calling Digest will return the current +// value of the digest. +type Digester interface { + Hash() hash.Hash // provides direct access to underlying hash instance. + Digest() Digest +} + +// digester provides a simple digester definition that embeds a hasher. +type digester struct { + alg Algorithm + hash hash.Hash +} + +func (d *digester) Hash() hash.Hash { + return d.hash +} + +func (d *digester) Digest() Digest { + return NewDigest(d.alg, d.hash) +} diff --git a/vendor/github.com/docker/distribution/digest/digester_resumable_test.go b/vendor/github.com/docker/distribution/digest/digester_resumable_test.go new file mode 100644 index 000000000..6ba21c801 --- /dev/null +++ b/vendor/github.com/docker/distribution/digest/digester_resumable_test.go @@ -0,0 +1,21 @@ +// +build !noresumabledigest + +package digest + +import ( + "testing" + + "github.com/stevvooe/resumable" + _ "github.com/stevvooe/resumable/sha256" +) + +// TestResumableDetection just ensures that the resumable capability of a hash +// is exposed through the digester type, which is just a hash plus a Digest +// method. +func TestResumableDetection(t *testing.T) { + d := Canonical.New() + + if _, ok := d.Hash().(resumable.Hash); !ok { + t.Fatalf("expected digester to implement resumable.Hash: %#v, %v", d, d.Hash()) + } +} diff --git a/vendor/github.com/docker/distribution/digest/doc.go b/vendor/github.com/docker/distribution/digest/doc.go new file mode 100644 index 000000000..f64b0db32 --- /dev/null +++ b/vendor/github.com/docker/distribution/digest/doc.go @@ -0,0 +1,42 @@ +// Package digest provides a generalized type to opaquely represent message +// digests and their operations within the registry. The Digest type is +// designed to serve as a flexible identifier in a content-addressable system. +// More importantly, it provides tools and wrappers to work with +// hash.Hash-based digests with little effort. +// +// Basics +// +// The format of a digest is simply a string with two parts, dubbed the +// "algorithm" and the "digest", separated by a colon: +// +// : +// +// An example of a sha256 digest representation follows: +// +// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc +// +// In this case, the string "sha256" is the algorithm and the hex bytes are +// the "digest". +// +// Because the Digest type is simply a string, once a valid Digest is +// obtained, comparisons are cheap, quick and simple to express with the +// standard equality operator. +// +// Verification +// +// The main benefit of using the Digest type is simple verification against a +// given digest. The Verifier interface, modeled after the stdlib hash.Hash +// interface, provides a common write sink for digest verification. After +// writing is complete, calling the Verifier.Verified method will indicate +// whether or not the stream of bytes matches the target digest. +// +// Missing Features +// +// In addition to the above, we intend to add the following features to this +// package: +// +// 1. A Digester type that supports write sink digest calculation. +// +// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry. +// +package digest diff --git a/vendor/github.com/docker/distribution/digest/set.go b/vendor/github.com/docker/distribution/digest/set.go new file mode 100644 index 000000000..4b9313c1a --- /dev/null +++ b/vendor/github.com/docker/distribution/digest/set.go @@ -0,0 +1,245 @@ +package digest + +import ( + "errors" + "sort" + "strings" + "sync" +) + +var ( + // ErrDigestNotFound is used when a matching digest + // could not be found in a set. + ErrDigestNotFound = errors.New("digest not found") + + // ErrDigestAmbiguous is used when multiple digests + // are found in a set. None of the matching digests + // should be considered valid matches. + ErrDigestAmbiguous = errors.New("ambiguous digest string") +) + +// Set is used to hold a unique set of digests which +// may be easily referenced by easily referenced by a string +// representation of the digest as well as short representation. +// The uniqueness of the short representation is based on other +// digests in the set. If digests are omitted from this set, +// collisions in a larger set may not be detected, therefore it +// is important to always do short representation lookups on +// the complete set of digests. To mitigate collisions, an +// appropriately long short code should be used. +type Set struct { + mutex sync.RWMutex + entries digestEntries +} + +// NewSet creates an empty set of digests +// which may have digests added. +func NewSet() *Set { + return &Set{ + entries: digestEntries{}, + } +} + +// checkShortMatch checks whether two digests match as either whole +// values or short values. This function does not test equality, +// rather whether the second value could match against the first +// value. +func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool { + if len(hex) == len(shortHex) { + if hex != shortHex { + return false + } + if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + } else if !strings.HasPrefix(hex, shortHex) { + return false + } else if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + return true +} + +// Lookup looks for a digest matching the given string representation. +// If no digests could be found ErrDigestNotFound will be returned +// with an empty digest value. If multiple matches are found +// ErrDigestAmbiguous will be returned with an empty digest value. +func (dst *Set) Lookup(d string) (Digest, error) { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + if len(dst.entries) == 0 { + return "", ErrDigestNotFound + } + var ( + searchFunc func(int) bool + alg Algorithm + hex string + ) + dgst, err := ParseDigest(d) + if err == ErrDigestInvalidFormat { + hex = d + searchFunc = func(i int) bool { + return dst.entries[i].val >= d + } + } else { + hex = dgst.Hex() + alg = dgst.Algorithm() + searchFunc = func(i int) bool { + if dst.entries[i].val == hex { + return dst.entries[i].alg >= alg + } + return dst.entries[i].val >= hex + } + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { + return "", ErrDigestNotFound + } + if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { + return dst.entries[idx].digest, nil + } + if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { + return "", ErrDigestAmbiguous + } + + return dst.entries[idx].digest, nil +} + +// Add adds the given digest to the set. An error will be returned +// if the given digest is invalid. If the digest already exists in the +// set, this operation will be a no-op. +func (dst *Set) Add(d Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) { + dst.entries = append(dst.entries, entry) + return nil + } else if dst.entries[idx].digest == d { + return nil + } + + entries := append(dst.entries, nil) + copy(entries[idx+1:], entries[idx:len(entries)-1]) + entries[idx] = entry + dst.entries = entries + return nil +} + +// Remove removes the given digest from the set. An err will be +// returned if the given digest is invalid. If the digest does +// not exist in the set, this operation will be a no-op. +func (dst *Set) Remove(d Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + // Not found if idx is after or value at idx is not digest + if idx == len(dst.entries) || dst.entries[idx].digest != d { + return nil + } + + entries := dst.entries + copy(entries[idx:], entries[idx+1:]) + entries = entries[:len(entries)-1] + dst.entries = entries + + return nil +} + +// All returns all the digests in the set +func (dst *Set) All() []Digest { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + retValues := make([]Digest, len(dst.entries)) + for i := range dst.entries { + retValues[i] = dst.entries[i].digest + } + + return retValues +} + +// ShortCodeTable returns a map of Digest to unique short codes. The +// length represents the minimum value, the maximum length may be the +// entire value of digest if uniqueness cannot be achieved without the +// full value. This function will attempt to make short codes as short +// as possible to be unique. +func ShortCodeTable(dst *Set, length int) map[Digest]string { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + m := make(map[Digest]string, len(dst.entries)) + l := length + resetIdx := 0 + for i := 0; i < len(dst.entries); i++ { + var short string + extended := true + for extended { + extended = false + if len(dst.entries[i].val) <= l { + short = dst.entries[i].digest.String() + } else { + short = dst.entries[i].val[:l] + for j := i + 1; j < len(dst.entries); j++ { + if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { + if j > resetIdx { + resetIdx = j + } + extended = true + } else { + break + } + } + if extended { + l++ + } + } + } + m[dst.entries[i].digest] = short + if i >= resetIdx { + l = length + } + } + return m +} + +type digestEntry struct { + alg Algorithm + val string + digest Digest +} + +type digestEntries []*digestEntry + +func (d digestEntries) Len() int { + return len(d) +} + +func (d digestEntries) Less(i, j int) bool { + if d[i].val != d[j].val { + return d[i].val < d[j].val + } + return d[i].alg < d[j].alg +} + +func (d digestEntries) Swap(i, j int) { + d[i], d[j] = d[j], d[i] +} diff --git a/vendor/github.com/docker/distribution/digest/set_test.go b/vendor/github.com/docker/distribution/digest/set_test.go new file mode 100644 index 000000000..e9dab8795 --- /dev/null +++ b/vendor/github.com/docker/distribution/digest/set_test.go @@ -0,0 +1,368 @@ +package digest + +import ( + "crypto/sha256" + "encoding/binary" + "math/rand" + "testing" +) + +func assertEqualDigests(t *testing.T, d1, d2 Digest) { + if d1 != d2 { + t.Fatalf("Digests do not match:\n\tActual: %s\n\tExpected: %s", d1, d2) + } +} + +func TestLookup(t *testing.T) { + digests := []Digest{ + "sha256:1234511111111111111111111111111111111111111111111111111111111111", + "sha256:1234111111111111111111111111111111111111111111111111111111111111", + "sha256:1234611111111111111111111111111111111111111111111111111111111111", + "sha256:5432111111111111111111111111111111111111111111111111111111111111", + "sha256:6543111111111111111111111111111111111111111111111111111111111111", + "sha256:6432111111111111111111111111111111111111111111111111111111111111", + "sha256:6542111111111111111111111111111111111111111111111111111111111111", + "sha256:6532111111111111111111111111111111111111111111111111111111111111", + } + + dset := NewSet() + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + t.Fatal(err) + } + } + + dgst, err := dset.Lookup("54") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[3]) + + dgst, err = dset.Lookup("1234") + if err == nil { + t.Fatal("Expected ambiguous error looking up: 1234") + } + if err != ErrDigestAmbiguous { + t.Fatal(err) + } + + dgst, err = dset.Lookup("9876") + if err == nil { + t.Fatal("Expected ambiguous error looking up: 9876") + } + if err != ErrDigestNotFound { + t.Fatal(err) + } + + dgst, err = dset.Lookup("sha256:1234") + if err == nil { + t.Fatal("Expected ambiguous error looking up: sha256:1234") + } + if err != ErrDigestAmbiguous { + t.Fatal(err) + } + + dgst, err = dset.Lookup("sha256:12345") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[0]) + + dgst, err = dset.Lookup("sha256:12346") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[2]) + + dgst, err = dset.Lookup("12346") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[2]) + + dgst, err = dset.Lookup("12345") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[0]) +} + +func TestAddDuplication(t *testing.T) { + digests := []Digest{ + "sha256:1234111111111111111111111111111111111111111111111111111111111111", + "sha256:1234511111111111111111111111111111111111111111111111111111111111", + "sha256:1234611111111111111111111111111111111111111111111111111111111111", + "sha256:5432111111111111111111111111111111111111111111111111111111111111", + "sha256:6543111111111111111111111111111111111111111111111111111111111111", + "sha512:65431111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", + "sha512:65421111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", + "sha512:65321111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", + } + + dset := NewSet() + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + t.Fatal(err) + } + } + + if len(dset.entries) != 8 { + t.Fatal("Invalid dset size") + } + + if err := dset.Add(Digest("sha256:1234511111111111111111111111111111111111111111111111111111111111")); err != nil { + t.Fatal(err) + } + + if len(dset.entries) != 8 { + t.Fatal("Duplicate digest insert allowed") + } + + if err := dset.Add(Digest("sha384:123451111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")); err != nil { + t.Fatal(err) + } + + if len(dset.entries) != 9 { + t.Fatal("Insert with different algorithm not allowed") + } +} + +func TestRemove(t *testing.T) { + digests, err := createDigests(10) + if err != nil { + t.Fatal(err) + } + + dset := NewSet() + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + t.Fatal(err) + } + } + + dgst, err := dset.Lookup(digests[0].String()) + if err != nil { + t.Fatal(err) + } + if dgst != digests[0] { + t.Fatalf("Unexpected digest value:\n\tExpected: %s\n\tActual: %s", digests[0], dgst) + } + + if err := dset.Remove(digests[0]); err != nil { + t.Fatal(err) + } + + if _, err := dset.Lookup(digests[0].String()); err != ErrDigestNotFound { + t.Fatalf("Expected error %v when looking up removed digest, got %v", ErrDigestNotFound, err) + } +} + +func TestAll(t *testing.T) { + digests, err := createDigests(100) + if err != nil { + t.Fatal(err) + } + + dset := NewSet() + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + t.Fatal(err) + } + } + + all := map[Digest]struct{}{} + for _, dgst := range dset.All() { + all[dgst] = struct{}{} + } + + if len(all) != len(digests) { + t.Fatalf("Unexpected number of unique digests found:\n\tExpected: %d\n\tActual: %d", len(digests), len(all)) + } + + for i, dgst := range digests { + if _, ok := all[dgst]; !ok { + t.Fatalf("Missing element at position %d: %s", i, dgst) + } + } + +} + +func assertEqualShort(t *testing.T, actual, expected string) { + if actual != expected { + t.Fatalf("Unexpected short value:\n\tExpected: %s\n\tActual: %s", expected, actual) + } +} + +func TestShortCodeTable(t *testing.T) { + digests := []Digest{ + "sha256:1234111111111111111111111111111111111111111111111111111111111111", + "sha256:1234511111111111111111111111111111111111111111111111111111111111", + "sha256:1234611111111111111111111111111111111111111111111111111111111111", + "sha256:5432111111111111111111111111111111111111111111111111111111111111", + "sha256:6543111111111111111111111111111111111111111111111111111111111111", + "sha256:6432111111111111111111111111111111111111111111111111111111111111", + "sha256:6542111111111111111111111111111111111111111111111111111111111111", + "sha256:6532111111111111111111111111111111111111111111111111111111111111", + } + + dset := NewSet() + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + t.Fatal(err) + } + } + + dump := ShortCodeTable(dset, 2) + + if len(dump) < len(digests) { + t.Fatalf("Error unexpected size: %d, expecting %d", len(dump), len(digests)) + } + assertEqualShort(t, dump[digests[0]], "12341") + assertEqualShort(t, dump[digests[1]], "12345") + assertEqualShort(t, dump[digests[2]], "12346") + assertEqualShort(t, dump[digests[3]], "54") + assertEqualShort(t, dump[digests[4]], "6543") + assertEqualShort(t, dump[digests[5]], "64") + assertEqualShort(t, dump[digests[6]], "6542") + assertEqualShort(t, dump[digests[7]], "653") +} + +func createDigests(count int) ([]Digest, error) { + r := rand.New(rand.NewSource(25823)) + digests := make([]Digest, count) + for i := range digests { + h := sha256.New() + if err := binary.Write(h, binary.BigEndian, r.Int63()); err != nil { + return nil, err + } + digests[i] = NewDigest("sha256", h) + } + return digests, nil +} + +func benchAddNTable(b *testing.B, n int) { + digests, err := createDigests(n) + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} + for j := range digests { + if err = dset.Add(digests[j]); err != nil { + b.Fatal(err) + } + } + } +} + +func benchLookupNTable(b *testing.B, n int, shortLen int) { + digests, err := createDigests(n) + if err != nil { + b.Fatal(err) + } + dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + b.Fatal(err) + } + } + shorts := make([]string, 0, n) + for _, short := range ShortCodeTable(dset, shortLen) { + shorts = append(shorts, short) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err = dset.Lookup(shorts[i%n]); err != nil { + b.Fatal(err) + } + } +} + +func benchRemoveNTable(b *testing.B, n int) { + digests, err := createDigests(n) + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} + b.StopTimer() + for j := range digests { + if err = dset.Add(digests[j]); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for j := range digests { + if err = dset.Remove(digests[j]); err != nil { + b.Fatal(err) + } + } + } +} + +func benchShortCodeNTable(b *testing.B, n int, shortLen int) { + digests, err := createDigests(n) + if err != nil { + b.Fatal(err) + } + dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + b.Fatal(err) + } + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + ShortCodeTable(dset, shortLen) + } +} + +func BenchmarkAdd10(b *testing.B) { + benchAddNTable(b, 10) +} + +func BenchmarkAdd100(b *testing.B) { + benchAddNTable(b, 100) +} + +func BenchmarkAdd1000(b *testing.B) { + benchAddNTable(b, 1000) +} + +func BenchmarkRemove10(b *testing.B) { + benchRemoveNTable(b, 10) +} + +func BenchmarkRemove100(b *testing.B) { + benchRemoveNTable(b, 100) +} + +func BenchmarkRemove1000(b *testing.B) { + benchRemoveNTable(b, 1000) +} + +func BenchmarkLookup10(b *testing.B) { + benchLookupNTable(b, 10, 12) +} + +func BenchmarkLookup100(b *testing.B) { + benchLookupNTable(b, 100, 12) +} + +func BenchmarkLookup1000(b *testing.B) { + benchLookupNTable(b, 1000, 12) +} + +func BenchmarkShortCode10(b *testing.B) { + benchShortCodeNTable(b, 10, 12) +} +func BenchmarkShortCode100(b *testing.B) { + benchShortCodeNTable(b, 100, 12) +} +func BenchmarkShortCode1000(b *testing.B) { + benchShortCodeNTable(b, 1000, 12) +} diff --git a/vendor/github.com/docker/distribution/digest/verifiers.go b/vendor/github.com/docker/distribution/digest/verifiers.go new file mode 100644 index 000000000..9af3be134 --- /dev/null +++ b/vendor/github.com/docker/distribution/digest/verifiers.go @@ -0,0 +1,44 @@ +package digest + +import ( + "hash" + "io" +) + +// Verifier presents a general verification interface to be used with message +// digests and other byte stream verifications. Users instantiate a Verifier +// from one of the various methods, write the data under test to it then check +// the result with the Verified method. +type Verifier interface { + io.Writer + + // Verified will return true if the content written to Verifier matches + // the digest. + Verified() bool +} + +// NewDigestVerifier returns a verifier that compares the written bytes +// against a passed in digest. +func NewDigestVerifier(d Digest) (Verifier, error) { + if err := d.Validate(); err != nil { + return nil, err + } + + return hashVerifier{ + hash: d.Algorithm().Hash(), + digest: d, + }, nil +} + +type hashVerifier struct { + digest Digest + hash hash.Hash +} + +func (hv hashVerifier) Write(p []byte) (n int, err error) { + return hv.hash.Write(p) +} + +func (hv hashVerifier) Verified() bool { + return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash) +} diff --git a/vendor/github.com/docker/distribution/digest/verifiers_test.go b/vendor/github.com/docker/distribution/digest/verifiers_test.go new file mode 100644 index 000000000..c342d6e7c --- /dev/null +++ b/vendor/github.com/docker/distribution/digest/verifiers_test.go @@ -0,0 +1,49 @@ +package digest + +import ( + "bytes" + "crypto/rand" + "io" + "testing" +) + +func TestDigestVerifier(t *testing.T) { + p := make([]byte, 1<<20) + rand.Read(p) + digest := FromBytes(p) + + verifier, err := NewDigestVerifier(digest) + if err != nil { + t.Fatalf("unexpected error getting digest verifier: %s", err) + } + + io.Copy(verifier, bytes.NewReader(p)) + + if !verifier.Verified() { + t.Fatalf("bytes not verified") + } +} + +// TestVerifierUnsupportedDigest ensures that unsupported digest validation is +// flowing through verifier creation. +func TestVerifierUnsupportedDigest(t *testing.T) { + unsupported := Digest("bean:0123456789abcdef") + + _, err := NewDigestVerifier(unsupported) + if err == nil { + t.Fatalf("expected error when creating verifier") + } + + if err != ErrDigestUnsupported { + t.Fatalf("incorrect error for unsupported digest: %v", err) + } +} + +// TODO(stevvooe): Add benchmarks to measure bytes/second throughput for +// DigestVerifier. +// +// The relevant benchmark for comparison can be run with the following +// commands: +// +// go test -bench . crypto/sha1 +// diff --git a/vendor/github.com/docker/distribution/doc.go b/vendor/github.com/docker/distribution/doc.go new file mode 100644 index 000000000..bdd8cb708 --- /dev/null +++ b/vendor/github.com/docker/distribution/doc.go @@ -0,0 +1,7 @@ +// Package distribution will define the interfaces for the components of +// docker distribution. The goal is to allow users to reliably package, ship +// and store content related to docker images. +// +// This is currently a work in progress. More details are available in the +// README.md. +package distribution diff --git a/vendor/github.com/docker/distribution/docs/README.md b/vendor/github.com/docker/distribution/docs/README.md new file mode 100644 index 000000000..b26dc3754 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/README.md @@ -0,0 +1,16 @@ +# The docs have been moved! + +The documentation for Registry has been merged into +[the general documentation repo](https://github.com/docker/docker.github.io). +Commit history has been preserved. + +The docs for Registry are now here: +https://github.com/docker/docker.github.io/tree/master/registry + +> Note: The definitive [./spec directory](spec/) directory and +[configuration.md](configuration.md) file will be maintained in this repository +and be refreshed periodically in +[the general documentation repo](https://github.com/docker/docker.github.io). + +As always, the docs in the general repo remain open-source and we appreciate +your feedback and pull requests! diff --git a/vendor/github.com/docker/distribution/docs/configuration.md b/vendor/github.com/docker/distribution/docs/configuration.md new file mode 100644 index 000000000..a1c66b21c --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/configuration.md @@ -0,0 +1,1121 @@ +--- +title: "Configuring a registry" +description: "Explains how to configure a registry" +keywords: registry, on-prem, images, tags, repository, distribution, configuration +--- + +The Registry configuration is based on a YAML file, detailed below. While it +comes with sane default values out of the box, you should review it exhaustively +before moving your systems to production. + +## Override specific configuration options + +In a typical setup where you run your Registry from the official image, you can +specify a configuration variable from the environment by passing `-e` arguments +to your `docker run` stanza or from within a Dockerfile using the `ENV` +instruction. + +To override a configuration option, create an environment variable named +`REGISTRY_variable` where `variable` is the name of the configuration option +and the `_` (underscore) represents indention levels. For example, you can +configure the `rootdirectory` of the `filesystem` storage backend: + +```none +storage: + filesystem: + rootdirectory: /var/lib/registry +``` + +To override this value, set an environment variable like this: + +```none +REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere +``` + +This variable overrides the `/var/lib/registry` value to the `/somewhere` +directory. + +> **Note**: Create a base configuration file with environment variables that can +> be configured to tweak individual values. Overriding configuration sections +> with environment variables is not recommended. + +## Overriding the entire configuration file + +If the default configuration is not a sound basis for your usage, or if you are +having issues overriding keys from the environment, you can specify an alternate +YAML configuration file by mounting it as a volume in the container. + +Typically, create a new configuration file from scratch,named `config.yml`, then +specify it in the `docker run` command: + +```bash +$ docker run -d -p 5000:5000 --restart=always --name registry \ + -v `pwd`/config.yml:/etc/docker/registry/config.yml \ + registry:2 +``` + +Use this +[example YAML file](https://github.com/docker/distribution/blob/master/cmd/registry/config-example.yml) +as a starting point. + +## List of configuration options + +These are all configuration options for the registry. Some options in the list +are mutually exclusive. Read the detailed reference information about each +option before finalizing your configuration. + +```none +version: 0.1 +log: + accesslog: + disabled: true + level: debug + formatter: text + fields: + service: registry + environment: staging + hooks: + - type: mail + disabled: true + levels: + - panic + options: + smtp: + addr: mail.example.com:25 + username: mailuser + password: password + insecure: true + from: sender@example.com + to: + - errors@example.com +loglevel: debug # deprecated: use "log" +storage: + filesystem: + rootdirectory: /var/lib/registry + maxthreads: 100 + azure: + accountname: accountname + accountkey: base64encodedaccountkey + container: containername + gcs: + bucket: bucketname + keyfile: /path/to/keyfile + rootdirectory: /gcs/object/name/prefix + chunksize: 5242880 + s3: + accesskey: awsaccesskey + secretkey: awssecretkey + region: us-west-1 + regionendpoint: http://myobjects.local + bucket: bucketname + encrypt: true + keyid: mykeyid + secure: true + v4auth: true + chunksize: 5242880 + multipartcopychunksize: 33554432 + multipartcopymaxconcurrency: 100 + multipartcopythresholdsize: 33554432 + rootdirectory: /s3/object/name/prefix + swift: + username: username + password: password + authurl: https://storage.myprovider.com/auth/v1.0 or https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth + tenant: tenantname + tenantid: tenantid + domain: domain name for Openstack Identity v3 API + domainid: domain id for Openstack Identity v3 API + insecureskipverify: true + region: fr + container: containername + rootdirectory: /swift/object/name/prefix + oss: + accesskeyid: accesskeyid + accesskeysecret: accesskeysecret + region: OSS region name + endpoint: optional endpoints + internal: optional internal endpoint + bucket: OSS bucket + encrypt: optional data encryption setting + secure: optional ssl setting + chunksize: optional size valye + rootdirectory: optional root directory + inmemory: # This driver takes no parameters + delete: + enabled: false + redirect: + disable: false + cache: + blobdescriptor: redis + maintenance: + uploadpurging: + enabled: true + age: 168h + interval: 24h + dryrun: false + readonly: + enabled: false +auth: + silly: + realm: silly-realm + service: silly-service + token: + realm: token-realm + service: token-service + issuer: registry-token-issuer + rootcertbundle: /root/certs/bundle + htpasswd: + realm: basic-realm + path: /path/to/htpasswd +middleware: + registry: + - name: ARegistryMiddleware + options: + foo: bar + repository: + - name: ARepositoryMiddleware + options: + foo: bar + storage: + - name: cloudfront + options: + baseurl: https://my.cloudfronted.domain.com/ + privatekey: /path/to/pem + keypairid: cloudfrontkeypairid + duration: 3000s + storage: + - name: redirect + options: + baseurl: https://example.com/ +reporting: + bugsnag: + apikey: bugsnagapikey + releasestage: bugsnagreleasestage + endpoint: bugsnagendpoint + newrelic: + licensekey: newreliclicensekey + name: newrelicname + verbose: true +http: + addr: localhost:5000 + prefix: /my/nested/registry/ + host: https://myregistryaddress.org:5000 + secret: asecretforlocaldevelopment + relativeurls: false + tls: + certificate: /path/to/x509/public + key: /path/to/x509/private + clientcas: + - /path/to/ca.pem + - /path/to/another/ca.pem + letsencrypt: + cachefile: /path/to/cache-file + email: emailused@letsencrypt.com + debug: + addr: localhost:5001 + headers: + X-Content-Type-Options: [nosniff] + http2: + disabled: false +notifications: + endpoints: + - name: alistener + disabled: false + url: https://my.listener.com/event + headers: + timeout: 500 + threshold: 5 + backoff: 1000 + ignoredmediatypes: + - application/octet-stream +redis: + addr: localhost:6379 + password: asecret + db: 0 + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s +health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 + file: + - file: /path/to/checked/file + interval: 10s + http: + - uri: http://server.to.check/must/return/200 + headers: + Authorization: [Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==] + statuscode: 200 + timeout: 3s + interval: 10s + threshold: 3 + tcp: + - addr: redis-server.domain.com:6379 + timeout: 3s + interval: 10s + threshold: 3 +proxy: + remoteurl: https://registry-1.docker.io + username: [username] + password: [password] +compatibility: + schema1: + signingkeyfile: /etc/registry/key.json +validation: + enabled: true + manifests: + urls: + allow: + - ^https?://([^/]+\.)*example\.com/ + deny: + - ^https?://www\.example\.com/ +``` + +In some instances a configuration option is **optional** but it contains child +options marked as **required**. In these cases, you can omit the parent with +all its children. However, if the parent is included, you must also include all +the children marked **required**. + +## `version` + +```none +version: 0.1 +``` + +The `version` option is **required**. It specifies the configuration's version. +It is expected to remain a top-level field, to allow for a consistent version +check before parsing the remainder of the configuration file. + +## `log` + +The `log` subsection configures the behavior of the logging system. The logging +system outputs everything to stdout. You can adjust the granularity and format +with this configuration section. + +```none +log: + accesslog: + disabled: true + level: debug + formatter: text + fields: + service: registry + environment: staging +``` + +| Parameter | Required | Description | +|-------------|----------|-------------| +| `level` | no | Sets the sensitivity of logging output. Permitted values are `error`, `warn`, `info`, and `debug`. The default is `info`. | +| `formatter` | no | This selects the format of logging output. The format primarily affects how keyed attributes for a log line are encoded. Options are `text`, `json`, and `logstash`. The default is `text`. | +| `fields` | no | A map of field names to values. These are added to every log line for the context. This is useful for identifying log messages source after being mixed in other systems. | + +### `accesslog` + +```none +accesslog: + disabled: true +``` + +Within `log`, `accesslog` configures the behavior of the access logging +system. By default, the access logging system outputs to stdout in +[Combined Log Format](https://httpd.apache.org/docs/2.4/logs.html#combined). +Access logging can be disabled by setting the boolean flag `disabled` to `true`. + +## `hooks` + +```none +hooks: + - type: mail + levels: + - panic + options: + smtp: + addr: smtp.sendhost.com:25 + username: sendername + password: password + insecure: true + from: name@sendhost.com + to: + - name@receivehost.com +``` + +The `hooks` subsection configures the logging hooks' behavior. This subsection +includes a sequence handler which you can use for sending mail, for example. +Refer to `loglevel` to configure the level of messages printed. + +## `loglevel` + +> **DEPRECATED:** Please use [log](#log) instead. + +```none +loglevel: debug +``` + +Permitted values are `error`, `warn`, `info` and `debug`. The default is +`info`. + +## `storage` + +```none +storage: + filesystem: + rootdirectory: /var/lib/registry + azure: + accountname: accountname + accountkey: base64encodedaccountkey + container: containername + gcs: + bucket: bucketname + keyfile: /path/to/keyfile + rootdirectory: /gcs/object/name/prefix + s3: + accesskey: awsaccesskey + secretkey: awssecretkey + region: us-west-1 + regionendpoint: http://myobjects.local + bucket: bucketname + encrypt: true + keyid: mykeyid + secure: true + v4auth: true + chunksize: 5242880 + multipartcopychunksize: 33554432 + multipartcopymaxconcurrency: 100 + multipartcopythresholdsize: 33554432 + rootdirectory: /s3/object/name/prefix + swift: + username: username + password: password + authurl: https://storage.myprovider.com/auth/v1.0 or https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth + tenant: tenantname + tenantid: tenantid + domain: domain name for Openstack Identity v3 API + domainid: domain id for Openstack Identity v3 API + insecureskipverify: true + region: fr + container: containername + rootdirectory: /swift/object/name/prefix + oss: + accesskeyid: accesskeyid + accesskeysecret: accesskeysecret + region: OSS region name + endpoint: optional endpoints + internal: optional internal endpoint + bucket: OSS bucket + encrypt: optional data encryption setting + secure: optional ssl setting + chunksize: optional size valye + rootdirectory: optional root directory + inmemory: + delete: + enabled: false + cache: + blobdescriptor: inmemory + maintenance: + uploadpurging: + enabled: true + age: 168h + interval: 24h + dryrun: false + readonly: + enabled: false + redirect: + disable: false +``` + +The `storage` option is **required** and defines which storage backend is in +use. You must configure exactly one backend. If you configure more, the registry +returns an error. You can choose any of these backend storage drivers: + +| Storage driver | Description | +|---------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `filesystem` | Uses the local disk to store registry files. It is ideal for development and may be appropriate for some small-scale production applications. See the [driver's reference documentation](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/filesystem.md). | +| `azure` | Uses Microsoft Azure Blob Storage. See the [driver's reference documentation](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/azure.md). | +| `gcs` | Uses Google Cloud Storage. See the [driver's reference documentation](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/gcs.md). | +| `s3` | Uses Amazon Simple Storage Service (S3) and compatible Storage Services. See the [driver's reference documentation](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/s3.md). | +| `swift` | Uses Openstack Swift object storage. See the [driver's reference documentation](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/swift.md). | +| `oss` | Uses Aliyun OSS for object storage. See the [driver's reference documentation](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/oss.md). | + +For testing only, you can use the [`inmemory` storage +driver](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/inmemory.md). +If you would like to run a registry from volatile memory, use the +[`filesystem` driver](https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers/filesystem.md) +on a ramdisk. + +If you are deploying a registry on Windows, a Windows volume mounted from the +host is not recommended. Instead, you can use a S3 or Azure backing +data-store. If you do use a Windows volume, the length of the `PATH` to +the mount point must be within the `MAX_PATH` limits (typically 255 characters), +or this error will occur: + +```none +mkdir /XXX protocol error and your registry will not function properly. +``` + +### `maintenance` + +Currently, upload purging and read-only mode are the only `maintenance` +functions available. + +### `uploadpurging` + +Upload purging is a background process that periodically removes orphaned files +from the upload directories of the registry. Upload purging is enabled by +default. To configure upload directory purging, the following parameters must +be set. + + +| Parameter | Required | Description | +|------------|----------|----------------------------------------------------------------------------------------------------| +| `enabled` | yes | Set to `true` to enable upload purging. Defaults to `true`. | +| `age` | yes | Upload directories which are older than this age will be deleted.Defaults to `168h` (1 week). | +| `interval` | yes | The interval between upload directory purging. Defaults to `24h`. | +| `dryrun` | yes | Set `dryrun` to `true` to obtain a summary of what directories will be deleted. Defaults to `false`.| + +> **Note**: `age` and `interval` are strings containing a number with optional +fraction and a unit suffix. Some examples: `45m`, `2h10m`, `168h`. + +### `readonly` + +If the `readonly` section under `maintenance` has `enabled` set to `true`, +clients will not be allowed to write to the registry. This mode is useful to +temporarily prevent writes to the backend storage so a garbage collection pass +can be run. Before running garbage collection, the registry should be +restarted with readonly's `enabled` set to true. After the garbage collection +pass finishes, the registry may be restarted again, this time with `readonly` +removed from the configuration (or set to false). + +### `delete` + +Use the `delete` structure to enable the deletion of image blobs and manifests +by digest. It defaults to false, but it can be enabled by writing the following +on the configuration file: + +```none +delete: + enabled: true +``` + +### `cache` + +Use the `cache` structure to enable caching of data accessed in the storage +backend. Currently, the only available cache provides fast access to layer +metadata, which uses the `blobdescriptor` field if configured. + +You can set `blobdescriptor` field to `redis` or `inmemory`. If set to `redis`,a +Redis pool caches layer metadata. If set to `inmemory`, an in-memory map caches +layer metadata. + +> **NOTE**: Formerly, `blobdescriptor` was known as `layerinfo`. While these +> are equivalent, `layerinfo` has been deprecated. + +### `redirect` + +The `redirect` subsection provides configuration for managing redirects from +content backends. For backends that support it, redirecting is enabled by +default. In certain deployment scenarios, you may decide to route all data +through the Registry, rather than redirecting to the backend. This may be more +efficient when using a backend that is not co-located or when a registry +instance is aggressively caching. + +To disable redirects, add a single flag `disable`, set to `true` +under the `redirect` section: + +```none +redirect: + disable: true +``` + +## `auth` + +```none +auth: + silly: + realm: silly-realm + service: silly-service + token: + realm: token-realm + service: token-service + issuer: registry-token-issuer + rootcertbundle: /root/certs/bundle + htpasswd: + realm: basic-realm + path: /path/to/htpasswd +``` + +The `auth` option is **optional**. Possible auth providers include: + +- [`silly`](#silly) +- [`token`](#token) +- [`htpasswd`](#token) + +You can configure only one authentication provider. + +### `silly` + +The `silly` authentication provider is only appropriate for development. It simply checks +for the existence of the `Authorization` header in the HTTP request. It does not +check the header's value. If the header does not exist, the `silly` auth +responds with a challenge response, echoing back the realm, service, and scope +for which access was denied. + +The following values are used to configure the response: + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `realm` | yes | The realm in which the registry server authenticates. | +| `service` | yes | The service being authenticated. | + +### `token` + +Token-based authentication allows you to decouple the authentication system from +the registry. It is an established authentication paradigm with a high degree of +security. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `realm` | yes | The realm in which the registry server authenticates. | +| `service` | yes | The service being authenticated. | +| `issuer` | yes | The name of the token issuer. The issuer inserts this into the token so it must match the value configured for the issuer. | +| `rootcertbundle` | yes | The absolute path to the root certificate bundle. This bundle contains the public part of the certificates used to sign authentication tokens. | + + +For more information about Token based authentication configuration, see the +[specification](spec/auth/token.md). + +### `htpasswd` + +The _htpasswd_ authentication backed allows you to configure basic +authentication using an +[Apache htpasswd file](https://httpd.apache.org/docs/2.4/programs/htpasswd.html). +The only supported password format is +[`bcrypt`](http://en.wikipedia.org/wiki/Bcrypt). Entries with other hash types +are ignored. The `htpasswd` file is loaded once, at startup. If the file is +invalid, the registry will display an error and will not start. + +> **Warning**: Only use the `htpasswd` authentication scheme with TLS +> configured, since basic authentication sends passwords as part of the HTTP +> header. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `realm` | yes | The realm in which the registry server authenticates. | +| `path` | yes | The path to the `htpasswd` file to load at startup. | + +## `middleware` + +The `middleware` structure is **optional**. Use this option to inject middleware at +named hook points. Each middleware must implement the same interface as the +object it is wrapping. For instance, a registry middleware must implement the +`distribution.Namespace` interface, while a repository middleware must implement +`distribution.Repository`, and a storage middleware must implement +`driver.StorageDriver`. + +This is an example configuration of the `cloudfront` middleware, a storage +middleware: + +```none +middleware: + registry: + - name: ARegistryMiddleware + options: + foo: bar + repository: + - name: ARepositoryMiddleware + options: + foo: bar + storage: + - name: cloudfront + options: + baseurl: https://my.cloudfronted.domain.com/ + privatekey: /path/to/pem + keypairid: cloudfrontkeypairid + duration: 3000s +``` + +Each middleware entry has `name` and `options` entries. The `name` must +correspond to the name under which the middleware registers itself. The +`options` field is a map that details custom configuration required to +initialize the middleware. It is treated as a `map[string]interface{}`. As such, +it supports any interesting structures desired, leaving it up to the middleware +initialization function to best determine how to handle the specific +interpretation of the options. + +### `cloudfront` + + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `baseurl` | yes | The `SCHEME://HOST[/PATH]` at which Cloudfront is served. | +| `privatekey` | yes | The private key for Cloudfront, provided by AWS. | +| `keypairid` | yes | The key pair ID provided by AWS. | +| `duration` | no | An integer and unit for the duration of the Cloudfront session. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, or `h`. For example, `3000s` is valid, but `3000 s` is not. If you do not specify a `duration` or you specify an integer without a time unit, the duration defaults to `20m` (20 minutes).| + +### `redirect` + +You can use the `redirect` storage middleware to specify a custom URL to a +location of a proxy for the layer stored by the S3 storage driver. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------------------------------------------------------------| +| `baseurl` | yes | `SCHEME://HOST` at which layers are served. Can also contain port. For example, `https://example.com:5443`. | + +## `reporting` + +``` +reporting: + bugsnag: + apikey: bugsnagapikey + releasestage: bugsnagreleasestage + endpoint: bugsnagendpoint + newrelic: + licensekey: newreliclicensekey + name: newrelicname + verbose: true +``` + +The `reporting` option is **optional** and configures error and metrics +reporting tools. At the moment only two services are supported: + +- [Bugsnag](#bugsnag) +- [New Relic](#new-relic) + +A valid configuration may contain both. + +### `bugsnag` + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `apikey` | yes | The API Key provided by Bugsnag. | +| `releasestage` | no | Tracks where the registry is deployed, using a string like `production`, `staging`, or `development`.| +| `endpoint`| no | The enterprise Bugsnag endpoint. | + +### `newrelic` + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `licensekey` | yes | License key provided by New Relic. | +| `name` | no | New Relic application name. | +| `verbose`| no | Set to `true` to enable New Relic debugging output on `stdout`. | + +## `http` + +```none +http: + addr: localhost:5000 + net: tcp + prefix: /my/nested/registry/ + host: https://myregistryaddress.org:5000 + secret: asecretforlocaldevelopment + relativeurls: false + tls: + certificate: /path/to/x509/public + key: /path/to/x509/private + clientcas: + - /path/to/ca.pem + - /path/to/another/ca.pem + letsencrypt: + cachefile: /path/to/cache-file + email: emailused@letsencrypt.com + debug: + addr: localhost:5001 + headers: + X-Content-Type-Options: [nosniff] + http2: + disabled: false +``` + +The `http` option details the configuration for the HTTP server that hosts the +registry. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `addr` | yes | The address for which the server should accept connections. The form depends on a network type (see the `net` option). Use `HOST:PORT` for TCP and `FILE` for a UNIX socket. | +| `net` | no | The network used to create a listening socket. Known networks are `unix` and `tcp`. | +| `prefix` | no | If the server does not run at the root path, set this to the value of the prefix. The root path is the section before `v2`. It requires both preceding and trailing slashes, such as in the example `/path/`. | +| `host` | no | A fully-qualified URL for an externally-reachable address for the registry. If present, it is used when creating generated URLs. Otherwise, these URLs are derived from client requests. | +| `secret` | no | A random piece of data used to sign state that may be stored with the client to protect against tampering. For production environments you should generate a random piece of data using a cryptographically secure random generator. If you omit the secret, the registry will automatically generate a secret when it starts. **If you are building a cluster of registries behind a load balancer, you MUST ensure the secret is the same for all registries.**| +| `relativeurls`| no | If `true`, the registry returns relative URLs in Location headers. The client is responsible for resolving the correct URL. **This option is not compatible with Docker 1.7 and earlier.**| + + +### `tls` + +The `tls` structure within `http` is **optional**. Use this to configure TLS +for the server. If you already have a web server running on +the same host as the registry, you may prefer to configure TLS on that web server +and proxy connections to the registry server. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `certificate` | yes | Absolute path to the x509 certificate file. | +| `key` | yes | Absolute path to the x509 private key file. | +| `clientcas` | no | An array of absolute paths to x509 CA files. | + +### `letsencrypt` + +The `letsencrypt` structure within `tls` is **optional**. Use this to configure +TLS certificates provided by +[Let's Encrypt](https://letsencrypt.org/how-it-works/). + +>**NOTE**: When using Let's Encrypt, ensure that the outward-facing address is +> accessible on port `443`. The registry defaults to listening on port `5000`. +> If you run the registry as a container, consider adding the flag `-p 443:5000` +> to the `docker run` command or using a similar setting in a cloud +> configuration. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `cachefile` | yes | Absolute path to a file where the Let's Encrypt agent can cache data. | +| `email` | yes | The email address used to register with Let's Encrypt. | + +### `debug` + +The `debug` option is **optional** . Use it to configure a debug server that +can be helpful in diagnosing problems. The debug endpoint can be used for +monitoring registry metrics and health, as well as profiling. Sensitive +information may be available via the debug endpoint. Please be certain that +access to the debug endpoint is locked down in a production environment. + +The `debug` section takes a single required `addr` parameter, which specifies +the `HOST:PORT` on which the debug server should accept connections. + +### `headers` + +The `headers` option is **optional** . Use it to specify headers that the HTTP +server should include in responses. This can be used for security headers such +as `Strict-Transport-Security`. + +The `headers` option should contain an option for each header to include, where +the parameter name is the header's name, and the parameter value a list of the +header's payload values. + +Including `X-Content-Type-Options: [nosniff]` is recommended, so that browsers +will not interpret content as HTML if they are directed to load a page from the +registry. This header is included in the example configuration file. + +### `http2` + +The `http2` structure within `http` is **optional**. Use this to control http2 +settings for the registry. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `disabled` | no | If `true`, then `http2` support is disabled. | + +## `notifications` + +```none +notifications: + endpoints: + - name: alistener + disabled: false + url: https://my.listener.com/event + headers: + timeout: 500 + threshold: 5 + backoff: 1000 + ignoredmediatypes: + - application/octet-stream +``` + +The notifications option is **optional** and currently may contain a single +option, `endpoints`. + +### `endpoints` + +The `endpoints` structure contains a list of named services (URLs) that can +accept event notifications. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `name` | yes | A human-readable name for the service. | +| `disabled` | no | If `true`, notifications are disabled for the service.| +| `url` | yes | The URL to which events should be published. | +| `headers` | yes | A list of static headers to add to each request. Each header's name is a key beneath `headers`, and each value is a list of payloads for that header name. Values must always be lists. | +| `timeout` | yes | A value for the HTTP timeout. A positive integer and an optional suffix indicating the unit of time, which may be `ns`, `us`, `ms`, `s`, `m`, or `h`. If you omit the unit of time, `ns` is used. | +| `threshold` | yes | An integer specifying how long to wait before backing off a failure. | +| `backoff` | yes | How long the system backs off before retrying after a failure. A positive integer and an optional suffix indicating the unit of time, which may be `ns`, `us`, `ms`, `s`, `m`, or `h`. If you omit the unit of time, `ns` is used. | +| `ignoredmediatypes`|no| A list of target media types to ignore. Events with these target media types are not published to the endpoint. | + +## `redis` + +```none +redis: + addr: localhost:6379 + password: asecret + db: 0 + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s +``` + +Declare parameters for constructing the `redis` connections. Registry instances +may use the Redis instance for several applications. Currently, it caches +information about immutable blobs. Most of the `redis` options control +how the registry connects to the `redis` instance. You can control the pool's +behavior with the [pool](#pool) subsection. + +You should configure Redis with the **allkeys-lru** eviction policy, because the +registry does not set an expiration value on keys. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `addr` | yes | The address (host and port) of the Redis instance. | +| `password`| no | A password used to authenticate to the Redis instance.| +| `db` | no | The name of the database to use for each connection. | +| `dialtimeout` | no | The timeout for connecting to the Redis instance. | +| `readtimeout` | no | The timeout for reading from the Redis instance. | +| `writetimeout` | no | The timeout for writing to the Redis instance. | + +### `pool` + +```none +pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s +``` + +Use these settings to configure the behavior of the Redis connection pool. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `maxidle` | no | The maximum number of idle connections in the pool. | +| `maxactive`| no | The maximum number of connections which can be open before blocking a connection request. | +| `idletimeout`| no | How long to wait before closing inactive connections. | + +## `health` + +```none +health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 + file: + - file: /path/to/checked/file + interval: 10s + http: + - uri: http://server.to.check/must/return/200 + headers: + Authorization: [Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==] + statuscode: 200 + timeout: 3s + interval: 10s + threshold: 3 + tcp: + - addr: redis-server.domain.com:6379 + timeout: 3s + interval: 10s + threshold: 3 +``` + +The health option is **optional**, and contains preferences for a periodic +health check on the storage driver's backend storage, as well as optional +periodic checks on local files, HTTP URIs, and/or TCP servers. The results of +the health checks are available at the `/debug/health` endpoint on the debug +HTTP server if the debug HTTP server is enabled (see http section). + +### `storagedriver` + +The `storagedriver` structure contains options for a health check on the +configured storage driver's backend storage. The health check is only active +when `enabled` is set to `true`. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `enabled` | yes | Set to `true` to enable storage driver health checks or `false` to disable them. | +| `interval`| no | How long to wait between repetitions of the storage driver health check. A positive integer and an optional suffix indicating the unit of time. The suffix is one of `ns`, `us`, `ms`, `s`, `m`, or `h`. Defaults to `10s` if the value is omitted. If you specify a value but omit the suffix, the value is interpreted as a number of nanoseconds. | +| `threshold`| no | A positive integer which represents the number of times the check must fail before the state is marked as unhealthy. If not specified, a single failure marks the state as unhealthy. | + +### `file` + +The `file` structure includes a list of paths to be periodically checked for the\ +existence of a file. If a file exists at the given path, the health check will +fail. You can use this mechanism to bring a registry out of rotation by creating +a file. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `file` | yes | The path to check for existence of a file. | +| `interval`| no | How long to wait before repeating the check. A positive integer and an optional suffix indicating the unit of time. The suffix is one of `ns`, `us`, `ms`, `s`, `m`, or `h`. Defaults to `10s` if the value is omitted. | + +### `http` + +The `http` structure includes a list of HTTP URIs to periodically check with +`HEAD` requests. If a `HEAD` request does not complete or returns an unexpected +status code, the health check will fail. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `uri` | yes | The URI to check. | +| `headers` | no | Static headers to add to each request. Each header's name is a key beneath `headers`, and each value is a list of payloads for that header name. Values must always be lists. | +| `statuscode` | no | The expected status code from the HTTP URI. Defaults to `200`. | +| `timeout` | no | How long to wait before timing out the HTTP request. A positive integer and an optional suffix indicating the unit of time. The suffix is one of `ns`, `us`, `ms`, `s`, `m`, or `h`. If you specify a value but omit the suffix, the value is interpreted as a number of nanoseconds. | +| `interval`| no | How long to wait before repeating the check. A positive integer and an optional suffix indicating the unit of time. The suffix is one of `ns`, `us`, `ms`, `s`, `m`, or `h`. Defaults to `10s` if the value is omitted. If you specify a value but omit the suffix, the value is interpreted as a number of nanoseconds. | +| `threshold`| no | The number of times the check must fail before the state is marked as unhealthy. If this field is not specified, a single failure marks the state as unhealthy. | + +### `tcp` + +The `tcp` structure includes a list of TCP addresses to periodically check using +TCP connection attempts. Addresses must include port numbers. If a connection +attempt fails, the health check will fail. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `addr` | yes | The TCP address and port to connect to. | +| `timeout` | no | How long to wait before timing out the TCP connection. A positive integer and an optional suffix indicating the unit of time. The suffix is one of `ns`, `us`, `ms`, `s`, `m`, or `h`. If you specify a value but omit the suffix, the value is interpreted as a number of nanoseconds. | +| `interval`| no | How long to wait between repetitions of the check. A positive integer and an optional suffix indicating the unit of time. The suffix is one of `ns`, `us`, `ms`, `s`, `m`, or `h`. Defaults to `10s` if the value is omitted. If you specify a value but omit the suffix, the value is interpreted as a number of nanoseconds. | +| `threshold`| no | The number of times the check must fail before the state is marked as unhealthy. If this field is not specified, a single failure marks the state as unhealthy. | + + +## `proxy` + +``` +proxy: + remoteurl: https://registry-1.docker.io + username: [username] + password: [password] +``` + +The `proxy` structure allows a registry to be configured as a pull-through cache +to Docker Hub. See +[mirror](https://github.com/docker/docker.github.io/tree/master/registry/recipes/mirror.md) +for more information. Pushing to a registry configured as a pull-through cache +is unsupported. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `remoteurl`| yes | The URL for the repository on Docker Hub. | +| `username` | no | The username registered with Docker Hub which has access to the repository. | +| `password` | no | The password used to authenticate to Docker Hub using the username specified in `username`. | + + +To enable pulling private repositories (e.g. `batman/robin`) specify the +username (such as `batman`) and the password for that username. + +> **Note**: These private repositories are stored in the proxy cache's storage. +> Take appropriate measures to protect access to the proxy cache. + +## `compatibility` + +```none +compatibility: + schema1: + signingkeyfile: /etc/registry/key.json +``` + +Use the `compatibility` structure to configure handling of older and deprecated +features. Each subsection defines such a feature with configurable behavior. + +### `schema1` + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `signingkeyfile` | no | The signing private key used to add signatures to `schema1` manifests. If no signing key is provided, a new ECDSA key is generated when the registry starts. | + +## `validation` + +```none +validation: + enabled: true + manifests: + urls: + allow: + - ^https?://([^/]+\.)*example\.com/ + deny: + - ^https?://www\.example\.com/ +``` + +### `enabled` + +Use the `enabled` flag to enable the other options in the `validation` +section. They are disabled by default. + +### `manifests` + +Use the `manifest` subsection to configure manifest validation. + +#### `urls` + +The `allow` and `deny` options are each a list of +[regular expressions](https://godoc.org/regexp/syntax) that restrict the URLs in +pushed manifests. + +If `allow` is unset, pushing a manifest containing URLs fails. + +If `allow` is set, pushing a manifest succeeds only if all URLs match +one of the `allow` regular expressions **and** one of the following holds: + +1. `deny` is unset. +2. `deny` is set but no URLs within the manifest match any of the `deny` regular + expressions. + +## Example: Development configuration + +You can use this simple example for local development: + +```none +version: 0.1 +log: + level: debug +storage: + filesystem: + rootdirectory: /var/lib/registry +http: + addr: localhost:5000 + secret: asecretforlocaldevelopment + debug: + addr: localhost:5001 +``` + +This example configures the registry instance to run on port `5000`, binding to +`localhost`, with the `debug` server enabled. Registry data is stored in the +`/var/lib/registry` directory. Logging is set to `debug` mode, which is the most +verbose. + +See +[config-example.yml](https://github.com/docker/distribution/blob/master/cmd/registry/config-example.yml) +for another simple configuration. Both examples are generally useful for local +development. + + +## Example: Middleware configuration + +This example configures [Amazon Cloudfront](http://aws.amazon.com/cloudfront/) +as the storage middleware in a registry. Middleware allows the registry to serve +layers via a content delivery network (CDN). This reduces requests to the +storage layer. + +Cloudfront requires the S3 storage driver. + +This is the configuration expressed in YAML: + +```none +middleware: + storage: + - name: cloudfront + disabled: false + options: + baseurl: http://d111111abcdef8.cloudfront.net + privatekey: /path/to/asecret.pem + keypairid: asecret + duration: 60 +``` + +See the configuration reference for [Cloudfront](#cloudfront) for more +information about configuration options. + +> **Note**: Cloudfront keys exist separately from other AWS keys. See +> [the documentation on AWS credentials](http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html) +> for more information. diff --git a/vendor/github.com/docker/distribution/docs/spec/api.md b/vendor/github.com/docker/distribution/docs/spec/api.md new file mode 100644 index 000000000..74ac8e24f --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/spec/api.md @@ -0,0 +1,5485 @@ +--- +title: "HTTP API V2" +description: "Specification for the Registry API." +keywords: ["registry, on-prem, images, tags, repository, distribution, api, advanced"] +--- + +# Docker Registry HTTP API V2 + +## Introduction + +The _Docker Registry HTTP API_ is the protocol to facilitate distribution of +images to the docker engine. It interacts with instances of the docker +registry, which is a service to manage information about docker images and +enable their distribution. The specification covers the operation of version 2 +of this API, known as _Docker Registry HTTP API V2_. + +While the V1 registry protocol is usable, there are several problems with the +architecture that have led to this new version. The main driver of this +specification is a set of changes to the docker the image format, covered in +[docker/docker#8093](https://github.com/docker/docker/issues/8093). +The new, self-contained image manifest simplifies image definition and improves +security. This specification will build on that work, leveraging new properties +of the manifest format to improve performance, reduce bandwidth usage and +decrease the likelihood of backend corruption. + +For relevant details and history leading up to this specification, please see +the following issues: + +- [docker/docker#8093](https://github.com/docker/docker/issues/8093) +- [docker/docker#9015](https://github.com/docker/docker/issues/9015) +- [docker/docker-registry#612](https://github.com/docker/docker-registry/issues/612) + +### Scope + +This specification covers the URL layout and protocols of the interaction +between docker registry and docker core. This will affect the docker core +registry API and the rewrite of docker-registry. Docker registry +implementations may implement other API endpoints, but they are not covered by +this specification. + +This includes the following features: + +- Namespace-oriented URI Layout +- PUSH/PULL registry server for V2 image manifest format +- Resumable layer PUSH support +- V2 Client library implementation + +While authentication and authorization support will influence this +specification, details of the protocol will be left to a future specification. +Relevant header definitions and error codes are present to provide an +indication of what a client may encounter. + +#### Future + +There are features that have been discussed during the process of cutting this +specification. The following is an incomplete list: + +- Immutable image references +- Multiple architecture support +- Migration from v2compatibility representation + +These may represent features that are either out of the scope of this +specification, the purview of another specification or have been deferred to a +future version. + +### Use Cases + +For the most part, the use cases of the former registry API apply to the new +version. Differentiating use cases are covered below. + +#### Image Verification + +A docker engine instance would like to run verified image named +"library/ubuntu", with the tag "latest". The engine contacts the registry, +requesting the manifest for "library/ubuntu:latest". An untrusted registry +returns a manifest. Before proceeding to download the individual layers, the +engine verifies the manifest's signature, ensuring that the content was +produced from a trusted source and no tampering has occurred. After each layer +is downloaded, the engine verifies the digest of the layer, ensuring that the +content matches that specified by the manifest. + +#### Resumable Push + +Company X's build servers lose connectivity to docker registry before +completing an image layer transfer. After connectivity returns, the build +server attempts to re-upload the image. The registry notifies the build server +that the upload has already been partially attempted. The build server +responds by only sending the remaining data to complete the image file. + +#### Resumable Pull + +Company X is having more connectivity problems but this time in their +deployment datacenter. When downloading an image, the connection is +interrupted before completion. The client keeps the partial data and uses http +`Range` requests to avoid downloading repeated data. + +#### Layer Upload De-duplication + +Company Y's build system creates two identical docker layers from build +processes A and B. Build process A completes uploading the layer before B. +When process B attempts to upload the layer, the registry indicates that its +not necessary because the layer is already known. + +If process A and B upload the same layer at the same time, both operations +will proceed and the first to complete will be stored in the registry (Note: +we may modify this to prevent dogpile with some locking mechanism). + +### Changes + +The V2 specification has been written to work as a living document, specifying +only what is certain and leaving what is not specified open or to future +changes. Only non-conflicting additions should be made to the API and accepted +changes should avoid preventing future changes from happening. + +This section should be updated when changes are made to the specification, +indicating what is different. Optionally, we may start marking parts of the +specification to correspond with the versions enumerated here. + +Each set of changes is given a letter corresponding to a set of modifications +that were applied to the baseline specification. These are merely for +reference and shouldn't be used outside the specification other than to +identify a set of modifications. + +
+
l
+
+
    +
  • Document TOOMANYREQUESTS error code.
  • +
+
+ +
k
+
+
    +
  • Document use of Accept and Content-Type headers in manifests endpoint.
  • +
+
+ +
j
+
+
    +
  • Add ability to mount blobs across repositories.
  • +
+
+ +
i
+
+
    +
  • Clarified expected behavior response to manifest HEAD request.
  • +
+
+ +
h
+
+
    +
  • All mention of tarsum removed.
  • +
+
+ +
g
+
+
    +
  • Clarify behavior of pagination behavior with unspecified parameters.
  • +
+
+ +
f
+
+
    +
  • Specify the delete API for layers and manifests.
  • +
+
+ +
e
+
+
    +
  • Added support for listing registry contents.
  • +
  • Added pagination to tags API.
  • +
  • Added common approach to support pagination.
  • +
+
+ +
d
+
+
    +
  • Allow repository name components to be one character.
  • +
  • Clarified that single component names are allowed.
  • +
+
+ +
c
+
+
    +
  • Added section covering digest format.
  • +
  • Added more clarification that manifest cannot be deleted by tag.
  • +
+
+ +
b
+
+
    +
  • Added capability of doing streaming upload to PATCH blob upload.
  • +
  • Updated PUT blob upload to no longer take final chunk, now requires entire data or no data.
  • +
  • Removed `416 Requested Range Not Satisfiable` response status from PUT blob upload.
  • +
+
+ +
a
+
+
    +
  • Added support for immutable manifest references in manifest endpoints.
  • +
  • Deleting a manifest by tag has been deprecated.
  • +
  • Specified `Docker-Content-Digest` header for appropriate entities.
  • +
  • Added error code for unsupported operations.
  • +
+
+
+ +## Overview + +This section covers client flows and details of the API endpoints. The URI +layout of the new API is structured to support a rich authentication and +authorization model by leveraging namespaces. All endpoints will be prefixed +by the API version and the repository name: + + /v2// + +For example, an API endpoint that will work with the `library/ubuntu` +repository, the URI prefix will be: + + /v2/library/ubuntu/ + +This scheme provides rich access control over various operations and methods +using the URI prefix and http methods that can be controlled in variety of +ways. + +Classically, repository names have always been two path components where each +path component is less than 30 characters. The V2 registry API does not +enforce this. The rules for a repository name are as follows: + +1. A repository name is broken up into _path components_. A component of a + repository name must be at least one lowercase, alpha-numeric characters, + optionally separated by periods, dashes or underscores. More strictly, it + must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*`. +2. If a repository name has two or more path components, they must be + separated by a forward slash ("/"). +3. The total length of a repository name, including slashes, must be less than + 256 characters. + +These name requirements _only_ apply to the registry API and should accept a +superset of what is supported by other docker ecosystem components. + +All endpoints should support aggressive http caching, compression and range +headers, where appropriate. The new API attempts to leverage HTTP semantics +where possible but may break from standards to implement targeted features. + +For detail on individual endpoints, please see the [_Detail_](#detail) +section. + +### Errors + +Actionable failure conditions, covered in detail in their relevant sections, +are reported as part of 4xx responses, in a json response body. One or more +errors will be returned in the following format: + + { + "errors:" [{ + "code": , + "message": , + "detail": + }, + ... + ] + } + +The `code` field will be a unique identifier, all caps with underscores by +convention. The `message` field will be a human readable string. The optional +`detail` field may contain arbitrary json data providing information the +client can use to resolve the issue. + +While the client can take action on certain error codes, the registry may add +new error codes over time. All client implementations should treat unknown +error codes as `UNKNOWN`, allowing future error codes to be added without +breaking API compatibility. For the purposes of the specification error codes +will only be added and never removed. + +For a complete account of all error codes, please see the [_Errors_](#errors-2) +section. + +### API Version Check + +A minimal endpoint, mounted at `/v2/` will provide version support information +based on its response statuses. The request format is as follows: + + GET /v2/ + +If a `200 OK` response is returned, the registry implements the V2(.1) +registry API and the client may proceed safely with other V2 operations. +Optionally, the response may contain information about the supported paths in +the response body. The client should be prepared to ignore this data. + +If a `401 Unauthorized` response is returned, the client should take action +based on the contents of the "WWW-Authenticate" header and try the endpoint +again. Depending on access control setup, the client may still have to +authenticate against different resources, even if this check succeeds. + +If `404 Not Found` response status, or other unexpected status, is returned, +the client should proceed with the assumption that the registry does not +implement V2 of the API. + +When a `200 OK` or `401 Unauthorized` response is returned, the +"Docker-Distribution-API-Version" header should be set to "registry/2.0". +Clients may require this header value to determine if the endpoint serves this +API. When this header is omitted, clients may fallback to an older API version. + +### Content Digests + +This API design is driven heavily by [content addressability](http://en.wikipedia.org/wiki/Content-addressable_storage). +The core of this design is the concept of a content addressable identifier. It +uniquely identifies content by taking a collision-resistant hash of the bytes. +Such an identifier can be independently calculated and verified by selection +of a common _algorithm_. If such an identifier can be communicated in a secure +manner, one can retrieve the content from an insecure source, calculate it +independently and be certain that the correct content was obtained. Put simply, +the identifier is a property of the content. + +To disambiguate from other concepts, we call this identifier a _digest_. A +_digest_ is a serialized hash result, consisting of a _algorithm_ and _hex_ +portion. The _algorithm_ identifies the methodology used to calculate the +digest. The _hex_ portion is the hex-encoded result of the hash. + +We define a _digest_ string to match the following grammar: +``` +digest := algorithm ":" hex +algorithm := /[A-Fa-f0-9_+.-]+/ +hex := /[A-Fa-f0-9]+/ +``` + +Some examples of _digests_ include the following: + +digest | description | +----------------------------------------------------------------------------------|------------------------------------------------ +sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Common sha256 based digest | + +While the _algorithm_ does allow one to implement a wide variety of +algorithms, compliant implementations should use sha256. Heavy processing of +input before calculating a hash is discouraged to avoid degrading the +uniqueness of the _digest_ but some canonicalization may be performed to +ensure consistent identifiers. + +Let's use a simple example in pseudo-code to demonstrate a digest calculation: +``` +let C = 'a small string' +let B = sha256(C) +let D = 'sha256:' + EncodeHex(B) +let ID(C) = D +``` + +Above, we have bytestring `C` passed into a function, `SHA256`, that returns a +bytestring `B`, which is the hash of `C`. `D` gets the algorithm concatenated +with the hex encoding of `B`. We then define the identifier of `C` to `ID(C)` +as equal to `D`. A digest can be verified by independently calculating `D` and +comparing it with identifier `ID(C)`. + +#### Digest Header + +To provide verification of http content, any response may include a +`Docker-Content-Digest` header. This will include the digest of the target +entity returned in the response. For blobs, this is the entire blob content. For +manifests, this is the manifest body without the signature content, also known +as the JWS payload. Note that the commonly used canonicalization for digest +calculation may be dependent on the mediatype of the content, such as with +manifests. + +The client may choose to ignore the header or may verify it to ensure content +integrity and transport security. This is most important when fetching by a +digest. To ensure security, the content should be verified against the digest +used to fetch the content. At times, the returned digest may differ from that +used to initiate a request. Such digests are considered to be from different +_domains_, meaning they have different values for _algorithm_. In such a case, +the client may choose to verify the digests in both domains or ignore the +server's digest. To maintain security, the client _must_ always verify the +content against the _digest_ used to fetch the content. + +> __IMPORTANT:__ If a _digest_ is used to fetch content, the client should use +> the same digest used to fetch the content to verify it. The header +> `Docker-Content-Digest` should not be trusted over the "local" digest. + +### Pulling An Image + +An "image" is a combination of a JSON manifest and individual layer files. The +process of pulling an image centers around retrieving these two components. + +The first step in pulling an image is to retrieve the manifest. For reference, +the relevant manifest fields for the registry are the following: + + field | description | +----------|------------------------------------------------| +name | The name of the image. | +tag | The tag for this version of the image. | +fsLayers | A list of layer descriptors (including digest) | +signature | A JWS used to verify the manifest content | + +For more information about the manifest format, please see +[docker/docker#8093](https://github.com/docker/docker/issues/8093). + +When the manifest is in hand, the client must verify the signature to ensure +the names and layers are valid. Once confirmed, the client will then use the +digests to download the individual layers. Layers are stored in as blobs in +the V2 registry API, keyed by their digest. + +#### Pulling an Image Manifest + +The image manifest can be fetched with the following url: + +``` +GET /v2//manifests/ +``` + +The `name` and `reference` parameter identify the image and are required. The +reference may include a tag or digest. + +The client should include an Accept header indicating which manifest content +types it supports. For more details on the manifest formats and their content +types, see [manifest-v2-1.md](manifest-v2-1.md) and +[manifest-v2-2.md](manifest-v2-2.md). In a successful response, the Content-Type +header will indicate which manifest type is being returned. + +A `404 Not Found` response will be returned if the image is unknown to the +registry. If the image exists and the response is successful, the image +manifest will be returned, with the following format (see +[docker/docker#8093](https://github.com/docker/docker/issues/8093) for details): + + { + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": + }, + ... + ] + ], + "history": , + "signature": + } + +The client should verify the returned manifest signature for authenticity +before fetching layers. + +##### Existing Manifests + +The image manifest can be checked for existence with the following url: + +``` +HEAD /v2//manifests/ +``` + +The `name` and `reference` parameter identify the image and are required. The +reference may include a tag or digest. + +A `404 Not Found` response will be returned if the image is unknown to the +registry. If the image exists and the response is successful the response will +be as follows: + +``` +200 OK +Content-Length: +Docker-Content-Digest: +``` + + +#### Pulling a Layer + +Layers are stored in the blob portion of the registry, keyed by digest. +Pulling a layer is carried out by a standard http request. The URL is as +follows: + + GET /v2//blobs/ + +Access to a layer will be gated by the `name` of the repository but is +identified uniquely in the registry by `digest`. + +This endpoint may issue a 307 (302 for /blobs/uploads/ +``` + +The parameters of this request are the image namespace under which the layer +will be linked. Responses to this request are covered below. + +##### Existing Layers + +The existence of a layer can be checked via a `HEAD` request to the blob store +API. The request should be formatted as follows: + +``` +HEAD /v2//blobs/ +``` + +If the layer with the digest specified in `digest` is available, a 200 OK +response will be received, with no actual body content (this is according to +http specification). The response will look as follows: + +``` +200 OK +Content-Length: +Docker-Content-Digest: +``` + +When this response is received, the client can assume that the layer is +already available in the registry under the given name and should take no +further action to upload the layer. Note that the binary digests may differ +for the existing registry layer, but the digests will be guaranteed to match. + +##### Uploading the Layer + +If the POST request is successful, a `202 Accepted` response will be returned +with the upload URL in the `Location` header: + +``` +202 Accepted +Location: /v2//blobs/uploads/ +Range: bytes=0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +The rest of the upload process can be carried out with the returned url, +called the "Upload URL" from the `Location` header. All responses to the +upload url, whether sending data or getting status, will be in this format. +Though the URI format (`/v2//blobs/uploads/`) for the `Location` +header is specified, clients should treat it as an opaque url and should never +try to assemble it. While the `uuid` parameter may be an actual UUID, this +proposal imposes no constraints on the format and clients should never impose +any. + +If clients need to correlate local upload state with remote upload state, the +contents of the `Docker-Upload-UUID` header should be used. Such an id can be +used to key the last used location header when implementing resumable uploads. + +##### Upload Progress + +The progress and chunk coordination of the upload process will be coordinated +through the `Range` header. While this is a non-standard use of the `Range` +header, there are examples of [similar approaches](https://developers.google.com/youtube/v3/guides/using_resumable_upload_protocol) in APIs with heavy use. +For an upload that just started, for an example with a 1000 byte layer file, +the `Range` header would be as follows: + +``` +Range: bytes=0-0 +``` + +To get the status of an upload, issue a GET request to the upload URL: + +``` +GET /v2//blobs/uploads/ +Host: +``` + +The response will be similar to the above, except will return 204 status: + +``` +204 No Content +Location: /v2//blobs/uploads/ +Range: bytes=0- +Docker-Upload-UUID: +``` + +Note that the HTTP `Range` header byte ranges are inclusive and that will be +honored, even in non-standard use cases. + +##### Monolithic Upload + +A monolithic upload is simply a chunked upload with a single chunk and may be +favored by clients that would like to avoided the complexity of chunking. To +carry out a "monolithic" upload, one can simply put the entire content blob to +the provided URL: + +``` +PUT /v2//blobs/uploads/?digest= +Content-Length: +Content-Type: application/octet-stream + + +``` + +The "digest" parameter must be included with the PUT request. Please see the +[_Completed Upload_](#completed-upload) section for details on the parameters +and expected responses. + +##### Chunked Upload + +To carry out an upload of a chunk, the client can specify a range header and +only include that part of the layer file: + +``` +PATCH /v2//blobs/uploads/ +Content-Length: +Content-Range: - +Content-Type: application/octet-stream + + +``` + +There is no enforcement on layer chunk splits other than that the server must +receive them in order. The server may enforce a minimum chunk size. If the +server cannot accept the chunk, a `416 Requested Range Not Satisfiable` +response will be returned and will include a `Range` header indicating the +current status: + +``` +416 Requested Range Not Satisfiable +Location: /v2//blobs/uploads/ +Range: 0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +If this response is received, the client should resume from the "last valid +range" and upload the subsequent chunk. A 416 will be returned under the +following conditions: + +- Invalid Content-Range header format +- Out of order chunk: the range of the next chunk must start immediately after + the "last valid range" from the previous response. + +When a chunk is accepted as part of the upload, a `202 Accepted` response will +be returned, including a `Range` header with the current upload status: + +``` +202 Accepted +Location: /v2//blobs/uploads/ +Range: bytes=0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +##### Completed Upload + +For an upload to be considered complete, the client must submit a `PUT` +request on the upload endpoint with a digest parameter. If it is not provided, +the upload will not be considered complete. The format for the final chunk +will be as follows: + +``` +PUT /v2//blob/uploads/?digest= +Content-Length: +Content-Range: - +Content-Type: application/octet-stream + + +``` + +Optionally, if all chunks have already been uploaded, a `PUT` request with a +`digest` parameter and zero-length body may be sent to complete and validated +the upload. Multiple "digest" parameters may be provided with different +digests. The server may verify none or all of them but _must_ notify the +client if the content is rejected. + +When the last chunk is received and the layer has been validated, the client +will receive a `201 Created` response: + +``` +201 Created +Location: /v2//blobs/ +Content-Length: 0 +Docker-Content-Digest: +``` + +The `Location` header will contain the registry URL to access the accepted +layer file. The `Docker-Content-Digest` header returns the canonical digest of +the uploaded blob which may differ from the provided digest. Most clients may +ignore the value but if it is used, the client should verify the value against +the uploaded blob data. + +###### Digest Parameter + +The "digest" parameter is designed as an opaque parameter to support +verification of a successful transfer. For example, an HTTP URI parameter +might be as follows: + +``` +sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b +``` + +Given this parameter, the registry will verify that the provided content does +match this digest. + +##### Canceling an Upload + +An upload can be cancelled by issuing a DELETE request to the upload endpoint. +The format will be as follows: + +``` +DELETE /v2//blobs/uploads/ +``` + +After this request is issued, the upload uuid will no longer be valid and the +registry server will dump all intermediate data. While uploads will time out +if not completed, clients should issue this request if they encounter a fatal +error but still have the ability to issue an http request. + +##### Cross Repository Blob Mount + +A blob may be mounted from another repository that the client has read access +to, removing the need to upload a blob already known to the registry. To issue +a blob mount instead of an upload, a POST request should be issued in the +following format: + +``` +POST /v2//blobs/uploads/?mount=&from= +Content-Length: 0 +``` + +If the blob is successfully mounted, the client will receive a `201 Created` +response: + +``` +201 Created +Location: /v2//blobs/ +Content-Length: 0 +Docker-Content-Digest: +``` + +The `Location` header will contain the registry URL to access the accepted +layer file. The `Docker-Content-Digest` header returns the canonical digest of +the uploaded blob which may differ from the provided digest. Most clients may +ignore the value but if it is used, the client should verify the value against +the uploaded blob data. + +If a mount fails due to invalid repository or digest arguments, the registry +will fall back to the standard upload behavior and return a `202 Accepted` with +the upload URL in the `Location` header: + +``` +202 Accepted +Location: /v2//blobs/uploads/ +Range: bytes=0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +This behavior is consistent with older versions of the registry, which do not +recognize the repository mount query parameters. + +Note: a client may issue a HEAD request to check existence of a blob in a source +repository to distinguish between the registry not supporting blob mounts and +the blob not existing in the expected repository. + +##### Errors + +If an 502, 503 or 504 error is received, the client should assume that the +download can proceed due to a temporary condition, honoring the appropriate +retry mechanism. Other 5xx errors should be treated as terminal. + +If there is a problem with the upload, a 4xx error will be returned indicating +the problem. After receiving a 4xx response (except 416, as called out above), +the upload will be considered failed and the client should take appropriate +action. + +Note that the upload url will not be available forever. If the upload uuid is +unknown to the registry, a `404 Not Found` response will be returned and the +client must restart the upload process. + +### Deleting a Layer + +A layer may be deleted from the registry via its `name` and `digest`. A +delete may be issued with the following request format: + + DELETE /v2//blobs/ + +If the blob exists and has been successfully deleted, the following response +will be issued: + + 202 Accepted + Content-Length: None + +If the blob had already been deleted or did not exist, a `404 Not Found` +response will be issued instead. + +If a layer is deleted which is referenced by a manifest in the registry, +then the complete images will not be resolvable. + +#### Pushing an Image Manifest + +Once all of the layers for an image are uploaded, the client can upload the +image manifest. An image can be pushed using the following request format: + + PUT /v2//manifests/ + Content-Type: + + { + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": + }, + ... + ] + ], + "history": , + "signature": , + ... + } + +The `name` and `reference` fields of the response body must match those +specified in the URL. The `reference` field may be a "tag" or a "digest". The +content type should match the type of the manifest being uploaded, as specified +in [manifest-v2-1.md](manifest-v2-1.md) and [manifest-v2-2.md](manifest-v2-2.md). + +If there is a problem with pushing the manifest, a relevant 4xx response will +be returned with a JSON error message. Please see the +[_PUT Manifest_](#put-manifest) section for details on possible error codes that +may be returned. + +If one or more layers are unknown to the registry, `BLOB_UNKNOWN` errors are +returned. The `detail` field of the error response will have a `digest` field +identifying the missing blob. An error is returned for each unknown blob. The +response format is as follows: + + { + "errors:" [{ + "code": "BLOB_UNKNOWN", + "message": "blob unknown to registry", + "detail": { + "digest": + } + }, + ... + ] + } + +### Listing Repositories + +Images are stored in collections, known as a _repository_, which is keyed by a +`name`, as seen throughout the API specification. A registry instance may +contain several repositories. The list of available repositories is made +available through the _catalog_. + +The catalog for a given registry can be retrieved with the following request: + +``` +GET /v2/_catalog +``` + +The response will be in the following format: + +``` +200 OK +Content-Type: application/json + +{ + "repositories": [ + , + ... + ] +} +``` + +Note that the contents of the response are specific to the registry +implementation. Some registries may opt to provide a full catalog output, +limit it based on the user's access level or omit upstream results, if +providing mirroring functionality. Subsequently, the presence of a repository +in the catalog listing only means that the registry *may* provide access to +the repository at the time of the request. Conversely, a missing entry does +*not* mean that the registry does not have the repository. More succinctly, +the presence of a repository only guarantees that it is there but not that it +is _not_ there. + +For registries with a large number of repositories, this response may be quite +large. If such a response is expected, one should use pagination. A registry +may also limit the amount of responses returned even if pagination was not +explicitly requested. In this case the `Link` header will be returned along +with the results, and subsequent results can be obtained by following the link +as if pagination had been initially requested. + +For details of the `Link` header, please see the [_Pagination_](#pagination) +section. + +#### Pagination + +Paginated catalog results can be retrieved by adding an `n` parameter to the +request URL, declaring that the response should be limited to `n` results. +Starting a paginated flow begins as follows: + +``` +GET /v2/_catalog?n= +``` + +The above specifies that a catalog response should be returned, from the start of +the result set, ordered lexically, limiting the number of results to `n`. The +response to such a request would look as follows: + +``` +200 OK +Content-Type: application/json +Link: <?n=&last=>; rel="next" + +{ + "repositories": [ + , + ... + ] +} +``` + +The above includes the _first_ `n` entries from the result set. To get the +_next_ `n` entries, one can create a URL where the argument `last` has the +value from `repositories[len(repositories)-1]`. If there are indeed more +results, the URL for the next block is encoded in an +[RFC5988](https://tools.ietf.org/html/rfc5988) `Link` header, as a "next" +relation. The presence of the `Link` header communicates to the client that +the entire result set has not been returned and another request must be +issued. If the header is not present, the client can assume that all results +have been received. + +> __NOTE:__ In the request template above, note that the brackets +> are required. For example, if the url is +> `http://example.com/v2/_catalog?n=20&last=b`, the value of the header would +> be `; rel="next"`. Please see +> [RFC5988](https://tools.ietf.org/html/rfc5988) for details. + +Compliant client implementations should always use the `Link` header +value when proceeding through results linearly. The client may construct URLs +to skip forward in the catalog. + +To get the next result set, a client would issue the request as follows, using +the URL encoded in the described `Link` header: + +``` +GET /v2/_catalog?n=&last= +``` + +The above process should then be repeated until the `Link` header is no longer +set. + +The catalog result set is represented abstractly as a lexically sorted list, +where the position in that list can be specified by the query term `last`. The +entries in the response start _after_ the term specified by `last`, up to `n` +entries. + +The behavior of `last` is quite simple when demonstrated with an example. Let +us say the registry has the following repositories: + +``` +a +b +c +d +``` + +If the value of `n` is 2, _a_ and _b_ will be returned on the first response. +The `Link` header returned on the response will have `n` set to 2 and last set +to _b_: + +``` +Link: <?n=2&last=b>; rel="next" +``` + +The client can then issue the request with the above value from the `Link` +header, receiving the values _c_ and _d_. Note that `n` may change on the second +to last response or be fully omitted, depending on the server implementation. + +### Listing Image Tags + +It may be necessary to list all of the tags under a given repository. The tags +for an image repository can be retrieved with the following request: + + GET /v2//tags/list + +The response will be in the following format: + + 200 OK + Content-Type: application/json + + { + "name": , + "tags": [ + , + ... + ] + } + +For repositories with a large number of tags, this response may be quite +large. If such a response is expected, one should use the pagination. + +#### Pagination + +Paginated tag results can be retrieved by adding the appropriate parameters to +the request URL described above. The behavior of tag pagination is identical +to that specified for catalog pagination. We cover a simple flow to highlight +any differences. + +Starting a paginated flow may begin as follows: + +``` +GET /v2//tags/list?n= +``` + +The above specifies that a tags response should be returned, from the start of +the result set, ordered lexically, limiting the number of results to `n`. The +response to such a request would look as follows: + +``` +200 OK +Content-Type: application/json +Link: <?n=&last=>; rel="next" + +{ + "name": , + "tags": [ + , + ... + ] +} +``` + +To get the next result set, a client would issue the request as follows, using +the value encoded in the [RFC5988](https://tools.ietf.org/html/rfc5988) `Link` +header: + +``` +GET /v2//tags/list?n=&last= +``` + +The above process should then be repeated until the `Link` header is no longer +set in the response. The behavior of the `last` parameter, the provided +response result, lexical ordering and encoding of the `Link` header are +identical to that of catalog pagination. + +### Deleting an Image + +An image may be deleted from the registry via its `name` and `reference`. A +delete may be issued with the following request format: + + DELETE /v2//manifests/ + +For deletes, `reference` *must* be a digest or the delete will fail. If the +image exists and has been successfully deleted, the following response will be +issued: + + 202 Accepted + Content-Length: None + +If the image had already been deleted or did not exist, a `404 Not Found` +response will be issued instead. + +> **Note** When deleting a manifest from a registry version 2.3 or later, the +> following header must be used when `HEAD` or `GET`-ing the manifest to obtain +> the correct digest to delete: + + Accept: application/vnd.docker.distribution.manifest.v2+json + +> for more details, see: [compatibility.md](../compatibility.md#content-addressable-storage-cas) + +## Detail + +> **Note**: This section is still under construction. For the purposes of +> implementation, if any details below differ from the described request flows +> above, the section below should be corrected. When they match, this note +> should be removed. + +The behavior of the endpoints are covered in detail in this section, organized +by route and entity. All aspects of the request and responses are covered, +including headers, parameters and body formats. Examples of requests and their +corresponding responses, with success and failure, are enumerated. + +> **Note**: The sections on endpoint detail are arranged with an example +> request, a description of the request, followed by information about that +> request. + +A list of methods and URIs are covered in the table below: + +|Method|Path|Entity|Description| +|------|----|------|-----------| +| GET | `/v2/` | Base | Check that the endpoint implements Docker Registry API V2. | +| GET | `/v2//tags/list` | Tags | Fetch the tags under the repository identified by `name`. | +| GET | `/v2//manifests/` | Manifest | Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. | +| PUT | `/v2//manifests/` | Manifest | Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest. | +| DELETE | `/v2//manifests/` | Manifest | Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`. | +| GET | `/v2//blobs/` | Blob | Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. | +| DELETE | `/v2//blobs/` | Blob | Delete the blob identified by `name` and `digest` | +| POST | `/v2//blobs/uploads/` | Initiate Blob Upload | Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request. | +| GET | `/v2//blobs/uploads/` | Blob Upload | Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload. | +| PATCH | `/v2//blobs/uploads/` | Blob Upload | Upload a chunk of data for the specified upload. | +| PUT | `/v2//blobs/uploads/` | Blob Upload | Complete the upload specified by `uuid`, optionally appending the body as the final chunk. | +| DELETE | `/v2//blobs/uploads/` | Blob Upload | Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout. | +| GET | `/v2/_catalog` | Catalog | Retrieve a sorted, json list of repositories available in the registry. | + + +The detail for each endpoint is covered in the following sections. + +### Errors + +The error codes encountered via the API are enumerated in the following table: + +|Code|Message|Description| +|----|-------|-----------| + `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. + `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. + `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. + `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. + `MANIFEST_BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a manifest blob is unknown to the registry. + `MANIFEST_INVALID` | manifest invalid | During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a more specific error is included. The detail will contain information the failed validation. + `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. + `MANIFEST_UNVERIFIED` | manifest failed signature verification | During manifest upload, if the manifest fails signature verification, this error will be returned. + `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. + `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. + `SIZE_INVALID` | provided length did not match content length | When a layer is uploaded, the provided size will be checked against the uploaded content. If they do not match, this error will be returned. + `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. + `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. + `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. + `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. + + + +### Base + +Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication. + + + +#### GET Base + +Check that the endpoint implements Docker Registry API V2. + + + +``` +GET /v2/ +Host: +Authorization: +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| + + + + +###### On Success: OK + +``` +200 OK +``` + +The API implements V2 protocol and is accessible. + + + + +###### On Failure: Not Found + +``` +404 Not Found +``` + +The registry does not implement the V2 API. + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + + +### Tags + +Retrieve information about tags. + + + +#### GET Tags + +Fetch the tags under the repository identified by `name`. + + +##### Tags + +``` +GET /v2//tags/list +Host: +Authorization: +``` + +Return all tags for the repository + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| + + + + +###### On Success: OK + +``` +200 OK +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "name": , + "tags": [ + , + ... + ] +} +``` + +A list of tags for the named repository. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + +##### Tags Paginated + +``` +GET /v2//tags/list?n=&last= +``` + +Return a portion of the tags for the specified repository. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`name`|path|Name of the target repository.| +|`n`|query|Limit the number of entries in each response. It not present, all entries will be returned.| +|`last`|query|Result set will include values lexically after last.| + + + + +###### On Success: OK + +``` +200 OK +Content-Length: +Link: <?n=&last=>; rel="next" +Content-Type: application/json; charset=utf-8 + +{ + "name": , + "tags": [ + , + ... + ], +} +``` + +A list of tags for the named repository. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| +|`Link`|RFC5988 compliant rel='next' with URL to next result set, if available| + + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + + +### Manifest + +Create, update, delete and retrieve manifests. + + + +#### GET Manifest + +Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. + + + +``` +GET /v2//manifests/ +Host: +Authorization: +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`reference`|path|Tag or digest of the target manifest.| + + + + +###### On Success: OK + +``` +200 OK +Docker-Content-Digest: +Content-Type: + +{ + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": "" + }, + ... + ] + ], + "history": , + "signature": +} +``` + +The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The name or reference was invalid. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + +#### PUT Manifest + +Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest. + + + +``` +PUT /v2//manifests/ +Host: +Authorization: +Content-Type: + +{ + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": "" + }, + ... + ] + ], + "history": , + "signature": +} +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`reference`|path|Tag or digest of the target manifest.| + + + + +###### On Success: Created + +``` +201 Created +Location: +Content-Length: 0 +Docker-Content-Digest: +``` + +The manifest has been accepted by the registry and is stored under the specified `name` and `tag`. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|The canonical location url of the uploaded manifest.| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + + + + +###### On Failure: Invalid Manifest + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | +| `MANIFEST_INVALID` | manifest invalid | During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a more specific error is included. The detail will contain information the failed validation. | +| `MANIFEST_UNVERIFIED` | manifest failed signature verification | During manifest upload, if the manifest fails signature verification, this error will be returned. | +| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + +###### On Failure: Missing Layer(s) + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [{ + "code": "BLOB_UNKNOWN", + "message": "blob unknown to registry", + "detail": { + "digest": "" + } + }, + ... + ] +} +``` + +One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | + + + +###### On Failure: Not allowed + +``` +405 Method Not Allowed +``` + +Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | + + + + +#### DELETE Manifest + +Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`. + + + +``` +DELETE /v2//manifests/ +Host: +Authorization: +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`reference`|path|Tag or digest of the target manifest.| + + + + +###### On Success: Accepted + +``` +202 Accepted +``` + + + + + + +###### On Failure: Invalid Name or Reference + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The specified `name` or `reference` were invalid and the delete was unable to proceed. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + +###### On Failure: Unknown Manifest + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | +| `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. | + + + +###### On Failure: Not allowed + +``` +405 Method Not Allowed +``` + +Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | + + + + + +### Blob + +Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest. + + + +#### GET Blob + +Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. + + +##### Fetch Blob + +``` +GET /v2//blobs/ +Host: +Authorization: +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`digest`|path|Digest of desired blob.| + + + + +###### On Success: OK + +``` +200 OK +Content-Length: +Docker-Content-Digest: +Content-Type: application/octet-stream + + +``` + +The blob identified by `digest` is available. The blob content will be present in the body of the request. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|The length of the requested blob content.| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + +###### On Success: Temporary Redirect + +``` +307 Temporary Redirect +Location: +Docker-Content-Digest: +``` + +The blob identified by `digest` is available at the provided location. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|The location where the layer should be accessible.| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The blob, identified by `name` and `digest`, is unknown to the registry. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | +| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + +##### Fetch Blob Part + +``` +GET /v2//blobs/ +Host: +Authorization: +Range: bytes=- +``` + +This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Range`|header|HTTP Range header specifying blob chunk.| +|`name`|path|Name of the target repository.| +|`digest`|path|Digest of desired blob.| + + + + +###### On Success: Partial Content + +``` +206 Partial Content +Content-Length: +Content-Range: bytes -/ +Content-Type: application/octet-stream + + +``` + +The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|The length of the requested blob chunk.| +|`Content-Range`|Content range of blob chunk.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + + + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | +| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | + + + +###### On Failure: Requested Range Not Satisfiable + +``` +416 Requested Range Not Satisfiable +``` + +The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content. + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + +#### DELETE Blob + +Delete the blob identified by `name` and `digest` + + + +``` +DELETE /v2//blobs/ +Host: +Authorization: +``` + + + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`digest`|path|Digest of desired blob.| + + + + +###### On Success: Accepted + +``` +202 Accepted +Content-Length: 0 +Docker-Content-Digest: +``` + + + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|0| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + + + + +###### On Failure: Invalid Name or Digest + +``` +400 Bad Request +``` + + + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The blob, identified by `name` and `digest`, is unknown to the registry. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | +| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | + + + +###### On Failure: Method Not Allowed + +``` +405 Method Not Allowed +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + + +### Initiate Blob Upload + +Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads. + + + +#### POST Initiate Blob Upload + +Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request. + + +##### Initiate Monolithic Blob Upload + +``` +POST /v2//blobs/uploads/?digest= +Host: +Authorization: +Content-Length: +Content-Type: application/octect-stream + + +``` + +Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Content-Length`|header|| +|`name`|path|Name of the target repository.| +|`digest`|query|Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.| + + + + +###### On Success: Created + +``` +201 Created +Location: +Content-Length: 0 +Docker-Upload-UUID: +``` + +The blob has been created in the registry and is available at the provided location. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| + + + + +###### On Failure: Invalid Name or Digest + +``` +400 Bad Request +``` + + + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | + + + +###### On Failure: Not allowed + +``` +405 Method Not Allowed +``` + +Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + +##### Initiate Resumable Blob Upload + +``` +POST /v2//blobs/uploads/ +Host: +Authorization: +Content-Length: 0 +``` + +Initiate a resumable blob upload with an empty request body. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Content-Length`|header|The `Content-Length` header must be zero and the body must be empty.| +|`name`|path|Name of the target repository.| + + + + +###### On Success: Accepted + +``` +202 Accepted +Content-Length: 0 +Location: /v2//blobs/uploads/ +Range: 0-0 +Docker-Upload-UUID: +``` + +The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Location`|The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.| +|`Range`|Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.| +|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| + + + + +###### On Failure: Invalid Name or Digest + +``` +400 Bad Request +``` + + + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + +##### Mount Blob + +``` +POST /v2//blobs/uploads/?mount=&from= +Host: +Authorization: +Content-Length: 0 +``` + +Mount a blob identified by the `mount` parameter from another repository. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Content-Length`|header|The `Content-Length` header must be zero and the body must be empty.| +|`name`|path|Name of the target repository.| +|`mount`|query|Digest of blob to mount from the source repository.| +|`from`|query|Name of the source repository.| + + + + +###### On Success: Created + +``` +201 Created +Location: +Content-Length: 0 +Docker-Upload-UUID: +``` + +The blob has been mounted in the repository and is available at the provided location. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| + + + + +###### On Failure: Invalid Name or Digest + +``` +400 Bad Request +``` + + + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | + + + +###### On Failure: Not allowed + +``` +405 Method Not Allowed +``` + +Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + + +### Blob Upload + +Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls. + + + +#### GET Blob Upload + +Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload. + + + +``` +GET /v2//blobs/uploads/ +Host: +Authorization: +``` + +Retrieve the progress of the current upload, as reported by the `Range` header. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| + + + + +###### On Success: Upload Progress + +``` +204 No Content +Range: 0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +The upload is known and in progress. The last received offset is available in the `Range` header. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Range`|Range indicating the current progress of the upload.| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was an error processing the upload and it must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The upload is unknown to the registry. The upload must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + +#### PATCH Blob Upload + +Upload a chunk of data for the specified upload. + + +##### Stream upload + +``` +PATCH /v2//blobs/uploads/ +Host: +Authorization: +Content-Type: application/octet-stream + + +``` + +Upload a stream of data to upload without completing the upload. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`name`|path|Name of the target repository.| +|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| + + + + +###### On Success: Data Accepted + +``` +204 No Content +Location: /v2//blobs/uploads/ +Range: 0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.| +|`Range`|Range indicating the current progress of the upload.| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was an error processing the upload and it must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The upload is unknown to the registry. The upload must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + +##### Chunked upload + +``` +PATCH /v2//blobs/uploads/ +Host: +Authorization: +Content-Range: - +Content-Length: +Content-Type: application/octet-stream + + +``` + +Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Content-Range`|header|Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.| +|`Content-Length`|header|Length of the chunk being uploaded, corresponding the length of the request body.| +|`name`|path|Name of the target repository.| +|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| + + + + +###### On Success: Chunk Accepted + +``` +204 No Content +Location: /v2//blobs/uploads/ +Range: 0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.| +|`Range`|Range indicating the current progress of the upload.| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was an error processing the upload and it must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The upload is unknown to the registry. The upload must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | + + + +###### On Failure: Requested Range Not Satisfiable + +``` +416 Requested Range Not Satisfiable +``` + +The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid. + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + +#### PUT Blob Upload + +Complete the upload specified by `uuid`, optionally appending the body as the final chunk. + + + +``` +PUT /v2//blobs/uploads/?digest= +Host: +Authorization: +Content-Length: +Content-Type: application/octet-stream + + +``` + +Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Content-Length`|header|Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.| +|`name`|path|Name of the target repository.| +|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| +|`digest`|query|Digest of uploaded blob.| + + + + +###### On Success: Upload Complete + +``` +204 No Content +Location: +Content-Range: - +Content-Length: 0 +Docker-Content-Digest: +``` + +The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Location`|The canonical location of the blob for retrieval| +|`Content-Range`|Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| +|`Docker-Content-Digest`|Digest of the targeted content for the request.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +There was an error processing the upload and it must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | +| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The upload is unknown to the registry. The upload must be restarted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + +#### DELETE Blob Upload + +Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout. + + + +``` +DELETE /v2//blobs/uploads/ +Host: +Authorization: +Content-Length: 0 +``` + +Cancel the upload specified by `uuid`. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| +|`Authorization`|header|An RFC7235 compliant authorization header.| +|`Content-Length`|header|The `Content-Length` header must be zero and the body must be empty.| +|`name`|path|Name of the target repository.| +|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| + + + + +###### On Success: Upload Deleted + +``` +204 No Content +Content-Length: 0 +``` + +The upload has been successfully deleted. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| + + + + +###### On Failure: Bad Request + +``` +400 Bad Request +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +An error was encountered processing the delete. The client may ignore this error. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | +| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | + + + +###### On Failure: Not Found + +``` +404 Not Found +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted. + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | + + + +###### On Failure: Authentication Required + +``` +401 Unauthorized +WWW-Authenticate: realm="", ..." +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client is not authenticated. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | + + + +###### On Failure: No Such Repository Error + +``` +404 Not Found +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The repository is not known to the registry. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | + + + +###### On Failure: Access Denied + +``` +403 Forbidden +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client does not have required access to the repository. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | + + + +###### On Failure: Too Many Requests + +``` +429 Too Many Requests +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +} +``` + +The client made too many requests within a time interval. + +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +| `TOOMANYREQUESTS` | too many requests | Returned when a client attempts to contact a service too many times | + + + + + +### Catalog + +List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available. + + + +#### GET Catalog + +Retrieve a sorted, json list of repositories available in the registry. + + +##### Catalog Fetch + +``` +GET /v2/_catalog +``` + +Request an unabridged list of repositories available. The implementation may impose a maximum limit and return a partial set with pagination links. + + + + + +###### On Success: OK + +``` +200 OK +Content-Length: +Content-Type: application/json; charset=utf-8 + +{ + "repositories": [ + , + ... + ] +} +``` + +Returns the unabridged list of repositories as a json response. + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| + + + +##### Catalog Fetch Paginated + +``` +GET /v2/_catalog?n=&last= +``` + +Return the specified portion of repositories. + + +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +|`n`|query|Limit the number of entries in each response. It not present, all entries will be returned.| +|`last`|query|Result set will include values lexically after last.| + + + + +###### On Success: OK + +``` +200 OK +Content-Length: +Link: <?n=&last=>; rel="next" +Content-Type: application/json; charset=utf-8 + +{ + "repositories": [ + , + ... + ] + "next": "?last=&n=" +} +``` + + + +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +|`Content-Length`|Length of the JSON response body.| +|`Link`|RFC5988 compliant rel='next' with URL to next result set, if available| + + + + + diff --git a/vendor/github.com/docker/distribution/docs/spec/api.md.tmpl b/vendor/github.com/docker/distribution/docs/spec/api.md.tmpl new file mode 100644 index 000000000..406eda5be --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/spec/api.md.tmpl @@ -0,0 +1,1215 @@ +--- +title: "HTTP API V2" +description: "Specification for the Registry API." +keywords: ["registry, on-prem, images, tags, repository, distribution, api, advanced"] +--- + +# Docker Registry HTTP API V2 + +## Introduction + +The _Docker Registry HTTP API_ is the protocol to facilitate distribution of +images to the docker engine. It interacts with instances of the docker +registry, which is a service to manage information about docker images and +enable their distribution. The specification covers the operation of version 2 +of this API, known as _Docker Registry HTTP API V2_. + +While the V1 registry protocol is usable, there are several problems with the +architecture that have led to this new version. The main driver of this +specification is a set of changes to the docker the image format, covered in +[docker/docker#8093](https://github.com/docker/docker/issues/8093). +The new, self-contained image manifest simplifies image definition and improves +security. This specification will build on that work, leveraging new properties +of the manifest format to improve performance, reduce bandwidth usage and +decrease the likelihood of backend corruption. + +For relevant details and history leading up to this specification, please see +the following issues: + +- [docker/docker#8093](https://github.com/docker/docker/issues/8093) +- [docker/docker#9015](https://github.com/docker/docker/issues/9015) +- [docker/docker-registry#612](https://github.com/docker/docker-registry/issues/612) + +### Scope + +This specification covers the URL layout and protocols of the interaction +between docker registry and docker core. This will affect the docker core +registry API and the rewrite of docker-registry. Docker registry +implementations may implement other API endpoints, but they are not covered by +this specification. + +This includes the following features: + +- Namespace-oriented URI Layout +- PUSH/PULL registry server for V2 image manifest format +- Resumable layer PUSH support +- V2 Client library implementation + +While authentication and authorization support will influence this +specification, details of the protocol will be left to a future specification. +Relevant header definitions and error codes are present to provide an +indication of what a client may encounter. + +#### Future + +There are features that have been discussed during the process of cutting this +specification. The following is an incomplete list: + +- Immutable image references +- Multiple architecture support +- Migration from v2compatibility representation + +These may represent features that are either out of the scope of this +specification, the purview of another specification or have been deferred to a +future version. + +### Use Cases + +For the most part, the use cases of the former registry API apply to the new +version. Differentiating use cases are covered below. + +#### Image Verification + +A docker engine instance would like to run verified image named +"library/ubuntu", with the tag "latest". The engine contacts the registry, +requesting the manifest for "library/ubuntu:latest". An untrusted registry +returns a manifest. Before proceeding to download the individual layers, the +engine verifies the manifest's signature, ensuring that the content was +produced from a trusted source and no tampering has occurred. After each layer +is downloaded, the engine verifies the digest of the layer, ensuring that the +content matches that specified by the manifest. + +#### Resumable Push + +Company X's build servers lose connectivity to docker registry before +completing an image layer transfer. After connectivity returns, the build +server attempts to re-upload the image. The registry notifies the build server +that the upload has already been partially attempted. The build server +responds by only sending the remaining data to complete the image file. + +#### Resumable Pull + +Company X is having more connectivity problems but this time in their +deployment datacenter. When downloading an image, the connection is +interrupted before completion. The client keeps the partial data and uses http +`Range` requests to avoid downloading repeated data. + +#### Layer Upload De-duplication + +Company Y's build system creates two identical docker layers from build +processes A and B. Build process A completes uploading the layer before B. +When process B attempts to upload the layer, the registry indicates that its +not necessary because the layer is already known. + +If process A and B upload the same layer at the same time, both operations +will proceed and the first to complete will be stored in the registry (Note: +we may modify this to prevent dogpile with some locking mechanism). + +### Changes + +The V2 specification has been written to work as a living document, specifying +only what is certain and leaving what is not specified open or to future +changes. Only non-conflicting additions should be made to the API and accepted +changes should avoid preventing future changes from happening. + +This section should be updated when changes are made to the specification, +indicating what is different. Optionally, we may start marking parts of the +specification to correspond with the versions enumerated here. + +Each set of changes is given a letter corresponding to a set of modifications +that were applied to the baseline specification. These are merely for +reference and shouldn't be used outside the specification other than to +identify a set of modifications. + +
+
l
+
+
    +
  • Document TOOMANYREQUESTS error code.
  • +
+
+ +
k
+
+
    +
  • Document use of Accept and Content-Type headers in manifests endpoint.
  • +
+
+ +
j
+
+
    +
  • Add ability to mount blobs across repositories.
  • +
+
+ +
i
+
+
    +
  • Clarified expected behavior response to manifest HEAD request.
  • +
+
+ +
h
+
+
    +
  • All mention of tarsum removed.
  • +
+
+ +
g
+
+
    +
  • Clarify behavior of pagination behavior with unspecified parameters.
  • +
+
+ +
f
+
+
    +
  • Specify the delete API for layers and manifests.
  • +
+
+ +
e
+
+
    +
  • Added support for listing registry contents.
  • +
  • Added pagination to tags API.
  • +
  • Added common approach to support pagination.
  • +
+
+ +
d
+
+
    +
  • Allow repository name components to be one character.
  • +
  • Clarified that single component names are allowed.
  • +
+
+ +
c
+
+
    +
  • Added section covering digest format.
  • +
  • Added more clarification that manifest cannot be deleted by tag.
  • +
+
+ +
b
+
+
    +
  • Added capability of doing streaming upload to PATCH blob upload.
  • +
  • Updated PUT blob upload to no longer take final chunk, now requires entire data or no data.
  • +
  • Removed `416 Requested Range Not Satisfiable` response status from PUT blob upload.
  • +
+
+ +
a
+
+
    +
  • Added support for immutable manifest references in manifest endpoints.
  • +
  • Deleting a manifest by tag has been deprecated.
  • +
  • Specified `Docker-Content-Digest` header for appropriate entities.
  • +
  • Added error code for unsupported operations.
  • +
+
+
+ +## Overview + +This section covers client flows and details of the API endpoints. The URI +layout of the new API is structured to support a rich authentication and +authorization model by leveraging namespaces. All endpoints will be prefixed +by the API version and the repository name: + + /v2// + +For example, an API endpoint that will work with the `library/ubuntu` +repository, the URI prefix will be: + + /v2/library/ubuntu/ + +This scheme provides rich access control over various operations and methods +using the URI prefix and http methods that can be controlled in variety of +ways. + +Classically, repository names have always been two path components where each +path component is less than 30 characters. The V2 registry API does not +enforce this. The rules for a repository name are as follows: + +1. A repository name is broken up into _path components_. A component of a + repository name must be at least one lowercase, alpha-numeric characters, + optionally separated by periods, dashes or underscores. More strictly, it + must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*`. +2. If a repository name has two or more path components, they must be + separated by a forward slash ("/"). +3. The total length of a repository name, including slashes, must be less than + 256 characters. + +These name requirements _only_ apply to the registry API and should accept a +superset of what is supported by other docker ecosystem components. + +All endpoints should support aggressive http caching, compression and range +headers, where appropriate. The new API attempts to leverage HTTP semantics +where possible but may break from standards to implement targeted features. + +For detail on individual endpoints, please see the [_Detail_](#detail) +section. + +### Errors + +Actionable failure conditions, covered in detail in their relevant sections, +are reported as part of 4xx responses, in a json response body. One or more +errors will be returned in the following format: + + { + "errors:" [{ + "code": , + "message": , + "detail": + }, + ... + ] + } + +The `code` field will be a unique identifier, all caps with underscores by +convention. The `message` field will be a human readable string. The optional +`detail` field may contain arbitrary json data providing information the +client can use to resolve the issue. + +While the client can take action on certain error codes, the registry may add +new error codes over time. All client implementations should treat unknown +error codes as `UNKNOWN`, allowing future error codes to be added without +breaking API compatibility. For the purposes of the specification error codes +will only be added and never removed. + +For a complete account of all error codes, please see the [_Errors_](#errors-2) +section. + +### API Version Check + +A minimal endpoint, mounted at `/v2/` will provide version support information +based on its response statuses. The request format is as follows: + + GET /v2/ + +If a `200 OK` response is returned, the registry implements the V2(.1) +registry API and the client may proceed safely with other V2 operations. +Optionally, the response may contain information about the supported paths in +the response body. The client should be prepared to ignore this data. + +If a `401 Unauthorized` response is returned, the client should take action +based on the contents of the "WWW-Authenticate" header and try the endpoint +again. Depending on access control setup, the client may still have to +authenticate against different resources, even if this check succeeds. + +If `404 Not Found` response status, or other unexpected status, is returned, +the client should proceed with the assumption that the registry does not +implement V2 of the API. + +When a `200 OK` or `401 Unauthorized` response is returned, the +"Docker-Distribution-API-Version" header should be set to "registry/2.0". +Clients may require this header value to determine if the endpoint serves this +API. When this header is omitted, clients may fallback to an older API version. + +### Content Digests + +This API design is driven heavily by [content addressability](http://en.wikipedia.org/wiki/Content-addressable_storage). +The core of this design is the concept of a content addressable identifier. It +uniquely identifies content by taking a collision-resistant hash of the bytes. +Such an identifier can be independently calculated and verified by selection +of a common _algorithm_. If such an identifier can be communicated in a secure +manner, one can retrieve the content from an insecure source, calculate it +independently and be certain that the correct content was obtained. Put simply, +the identifier is a property of the content. + +To disambiguate from other concepts, we call this identifier a _digest_. A +_digest_ is a serialized hash result, consisting of a _algorithm_ and _hex_ +portion. The _algorithm_ identifies the methodology used to calculate the +digest. The _hex_ portion is the hex-encoded result of the hash. + +We define a _digest_ string to match the following grammar: +``` +digest := algorithm ":" hex +algorithm := /[A-Fa-f0-9_+.-]+/ +hex := /[A-Fa-f0-9]+/ +``` + +Some examples of _digests_ include the following: + +digest | description | +----------------------------------------------------------------------------------|------------------------------------------------ +sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Common sha256 based digest | + +While the _algorithm_ does allow one to implement a wide variety of +algorithms, compliant implementations should use sha256. Heavy processing of +input before calculating a hash is discouraged to avoid degrading the +uniqueness of the _digest_ but some canonicalization may be performed to +ensure consistent identifiers. + +Let's use a simple example in pseudo-code to demonstrate a digest calculation: +``` +let C = 'a small string' +let B = sha256(C) +let D = 'sha256:' + EncodeHex(B) +let ID(C) = D +``` + +Above, we have bytestring `C` passed into a function, `SHA256`, that returns a +bytestring `B`, which is the hash of `C`. `D` gets the algorithm concatenated +with the hex encoding of `B`. We then define the identifier of `C` to `ID(C)` +as equal to `D`. A digest can be verified by independently calculating `D` and +comparing it with identifier `ID(C)`. + +#### Digest Header + +To provide verification of http content, any response may include a +`Docker-Content-Digest` header. This will include the digest of the target +entity returned in the response. For blobs, this is the entire blob content. For +manifests, this is the manifest body without the signature content, also known +as the JWS payload. Note that the commonly used canonicalization for digest +calculation may be dependent on the mediatype of the content, such as with +manifests. + +The client may choose to ignore the header or may verify it to ensure content +integrity and transport security. This is most important when fetching by a +digest. To ensure security, the content should be verified against the digest +used to fetch the content. At times, the returned digest may differ from that +used to initiate a request. Such digests are considered to be from different +_domains_, meaning they have different values for _algorithm_. In such a case, +the client may choose to verify the digests in both domains or ignore the +server's digest. To maintain security, the client _must_ always verify the +content against the _digest_ used to fetch the content. + +> __IMPORTANT:__ If a _digest_ is used to fetch content, the client should use +> the same digest used to fetch the content to verify it. The header +> `Docker-Content-Digest` should not be trusted over the "local" digest. + +### Pulling An Image + +An "image" is a combination of a JSON manifest and individual layer files. The +process of pulling an image centers around retrieving these two components. + +The first step in pulling an image is to retrieve the manifest. For reference, +the relevant manifest fields for the registry are the following: + + field | description | +----------|------------------------------------------------| +name | The name of the image. | +tag | The tag for this version of the image. | +fsLayers | A list of layer descriptors (including digest) | +signature | A JWS used to verify the manifest content | + +For more information about the manifest format, please see +[docker/docker#8093](https://github.com/docker/docker/issues/8093). + +When the manifest is in hand, the client must verify the signature to ensure +the names and layers are valid. Once confirmed, the client will then use the +digests to download the individual layers. Layers are stored in as blobs in +the V2 registry API, keyed by their digest. + +#### Pulling an Image Manifest + +The image manifest can be fetched with the following url: + +``` +GET /v2//manifests/ +``` + +The `name` and `reference` parameter identify the image and are required. The +reference may include a tag or digest. + +The client should include an Accept header indicating which manifest content +types it supports. For more details on the manifest formats and their content +types, see [manifest-v2-1.md](manifest-v2-1.md) and +[manifest-v2-2.md](manifest-v2-2.md). In a successful response, the Content-Type +header will indicate which manifest type is being returned. + +A `404 Not Found` response will be returned if the image is unknown to the +registry. If the image exists and the response is successful, the image +manifest will be returned, with the following format (see +[docker/docker#8093](https://github.com/docker/docker/issues/8093) for details): + + { + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": + }, + ... + ] + ], + "history": , + "signature": + } + +The client should verify the returned manifest signature for authenticity +before fetching layers. + +##### Existing Manifests + +The image manifest can be checked for existence with the following url: + +``` +HEAD /v2//manifests/ +``` + +The `name` and `reference` parameter identify the image and are required. The +reference may include a tag or digest. + +A `404 Not Found` response will be returned if the image is unknown to the +registry. If the image exists and the response is successful the response will +be as follows: + +``` +200 OK +Content-Length: +Docker-Content-Digest: +``` + + +#### Pulling a Layer + +Layers are stored in the blob portion of the registry, keyed by digest. +Pulling a layer is carried out by a standard http request. The URL is as +follows: + + GET /v2//blobs/ + +Access to a layer will be gated by the `name` of the repository but is +identified uniquely in the registry by `digest`. + +This endpoint may issue a 307 (302 for /blobs/uploads/ +``` + +The parameters of this request are the image namespace under which the layer +will be linked. Responses to this request are covered below. + +##### Existing Layers + +The existence of a layer can be checked via a `HEAD` request to the blob store +API. The request should be formatted as follows: + +``` +HEAD /v2//blobs/ +``` + +If the layer with the digest specified in `digest` is available, a 200 OK +response will be received, with no actual body content (this is according to +http specification). The response will look as follows: + +``` +200 OK +Content-Length: +Docker-Content-Digest: +``` + +When this response is received, the client can assume that the layer is +already available in the registry under the given name and should take no +further action to upload the layer. Note that the binary digests may differ +for the existing registry layer, but the digests will be guaranteed to match. + +##### Uploading the Layer + +If the POST request is successful, a `202 Accepted` response will be returned +with the upload URL in the `Location` header: + +``` +202 Accepted +Location: /v2//blobs/uploads/ +Range: bytes=0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +The rest of the upload process can be carried out with the returned url, +called the "Upload URL" from the `Location` header. All responses to the +upload url, whether sending data or getting status, will be in this format. +Though the URI format (`/v2//blobs/uploads/`) for the `Location` +header is specified, clients should treat it as an opaque url and should never +try to assemble it. While the `uuid` parameter may be an actual UUID, this +proposal imposes no constraints on the format and clients should never impose +any. + +If clients need to correlate local upload state with remote upload state, the +contents of the `Docker-Upload-UUID` header should be used. Such an id can be +used to key the last used location header when implementing resumable uploads. + +##### Upload Progress + +The progress and chunk coordination of the upload process will be coordinated +through the `Range` header. While this is a non-standard use of the `Range` +header, there are examples of [similar approaches](https://developers.google.com/youtube/v3/guides/using_resumable_upload_protocol) in APIs with heavy use. +For an upload that just started, for an example with a 1000 byte layer file, +the `Range` header would be as follows: + +``` +Range: bytes=0-0 +``` + +To get the status of an upload, issue a GET request to the upload URL: + +``` +GET /v2//blobs/uploads/ +Host: +``` + +The response will be similar to the above, except will return 204 status: + +``` +204 No Content +Location: /v2//blobs/uploads/ +Range: bytes=0- +Docker-Upload-UUID: +``` + +Note that the HTTP `Range` header byte ranges are inclusive and that will be +honored, even in non-standard use cases. + +##### Monolithic Upload + +A monolithic upload is simply a chunked upload with a single chunk and may be +favored by clients that would like to avoided the complexity of chunking. To +carry out a "monolithic" upload, one can simply put the entire content blob to +the provided URL: + +``` +PUT /v2//blobs/uploads/?digest= +Content-Length: +Content-Type: application/octet-stream + + +``` + +The "digest" parameter must be included with the PUT request. Please see the +[_Completed Upload_](#completed-upload) section for details on the parameters +and expected responses. + +##### Chunked Upload + +To carry out an upload of a chunk, the client can specify a range header and +only include that part of the layer file: + +``` +PATCH /v2//blobs/uploads/ +Content-Length: +Content-Range: - +Content-Type: application/octet-stream + + +``` + +There is no enforcement on layer chunk splits other than that the server must +receive them in order. The server may enforce a minimum chunk size. If the +server cannot accept the chunk, a `416 Requested Range Not Satisfiable` +response will be returned and will include a `Range` header indicating the +current status: + +``` +416 Requested Range Not Satisfiable +Location: /v2//blobs/uploads/ +Range: 0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +If this response is received, the client should resume from the "last valid +range" and upload the subsequent chunk. A 416 will be returned under the +following conditions: + +- Invalid Content-Range header format +- Out of order chunk: the range of the next chunk must start immediately after + the "last valid range" from the previous response. + +When a chunk is accepted as part of the upload, a `202 Accepted` response will +be returned, including a `Range` header with the current upload status: + +``` +202 Accepted +Location: /v2//blobs/uploads/ +Range: bytes=0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +##### Completed Upload + +For an upload to be considered complete, the client must submit a `PUT` +request on the upload endpoint with a digest parameter. If it is not provided, +the upload will not be considered complete. The format for the final chunk +will be as follows: + +``` +PUT /v2//blob/uploads/?digest= +Content-Length: +Content-Range: - +Content-Type: application/octet-stream + + +``` + +Optionally, if all chunks have already been uploaded, a `PUT` request with a +`digest` parameter and zero-length body may be sent to complete and validated +the upload. Multiple "digest" parameters may be provided with different +digests. The server may verify none or all of them but _must_ notify the +client if the content is rejected. + +When the last chunk is received and the layer has been validated, the client +will receive a `201 Created` response: + +``` +201 Created +Location: /v2//blobs/ +Content-Length: 0 +Docker-Content-Digest: +``` + +The `Location` header will contain the registry URL to access the accepted +layer file. The `Docker-Content-Digest` header returns the canonical digest of +the uploaded blob which may differ from the provided digest. Most clients may +ignore the value but if it is used, the client should verify the value against +the uploaded blob data. + +###### Digest Parameter + +The "digest" parameter is designed as an opaque parameter to support +verification of a successful transfer. For example, an HTTP URI parameter +might be as follows: + +``` +sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b +``` + +Given this parameter, the registry will verify that the provided content does +match this digest. + +##### Canceling an Upload + +An upload can be cancelled by issuing a DELETE request to the upload endpoint. +The format will be as follows: + +``` +DELETE /v2//blobs/uploads/ +``` + +After this request is issued, the upload uuid will no longer be valid and the +registry server will dump all intermediate data. While uploads will time out +if not completed, clients should issue this request if they encounter a fatal +error but still have the ability to issue an http request. + +##### Cross Repository Blob Mount + +A blob may be mounted from another repository that the client has read access +to, removing the need to upload a blob already known to the registry. To issue +a blob mount instead of an upload, a POST request should be issued in the +following format: + +``` +POST /v2//blobs/uploads/?mount=&from= +Content-Length: 0 +``` + +If the blob is successfully mounted, the client will receive a `201 Created` +response: + +``` +201 Created +Location: /v2//blobs/ +Content-Length: 0 +Docker-Content-Digest: +``` + +The `Location` header will contain the registry URL to access the accepted +layer file. The `Docker-Content-Digest` header returns the canonical digest of +the uploaded blob which may differ from the provided digest. Most clients may +ignore the value but if it is used, the client should verify the value against +the uploaded blob data. + +If a mount fails due to invalid repository or digest arguments, the registry +will fall back to the standard upload behavior and return a `202 Accepted` with +the upload URL in the `Location` header: + +``` +202 Accepted +Location: /v2//blobs/uploads/ +Range: bytes=0- +Content-Length: 0 +Docker-Upload-UUID: +``` + +This behavior is consistent with older versions of the registry, which do not +recognize the repository mount query parameters. + +Note: a client may issue a HEAD request to check existence of a blob in a source +repository to distinguish between the registry not supporting blob mounts and +the blob not existing in the expected repository. + +##### Errors + +If an 502, 503 or 504 error is received, the client should assume that the +download can proceed due to a temporary condition, honoring the appropriate +retry mechanism. Other 5xx errors should be treated as terminal. + +If there is a problem with the upload, a 4xx error will be returned indicating +the problem. After receiving a 4xx response (except 416, as called out above), +the upload will be considered failed and the client should take appropriate +action. + +Note that the upload url will not be available forever. If the upload uuid is +unknown to the registry, a `404 Not Found` response will be returned and the +client must restart the upload process. + +### Deleting a Layer + +A layer may be deleted from the registry via its `name` and `digest`. A +delete may be issued with the following request format: + + DELETE /v2//blobs/ + +If the blob exists and has been successfully deleted, the following response +will be issued: + + 202 Accepted + Content-Length: None + +If the blob had already been deleted or did not exist, a `404 Not Found` +response will be issued instead. + +If a layer is deleted which is referenced by a manifest in the registry, +then the complete images will not be resolvable. + +#### Pushing an Image Manifest + +Once all of the layers for an image are uploaded, the client can upload the +image manifest. An image can be pushed using the following request format: + + PUT /v2//manifests/ + Content-Type: + + { + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": + }, + ... + ] + ], + "history": , + "signature": , + ... + } + +The `name` and `reference` fields of the response body must match those +specified in the URL. The `reference` field may be a "tag" or a "digest". The +content type should match the type of the manifest being uploaded, as specified +in [manifest-v2-1.md](manifest-v2-1.md) and [manifest-v2-2.md](manifest-v2-2.md). + +If there is a problem with pushing the manifest, a relevant 4xx response will +be returned with a JSON error message. Please see the +[_PUT Manifest_](#put-manifest) section for details on possible error codes that +may be returned. + +If one or more layers are unknown to the registry, `BLOB_UNKNOWN` errors are +returned. The `detail` field of the error response will have a `digest` field +identifying the missing blob. An error is returned for each unknown blob. The +response format is as follows: + + { + "errors:" [{ + "code": "BLOB_UNKNOWN", + "message": "blob unknown to registry", + "detail": { + "digest": + } + }, + ... + ] + } + +### Listing Repositories + +Images are stored in collections, known as a _repository_, which is keyed by a +`name`, as seen throughout the API specification. A registry instance may +contain several repositories. The list of available repositories is made +available through the _catalog_. + +The catalog for a given registry can be retrieved with the following request: + +``` +GET /v2/_catalog +``` + +The response will be in the following format: + +``` +200 OK +Content-Type: application/json + +{ + "repositories": [ + , + ... + ] +} +``` + +Note that the contents of the response are specific to the registry +implementation. Some registries may opt to provide a full catalog output, +limit it based on the user's access level or omit upstream results, if +providing mirroring functionality. Subsequently, the presence of a repository +in the catalog listing only means that the registry *may* provide access to +the repository at the time of the request. Conversely, a missing entry does +*not* mean that the registry does not have the repository. More succinctly, +the presence of a repository only guarantees that it is there but not that it +is _not_ there. + +For registries with a large number of repositories, this response may be quite +large. If such a response is expected, one should use pagination. A registry +may also limit the amount of responses returned even if pagination was not +explicitly requested. In this case the `Link` header will be returned along +with the results, and subsequent results can be obtained by following the link +as if pagination had been initially requested. + +For details of the `Link` header, please see the [_Pagination_](#pagination) +section. + +#### Pagination + +Paginated catalog results can be retrieved by adding an `n` parameter to the +request URL, declaring that the response should be limited to `n` results. +Starting a paginated flow begins as follows: + +``` +GET /v2/_catalog?n= +``` + +The above specifies that a catalog response should be returned, from the start of +the result set, ordered lexically, limiting the number of results to `n`. The +response to such a request would look as follows: + +``` +200 OK +Content-Type: application/json +Link: <?n=&last=>; rel="next" + +{ + "repositories": [ + , + ... + ] +} +``` + +The above includes the _first_ `n` entries from the result set. To get the +_next_ `n` entries, one can create a URL where the argument `last` has the +value from `repositories[len(repositories)-1]`. If there are indeed more +results, the URL for the next block is encoded in an +[RFC5988](https://tools.ietf.org/html/rfc5988) `Link` header, as a "next" +relation. The presence of the `Link` header communicates to the client that +the entire result set has not been returned and another request must be +issued. If the header is not present, the client can assume that all results +have been received. + +> __NOTE:__ In the request template above, note that the brackets +> are required. For example, if the url is +> `http://example.com/v2/_catalog?n=20&last=b`, the value of the header would +> be `; rel="next"`. Please see +> [RFC5988](https://tools.ietf.org/html/rfc5988) for details. + +Compliant client implementations should always use the `Link` header +value when proceeding through results linearly. The client may construct URLs +to skip forward in the catalog. + +To get the next result set, a client would issue the request as follows, using +the URL encoded in the described `Link` header: + +``` +GET /v2/_catalog?n=&last= +``` + +The above process should then be repeated until the `Link` header is no longer +set. + +The catalog result set is represented abstractly as a lexically sorted list, +where the position in that list can be specified by the query term `last`. The +entries in the response start _after_ the term specified by `last`, up to `n` +entries. + +The behavior of `last` is quite simple when demonstrated with an example. Let +us say the registry has the following repositories: + +``` +a +b +c +d +``` + +If the value of `n` is 2, _a_ and _b_ will be returned on the first response. +The `Link` header returned on the response will have `n` set to 2 and last set +to _b_: + +``` +Link: <?n=2&last=b>; rel="next" +``` + +The client can then issue the request with the above value from the `Link` +header, receiving the values _c_ and _d_. Note that `n` may change on the second +to last response or be fully omitted, depending on the server implementation. + +### Listing Image Tags + +It may be necessary to list all of the tags under a given repository. The tags +for an image repository can be retrieved with the following request: + + GET /v2//tags/list + +The response will be in the following format: + + 200 OK + Content-Type: application/json + + { + "name": , + "tags": [ + , + ... + ] + } + +For repositories with a large number of tags, this response may be quite +large. If such a response is expected, one should use the pagination. + +#### Pagination + +Paginated tag results can be retrieved by adding the appropriate parameters to +the request URL described above. The behavior of tag pagination is identical +to that specified for catalog pagination. We cover a simple flow to highlight +any differences. + +Starting a paginated flow may begin as follows: + +``` +GET /v2//tags/list?n= +``` + +The above specifies that a tags response should be returned, from the start of +the result set, ordered lexically, limiting the number of results to `n`. The +response to such a request would look as follows: + +``` +200 OK +Content-Type: application/json +Link: <?n=&last=>; rel="next" + +{ + "name": , + "tags": [ + , + ... + ] +} +``` + +To get the next result set, a client would issue the request as follows, using +the value encoded in the [RFC5988](https://tools.ietf.org/html/rfc5988) `Link` +header: + +``` +GET /v2//tags/list?n=&last= +``` + +The above process should then be repeated until the `Link` header is no longer +set in the response. The behavior of the `last` parameter, the provided +response result, lexical ordering and encoding of the `Link` header are +identical to that of catalog pagination. + +### Deleting an Image + +An image may be deleted from the registry via its `name` and `reference`. A +delete may be issued with the following request format: + + DELETE /v2//manifests/ + +For deletes, `reference` *must* be a digest or the delete will fail. If the +image exists and has been successfully deleted, the following response will be +issued: + + 202 Accepted + Content-Length: None + +If the image had already been deleted or did not exist, a `404 Not Found` +response will be issued instead. + +> **Note** When deleting a manifest from a registry version 2.3 or later, the +> following header must be used when `HEAD` or `GET`-ing the manifest to obtain +> the correct digest to delete: + + Accept: application/vnd.docker.distribution.manifest.v2+json + +> for more details, see: [compatibility.md](../compatibility.md#content-addressable-storage-cas) + +## Detail + +> **Note**: This section is still under construction. For the purposes of +> implementation, if any details below differ from the described request flows +> above, the section below should be corrected. When they match, this note +> should be removed. + +The behavior of the endpoints are covered in detail in this section, organized +by route and entity. All aspects of the request and responses are covered, +including headers, parameters and body formats. Examples of requests and their +corresponding responses, with success and failure, are enumerated. + +> **Note**: The sections on endpoint detail are arranged with an example +> request, a description of the request, followed by information about that +> request. + +A list of methods and URIs are covered in the table below: + +|Method|Path|Entity|Description| +|------|----|------|-----------| +{{range $route := .RouteDescriptors}}{{range $method := .Methods}}| {{$method.Method}} | `{{$route.Path|prettygorilla}}` | {{$route.Entity}} | {{$method.Description}} | +{{end}}{{end}} + +The detail for each endpoint is covered in the following sections. + +### Errors + +The error codes encountered via the API are enumerated in the following table: + +|Code|Message|Description| +|----|-------|-----------| +{{range $err := .ErrorDescriptors}} `{{$err.Value}}` | {{$err.Message}} | {{$err.Description|removenewlines}} +{{end}} + +{{range $route := .RouteDescriptors}} +### {{.Entity}} + +{{.Description}} + +{{range $method := $route.Methods}} + +#### {{.Method}} {{$route.Entity}} + +{{.Description}} + +{{if .Requests}}{{range .Requests}}{{if .Name}} +##### {{.Name}}{{end}} + +``` +{{$method.Method}} {{$route.Path|prettygorilla}}{{range $i, $param := .QueryParameters}}{{if eq $i 0}}?{{else}}&{{end}}{{$param.Name}}={{$param.Format}}{{end}}{{range .Headers}} +{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} +Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} + +{{.Body.Format}}{{end}} +``` + +{{.Description}} + +{{if or .Headers .PathParameters .QueryParameters}} +The following parameters should be specified on the request: + +|Name|Kind|Description| +|----|----|-----------| +{{range .Headers}}|`{{.Name}}`|header|{{.Description}}| +{{end}}{{range .PathParameters}}|`{{.Name}}`|path|{{.Description}}| +{{end}}{{range .QueryParameters}}|`{{.Name}}`|query|{{.Description}}| +{{end}}{{end}} + +{{if .Successes}} +{{range .Successes}} +###### On Success: {{if .Name}}{{.Name}}{{else}}{{.StatusCode | statustext}}{{end}} + +``` +{{.StatusCode}} {{.StatusCode | statustext}}{{range .Headers}} +{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} +Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} + +{{.Body.Format}}{{end}} +``` + +{{.Description}} +{{if .Fields}}The following fields may be returned in the response body: + +|Name|Description| +|----|-----------| +{{range .Fields}}|`{{.Name}}`|{{.Description}}| +{{end}}{{end}}{{if .Headers}} +The following headers will be returned with the response: + +|Name|Description| +|----|-----------| +{{range .Headers}}|`{{.Name}}`|{{.Description}}| +{{end}}{{end}}{{end}}{{end}} + +{{if .Failures}} +{{range .Failures}} +###### On Failure: {{if .Name}}{{.Name}}{{else}}{{.StatusCode | statustext}}{{end}} + +``` +{{.StatusCode}} {{.StatusCode | statustext}}{{range .Headers}} +{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} +Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} + +{{.Body.Format}}{{end}} +``` + +{{.Description}} +{{if .Headers}} +The following headers will be returned on the response: + +|Name|Description| +|----|-----------| +{{range .Headers}}|`{{.Name}}`|{{.Description}}| +{{end}}{{end}} + +{{if .ErrorCodes}} +The error codes that may be included in the response body are enumerated below: + +|Code|Message|Description| +|----|-------|-----------| +{{range $err := .ErrorCodes}}| `{{$err.Descriptor.Value}}` | {{$err.Descriptor.Message}} | {{$err.Descriptor.Description|removenewlines}} | +{{end}} + +{{end}}{{end}}{{end}}{{end}}{{end}}{{end}} + +{{end}} diff --git a/vendor/github.com/docker/distribution/docs/spec/auth/index.md b/vendor/github.com/docker/distribution/docs/spec/auth/index.md new file mode 100644 index 000000000..d1aa94225 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/spec/auth/index.md @@ -0,0 +1,12 @@ +--- +title: "Docker Registry Token Authentication" +description: "Docker Registry v2 authentication schema" +keywords: ["registry, on-prem, images, tags, repository, distribution, authentication, advanced"] +--- + +# Docker Registry v2 authentication + +See the [Token Authentication Specification](token.md), +[Token Authentication Implementation](jwt.md), +[Token Scope Documentation](scope.md), +[OAuth2 Token Authentication](oauth.md) for more information. diff --git a/vendor/github.com/docker/distribution/docs/spec/auth/jwt.md b/vendor/github.com/docker/distribution/docs/spec/auth/jwt.md new file mode 100644 index 000000000..aa9941b0b --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/spec/auth/jwt.md @@ -0,0 +1,329 @@ +--- +title: "Token Authentication Implementation" +description: "Describe the reference implementation of the Docker Registry v2 authentication schema" +keywords: ["registry, on-prem, images, tags, repository, distribution, JWT authentication, advanced"] +--- + +# Docker Registry v2 Bearer token specification + +This specification covers the `docker/distribution` implementation of the +v2 Registry's authentication schema. Specifically, it describes the JSON +Web Token schema that `docker/distribution` has adopted to implement the +client-opaque Bearer token issued by an authentication service and +understood by the registry. + +This document borrows heavily from the [JSON Web Token Draft Spec](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32) + +## Getting a Bearer Token + +For this example, the client makes an HTTP GET request to the following URL: + +``` +https://auth.docker.io/token?service=registry.docker.io&scope=repository:samalba/my-app:pull,push +``` + +The token server should first attempt to authenticate the client using any +authentication credentials provided with the request. As of Docker 1.8, the +registry client in the Docker Engine only supports Basic Authentication to +these token servers. If an attempt to authenticate to the token server fails, +the token server should return a `401 Unauthorized` response indicating that +the provided credentials are invalid. + +Whether the token server requires authentication is up to the policy of that +access control provider. Some requests may require authentication to determine +access (such as pushing or pulling a private repository) while others may not +(such as pulling from a public repository). + +After authenticating the client (which may simply be an anonymous client if +no attempt was made to authenticate), the token server must next query its +access control list to determine whether the client has the requested scope. In +this example request, if I have authenticated as user `jlhawn`, the token +server will determine what access I have to the repository `samalba/my-app` +hosted by the entity `registry.docker.io`. + +Once the token server has determined what access the client has to the +resources requested in the `scope` parameter, it will take the intersection of +the set of requested actions on each resource and the set of actions that the +client has in fact been granted. If the client only has a subset of the +requested access **it must not be considered an error** as it is not the +responsibility of the token server to indicate authorization errors as part of +this workflow. + +Continuing with the example request, the token server will find that the +client's set of granted access to the repository is `[pull, push]` which when +intersected with the requested access `[pull, push]` yields an equal set. If +the granted access set was found only to be `[pull]` then the intersected set +would only be `[pull]`. If the client has no access to the repository then the +intersected set would be empty, `[]`. + +It is this intersected set of access which is placed in the returned token. + +The server will now construct a JSON Web Token to sign and return. A JSON Web +Token has 3 main parts: + +1. Headers + + The header of a JSON Web Token is a standard JOSE header. The "typ" field + will be "JWT" and it will also contain the "alg" which identifies the + signing algorithm used to produce the signature. It also must have a "kid" + field, representing the ID of the key which was used to sign the token. + + The "kid" field has to be in a libtrust fingerprint compatible format. + Such a format can be generated by following steps: + + 1. Take the DER encoded public key which the JWT token was signed against. + + 2. Create a SHA256 hash out of it and truncate to 240bits. + + 3. Split the result into 12 base32 encoded groups with `:` as delimiter. + + Here is an example JOSE Header for a JSON Web Token (formatted with + whitespace for readability): + + ``` + { + "typ": "JWT", + "alg": "ES256", + "kid": "PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6" + } + ``` + + It specifies that this object is going to be a JSON Web token signed using + the key with the given ID using the Elliptic Curve signature algorithm + using a SHA256 hash. + +2. Claim Set + + The Claim Set is a JSON struct containing these standard registered claim + name fields: + +
+
+ iss (Issuer) +
+
+ The issuer of the token, typically the fqdn of the authorization + server. +
+
+ sub (Subject) +
+
+ The subject of the token; the name or id of the client which + requested it. This should be empty (`""`) if the client did not + authenticate. +
+
+ aud (Audience) +
+
+ The intended audience of the token; the name or id of the service + which will verify the token to authorize the client/subject. +
+
+ exp (Expiration) +
+
+ The token should only be considered valid up to this specified date + and time. +
+
+ nbf (Not Before) +
+
+ The token should not be considered valid before this specified date + and time. +
+
+ iat (Issued At) +
+
+ Specifies the date and time which the Authorization server + generated this token. +
+
+ jti (JWT ID) +
+
+ A unique identifier for this token. Can be used by the intended + audience to prevent replays of the token. +
+
+ + The Claim Set will also contain a private claim name unique to this + authorization server specification: + +
+
+ access +
+
+ An array of access entry objects with the following fields: + +
+
+ type +
+
+ The type of resource hosted by the service. +
+
+ name +
+
+ The name of the resource of the given type hosted by the + service. +
+
+ actions +
+
+ An array of strings which give the actions authorized on + this resource. +
+
+
+
+ + Here is an example of such a JWT Claim Set (formatted with whitespace for + readability): + + ``` + { + "iss": "auth.docker.com", + "sub": "jlhawn", + "aud": "registry.docker.com", + "exp": 1415387315, + "nbf": 1415387015, + "iat": 1415387015, + "jti": "tYJCO1c6cnyy7kAn0c7rKPgbV1H1bFws", + "access": [ + { + "type": "repository", + "name": "samalba/my-app", + "actions": [ + "pull", + "push" + ] + } + ] + } + ``` + +3. Signature + + The authorization server will produce a JOSE header and Claim Set with no + extraneous whitespace, i.e., the JOSE Header from above would be + + ``` + {"typ":"JWT","alg":"ES256","kid":"PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6"} + ``` + + and the Claim Set from above would be + + ``` + {"iss":"auth.docker.com","sub":"jlhawn","aud":"registry.docker.com","exp":1415387315,"nbf":1415387015,"iat":1415387015,"jti":"tYJCO1c6cnyy7kAn0c7rKPgbV1H1bFws","access":[{"type":"repository","name":"samalba/my-app","actions":["push","pull"]}]} + ``` + + The utf-8 representation of this JOSE header and Claim Set are then + url-safe base64 encoded (sans trailing '=' buffer), producing: + + ``` + eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0 + ``` + + for the JOSE Header and + + ``` + eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0 + ``` + + for the Claim Set. These two are concatenated using a '.' character, + yielding the string: + + ``` + eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0 + ``` + + This is then used as the payload to a the `ES256` signature algorithm + specified in the JOSE header and specified fully in [Section 3.4 of the JSON Web Algorithms (JWA) + draft specification](https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-38#section-3.4) + + This example signature will use the following ECDSA key for the server: + + ``` + { + "kty": "EC", + "crv": "P-256", + "kid": "PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6", + "d": "R7OnbfMaD5J2jl7GeE8ESo7CnHSBm_1N2k9IXYFrKJA", + "x": "m7zUpx3b-zmVE5cymSs64POG9QcyEpJaYCD82-549_Q", + "y": "dU3biz8sZ_8GPB-odm8Wxz3lNDr1xcAQQPQaOcr1fmc" + } + ``` + + A resulting signature of the above payload using this key is: + + ``` + QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w + ``` + + Concatenating all of these together with a `.` character gives the + resulting JWT: + + ``` + eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w + ``` + +This can now be placed in an HTTP response and returned to the client to use to +authenticate to the audience service: + + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w"} +``` + +## Using the signed token + +Once the client has a token, it will try the registry request again with the +token placed in the HTTP `Authorization` header like so: + +``` +Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IkJWM0Q6MkFWWjpVQjVaOktJQVA6SU5QTDo1RU42Ok40SjQ6Nk1XTzpEUktFOkJWUUs6M0ZKTDpQT1RMIn0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJCQ0NZOk9VNlo6UUVKNTpXTjJDOjJBVkM6WTdZRDpBM0xZOjQ1VVc6NE9HRDpLQUxMOkNOSjU6NUlVTCIsImF1ZCI6InJlZ2lzdHJ5LmRvY2tlci5jb20iLCJleHAiOjE0MTUzODczMTUsIm5iZiI6MTQxNTM4NzAxNSwiaWF0IjoxNDE1Mzg3MDE1LCJqdGkiOiJ0WUpDTzFjNmNueXk3a0FuMGM3cktQZ2JWMUgxYkZ3cyIsInNjb3BlIjoiamxoYXduOnJlcG9zaXRvcnk6c2FtYWxiYS9teS1hcHA6cHVzaCxwdWxsIGpsaGF3bjpuYW1lc3BhY2U6c2FtYWxiYTpwdWxsIn0.Y3zZSwaZPqy4y9oRBVRImZyv3m_S9XDHF1tWwN7mL52C_IiA73SJkWVNsvNqpJIn5h7A2F8biv_S2ppQ1lgkbw +``` + +This is also described in [Section 2.1 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-2.1) + +## Verifying the token + +The registry must now verify the token presented by the user by inspecting the +claim set within. The registry will: + +- Ensure that the issuer (`iss` claim) is an authority it trusts. +- Ensure that the registry identifies as the audience (`aud` claim). +- Check that the current time is between the `nbf` and `exp` claim times. +- If enforcing single-use tokens, check that the JWT ID (`jti` claim) value has + not been seen before. + - To enforce this, the registry may keep a record of `jti`s it has seen for + up to the `exp` time of the token to prevent token replays. +- Check the `access` claim value and use the identified resources and the list + of actions authorized to determine whether the token grants the required + level of access for the operation the client is attempting to perform. +- Verify that the signature of the token is valid. + +If any of these requirements are not met, the registry will return a +`403 Forbidden` response to indicate that the token is invalid. + +**Note**: it is only at this point in the workflow that an authorization error +may occur. The token server should *not* return errors when the user does not +have the requested authorization. Instead, the returned token should indicate +whatever of the requested scope the client does have (the intersection of +requested and granted access). If the token does not supply proper +authorization then the registry will return the appropriate error. + +At no point in this process should the registry need to call back to the +authorization server. The registry only needs to be supplied with the trusted +public keys to verify the token signatures. diff --git a/vendor/github.com/docker/distribution/docs/spec/auth/oauth.md b/vendor/github.com/docker/distribution/docs/spec/auth/oauth.md new file mode 100644 index 000000000..d946da8a2 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/spec/auth/oauth.md @@ -0,0 +1,190 @@ +--- +title: "Oauth2 Token Authentication" +description: "Specifies the Docker Registry v2 authentication" +keywords: ["registry, on-prem, images, tags, repository, distribution, oauth2, advanced"] +--- + +# Docker Registry v2 authentication using OAuth2 + +This document describes support for the OAuth2 protocol within the authorization +server. [RFC6749](https://tools.ietf.org/html/rfc6749) should be used as a +reference for the protocol and HTTP endpoints described here. + +**Note**: Not all token servers implement oauth2. If the request to the endpoint +returns `404` using the HTTP `POST` method, refer to +[Token Documentation](token.md) for using the HTTP `GET` method supported by all +token servers. + +## Refresh token format + +The format of the refresh token is completely opaque to the client and should be +determined by the authorization server. The authorization should ensure the +token is sufficiently long and is responsible for storing any information about +long-lived tokens which may be needed for revoking. Any information stored +inside the token will not be extracted and presented by clients. + +## Getting a token + +POST /token + +#### Headers +Content-Type: application/x-www-form-urlencoded + +#### Post parameters + +
+
+ grant_type +
+
+ (REQUIRED) Type of grant used to get token. When getting a refresh token + using credentials this type should be set to "password" and have the + accompanying username and password paramters. Type "authorization_code" + is reserved for future use for authenticating to an authorization server + without having to send credentials directly from the client. When + requesting an access token with a refresh token this should be set to + "refresh_token". +
+
+ service +
+
+ (REQUIRED) The name of the service which hosts the resource to get + access for. Refresh tokens will only be good for getting tokens for + this service. +
+
+ client_id +
+
+ (REQUIRED) String identifying the client. This client_id does not need + to be registered with the authorization server but should be set to a + meaningful value in order to allow auditing keys created by unregistered + clients. Accepted syntax is defined in + [RFC6749 Appendix A.1](https://tools.ietf.org/html/rfc6749#appendix-A.1) +
+
+ access_type +
+
+ (OPTIONAL) Access which is being requested. If "offline" is provided + then a refresh token will be returned. The default is "online" only + returning short lived access token. If the grant type is "refresh_token" + this will only return the same refresh token and not a new one. +
+
+ scope +
+
+ (OPTIONAL) The resource in question, formatted as one of the space-delimited + entries from the scope parameters from the WWW-Authenticate header + shown above. This query parameter should only be specified once but may + contain multiple scopes using the scope list format defined in the scope + grammar. If multiple scope is provided from + WWW-Authenticate header the scopes should first be + converted to a scope list before requesting the token. The above example + would be specified as: scope=repository:samalba/my-app:push. + When requesting a refresh token the scopes may be empty since the + refresh token will not be limited by this scope, only the provided short + lived access token will have the scope limitation. +
+
+ refresh_token +
+
+ (OPTIONAL) The refresh token to use for authentication when grant type "refresh_token" is used. +
+
+ username +
+
+ (OPTIONAL) The username to use for authentication when grant type "password" is used. +
+
+ password +
+
+ (OPTIONAL) The password to use for authentication when grant type "password" is used. +
+
+ +#### Response fields + +
+
+ access_token +
+
+ (REQUIRED) An opaque Bearer token that clients should + supply to subsequent requests in the Authorization header. + This token should not be attempted to be parsed or understood by the + client but treated as opaque string. +
+
+ scope +
+
+ (REQUIRED) The scope granted inside the access token. This may be the + same scope as requested or a subset. This requirement is stronger than + specified in [RFC6749 Section 4.2.2](https://tools.ietf.org/html/rfc6749#section-4.2.2) + by strictly requiring the scope in the return value. +
+
+ expires_in +
+
+ (REQUIRED) The duration in seconds since the token was issued that it + will remain valid. When omitted, this defaults to 60 seconds. For + compatibility with older clients, a token should never be returned with + less than 60 seconds to live. +
+
+ issued_at +
+
+ (Optional) The RFC3339-serialized UTC + standard time at which a given token was issued. If issued_at is omitted, the + expiration is from when the token exchange completed. +
+
+ refresh_token +
+
+ (Optional) Token which can be used to get additional access tokens for + the same subject with different scopes. This token should be kept secure + by the client and only sent to the authorization server which issues + bearer tokens. This field will only be set when `access_type=offline` is + provided in the request. +
+
+ + +#### Example getting refresh token + +``` +POST /token HTTP/1.1 +Host: auth.docker.io +Content-Type: application/x-www-form-urlencoded + +grant_type=password&username=johndoe&password=A3ddj3w&service=hub.docker.io&client_id=dockerengine&access_type=offline + +HTTP/1.1 200 OK +Content-Type: application/json + +{"refresh_token":"kas9Da81Dfa8","access_token":"eyJhbGciOiJFUzI1NiIsInR5","expires_in":900,"scope":""} +``` + +#### Example refreshing an Access Token + +``` +POST /token HTTP/1.1 +Host: auth.docker.io +Content-Type: application/x-www-form-urlencoded + +grant_type=refresh_token&refresh_token=kas9Da81Dfa8&service=registry-1.docker.io&client_id=dockerengine&scope=repository:samalba/my-app:pull,push + +HTTP/1.1 200 OK +Content-Type: application/json + +{"refresh_token":"kas9Da81Dfa8","access_token":"eyJhbGciOiJFUzI1NiIsInR5":"expires_in":900,"scope":"repository:samalba/my-app:pull,repository:samalba/my-app:push"} +``` diff --git a/vendor/github.com/docker/distribution/docs/spec/auth/scope.md b/vendor/github.com/docker/distribution/docs/spec/auth/scope.md new file mode 100644 index 000000000..6ef61edf1 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/spec/auth/scope.md @@ -0,0 +1,148 @@ +--- +title: "Token Scope Documentation" +description: "Describes the scope and access fields used for registry authorization tokens" +keywords: ["registry, on-prem, images, tags, repository, distribution, advanced, access, scope"] +--- + +# Docker Registry Token Scope and Access + +Tokens used by the registry are always restricted what resources they may +be used to access, where those resources may be accessed, and what actions +may be done on those resources. Tokens always have the context of a user which +the token was originally created for. This document describes how these +restrictions are represented and enforced by the authorization server and +resource providers. + +## Scope Components + +### Subject (Authenticated User) + +The subject represents the user for which a token is valid. Any actions +performed using an access token should be considered on behalf of the subject. +This is included in the `sub` field of access token JWT. A refresh token should +be limited to a single subject and only be able to give out access tokens for +that subject. + +### Audience (Resource Provider) + +The audience represents a resource provider which is intended to be able to +perform the actions specified in the access token. Any resource provider which +does not match the audience should not use that access token. The audience is +included in the `aud` field of the access token JWT. A refresh token should be +limited to a single audience and only be able to give out access tokens for that +audience. + +### Resource Type + +The resource type represents the type of resource which the resource name is +intended to represent. This type may be specific to a resource provider but must +be understood by the authorization server in order to validate the subject +is authorized for a specific resource. + +#### Resource Class + +The resource type might have a resource class which further classifies the +the resource name within the resource type. A class is not required and +is specific to the resource type. + +#### Example Resource Types + + - `repository` - represents a single repository within a registry. A +repository may represent many manifest or content blobs, but the resource type +is considered the collections of those items. Actions which may be performed on +a `repository` are `pull` for accessing the collection and `push` for adding to +it. By default the `repository` type has the class of `image`. + - `repository(plugin)` - represents a single repository of plugins within a +registry. A plugin repository has the same content and actions as a repository. + - `registry` - represents the entire registry. Used for administrative actions +or lookup operations that span an entire registry. + +### Resource Name + +The resource name represent the name which identifies a resource for a resource +provider. A resource is identified by this name and the provided resource type. +An example of a resource name would be the name component of an image tag, such +as "samalba/myapp" or "hostname/samalba/myapp". + +### Resource Actions + +The resource actions define the actions which the access token allows to be +performed on the identified resource. These actions are type specific but will +normally have actions identifying read and write access on the resource. Example +for the `repository` type are `pull` for read access and `push` for write +access. + +## Authorization Server Use + +Each access token request may include a scope and an audience. The subject is +always derived from the passed in credentials or refresh token. When using +a refresh token the passed in audience must match the audience defined for +the refresh token. The audience (resource provider) is provided using the +`service` field. Multiple resource scopes may be provided using multiple `scope` +fields on the `GET` request. The `POST` request only takes in a single +`scope` field but may use a space to separate a list of multiple resource +scopes. + +### Resource Scope Grammar + +``` +scope := resourcescope [ ' ' resourcescope ]* +resourcescope := resourcetype ":" resourcename ":" action [ ',' action ]* +resourcetype := resourcetypevalue [ '(' resourcetypevalue ')' ] +resourcetypevalue := /[a-z0-9]+/ +resourcename := [ hostname '/' ] component [ '/' component ]* +hostname := hostcomponent ['.' hostcomponent]* [':' port-number] +hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +port-number := /[0-9]+/ +action := /[a-z]*/ +component := alpha-numeric [ separator alpha-numeric ]* +alpha-numeric := /[a-z0-9]+/ +separator := /[_.]|__|[-]*/ +``` +Full reference grammar is defined +[here](https://godoc.org/github.com/docker/distribution/reference). Currently +the scope name grammar is a subset of the reference grammar. + +> **NOTE:** that the `resourcename` may contain one `:` due to a possible port +> number in the hostname component of the `resourcename`, so a naive +> implementation that interprets the first three `:`-delimited tokens of a +> `scope` to be the `resourcetype`, `resourcename`, and a list of `action` +> would be insufficient. + +## Resource Provider Use + +Once a resource provider has verified the authenticity of the scope through +JWT access token verification, the resource provider must ensure that scope +satisfies the request. The resource provider should match the given audience +according to name or URI the resource provider uses to identify itself. Any +denial based on subject is not defined here and is up to resource provider, the +subject is mainly provided for audit logs and any other user-specific rules +which may need to be provided but are not defined by the authorization server. + +The resource provider must ensure that ANY resource being accessed as the +result of a request has the appropriate access scope. Both the resource type +and resource name must match the accessed resource and an appropriate action +scope must be included. + +When appropriate authorization is not provided either due to lack of scope +or missing token, the resource provider to return a `WWW-AUTHENTICATE` HTTP +header with the `realm` as the authorization server, the `service` as the +expected audience identifying string, and a `scope` field for each required +resource scope to complete the request. + +## JWT Access Tokens + +Each JWT access token may only have a single subject and audience but multiple +resource scopes. The subject and audience are put into standard JWT fields +`sub` and `aud`. The resource scope is put into the `access` field. The +structure of the access field can be seen in the +[jwt documentation](jwt.md). + +## Refresh Tokens + +A refresh token must be defined for a single subject and audience. Further +restricting scope to specific type, name, and actions combinations should be +done by fetching an access token using the refresh token. Since the refresh +token is not scoped to specific resources for an audience, extra care should +be taken to only use the refresh token to negotiate new access tokens directly +with the authorization server, and never with a resource provider. diff --git a/vendor/github.com/docker/distribution/docs/spec/auth/token.md b/vendor/github.com/docker/distribution/docs/spec/auth/token.md new file mode 100644 index 000000000..f8391bd5a --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/spec/auth/token.md @@ -0,0 +1,250 @@ +--- +title: "Token Authentication Specification" +description: "Specifies the Docker Registry v2 authentication" +keywords: ["registry, on-prem, images, tags, repository, distribution, Bearer authentication, advanced"] +--- + +# Docker Registry v2 authentication via central service + +This document outlines the v2 Docker registry authentication scheme: + +![v2 registry auth](../../images/v2-registry-auth.png) + +1. Attempt to begin a push/pull operation with the registry. +2. If the registry requires authorization it will return a `401 Unauthorized` + HTTP response with information on how to authenticate. +3. The registry client makes a request to the authorization service for a + Bearer token. +4. The authorization service returns an opaque Bearer token representing the + client's authorized access. +5. The client retries the original request with the Bearer token embedded in + the request's Authorization header. +6. The Registry authorizes the client by validating the Bearer token and the + claim set embedded within it and begins the push/pull session as usual. + +## Requirements + +- Registry clients which can understand and respond to token auth challenges + returned by the resource server. +- An authorization server capable of managing access controls to their + resources hosted by any given service (such as repositories in a Docker + Registry). +- A Docker Registry capable of trusting the authorization server to sign tokens + which clients can use for authorization and the ability to verify these + tokens for single use or for use during a sufficiently short period of time. + +## Authorization Server Endpoint Descriptions + +The described server is meant to serve as a standalone access control manager +for resources hosted by other services which wish to authenticate and manage +authorizations using a separate access control manager. + +A service like this is used by the official Docker Registry to authenticate +clients and verify their authorization to Docker image repositories. + +As of Docker 1.6, the registry client within the Docker Engine has been updated +to handle such an authorization workflow. + +## How to authenticate + +Registry V1 clients first contact the index to initiate a push or pull. Under +the Registry V2 workflow, clients should contact the registry first. If the +registry server requires authentication it will return a `401 Unauthorized` +response with a `WWW-Authenticate` header detailing how to authenticate to this +registry. + +For example, say I (username `jlhawn`) am attempting to push an image to the +repository `samalba/my-app`. For the registry to authorize this, I will need +`push` access to the `samalba/my-app` repository. The registry will first +return this response: + +``` +HTTP/1.1 401 Unauthorized +Content-Type: application/json; charset=utf-8 +Docker-Distribution-Api-Version: registry/2.0 +Www-Authenticate: Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:samalba/my-app:pull,push" +Date: Thu, 10 Sep 2015 19:32:31 GMT +Content-Length: 235 +Strict-Transport-Security: max-age=31536000 + +{"errors":[{"code":"UNAUTHORIZED","message":"access to the requested resource is not authorized","detail":[{"Type":"repository","Name":"samalba/my-app","Action":"pull"},{"Type":"repository","Name":"samalba/my-app","Action":"push"}]}]} +``` + +Note the HTTP Response Header indicating the auth challenge: + +``` +Www-Authenticate: Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:samalba/my-app:pull,push" +``` + +This format is documented in [Section 3 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-3) + +This challenge indicates that the registry requires a token issued by the +specified token server and that the request the client is attempting will +need to include sufficient access entries in its claim set. To respond to this +challenge, the client will need to make a `GET` request to the URL +`https://auth.docker.io/token` using the `service` and `scope` values from the +`WWW-Authenticate` header. + +## Requesting a Token + +Defines getting a bearer and refresh token using the token endpoint. + +#### Query Parameters + +
+
+ service +
+
+ The name of the service which hosts the resource. +
+
+ offline_token +
+
+ Whether to return a refresh token along with the bearer token. A refresh + token is capable of getting additional bearer tokens for the same + subject with different scopes. The refresh token does not have an + expiration and should be considered completely opaque to the client. +
+
+ client_id +
+
+ String identifying the client. This client_id does not need + to be registered with the authorization server but should be set to a + meaningful value in order to allow auditing keys created by unregistered + clients. Accepted syntax is defined in + [RFC6749 Appendix A.1](https://tools.ietf.org/html/rfc6749#appendix-A.1). +
+
+ scope +
+
+ The resource in question, formatted as one of the space-delimited + entries from the scope parameters from the WWW-Authenticate header + shown above. This query parameter should be specified multiple times if + there is more than one scope entry from the WWW-Authenticate + header. The above example would be specified as: + scope=repository:samalba/my-app:push. The scope field may + be empty to request a refresh token without providing any resource + permissions to the returned bearer token. +
+
+ + +#### Token Response Fields + +
+
+ token +
+
+ An opaque Bearer token that clients should supply to subsequent + requests in the Authorization header. +
+
+ access_token +
+
+ For compatibility with OAuth 2.0, we will also accept token under the name + access_token. At least one of these fields must be specified, but + both may also appear (for compatibility with older clients). When both are specified, + they should be equivalent; if they differ the client's choice is undefined. +
+
+ expires_in +
+
+ (Optional) The duration in seconds since the token was issued that it + will remain valid. When omitted, this defaults to 60 seconds. For + compatibility with older clients, a token should never be returned with + less than 60 seconds to live. +
+
+ issued_at +
+
+ (Optional) The RFC3339-serialized UTC + standard time at which a given token was issued. If issued_at is omitted, the + expiration is from when the token exchange completed. +
+
+ refresh_token +
+
+ (Optional) Token which can be used to get additional access tokens for + the same subject with different scopes. This token should be kept secure + by the client and only sent to the authorization server which issues + bearer tokens. This field will only be set when `offline_token=true` is + provided in the request. +
+
+ +#### Example + +For this example, the client makes an HTTP GET request to the following URL: + +``` +https://auth.docker.io/token?service=registry.docker.io&scope=repository:samalba/my-app:pull,push +``` + +The token server should first attempt to authenticate the client using any +authentication credentials provided with the request. From Docker 1.11 the +Docker engine supports both Basic Authentication and [OAuth2](oauth.md) for +getting tokens. Docker 1.10 and before, the registry client in the Docker Engine +only supports Basic Authentication. If an attempt to authenticate to the token +server fails, the token server should return a `401 Unauthorized` response +indicating that the provided credentials are invalid. + +Whether the token server requires authentication is up to the policy of that +access control provider. Some requests may require authentication to determine +access (such as pushing or pulling a private repository) while others may not +(such as pulling from a public repository). + +After authenticating the client (which may simply be an anonymous client if +no attempt was made to authenticate), the token server must next query its +access control list to determine whether the client has the requested scope. In +this example request, if I have authenticated as user `jlhawn`, the token +server will determine what access I have to the repository `samalba/my-app` +hosted by the entity `registry.docker.io`. + +Once the token server has determined what access the client has to the +resources requested in the `scope` parameter, it will take the intersection of +the set of requested actions on each resource and the set of actions that the +client has in fact been granted. If the client only has a subset of the +requested access **it must not be considered an error** as it is not the +responsibility of the token server to indicate authorization errors as part of +this workflow. + +Continuing with the example request, the token server will find that the +client's set of granted access to the repository is `[pull, push]` which when +intersected with the requested access `[pull, push]` yields an equal set. If +the granted access set was found only to be `[pull]` then the intersected set +would only be `[pull]`. If the client has no access to the repository then the +intersected set would be empty, `[]`. + +It is this intersected set of access which is placed in the returned token. + +The server then constructs an implementation-specific token with this +intersected set of access, and returns it to the Docker client to use to +authenticate to the audience service (within the indicated window of time): + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w", "expires_in": 3600,"issued_at": "2009-11-10T23:00:00Z"} +``` + + +## Using the Bearer token + +Once the client has a token, it will try the registry request again with the +token placed in the HTTP `Authorization` header like so: + +``` +Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IkJWM0Q6MkFWWjpVQjVaOktJQVA6SU5QTDo1RU42Ok40SjQ6Nk1XTzpEUktFOkJWUUs6M0ZKTDpQT1RMIn0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJCQ0NZOk9VNlo6UUVKNTpXTjJDOjJBVkM6WTdZRDpBM0xZOjQ1VVc6NE9HRDpLQUxMOkNOSjU6NUlVTCIsImF1ZCI6InJlZ2lzdHJ5LmRvY2tlci5jb20iLCJleHAiOjE0MTUzODczMTUsIm5iZiI6MTQxNTM4NzAxNSwiaWF0IjoxNDE1Mzg3MDE1LCJqdGkiOiJ0WUpDTzFjNmNueXk3a0FuMGM3cktQZ2JWMUgxYkZ3cyIsInNjb3BlIjoiamxoYXduOnJlcG9zaXRvcnk6c2FtYWxiYS9teS1hcHA6cHVzaCxwdWxsIGpsaGF3bjpuYW1lc3BhY2U6c2FtYWxiYTpwdWxsIn0.Y3zZSwaZPqy4y9oRBVRImZyv3m_S9XDHF1tWwN7mL52C_IiA73SJkWVNsvNqpJIn5h7A2F8biv_S2ppQ1lgkbw +``` + +This is also described in [Section 2.1 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-2.1) diff --git a/vendor/github.com/docker/distribution/docs/spec/implementations.md b/vendor/github.com/docker/distribution/docs/spec/implementations.md new file mode 100644 index 000000000..347465350 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/spec/implementations.md @@ -0,0 +1,30 @@ +--- +published: false +--- + +# Distribution API Implementations + +This is a list of known implementations of the Distribution API spec. + +## [Docker Distribution Registry](https://github.com/docker/distribution) + +Docker distribution is the reference implementation of the distribution API +specification. It aims to fully implement the entire specification. + +### Releases +#### 2.0.1 (_in development_) +Implements API 2.0.1 + +_Known Issues_ + - No resumable push support + - Content ranges ignored + - Blob upload status will always return a starting range of 0 + +#### 2.0.0 +Implements API 2.0.0 + +_Known Issues_ + - No resumable push support + - No PATCH implementation for blob upload + - Content ranges ignored + diff --git a/vendor/github.com/docker/distribution/docs/spec/index.md b/vendor/github.com/docker/distribution/docs/spec/index.md new file mode 100644 index 000000000..952ebabd2 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/spec/index.md @@ -0,0 +1,12 @@ +--- +title: "Reference Overview" +description: "Explains registry JSON objects" +keywords: ["registry, service, images, repository, json"] +--- + +# Docker Registry Reference + +* [HTTP API V2](api.md) +* [Storage Driver](../storage-drivers/index.md) +* [Token Authentication Specification](auth/token.md) +* [Token Authentication Implementation](auth/jwt.md) diff --git a/vendor/github.com/docker/distribution/docs/spec/json.md b/vendor/github.com/docker/distribution/docs/spec/json.md new file mode 100644 index 000000000..825b17ac2 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/spec/json.md @@ -0,0 +1,90 @@ +--- +published: false +title: "Docker Distribution JSON Canonicalization" +description: "Explains registry JSON objects" +keywords: ["registry, service, images, repository, json"] +--- + + + +# Docker Distribution JSON Canonicalization + +To provide consistent content hashing of JSON objects throughout Docker +Distribution APIs, the specification defines a canonical JSON format. Adopting +such a canonicalization also aids in caching JSON responses. + +Note that protocols should not be designed to depend on identical JSON being +generated across different versions or clients. The canonicalization rules are +merely useful for caching and consistency. + +## Rules + +Compliant JSON should conform to the following rules: + +1. All generated JSON should comply with [RFC + 7159](http://www.ietf.org/rfc/rfc7159.txt). +2. Resulting "JSON text" shall always be encoded in UTF-8. +3. Unless a canonical key order is defined for a particular schema, object + keys shall always appear in lexically sorted order. +4. All whitespace between tokens should be removed. +5. No "trailing commas" are allowed in object or array definitions. +6. The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e". + Ampersand "&" is escaped to "\u0026". + +## Examples + +The following is a simple example of a canonicalized JSON string: + +```json +{"asdf":1,"qwer":[],"zxcv":[{},true,1000000000,"tyui"]} +``` + +## Reference + +### Other Canonicalizations + +The OLPC project specifies [Canonical +JSON](http://wiki.laptop.org/go/Canonical_JSON). While this is used in +[TUF](http://theupdateframework.com/), which may be used with other +distribution-related protocols, this alternative format has been proposed in +case the original source changes. Specifications complying with either this +specification or an alternative should explicitly call out the +canonicalization format. Except for key ordering, this specification is mostly +compatible. + +### Go + +In Go, the [`encoding/json`](http://golang.org/pkg/encoding/json/) library +will emit canonical JSON by default. Simply using `json.Marshal` will suffice +in most cases: + +```go +incoming := map[string]interface{}{ + "asdf": 1, + "qwer": []interface{}{}, + "zxcv": []interface{}{ + map[string]interface{}{}, + true, + int(1e9), + "tyui", + }, +} + +canonical, err := json.Marshal(incoming) +if err != nil { + // ... handle error +} +``` + +To apply canonical JSON format spacing to an existing serialized JSON buffer, one +can use +[`json.Indent`](http://golang.org/src/encoding/json/indent.go?s=1918:1989#L65) +with the following arguments: + +```go +incoming := getBytes() +var canonical bytes.Buffer +if err := json.Indent(&canonical, incoming, "", ""); err != nil { + // ... handle error +} +``` diff --git a/vendor/github.com/docker/distribution/docs/spec/manifest-v2-1.md b/vendor/github.com/docker/distribution/docs/spec/manifest-v2-1.md new file mode 100644 index 000000000..b06c2f2a9 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/spec/manifest-v2-1.md @@ -0,0 +1,163 @@ +--- +title: "Image Manifest V 2, Schema 1 " +description: "image manifest for the Registry." +keywords: ["registry, on-prem, images, tags, repository, distribution, api, advanced, manifest"] +--- + +# Image Manifest Version 2, Schema 1 + +This document outlines the format of of the V2 image manifest. The image +manifest described herein was introduced in the Docker daemon in the [v1.3.0 +release](https://github.com/docker/docker/commit/9f482a66ab37ec396ac61ed0c00d59122ac07453). +It is a provisional manifest to provide a compatibility with the [V1 Image +format](https://github.com/docker/docker/blob/master/image/spec/v1.md), as the +requirements are defined for the [V2 Schema 2 +image](https://github.com/docker/distribution/pull/62). + + +Image manifests describe the various constituents of a docker image. Image +manifests can be serialized to JSON format with the following media types: + +Manifest Type | Media Type +------------- | ------------- +manifest | "application/vnd.docker.distribution.manifest.v1+json" +signed manifest | "application/vnd.docker.distribution.manifest.v1+prettyjws" + +*Note that "application/json" will also be accepted for schema 1.* + +References: + + - [Proposal: JSON Registry API V2.1](https://github.com/docker/docker/issues/9015) + - [Proposal: Provenance step 1 - Transform images for validation and verification](https://github.com/docker/docker/issues/8093) + +## *Manifest* Field Descriptions + +Manifest provides the base accessible fields for working with V2 image format + in the registry. + +- **`name`** *string* + + name is the name of the image's repository + +- **`tag`** *string* + + tag is the tag of the image + +- **`architecture`** *string* + + architecture is the host architecture on which this image is intended to + run. This is for information purposes and not currently used by the engine + +- **`fsLayers`** *array* + + fsLayers is a list of filesystem layer blob sums contained in this image. + + An fsLayer is a struct consisting of the following fields + - **`blobSum`** *digest.Digest* + + blobSum is the digest of the referenced filesystem image layer. A + digest must be a sha256 hash. + + +- **`history`** *array* + + history is a list of unstructured historical data for v1 compatibility. It + contains ID of the image layer and ID of the layer's parent layers. + + history is a struct consisting of the following fields + - **`v1Compatibility`** string + + V1Compatibility is the raw V1 compatibility information. This will + contain the JSON object describing the V1 of this image. + +- **`schemaVersion`** *int* + + SchemaVersion is the image manifest schema that this image follows. + +>**Note**:the length of `history` must be equal to the length of `fsLayers` and +>entries in each are correlated by index. + +## Signed Manifests + +Signed manifests provides an envelope for a signed image manifest. A signed +manifest consists of an image manifest along with an additional field +containing the signature of the manifest. + +The docker client can verify signed manifests and displays a message to the user. + +### Signing Manifests + +Image manifests can be signed in two different ways: with a *libtrust* private + key or an x509 certificate chain. When signing with an x509 certificate chain, + the public key of the first element in the chain must be the public key + corresponding with the sign key. + +### Signed Manifest Field Description + +Signed manifests include an image manifest and a list of signatures generated +by *libtrust*. A signature consists of the following fields: + + +- **`header`** *[JOSE](http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2)* + + A [JSON Web Signature](http://self-issued.info/docs/draft-ietf-jose-json-web-signature.html) + +- **`signature`** *string* + + A signature for the image manifest, signed by a *libtrust* private key + +- **`protected`** *string* + + The signed protected header + +## Example Manifest + +*Example showing the official 'hello-world' image manifest.* + +``` +{ + "name": "hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + }, + { + "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + }, + { + "blobSum": "sha256:cc8567d70002e957612902a8e985ea129d831ebe04057d88fb644857caa45d11" + }, + { + "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5\",\"parent\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"created\":\"2014-12-31T22:57:59.178729048Z\",\"container\":\"27b45f8fb11795b52e9605b686159729b0d9ca92f76d40fb4f05a62e19c46b4f\",\"container_config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [/hello]\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/hello\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5\",\"parent\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"created\":\"2014-12-31T22:57:59.178729048Z\",\"container\":\"27b45f8fb11795b52e9605b686159729b0d9ca92f76d40fb4f05a62e19c46b4f\",\"container_config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [/hello]\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/hello\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + ], + "schemaVersion": 1, + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OD6I:6DRK:JXEJ:KBM4:255X:NSAA:MUSF:E4VM:ZI6W:CUN2:L4Z6:LSF4", + "kty": "EC", + "x": "3gAwX48IQ5oaYQAYSxor6rYYc_6yjuLCjtQ9LUakg4A", + "y": "t72ge6kIA1XOjqjVoEOiPPAURltJFBMGDSQvEGVB010" + }, + "alg": "ES256" + }, + "signature": "XREm0L8WNn27Ga_iE_vRnTxVMhhYY0Zst_FfkKopg6gWSoTOZTuW4rK0fg_IqnKkEKlbD83tD46LKEGi5aIVFg", + "protected": "eyJmb3JtYXRMZW5ndGgiOjY2MjgsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wNC0wOFQxODo1Mjo1OVoifQ" + } + ] +} + +``` diff --git a/vendor/github.com/docker/distribution/docs/spec/manifest-v2-2.md b/vendor/github.com/docker/distribution/docs/spec/manifest-v2-2.md new file mode 100644 index 000000000..4b41bb28c --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/spec/manifest-v2-2.md @@ -0,0 +1,295 @@ +--- +title: "Image Manifest V 2, Schema 2 " +description: "image manifest for the Registry." +keywords: ["registry, on-prem, images, tags, repository, distribution, api, advanced, manifest"] +--- + +# Image Manifest Version 2, Schema 2 + +This document outlines the format of of the V2 image manifest, schema version 2. +The original (and provisional) image manifest for V2 (schema 1), was introduced +in the Docker daemon in the [v1.3.0 +release](https://github.com/docker/docker/commit/9f482a66ab37ec396ac61ed0c00d59122ac07453) +and is specified in the [schema 1 manifest definition](manifest-v2-1.md) + +This second schema version has two primary goals. The first is to allow +multi-architecture images, through a "fat manifest" which references image +manifests for platform-specific versions of an image. The second is to +move the Docker engine towards content-addressable images, by supporting +an image model where the image's configuration can be hashed to generate +an ID for the image. + +# Media Types + +The following media types are used by the manifest formats described here, and +the resources they reference: + +- `application/vnd.docker.distribution.manifest.v1+json`: schema1 (existing manifest format) +- `application/vnd.docker.distribution.manifest.v2+json`: New image manifest format (schemaVersion = 2) +- `application/vnd.docker.distribution.manifest.list.v2+json`: Manifest list, aka "fat manifest" +- `application/vnd.docker.container.image.v1+json`: Container config JSON +- `application/vnd.docker.image.rootfs.diff.tar.gzip`: "Layer", as a gzipped tar +- `application/vnd.docker.image.rootfs.foreign.diff.tar.gzip`: "Layer", as a gzipped tar that should never be pushed +- `application/vnd.docker.plugin.v1+json`: Plugin config JSON + +## Manifest List + +The manifest list is the "fat manifest" which points to specific image manifests +for one or more platforms. Its use is optional, and relatively few images will +use one of these manifests. A client will distinguish a manifest list from an +image manifest based on the Content-Type returned in the HTTP response. + +## *Manifest List* Field Descriptions + +- **`schemaVersion`** *int* + + This field specifies the image manifest schema version as an integer. This + schema uses the version `2`. + +- **`mediaType`** *string* + + The MIME type of the manifest list. This should be set to + `application/vnd.docker.distribution.manifest.list.v2+json`. + +- **`manifests`** *array* + + The manifests field contains a list of manifests for specific platforms. + + Fields of an object in the manifests list are: + + - **`mediaType`** *string* + + The MIME type of the referenced object. This will generally be + `application/vnd.docker.image.manifest.v2+json`, but it could also + be `application/vnd.docker.image.manifest.v1+json` if the manifest + list references a legacy schema-1 manifest. + + - **`size`** *int* + + The size in bytes of the object. This field exists so that a client + will have an expected size for the content before validating. If the + length of the retrieved content does not match the specified length, + the content should not be trusted. + + - **`digest`** *string* + + The digest of the content, as defined by the + [Registry V2 HTTP API Specificiation](api.md#digest-parameter). + + - **`platform`** *object* + + The platform object describes the platform which the image in the + manifest runs on. A full list of valid operating system and architecture + values are listed in the [Go language documentation for `$GOOS` and + `$GOARCH`](https://golang.org/doc/install/source#environment) + + - **`architecture`** *string* + + The architecture field specifies the CPU architecture, for example + `amd64` or `ppc64le`. + + - **`os`** *string* + + The os field specifies the operating system, for example + `linux` or `windows`. + + - **`os.version`** *string* + + The optional os.version field specifies the operating system version, + for example `10.0.10586`. + + - **`os.features`** *array* + + The optional os.features field specifies an array of strings, + each listing a required OS feature (for example on Windows + `win32k`). + + - **`variant`** *string* + + The optional variant field specifies a variant of the CPU, for + example `armv6l` to specify a particular CPU variant of the ARM CPU. + + - **`features`** *array* + + The optional features field specifies an array of strings, each + listing a required CPU feature (for example `sse4` or `aes`). + +## Example Manifest List + +*Example showing a simple manifest list pointing to image manifests for two platforms:* +```json +{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json", + "manifests": [ + { + "mediaType": "application/vnd.docker.image.manifest.v2+json", + "size": 7143, + "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", + "platform": { + "architecture": "ppc64le", + "os": "linux", + } + }, + { + "mediaType": "application/vnd.docker.image.manifest.v2+json", + "size": 7682, + "digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270", + "platform": { + "architecture": "amd64", + "os": "linux", + "features": [ + "sse4" + ] + } + } + ] +} +``` + +# Image Manifest + +The image manifest provides a configuration and a set of layers for a container +image. It's the direct replacement for the schema-1 manifest. + +## *Image Manifest* Field Descriptions + +- **`schemaVersion`** *int* + + This field specifies the image manifest schema version as an integer. This + schema uses version `2`. + +- **`mediaType`** *string* + + The MIME type of the manifest. This should be set to + `application/vnd.docker.distribution.manifest.v2+json`. + +- **`config`** *object* + + The config field references a configuration object for a container, by + digest. This configuration item is a JSON blob that the runtime uses + to set up the container. This new schema uses a tweaked version + of this configuration to allow image content-addressability on the + daemon side. + + Fields of a config object are: + + - **`mediaType`** *string* + + The MIME type of the referenced object. This should generally be + `application/vnd.docker.container.image.v1+json`. + + - **`size`** *int* + + The size in bytes of the object. This field exists so that a client + will have an expected size for the content before validating. If the + length of the retrieved content does not match the specified length, + the content should not be trusted. + + - **`digest`** *string* + + The digest of the content, as defined by the + [Registry V2 HTTP API Specificiation](api.md#digest-parameter). + +- **`layers`** *array* + + The layer list is ordered starting from the base image (opposite order of schema1). + + Fields of an item in the layers list are: + + - **`mediaType`** *string* + + The MIME type of the referenced object. This should + generally be `application/vnd.docker.image.rootfs.diff.tar.gzip`. + Layers of type + `application/vnd.docker.image.rootfs.foreign.diff.tar.gzip` may be + pulled from a remote location but they should never be pushed. + + - **`size`** *int* + + The size in bytes of the object. This field exists so that a client + will have an expected size for the content before validating. If the + length of the retrieved content does not match the specified length, + the content should not be trusted. + + - **`digest`** *string* + + The digest of the content, as defined by the + [Registry V2 HTTP API Specificiation](api.md#digest-parameter). + + - **`urls`** *array* + + Provides a list of URLs from which the content may be fetched. Content + should be verified against the `digest` and `size`. This field is + optional and uncommon. + +## Example Image Manifest + +*Example showing an image manifest:* +```json +{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": 7023, + "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7" + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 32654, + "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f" + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 16724, + "digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b" + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 73109, + "digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736" + } + ] +} +``` + +# Backward compatibility + +The registry will continue to accept uploads of manifests in both the old and +new formats. + +When pushing images, clients which support the new manifest format should first +construct a manifest in the new format. If uploading this manifest fails, +presumably because the registry only supports the old format, the client may +fall back to uploading a manifest in the old format. + +When pulling images, clients indicate support for this new version of the +manifest format by sending the +`application/vnd.docker.distribution.manifest.v2+json` and +`application/vnd.docker.distribution.manifest.list.v2+json` media types in an +`Accept` header when making a request to the `manifests` endpoint. Updated +clients should check the `Content-Type` header to see whether the manifest +returned from the endpoint is in the old format, or is an image manifest or +manifest list in the new format. + +If the manifest being requested uses the new format, and the appropriate media +type is not present in an `Accept` header, the registry will assume that the +client cannot handle the manifest as-is, and rewrite it on the fly into the old +format. If the object that would otherwise be returned is a manifest list, the +registry will look up the appropriate manifest for the amd64 platform and +linux OS, rewrite that manifest into the old format if necessary, and return +the result to the client. If no suitable manifest is found in the manifest +list, the registry will return a 404 error. + +One of the challenges in rewriting manifests to the old format is that the old +format involves an image configuration for each layer in the manifest, but the +new format only provides one image configuration. To work around this, the +registry will create synthetic image configurations for all layers except the +top layer. These image configurations will not result in runnable images on +their own, but only serve to fill in the parent chain in a compatible way. +The IDs in these synthetic configurations will be derived from hashes of their +respective blobs. The registry will create these configurations and their IDs +using the same scheme as Docker 1.10 when it creates a legacy manifest to push +to a registry which doesn't support the new format. diff --git a/vendor/github.com/docker/distribution/docs/spec/menu.md b/vendor/github.com/docker/distribution/docs/spec/menu.md new file mode 100644 index 000000000..9237e3ce8 --- /dev/null +++ b/vendor/github.com/docker/distribution/docs/spec/menu.md @@ -0,0 +1,7 @@ +--- +title: "Reference" +description: "Explains registry JSON objects" +keywords: ["registry, service, images, repository, json"] +type: "menu" +identifier: "smn_registry_ref" +--- diff --git a/vendor/github.com/docker/distribution/errors.go b/vendor/github.com/docker/distribution/errors.go new file mode 100644 index 000000000..c20f28113 --- /dev/null +++ b/vendor/github.com/docker/distribution/errors.go @@ -0,0 +1,115 @@ +package distribution + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/distribution/digest" +) + +// ErrAccessDenied is returned when an access to a requested resource is +// denied. +var ErrAccessDenied = errors.New("access denied") + +// ErrManifestNotModified is returned when a conditional manifest GetByTag +// returns nil due to the client indicating it has the latest version +var ErrManifestNotModified = errors.New("manifest not modified") + +// ErrUnsupported is returned when an unimplemented or unsupported action is +// performed +var ErrUnsupported = errors.New("operation unsupported") + +// ErrTagUnknown is returned if the given tag is not known by the tag service +type ErrTagUnknown struct { + Tag string +} + +func (err ErrTagUnknown) Error() string { + return fmt.Sprintf("unknown tag=%s", err.Tag) +} + +// ErrRepositoryUnknown is returned if the named repository is not known by +// the registry. +type ErrRepositoryUnknown struct { + Name string +} + +func (err ErrRepositoryUnknown) Error() string { + return fmt.Sprintf("unknown repository name=%s", err.Name) +} + +// ErrRepositoryNameInvalid should be used to denote an invalid repository +// name. Reason may set, indicating the cause of invalidity. +type ErrRepositoryNameInvalid struct { + Name string + Reason error +} + +func (err ErrRepositoryNameInvalid) Error() string { + return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason) +} + +// ErrManifestUnknown is returned if the manifest is not known by the +// registry. +type ErrManifestUnknown struct { + Name string + Tag string +} + +func (err ErrManifestUnknown) Error() string { + return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) +} + +// ErrManifestUnknownRevision is returned when a manifest cannot be found by +// revision within a repository. +type ErrManifestUnknownRevision struct { + Name string + Revision digest.Digest +} + +func (err ErrManifestUnknownRevision) Error() string { + return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) +} + +// ErrManifestUnverified is returned when the registry is unable to verify +// the manifest. +type ErrManifestUnverified struct{} + +func (ErrManifestUnverified) Error() string { + return fmt.Sprintf("unverified manifest") +} + +// ErrManifestVerification provides a type to collect errors encountered +// during manifest verification. Currently, it accepts errors of all types, +// but it may be narrowed to those involving manifest verification. +type ErrManifestVerification []error + +func (errs ErrManifestVerification) Error() string { + var parts []string + for _, err := range errs { + parts = append(parts, err.Error()) + } + + return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) +} + +// ErrManifestBlobUnknown returned when a referenced blob cannot be found. +type ErrManifestBlobUnknown struct { + Digest digest.Digest +} + +func (err ErrManifestBlobUnknown) Error() string { + return fmt.Sprintf("unknown blob %v on manifest", err.Digest) +} + +// ErrManifestNameInvalid should be used to denote an invalid manifest +// name. Reason may set, indicating the cause of invalidity. +type ErrManifestNameInvalid struct { + Name string + Reason error +} + +func (err ErrManifestNameInvalid) Error() string { + return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason) +} diff --git a/vendor/github.com/docker/distribution/health/api/api.go b/vendor/github.com/docker/distribution/health/api/api.go new file mode 100644 index 000000000..73fcc4535 --- /dev/null +++ b/vendor/github.com/docker/distribution/health/api/api.go @@ -0,0 +1,37 @@ +package api + +import ( + "errors" + "net/http" + + "github.com/docker/distribution/health" +) + +var ( + updater = health.NewStatusUpdater() +) + +// DownHandler registers a manual_http_status that always returns an Error +func DownHandler(w http.ResponseWriter, r *http.Request) { + if r.Method == "POST" { + updater.Update(errors.New("Manual Check")) + } else { + w.WriteHeader(http.StatusNotFound) + } +} + +// UpHandler registers a manual_http_status that always returns nil +func UpHandler(w http.ResponseWriter, r *http.Request) { + if r.Method == "POST" { + updater.Update(nil) + } else { + w.WriteHeader(http.StatusNotFound) + } +} + +// init sets up the two endpoints to bring the service up and down +func init() { + health.Register("manual_http_status", updater) + http.HandleFunc("/debug/health/down", DownHandler) + http.HandleFunc("/debug/health/up", UpHandler) +} diff --git a/vendor/github.com/docker/distribution/health/api/api_test.go b/vendor/github.com/docker/distribution/health/api/api_test.go new file mode 100644 index 000000000..ec82154f6 --- /dev/null +++ b/vendor/github.com/docker/distribution/health/api/api_test.go @@ -0,0 +1,86 @@ +package api + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/docker/distribution/health" +) + +// TestGETDownHandlerDoesNotChangeStatus ensures that calling the endpoint +// /debug/health/down with METHOD GET returns a 404 +func TestGETDownHandlerDoesNotChangeStatus(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health/down", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + DownHandler(recorder, req) + + if recorder.Code != 404 { + t.Errorf("Did not get a 404.") + } +} + +// TestGETUpHandlerDoesNotChangeStatus ensures that calling the endpoint +// /debug/health/down with METHOD GET returns a 404 +func TestGETUpHandlerDoesNotChangeStatus(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health/up", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + DownHandler(recorder, req) + + if recorder.Code != 404 { + t.Errorf("Did not get a 404.") + } +} + +// TestPOSTDownHandlerChangeStatus ensures the endpoint /debug/health/down changes +// the status code of the response to 503 +// This test is order dependent, and should come before TestPOSTUpHandlerChangeStatus +func TestPOSTDownHandlerChangeStatus(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("POST", "https://fakeurl.com/debug/health/down", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + DownHandler(recorder, req) + + if recorder.Code != 200 { + t.Errorf("Did not get a 200.") + } + + if len(health.CheckStatus()) != 1 { + t.Errorf("DownHandler didn't add an error check.") + } +} + +// TestPOSTUpHandlerChangeStatus ensures the endpoint /debug/health/up changes +// the status code of the response to 200 +func TestPOSTUpHandlerChangeStatus(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("POST", "https://fakeurl.com/debug/health/up", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + UpHandler(recorder, req) + + if recorder.Code != 200 { + t.Errorf("Did not get a 200.") + } + + if len(health.CheckStatus()) != 0 { + t.Errorf("UpHandler didn't remove the error check.") + } +} diff --git a/vendor/github.com/docker/distribution/health/checks/checks.go b/vendor/github.com/docker/distribution/health/checks/checks.go new file mode 100644 index 000000000..e3c3b08d3 --- /dev/null +++ b/vendor/github.com/docker/distribution/health/checks/checks.go @@ -0,0 +1,62 @@ +package checks + +import ( + "errors" + "net" + "net/http" + "os" + "strconv" + "time" + + "github.com/docker/distribution/health" +) + +// FileChecker checks the existence of a file and returns an error +// if the file exists. +func FileChecker(f string) health.Checker { + return health.CheckFunc(func() error { + if _, err := os.Stat(f); err == nil { + return errors.New("file exists") + } + return nil + }) +} + +// HTTPChecker does a HEAD request and verifies that the HTTP status code +// returned matches statusCode. +func HTTPChecker(r string, statusCode int, timeout time.Duration, headers http.Header) health.Checker { + return health.CheckFunc(func() error { + client := http.Client{ + Timeout: timeout, + } + req, err := http.NewRequest("HEAD", r, nil) + if err != nil { + return errors.New("error creating request: " + r) + } + for headerName, headerValues := range headers { + for _, headerValue := range headerValues { + req.Header.Add(headerName, headerValue) + } + } + response, err := client.Do(req) + if err != nil { + return errors.New("error while checking: " + r) + } + if response.StatusCode != statusCode { + return errors.New("downstream service returned unexpected status: " + strconv.Itoa(response.StatusCode)) + } + return nil + }) +} + +// TCPChecker attempts to open a TCP connection. +func TCPChecker(addr string, timeout time.Duration) health.Checker { + return health.CheckFunc(func() error { + conn, err := net.DialTimeout("tcp", addr, timeout) + if err != nil { + return errors.New("connection to " + addr + " failed") + } + conn.Close() + return nil + }) +} diff --git a/vendor/github.com/docker/distribution/health/checks/checks_test.go b/vendor/github.com/docker/distribution/health/checks/checks_test.go new file mode 100644 index 000000000..6b6dd14fa --- /dev/null +++ b/vendor/github.com/docker/distribution/health/checks/checks_test.go @@ -0,0 +1,25 @@ +package checks + +import ( + "testing" +) + +func TestFileChecker(t *testing.T) { + if err := FileChecker("/tmp").Check(); err == nil { + t.Errorf("/tmp was expected as exists") + } + + if err := FileChecker("NoSuchFileFromMoon").Check(); err != nil { + t.Errorf("NoSuchFileFromMoon was expected as not exists, error:%v", err) + } +} + +func TestHTTPChecker(t *testing.T) { + if err := HTTPChecker("https://www.google.cybertron", 200, 0, nil).Check(); err == nil { + t.Errorf("Google on Cybertron was expected as not exists") + } + + if err := HTTPChecker("https://www.google.pt", 200, 0, nil).Check(); err != nil { + t.Errorf("Google at Portugal was expected as exists, error:%v", err) + } +} diff --git a/vendor/github.com/docker/distribution/health/doc.go b/vendor/github.com/docker/distribution/health/doc.go new file mode 100644 index 000000000..8c106b42b --- /dev/null +++ b/vendor/github.com/docker/distribution/health/doc.go @@ -0,0 +1,130 @@ +// Package health provides a generic health checking framework. +// The health package works expvar style. By importing the package the debug +// server is getting a "/debug/health" endpoint that returns the current +// status of the application. +// If there are no errors, "/debug/health" will return an HTTP 200 status, +// together with an empty JSON reply "{}". If there are any checks +// with errors, the JSON reply will include all the failed checks, and the +// response will be have an HTTP 503 status. +// +// A Check can either be run synchronously, or asynchronously. We recommend +// that most checks are registered as an asynchronous check, so a call to the +// "/debug/health" endpoint always returns immediately. This pattern is +// particularly useful for checks that verify upstream connectivity or +// database status, since they might take a long time to return/timeout. +// +// Installing +// +// To install health, just import it in your application: +// +// import "github.com/docker/distribution/health" +// +// You can also (optionally) import "health/api" that will add two convenience +// endpoints: "/debug/health/down" and "/debug/health/up". These endpoints add +// "manual" checks that allow the service to quickly be brought in/out of +// rotation. +// +// import _ "github.com/docker/distribution/registry/health/api" +// +// # curl localhost:5001/debug/health +// {} +// # curl -X POST localhost:5001/debug/health/down +// # curl localhost:5001/debug/health +// {"manual_http_status":"Manual Check"} +// +// After importing these packages to your main application, you can start +// registering checks. +// +// Registering Checks +// +// The recommended way of registering checks is using a periodic Check. +// PeriodicChecks run on a certain schedule and asynchronously update the +// status of the check. This allows CheckStatus to return without blocking +// on an expensive check. +// +// A trivial example of a check that runs every 5 seconds and shuts down our +// server if the current minute is even, could be added as follows: +// +// func currentMinuteEvenCheck() error { +// m := time.Now().Minute() +// if m%2 == 0 { +// return errors.New("Current minute is even!") +// } +// return nil +// } +// +// health.RegisterPeriodicFunc("minute_even", currentMinuteEvenCheck, time.Second*5) +// +// Alternatively, you can also make use of "RegisterPeriodicThresholdFunc" to +// implement the exact same check, but add a threshold of failures after which +// the check will be unhealthy. This is particularly useful for flaky Checks, +// ensuring some stability of the service when handling them. +// +// health.RegisterPeriodicThresholdFunc("minute_even", currentMinuteEvenCheck, time.Second*5, 4) +// +// The lowest-level way to interact with the health package is calling +// "Register" directly. Register allows you to pass in an arbitrary string and +// something that implements "Checker" and runs your check. If your method +// returns an error with nil, it is considered a healthy check, otherwise it +// will make the health check endpoint "/debug/health" start returning a 503 +// and list the specific check that failed. +// +// Assuming you wish to register a method called "currentMinuteEvenCheck() +// error" you could do that by doing: +// +// health.Register("even_minute", health.CheckFunc(currentMinuteEvenCheck)) +// +// CheckFunc is a convenience type that implements Checker. +// +// Another way of registering a check could be by using an anonymous function +// and the convenience method RegisterFunc. An example that makes the status +// endpoint always return an error: +// +// health.RegisterFunc("my_check", func() error { +// return Errors.new("This is an error!") +// })) +// +// Examples +// +// You could also use the health checker mechanism to ensure your application +// only comes up if certain conditions are met, or to allow the developer to +// take the service out of rotation immediately. An example that checks +// database connectivity and immediately takes the server out of rotation on +// err: +// +// updater = health.NewStatusUpdater() +// health.RegisterFunc("database_check", func() error { +// return updater.Check() +// })) +// +// conn, err := Connect(...) // database call here +// if err != nil { +// updater.Update(errors.New("Error connecting to the database: " + err.Error())) +// } +// +// You can also use the predefined Checkers that come included with the health +// package. First, import the checks: +// +// import "github.com/docker/distribution/health/checks +// +// After that you can make use of any of the provided checks. An example of +// using a `FileChecker` to take the application out of rotation if a certain +// file exists can be done as follows: +// +// health.Register("fileChecker", health.PeriodicChecker(checks.FileChecker("/tmp/disable"), time.Second*5)) +// +// After registering the check, it is trivial to take an application out of +// rotation from the console: +// +// # curl localhost:5001/debug/health +// {} +// # touch /tmp/disable +// # curl localhost:5001/debug/health +// {"fileChecker":"file exists"} +// +// You could also test the connectivity to a downstream service by using a +// "HTTPChecker", but ensure that you only mark the test unhealthy if there +// are a minimum of two failures in a row: +// +// health.Register("httpChecker", health.PeriodicThresholdChecker(checks.HTTPChecker("https://www.google.pt"), time.Second*5, 2)) +package health diff --git a/vendor/github.com/docker/distribution/health/health.go b/vendor/github.com/docker/distribution/health/health.go new file mode 100644 index 000000000..220282dcd --- /dev/null +++ b/vendor/github.com/docker/distribution/health/health.go @@ -0,0 +1,306 @@ +package health + +import ( + "encoding/json" + "fmt" + "net/http" + "sync" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/api/errcode" +) + +// A Registry is a collection of checks. Most applications will use the global +// registry defined in DefaultRegistry. However, unit tests may need to create +// separate registries to isolate themselves from other tests. +type Registry struct { + mu sync.RWMutex + registeredChecks map[string]Checker +} + +// NewRegistry creates a new registry. This isn't necessary for normal use of +// the package, but may be useful for unit tests so individual tests have their +// own set of checks. +func NewRegistry() *Registry { + return &Registry{ + registeredChecks: make(map[string]Checker), + } +} + +// DefaultRegistry is the default registry where checks are registered. It is +// the registry used by the HTTP handler. +var DefaultRegistry *Registry + +// Checker is the interface for a Health Checker +type Checker interface { + // Check returns nil if the service is okay. + Check() error +} + +// CheckFunc is a convenience type to create functions that implement +// the Checker interface +type CheckFunc func() error + +// Check Implements the Checker interface to allow for any func() error method +// to be passed as a Checker +func (cf CheckFunc) Check() error { + return cf() +} + +// Updater implements a health check that is explicitly set. +type Updater interface { + Checker + + // Update updates the current status of the health check. + Update(status error) +} + +// updater implements Checker and Updater, providing an asynchronous Update +// method. +// This allows us to have a Checker that returns the Check() call immediately +// not blocking on a potentially expensive check. +type updater struct { + mu sync.Mutex + status error +} + +// Check implements the Checker interface +func (u *updater) Check() error { + u.mu.Lock() + defer u.mu.Unlock() + + return u.status +} + +// Update implements the Updater interface, allowing asynchronous access to +// the status of a Checker. +func (u *updater) Update(status error) { + u.mu.Lock() + defer u.mu.Unlock() + + u.status = status +} + +// NewStatusUpdater returns a new updater +func NewStatusUpdater() Updater { + return &updater{} +} + +// thresholdUpdater implements Checker and Updater, providing an asynchronous Update +// method. +// This allows us to have a Checker that returns the Check() call immediately +// not blocking on a potentially expensive check. +type thresholdUpdater struct { + mu sync.Mutex + status error + threshold int + count int +} + +// Check implements the Checker interface +func (tu *thresholdUpdater) Check() error { + tu.mu.Lock() + defer tu.mu.Unlock() + + if tu.count >= tu.threshold { + return tu.status + } + + return nil +} + +// thresholdUpdater implements the Updater interface, allowing asynchronous +// access to the status of a Checker. +func (tu *thresholdUpdater) Update(status error) { + tu.mu.Lock() + defer tu.mu.Unlock() + + if status == nil { + tu.count = 0 + } else if tu.count < tu.threshold { + tu.count++ + } + + tu.status = status +} + +// NewThresholdStatusUpdater returns a new thresholdUpdater +func NewThresholdStatusUpdater(t int) Updater { + return &thresholdUpdater{threshold: t} +} + +// PeriodicChecker wraps an updater to provide a periodic checker +func PeriodicChecker(check Checker, period time.Duration) Checker { + u := NewStatusUpdater() + go func() { + t := time.NewTicker(period) + for { + <-t.C + u.Update(check.Check()) + } + }() + + return u +} + +// PeriodicThresholdChecker wraps an updater to provide a periodic checker that +// uses a threshold before it changes status +func PeriodicThresholdChecker(check Checker, period time.Duration, threshold int) Checker { + tu := NewThresholdStatusUpdater(threshold) + go func() { + t := time.NewTicker(period) + for { + <-t.C + tu.Update(check.Check()) + } + }() + + return tu +} + +// CheckStatus returns a map with all the current health check errors +func (registry *Registry) CheckStatus() map[string]string { // TODO(stevvooe) this needs a proper type + registry.mu.RLock() + defer registry.mu.RUnlock() + statusKeys := make(map[string]string) + for k, v := range registry.registeredChecks { + err := v.Check() + if err != nil { + statusKeys[k] = err.Error() + } + } + + return statusKeys +} + +// CheckStatus returns a map with all the current health check errors from the +// default registry. +func CheckStatus() map[string]string { + return DefaultRegistry.CheckStatus() +} + +// Register associates the checker with the provided name. +func (registry *Registry) Register(name string, check Checker) { + if registry == nil { + registry = DefaultRegistry + } + registry.mu.Lock() + defer registry.mu.Unlock() + _, ok := registry.registeredChecks[name] + if ok { + panic("Check already exists: " + name) + } + registry.registeredChecks[name] = check +} + +// Register associates the checker with the provided name in the default +// registry. +func Register(name string, check Checker) { + DefaultRegistry.Register(name, check) +} + +// RegisterFunc allows the convenience of registering a checker directly from +// an arbitrary func() error. +func (registry *Registry) RegisterFunc(name string, check func() error) { + registry.Register(name, CheckFunc(check)) +} + +// RegisterFunc allows the convenience of registering a checker in the default +// registry directly from an arbitrary func() error. +func RegisterFunc(name string, check func() error) { + DefaultRegistry.RegisterFunc(name, check) +} + +// RegisterPeriodicFunc allows the convenience of registering a PeriodicChecker +// from an arbitrary func() error. +func (registry *Registry) RegisterPeriodicFunc(name string, period time.Duration, check CheckFunc) { + registry.Register(name, PeriodicChecker(CheckFunc(check), period)) +} + +// RegisterPeriodicFunc allows the convenience of registering a PeriodicChecker +// in the default registry from an arbitrary func() error. +func RegisterPeriodicFunc(name string, period time.Duration, check CheckFunc) { + DefaultRegistry.RegisterPeriodicFunc(name, period, check) +} + +// RegisterPeriodicThresholdFunc allows the convenience of registering a +// PeriodicChecker from an arbitrary func() error. +func (registry *Registry) RegisterPeriodicThresholdFunc(name string, period time.Duration, threshold int, check CheckFunc) { + registry.Register(name, PeriodicThresholdChecker(CheckFunc(check), period, threshold)) +} + +// RegisterPeriodicThresholdFunc allows the convenience of registering a +// PeriodicChecker in the default registry from an arbitrary func() error. +func RegisterPeriodicThresholdFunc(name string, period time.Duration, threshold int, check CheckFunc) { + DefaultRegistry.RegisterPeriodicThresholdFunc(name, period, threshold, check) +} + +// StatusHandler returns a JSON blob with all the currently registered Health Checks +// and their corresponding status. +// Returns 503 if any Error status exists, 200 otherwise +func StatusHandler(w http.ResponseWriter, r *http.Request) { + if r.Method == "GET" { + checks := CheckStatus() + status := http.StatusOK + + // If there is an error, return 503 + if len(checks) != 0 { + status = http.StatusServiceUnavailable + } + + statusResponse(w, r, status, checks) + } else { + http.NotFound(w, r) + } +} + +// Handler returns a handler that will return 503 response code if the health +// checks have failed. If everything is okay with the health checks, the +// handler will pass through to the provided handler. Use this handler to +// disable a web application when the health checks fail. +func Handler(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + checks := CheckStatus() + if len(checks) != 0 { + errcode.ServeJSON(w, errcode.ErrorCodeUnavailable. + WithDetail("health check failed: please see /debug/health")) + return + } + + handler.ServeHTTP(w, r) // pass through + }) +} + +// statusResponse completes the request with a response describing the health +// of the service. +func statusResponse(w http.ResponseWriter, r *http.Request, status int, checks map[string]string) { + p, err := json.Marshal(checks) + if err != nil { + context.GetLogger(context.Background()).Errorf("error serializing health status: %v", err) + p, err = json.Marshal(struct { + ServerError string `json:"server_error"` + }{ + ServerError: "Could not parse error message", + }) + status = http.StatusInternalServerError + + if err != nil { + context.GetLogger(context.Background()).Errorf("error serializing health status failure message: %v", err) + return + } + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("Content-Length", fmt.Sprint(len(p))) + w.WriteHeader(status) + if _, err := w.Write(p); err != nil { + context.GetLogger(context.Background()).Errorf("error writing health status response body: %v", err) + } +} + +// Registers global /debug/health api endpoint, creates default registry +func init() { + DefaultRegistry = NewRegistry() + http.HandleFunc("/debug/health", StatusHandler) +} diff --git a/vendor/github.com/docker/distribution/health/health_test.go b/vendor/github.com/docker/distribution/health/health_test.go new file mode 100644 index 000000000..766fe159f --- /dev/null +++ b/vendor/github.com/docker/distribution/health/health_test.go @@ -0,0 +1,107 @@ +package health + +import ( + "errors" + "fmt" + "net/http" + "net/http/httptest" + "testing" +) + +// TestReturns200IfThereAreNoChecks ensures that the result code of the health +// endpoint is 200 if there are not currently registered checks. +func TestReturns200IfThereAreNoChecks(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + StatusHandler(recorder, req) + + if recorder.Code != 200 { + t.Errorf("Did not get a 200.") + } +} + +// TestReturns500IfThereAreErrorChecks ensures that the result code of the +// health endpoint is 500 if there are health checks with errors +func TestReturns503IfThereAreErrorChecks(t *testing.T) { + recorder := httptest.NewRecorder() + + req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health", nil) + if err != nil { + t.Errorf("Failed to create request.") + } + + // Create a manual error + Register("some_check", CheckFunc(func() error { + return errors.New("This Check did not succeed") + })) + + StatusHandler(recorder, req) + + if recorder.Code != 503 { + t.Errorf("Did not get a 503.") + } +} + +// TestHealthHandler ensures that our handler implementation correct protects +// the web application when things aren't so healthy. +func TestHealthHandler(t *testing.T) { + // clear out existing checks. + DefaultRegistry = NewRegistry() + + // protect an http server + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNoContent) + })) + + // wrap it in our health handler + handler = Handler(handler) + + // use this swap check status + updater := NewStatusUpdater() + Register("test_check", updater) + + // now, create a test server + server := httptest.NewServer(handler) + + checkUp := func(t *testing.T, message string) { + resp, err := http.Get(server.URL) + if err != nil { + t.Fatalf("error getting success status: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusNoContent { + t.Fatalf("unexpected response code from server when %s: %d != %d", message, resp.StatusCode, http.StatusNoContent) + } + // NOTE(stevvooe): we really don't care about the body -- the format is + // not standardized or supported, yet. + } + + checkDown := func(t *testing.T, message string) { + resp, err := http.Get(server.URL) + if err != nil { + t.Fatalf("error getting down status: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusServiceUnavailable { + t.Fatalf("unexpected response code from server when %s: %d != %d", message, resp.StatusCode, http.StatusServiceUnavailable) + } + } + + // server should be up + checkUp(t, "initial health check") + + // now, we fail the health check + updater.Update(fmt.Errorf("the server is now out of commission")) + checkDown(t, "server should be down") // should be down + + // bring server back up + updater.Update(nil) + checkUp(t, "when server is back up") // now we should be back up. +} diff --git a/vendor/github.com/docker/distribution/manifest/doc.go b/vendor/github.com/docker/distribution/manifest/doc.go new file mode 100644 index 000000000..88367b0a0 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/doc.go @@ -0,0 +1 @@ +package manifest diff --git a/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go b/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go new file mode 100644 index 000000000..a2082ec02 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go @@ -0,0 +1,155 @@ +package manifestlist + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" +) + +// MediaTypeManifestList specifies the mediaType for manifest lists. +const MediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" + +// SchemaVersion provides a pre-initialized version structure for this +// packages version of the manifest. +var SchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: MediaTypeManifestList, +} + +func init() { + manifestListFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + m := new(DeserializedManifestList) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifestList}, err + } + err := distribution.RegisterManifestSchema(MediaTypeManifestList, manifestListFunc) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// PlatformSpec specifies a platform where a particular image manifest is +// applicable. +type PlatformSpec struct { + // Architecture field specifies the CPU architecture, for example + // `amd64` or `ppc64`. + Architecture string `json:"architecture"` + + // OS specifies the operating system, for example `linux` or `windows`. + OS string `json:"os"` + + // OSVersion is an optional field specifying the operating system + // version, for example `10.0.10586`. + OSVersion string `json:"os.version,omitempty"` + + // OSFeatures is an optional field specifying an array of strings, + // each listing a required OS feature (for example on Windows `win32k`). + OSFeatures []string `json:"os.features,omitempty"` + + // Variant is an optional field specifying a variant of the CPU, for + // example `ppc64le` to specify a little-endian version of a PowerPC CPU. + Variant string `json:"variant,omitempty"` + + // Features is an optional field specifying an array of strings, each + // listing a required CPU feature (for example `sse4` or `aes`). + Features []string `json:"features,omitempty"` +} + +// A ManifestDescriptor references a platform-specific manifest. +type ManifestDescriptor struct { + distribution.Descriptor + + // Platform specifies which platform the manifest pointed to by the + // descriptor runs on. + Platform PlatformSpec `json:"platform"` +} + +// ManifestList references manifests for various platforms. +type ManifestList struct { + manifest.Versioned + + // Config references the image configuration as a blob. + Manifests []ManifestDescriptor `json:"manifests"` +} + +// References returnes the distribution descriptors for the referenced image +// manifests. +func (m ManifestList) References() []distribution.Descriptor { + dependencies := make([]distribution.Descriptor, len(m.Manifests)) + for i := range m.Manifests { + dependencies[i] = m.Manifests[i].Descriptor + } + + return dependencies +} + +// DeserializedManifestList wraps ManifestList with a copy of the original +// JSON. +type DeserializedManifestList struct { + ManifestList + + // canonical is the canonical byte representation of the Manifest. + canonical []byte +} + +// FromDescriptors takes a slice of descriptors, and returns a +// DeserializedManifestList which contains the resulting manifest list +// and its JSON representation. +func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) { + m := ManifestList{ + Versioned: SchemaVersion, + } + + m.Manifests = make([]ManifestDescriptor, len(descriptors), len(descriptors)) + copy(m.Manifests, descriptors) + + deserialized := DeserializedManifestList{ + ManifestList: m, + } + + var err error + deserialized.canonical, err = json.MarshalIndent(&m, "", " ") + return &deserialized, err +} + +// UnmarshalJSON populates a new ManifestList struct from JSON data. +func (m *DeserializedManifestList) UnmarshalJSON(b []byte) error { + m.canonical = make([]byte, len(b), len(b)) + // store manifest list in canonical + copy(m.canonical, b) + + // Unmarshal canonical JSON into ManifestList object + var manifestList ManifestList + if err := json.Unmarshal(m.canonical, &manifestList); err != nil { + return err + } + + m.ManifestList = manifestList + + return nil +} + +// MarshalJSON returns the contents of canonical. If canonical is empty, +// marshals the inner contents. +func (m *DeserializedManifestList) MarshalJSON() ([]byte, error) { + if len(m.canonical) > 0 { + return m.canonical, nil + } + + return nil, errors.New("JSON representation not initialized in DeserializedManifestList") +} + +// Payload returns the raw content of the manifest list. The contents can be +// used to calculate the content identifier. +func (m DeserializedManifestList) Payload() (string, []byte, error) { + return m.MediaType, m.canonical, nil +} diff --git a/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist_test.go b/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist_test.go new file mode 100644 index 000000000..09e6ed1f0 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist_test.go @@ -0,0 +1,111 @@ +package manifestlist + +import ( + "bytes" + "encoding/json" + "reflect" + "testing" + + "github.com/docker/distribution" +) + +var expectedManifestListSerialization = []byte(`{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json", + "manifests": [ + { + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "size": 985, + "digest": "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + "platform": { + "architecture": "amd64", + "os": "linux", + "features": [ + "sse4" + ] + } + }, + { + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "size": 2392, + "digest": "sha256:6346340964309634683409684360934680934608934608934608934068934608", + "platform": { + "architecture": "sun4m", + "os": "sunos" + } + } + ] +}`) + +func TestManifestList(t *testing.T) { + manifestDescriptors := []ManifestDescriptor{ + { + Descriptor: distribution.Descriptor{ + Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + Size: 985, + MediaType: "application/vnd.docker.distribution.manifest.v2+json", + }, + Platform: PlatformSpec{ + Architecture: "amd64", + OS: "linux", + Features: []string{"sse4"}, + }, + }, + { + Descriptor: distribution.Descriptor{ + Digest: "sha256:6346340964309634683409684360934680934608934608934608934068934608", + Size: 2392, + MediaType: "application/vnd.docker.distribution.manifest.v2+json", + }, + Platform: PlatformSpec{ + Architecture: "sun4m", + OS: "sunos", + }, + }, + } + + deserialized, err := FromDescriptors(manifestDescriptors) + if err != nil { + t.Fatalf("error creating DeserializedManifestList: %v", err) + } + + mediaType, canonical, err := deserialized.Payload() + + if mediaType != MediaTypeManifestList { + t.Fatalf("unexpected media type: %s", mediaType) + } + + // Check that the canonical field is the same as json.MarshalIndent + // with these parameters. + p, err := json.MarshalIndent(&deserialized.ManifestList, "", " ") + if err != nil { + t.Fatalf("error marshaling manifest list: %v", err) + } + if !bytes.Equal(p, canonical) { + t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(p)) + } + + // Check that the canonical field has the expected value. + if !bytes.Equal(expectedManifestListSerialization, canonical) { + t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(expectedManifestListSerialization)) + } + + var unmarshalled DeserializedManifestList + if err := json.Unmarshal(deserialized.canonical, &unmarshalled); err != nil { + t.Fatalf("error unmarshaling manifest: %v", err) + } + + if !reflect.DeepEqual(&unmarshalled, deserialized) { + t.Fatalf("manifests are different after unmarshaling: %v != %v", unmarshalled, *deserialized) + } + + references := deserialized.References() + if len(references) != 2 { + t.Fatalf("unexpected number of references: %d", len(references)) + } + for i := range references { + if !reflect.DeepEqual(references[i], manifestDescriptors[i].Descriptor) { + t.Fatalf("unexpected value %d returned by References: %v", i, references[i]) + } + } +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go b/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go new file mode 100644 index 000000000..be0123731 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go @@ -0,0 +1,282 @@ +package schema1 + +import ( + "crypto/sha512" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" +) + +type diffID digest.Digest + +// gzippedEmptyTar is a gzip-compressed version of an empty tar file +// (1024 NULL bytes) +var gzippedEmptyTar = []byte{ + 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, + 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, +} + +// digestSHA256GzippedEmptyTar is the canonical sha256 digest of +// gzippedEmptyTar +const digestSHA256GzippedEmptyTar = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + +// configManifestBuilder is a type for constructing manifests from an image +// configuration and generic descriptors. +type configManifestBuilder struct { + // bs is a BlobService used to create empty layer tars in the + // blob store if necessary. + bs distribution.BlobService + // pk is the libtrust private key used to sign the final manifest. + pk libtrust.PrivateKey + // configJSON is configuration supplied when the ManifestBuilder was + // created. + configJSON []byte + // ref contains the name and optional tag provided to NewConfigManifestBuilder. + ref reference.Named + // descriptors is the set of descriptors referencing the layers. + descriptors []distribution.Descriptor + // emptyTarDigest is set to a valid digest if an empty tar has been + // put in the blob store; otherwise it is empty. + emptyTarDigest digest.Digest +} + +// NewConfigManifestBuilder is used to build new manifests for the current +// schema version from an image configuration and a set of descriptors. +// It takes a BlobService so that it can add an empty tar to the blob store +// if the resulting manifest needs empty layers. +func NewConfigManifestBuilder(bs distribution.BlobService, pk libtrust.PrivateKey, ref reference.Named, configJSON []byte) distribution.ManifestBuilder { + return &configManifestBuilder{ + bs: bs, + pk: pk, + configJSON: configJSON, + ref: ref, + } +} + +// Build produces a final manifest from the given references +func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Manifest, err error) { + type imageRootFS struct { + Type string `json:"type"` + DiffIDs []diffID `json:"diff_ids,omitempty"` + BaseLayer string `json:"base_layer,omitempty"` + } + + type imageHistory struct { + Created time.Time `json:"created"` + Author string `json:"author,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + Comment string `json:"comment,omitempty"` + EmptyLayer bool `json:"empty_layer,omitempty"` + } + + type imageConfig struct { + RootFS *imageRootFS `json:"rootfs,omitempty"` + History []imageHistory `json:"history,omitempty"` + Architecture string `json:"architecture,omitempty"` + } + + var img imageConfig + + if err := json.Unmarshal(mb.configJSON, &img); err != nil { + return nil, err + } + + if len(img.History) == 0 { + return nil, errors.New("empty history when trying to create schema1 manifest") + } + + if len(img.RootFS.DiffIDs) != len(mb.descriptors) { + return nil, fmt.Errorf("number of descriptors and number of layers in rootfs must match: len(%v) != len(%v)", img.RootFS.DiffIDs, mb.descriptors) + } + + // Generate IDs for each layer + // For non-top-level layers, create fake V1Compatibility strings that + // fit the format and don't collide with anything else, but don't + // result in runnable images on their own. + type v1Compatibility struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + ContainerConfig struct { + Cmd []string + } `json:"container_config,omitempty"` + Author string `json:"author,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` + } + + fsLayerList := make([]FSLayer, len(img.History)) + history := make([]History, len(img.History)) + + parent := "" + layerCounter := 0 + for i, h := range img.History[:len(img.History)-1] { + var blobsum digest.Digest + if h.EmptyLayer { + if blobsum, err = mb.emptyTar(ctx); err != nil { + return nil, err + } + } else { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, errors.New("too many non-empty layers in History section") + } + blobsum = mb.descriptors[layerCounter].Digest + layerCounter++ + } + + v1ID := digest.FromBytes([]byte(blobsum.Hex() + " " + parent)).Hex() + + if i == 0 && img.RootFS.BaseLayer != "" { + // windows-only baselayer setup + baseID := sha512.Sum384([]byte(img.RootFS.BaseLayer)) + parent = fmt.Sprintf("%x", baseID[:32]) + } + + v1Compatibility := v1Compatibility{ + ID: v1ID, + Parent: parent, + Comment: h.Comment, + Created: h.Created, + Author: h.Author, + } + v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy} + if h.EmptyLayer { + v1Compatibility.ThrowAway = true + } + jsonBytes, err := json.Marshal(&v1Compatibility) + if err != nil { + return nil, err + } + + reversedIndex := len(img.History) - i - 1 + history[reversedIndex].V1Compatibility = string(jsonBytes) + fsLayerList[reversedIndex] = FSLayer{BlobSum: blobsum} + + parent = v1ID + } + + latestHistory := img.History[len(img.History)-1] + + var blobsum digest.Digest + if latestHistory.EmptyLayer { + if blobsum, err = mb.emptyTar(ctx); err != nil { + return nil, err + } + } else { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, errors.New("too many non-empty layers in History section") + } + blobsum = mb.descriptors[layerCounter].Digest + } + + fsLayerList[0] = FSLayer{BlobSum: blobsum} + dgst := digest.FromBytes([]byte(blobsum.Hex() + " " + parent + " " + string(mb.configJSON))) + + // Top-level v1compatibility string should be a modified version of the + // image config. + transformedConfig, err := MakeV1ConfigFromConfig(mb.configJSON, dgst.Hex(), parent, latestHistory.EmptyLayer) + if err != nil { + return nil, err + } + + history[0].V1Compatibility = string(transformedConfig) + + tag := "" + if tagged, isTagged := mb.ref.(reference.Tagged); isTagged { + tag = tagged.Tag() + } + + mfst := Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: mb.ref.Name(), + Tag: tag, + Architecture: img.Architecture, + FSLayers: fsLayerList, + History: history, + } + + return Sign(&mfst, mb.pk) +} + +// emptyTar pushes a compressed empty tar to the blob store if one doesn't +// already exist, and returns its blobsum. +func (mb *configManifestBuilder) emptyTar(ctx context.Context) (digest.Digest, error) { + if mb.emptyTarDigest != "" { + // Already put an empty tar + return mb.emptyTarDigest, nil + } + + descriptor, err := mb.bs.Stat(ctx, digestSHA256GzippedEmptyTar) + switch err { + case nil: + mb.emptyTarDigest = descriptor.Digest + return descriptor.Digest, nil + case distribution.ErrBlobUnknown: + // nop + default: + return "", err + } + + // Add gzipped empty tar to the blob store + descriptor, err = mb.bs.Put(ctx, "", gzippedEmptyTar) + if err != nil { + return "", err + } + + mb.emptyTarDigest = descriptor.Digest + + return descriptor.Digest, nil +} + +// AppendReference adds a reference to the current ManifestBuilder +func (mb *configManifestBuilder) AppendReference(d distribution.Describable) error { + // todo: verification here? + mb.descriptors = append(mb.descriptors, d.Descriptor()) + return nil +} + +// References returns the current references added to this builder +func (mb *configManifestBuilder) References() []distribution.Descriptor { + return mb.descriptors +} + +// MakeV1ConfigFromConfig creates an legacy V1 image config from image config JSON +func MakeV1ConfigFromConfig(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { + // Top-level v1compatibility string should be a modified version of the + // image config. + var configAsMap map[string]*json.RawMessage + if err := json.Unmarshal(configJSON, &configAsMap); err != nil { + return nil, err + } + + // Delete fields that didn't exist in old manifest + delete(configAsMap, "rootfs") + delete(configAsMap, "history") + configAsMap["id"] = rawJSON(v1ID) + if parentV1ID != "" { + configAsMap["parent"] = rawJSON(parentV1ID) + } + if throwaway { + configAsMap["throwaway"] = rawJSON(true) + } + + return json.Marshal(configAsMap) +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/config_builder_test.go b/vendor/github.com/docker/distribution/manifest/schema1/config_builder_test.go new file mode 100644 index 000000000..5f9abaa9f --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/config_builder_test.go @@ -0,0 +1,272 @@ +package schema1 + +import ( + "bytes" + "compress/gzip" + "io" + "reflect" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" +) + +type mockBlobService struct { + descriptors map[digest.Digest]distribution.Descriptor +} + +func (bs *mockBlobService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if descriptor, ok := bs.descriptors[dgst]; ok { + return descriptor, nil + } + return distribution.Descriptor{}, distribution.ErrBlobUnknown +} + +func (bs *mockBlobService) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + panic("not implemented") +} + +func (bs *mockBlobService) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + panic("not implemented") +} + +func (bs *mockBlobService) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + d := distribution.Descriptor{ + Digest: digest.FromBytes(p), + Size: int64(len(p)), + MediaType: mediaType, + } + bs.descriptors[d.Digest] = d + return d, nil +} + +func (bs *mockBlobService) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func (bs *mockBlobService) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func TestEmptyTar(t *testing.T) { + // Confirm that gzippedEmptyTar expands to 1024 NULL bytes. + var decompressed [2048]byte + gzipReader, err := gzip.NewReader(bytes.NewReader(gzippedEmptyTar)) + if err != nil { + t.Fatalf("NewReader returned error: %v", err) + } + n, err := gzipReader.Read(decompressed[:]) + if n != 1024 { + t.Fatalf("read returned %d bytes; expected 1024", n) + } + n, err = gzipReader.Read(decompressed[1024:]) + if n != 0 { + t.Fatalf("read returned %d bytes; expected 0", n) + } + if err != io.EOF { + t.Fatal("read did not return io.EOF") + } + gzipReader.Close() + for _, b := range decompressed[:1024] { + if b != 0 { + t.Fatal("nonzero byte in decompressed tar") + } + } + + // Confirm that digestSHA256EmptyTar is the digest of gzippedEmptyTar. + dgst := digest.FromBytes(gzippedEmptyTar) + if dgst != digestSHA256GzippedEmptyTar { + t.Fatalf("digest mismatch for empty tar: expected %s got %s", digestSHA256GzippedEmptyTar, dgst) + } +} + +func TestConfigBuilder(t *testing.T) { + imgJSON := `{ + "architecture": "amd64", + "config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/bin/sh", + "-c", + "echo hi" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "derived=true", + "asdf=true" + ], + "Hostname": "23304fc829f9", + "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246", + "Labels": {}, + "OnBuild": [], + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "container": "e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001", + "container_config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/bin/sh", + "-c", + "#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "derived=true", + "asdf=true" + ], + "Hostname": "23304fc829f9", + "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246", + "Labels": {}, + "OnBuild": [], + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "created": "2015-11-04T23:06:32.365666163Z", + "docker_version": "1.9.0-dev", + "history": [ + { + "created": "2015-10-31T22:22:54.690851953Z", + "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" + }, + { + "created": "2015-10-31T22:22:55.613815829Z", + "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]" + }, + { + "created": "2015-11-04T23:06:30.934316144Z", + "created_by": "/bin/sh -c #(nop) ENV derived=true", + "empty_layer": true + }, + { + "created": "2015-11-04T23:06:31.192097572Z", + "created_by": "/bin/sh -c #(nop) ENV asdf=true", + "empty_layer": true + }, + { + "author": "Alyssa P. Hacker \u003calyspdev@example.com\u003e", + "created": "2015-11-04T23:06:32.083868454Z", + "created_by": "/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024" + }, + { + "created": "2015-11-04T23:06:32.365666163Z", + "created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]", + "empty_layer": true + } + ], + "os": "linux", + "rootfs": { + "diff_ids": [ + "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef", + "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49" + ], + "type": "layers" + } +}` + + descriptors := []distribution.Descriptor{ + {Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {Digest: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + {Digest: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("could not generate key for testing: %v", err) + } + + bs := &mockBlobService{descriptors: make(map[digest.Digest]distribution.Descriptor)} + + ref, err := reference.ParseNamed("testrepo:testtag") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + + builder := NewConfigManifestBuilder(bs, pk, ref, []byte(imgJSON)) + + for _, d := range descriptors { + if err := builder.AppendReference(d); err != nil { + t.Fatalf("AppendReference returned error: %v", err) + } + } + + signed, err := builder.Build(context.Background()) + if err != nil { + t.Fatalf("Build returned error: %v", err) + } + + // Check that the gzipped empty layer tar was put in the blob store + _, err = bs.Stat(context.Background(), digestSHA256GzippedEmptyTar) + if err != nil { + t.Fatal("gzipped empty tar was not put in the blob store") + } + + manifest := signed.(*SignedManifest).Manifest + + if manifest.Versioned.SchemaVersion != 1 { + t.Fatal("SchemaVersion != 1") + } + if manifest.Name != "testrepo" { + t.Fatal("incorrect name in manifest") + } + if manifest.Tag != "testtag" { + t.Fatal("incorrect tag in manifest") + } + if manifest.Architecture != "amd64" { + t.Fatal("incorrect arch in manifest") + } + + expectedFSLayers := []FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + } + + if len(manifest.FSLayers) != len(expectedFSLayers) { + t.Fatalf("wrong number of FSLayers: %d", len(manifest.FSLayers)) + } + if !reflect.DeepEqual(manifest.FSLayers, expectedFSLayers) { + t.Fatal("wrong FSLayers list") + } + + expectedV1Compatibility := []string{ + `{"architecture":"amd64","config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","echo hi"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"container":"e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001","container_config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"created":"2015-11-04T23:06:32.365666163Z","docker_version":"1.9.0-dev","id":"69e5c1bfadad697fdb6db59f6326648fa119e0c031a0eda33b8cfadcab54ba7f","os":"linux","parent":"74cf9c92699240efdba1903c2748ef57105d5bedc588084c4e88f3bb1c3ef0b0","throwaway":true}`, + `{"id":"74cf9c92699240efdba1903c2748ef57105d5bedc588084c4e88f3bb1c3ef0b0","parent":"178be37afc7c49e951abd75525dbe0871b62ad49402f037164ee6314f754599d","created":"2015-11-04T23:06:32.083868454Z","container_config":{"Cmd":["/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024"]},"author":"Alyssa P. Hacker \u003calyspdev@example.com\u003e"}`, + `{"id":"178be37afc7c49e951abd75525dbe0871b62ad49402f037164ee6314f754599d","parent":"b449305a55a283538c4574856a8b701f2a3d5ec08ef8aec47f385f20339a4866","created":"2015-11-04T23:06:31.192097572Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV asdf=true"]},"throwaway":true}`, + `{"id":"b449305a55a283538c4574856a8b701f2a3d5ec08ef8aec47f385f20339a4866","parent":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","created":"2015-11-04T23:06:30.934316144Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV derived=true"]},"throwaway":true}`, + `{"id":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","parent":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:55.613815829Z","container_config":{"Cmd":["/bin/sh -c #(nop) CMD [\"sh\"]"]}}`, + `{"id":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:54.690851953Z","container_config":{"Cmd":["/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"]}}`, + } + + if len(manifest.History) != len(expectedV1Compatibility) { + t.Fatalf("wrong number of history entries: %d", len(manifest.History)) + } + for i := range expectedV1Compatibility { + if manifest.History[i].V1Compatibility != expectedV1Compatibility[i] { + t.Errorf("wrong V1Compatibility %d. expected:\n%s\ngot:\n%s", i, expectedV1Compatibility[i], manifest.History[i].V1Compatibility) + } + } +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/manifest.go b/vendor/github.com/docker/distribution/manifest/schema1/manifest.go new file mode 100644 index 000000000..bff47bde0 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/manifest.go @@ -0,0 +1,184 @@ +package schema1 + +import ( + "encoding/json" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/libtrust" +) + +const ( + // MediaTypeManifest specifies the mediaType for the current version. Note + // that for schema version 1, the the media is optionally "application/json". + MediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json" + // MediaTypeSignedManifest specifies the mediatype for current SignedManifest version + MediaTypeSignedManifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" + // MediaTypeManifestLayer specifies the media type for manifest layers + MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar" +) + +var ( + // SchemaVersion provides a pre-initialized version structure for this + // packages version of the manifest. + SchemaVersion = manifest.Versioned{ + SchemaVersion: 1, + } +) + +func init() { + schema1Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + sm := new(SignedManifest) + err := sm.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + desc := distribution.Descriptor{ + Digest: digest.FromBytes(sm.Canonical), + Size: int64(len(sm.Canonical)), + MediaType: MediaTypeSignedManifest, + } + return sm, desc, err + } + err := distribution.RegisterManifestSchema(MediaTypeSignedManifest, schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } + err = distribution.RegisterManifestSchema("", schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } + err = distribution.RegisterManifestSchema("application/json", schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// FSLayer is a container struct for BlobSums defined in an image manifest +type FSLayer struct { + // BlobSum is the tarsum of the referenced filesystem image layer + BlobSum digest.Digest `json:"blobSum"` +} + +// History stores unstructured v1 compatibility information +type History struct { + // V1Compatibility is the raw v1 compatibility information + V1Compatibility string `json:"v1Compatibility"` +} + +// Manifest provides the base accessible fields for working with V2 image +// format in the registry. +type Manifest struct { + manifest.Versioned + + // Name is the name of the image's repository + Name string `json:"name"` + + // Tag is the tag of the image specified by this manifest + Tag string `json:"tag"` + + // Architecture is the host architecture on which this image is intended to + // run + Architecture string `json:"architecture"` + + // FSLayers is a list of filesystem layer blobSums contained in this image + FSLayers []FSLayer `json:"fsLayers"` + + // History is a list of unstructured historical data for v1 compatibility + History []History `json:"history"` +} + +// SignedManifest provides an envelope for a signed image manifest, including +// the format sensitive raw bytes. +type SignedManifest struct { + Manifest + + // Canonical is the canonical byte representation of the ImageManifest, + // without any attached signatures. The manifest byte + // representation cannot change or it will have to be re-signed. + Canonical []byte `json:"-"` + + // all contains the byte representation of the Manifest including signatures + // and is returned by Payload() + all []byte +} + +// UnmarshalJSON populates a new SignedManifest struct from JSON data. +func (sm *SignedManifest) UnmarshalJSON(b []byte) error { + sm.all = make([]byte, len(b), len(b)) + // store manifest and signatures in all + copy(sm.all, b) + + jsig, err := libtrust.ParsePrettySignature(b, "signatures") + if err != nil { + return err + } + + // Resolve the payload in the manifest. + bytes, err := jsig.Payload() + if err != nil { + return err + } + + // sm.Canonical stores the canonical manifest JSON + sm.Canonical = make([]byte, len(bytes), len(bytes)) + copy(sm.Canonical, bytes) + + // Unmarshal canonical JSON into Manifest object + var manifest Manifest + if err := json.Unmarshal(sm.Canonical, &manifest); err != nil { + return err + } + + sm.Manifest = manifest + + return nil +} + +// References returnes the descriptors of this manifests references +func (sm SignedManifest) References() []distribution.Descriptor { + dependencies := make([]distribution.Descriptor, len(sm.FSLayers)) + for i, fsLayer := range sm.FSLayers { + dependencies[i] = distribution.Descriptor{ + MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar", + Digest: fsLayer.BlobSum, + } + } + + return dependencies + +} + +// MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner +// contents. Applications requiring a marshaled signed manifest should simply +// use Raw directly, since the the content produced by json.Marshal will be +// compacted and will fail signature checks. +func (sm *SignedManifest) MarshalJSON() ([]byte, error) { + if len(sm.all) > 0 { + return sm.all, nil + } + + // If the raw data is not available, just dump the inner content. + return json.Marshal(&sm.Manifest) +} + +// Payload returns the signed content of the signed manifest. +func (sm SignedManifest) Payload() (string, []byte, error) { + return MediaTypeSignedManifest, sm.all, nil +} + +// Signatures returns the signatures as provided by +// (*libtrust.JSONSignature).Signatures. The byte slices are opaque jws +// signatures. +func (sm *SignedManifest) Signatures() ([][]byte, error) { + jsig, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + return nil, err + } + + // Resolve the payload in the manifest. + return jsig.Signatures() +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/manifest_test.go b/vendor/github.com/docker/distribution/manifest/schema1/manifest_test.go new file mode 100644 index 000000000..05bb8ec57 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/manifest_test.go @@ -0,0 +1,136 @@ +package schema1 + +import ( + "bytes" + "encoding/json" + "reflect" + "testing" + + "github.com/docker/libtrust" +) + +type testEnv struct { + name, tag string + invalidSigned *SignedManifest + signed *SignedManifest + pk libtrust.PrivateKey +} + +func TestManifestMarshaling(t *testing.T) { + env := genEnv(t) + + // Check that the all field is the same as json.MarshalIndent with these + // parameters. + p, err := json.MarshalIndent(env.signed, "", " ") + if err != nil { + t.Fatalf("error marshaling manifest: %v", err) + } + + if !bytes.Equal(p, env.signed.all) { + t.Fatalf("manifest bytes not equal: %q != %q", string(env.signed.all), string(p)) + } +} + +func TestManifestUnmarshaling(t *testing.T) { + env := genEnv(t) + + var signed SignedManifest + if err := json.Unmarshal(env.signed.all, &signed); err != nil { + t.Fatalf("error unmarshaling signed manifest: %v", err) + } + + if !reflect.DeepEqual(&signed, env.signed) { + t.Fatalf("manifests are different after unmarshaling: %v != %v", signed, env.signed) + } + +} + +func TestManifestVerification(t *testing.T) { + env := genEnv(t) + + publicKeys, err := Verify(env.signed) + if err != nil { + t.Fatalf("error verifying manifest: %v", err) + } + + if len(publicKeys) == 0 { + t.Fatalf("no public keys found in signature") + } + + var found bool + publicKey := env.pk.PublicKey() + // ensure that one of the extracted public keys matches the private key. + for _, candidate := range publicKeys { + if candidate.KeyID() == publicKey.KeyID() { + found = true + break + } + } + + if !found { + t.Fatalf("expected public key, %v, not found in verified keys: %v", publicKey, publicKeys) + } + + // Check that an invalid manifest fails verification + _, err = Verify(env.invalidSigned) + if err != nil { + t.Fatalf("Invalid manifest should not pass Verify()") + } +} + +func genEnv(t *testing.T) *testEnv { + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("error generating test key: %v", err) + } + + name, tag := "foo/bar", "test" + + invalid := Manifest{ + Versioned: SchemaVersion, + Name: name, + Tag: tag, + FSLayers: []FSLayer{ + { + BlobSum: "asdf", + }, + { + BlobSum: "qwer", + }, + }, + } + + valid := Manifest{ + Versioned: SchemaVersion, + Name: name, + Tag: tag, + FSLayers: []FSLayer{ + { + BlobSum: "asdf", + }, + }, + History: []History{ + { + V1Compatibility: "", + }, + }, + } + + sm, err := Sign(&valid, pk) + if err != nil { + t.Fatalf("error signing manifest: %v", err) + } + + invalidSigned, err := Sign(&invalid, pk) + if err != nil { + t.Fatalf("error signing manifest: %v", err) + } + + return &testEnv{ + name: name, + tag: tag, + invalidSigned: invalidSigned, + signed: sm, + pk: pk, + } +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go b/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go new file mode 100644 index 000000000..fc1045f9e --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go @@ -0,0 +1,98 @@ +package schema1 + +import ( + "fmt" + + "errors" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" +) + +// referenceManifestBuilder is a type for constructing manifests from schema1 +// dependencies. +type referenceManifestBuilder struct { + Manifest + pk libtrust.PrivateKey +} + +// NewReferenceManifestBuilder is used to build new manifests for the current +// schema version using schema1 dependencies. +func NewReferenceManifestBuilder(pk libtrust.PrivateKey, ref reference.Named, architecture string) distribution.ManifestBuilder { + tag := "" + if tagged, isTagged := ref.(reference.Tagged); isTagged { + tag = tagged.Tag() + } + + return &referenceManifestBuilder{ + Manifest: Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: ref.Name(), + Tag: tag, + Architecture: architecture, + }, + pk: pk, + } +} + +func (mb *referenceManifestBuilder) Build(ctx context.Context) (distribution.Manifest, error) { + m := mb.Manifest + if len(m.FSLayers) == 0 { + return nil, errors.New("cannot build manifest with zero layers or history") + } + + m.FSLayers = make([]FSLayer, len(mb.Manifest.FSLayers)) + m.History = make([]History, len(mb.Manifest.History)) + copy(m.FSLayers, mb.Manifest.FSLayers) + copy(m.History, mb.Manifest.History) + + return Sign(&m, mb.pk) +} + +// AppendReference adds a reference to the current ManifestBuilder +func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable) error { + r, ok := d.(Reference) + if !ok { + return fmt.Errorf("Unable to add non-reference type to v1 builder") + } + + // Entries need to be prepended + mb.Manifest.FSLayers = append([]FSLayer{{BlobSum: r.Digest}}, mb.Manifest.FSLayers...) + mb.Manifest.History = append([]History{r.History}, mb.Manifest.History...) + return nil + +} + +// References returns the current references added to this builder +func (mb *referenceManifestBuilder) References() []distribution.Descriptor { + refs := make([]distribution.Descriptor, len(mb.Manifest.FSLayers)) + for i := range mb.Manifest.FSLayers { + layerDigest := mb.Manifest.FSLayers[i].BlobSum + history := mb.Manifest.History[i] + ref := Reference{layerDigest, 0, history} + refs[i] = ref.Descriptor() + } + return refs +} + +// Reference describes a manifest v2, schema version 1 dependency. +// An FSLayer associated with a history entry. +type Reference struct { + Digest digest.Digest + Size int64 // if we know it, set it for the descriptor. + History History +} + +// Descriptor describes a reference +func (r Reference) Descriptor() distribution.Descriptor { + return distribution.Descriptor{ + MediaType: MediaTypeManifestLayer, + Digest: r.Digest, + Size: r.Size, + } +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/reference_builder_test.go b/vendor/github.com/docker/distribution/manifest/schema1/reference_builder_test.go new file mode 100644 index 000000000..35db28e46 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/reference_builder_test.go @@ -0,0 +1,108 @@ +package schema1 + +import ( + "testing" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" +) + +func makeSignedManifest(t *testing.T, pk libtrust.PrivateKey, refs []Reference) *SignedManifest { + u := &Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: "foo/bar", + Tag: "latest", + Architecture: "amd64", + } + + for i := len(refs) - 1; i >= 0; i-- { + u.FSLayers = append(u.FSLayers, FSLayer{ + BlobSum: refs[i].Digest, + }) + u.History = append(u.History, History{ + V1Compatibility: refs[i].History.V1Compatibility, + }) + } + + signedManifest, err := Sign(u, pk) + if err != nil { + t.Fatalf("unexpected error signing manifest: %v", err) + } + return signedManifest +} + +func TestReferenceBuilder(t *testing.T) { + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + r1 := Reference{ + Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + Size: 1, + History: History{V1Compatibility: "{\"a\" : 1 }"}, + } + r2 := Reference{ + Digest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + Size: 2, + History: History{V1Compatibility: "{\"\a\" : 2 }"}, + } + + handCrafted := makeSignedManifest(t, pk, []Reference{r1, r2}) + + ref, err := reference.ParseNamed(handCrafted.Manifest.Name) + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + ref, err = reference.WithTag(ref, handCrafted.Manifest.Tag) + if err != nil { + t.Fatalf("could not add tag: %v", err) + } + + b := NewReferenceManifestBuilder(pk, ref, handCrafted.Manifest.Architecture) + _, err = b.Build(context.Background()) + if err == nil { + t.Fatal("Expected error building zero length manifest") + } + + err = b.AppendReference(r1) + if err != nil { + t.Fatal(err) + } + + err = b.AppendReference(r2) + if err != nil { + t.Fatal(err) + } + + refs := b.References() + if len(refs) != 2 { + t.Fatalf("Unexpected reference count : %d != %d", 2, len(refs)) + } + + // Ensure ordering + if refs[0].Digest != r2.Digest { + t.Fatalf("Unexpected reference : %v", refs[0]) + } + + m, err := b.Build(context.Background()) + if err != nil { + t.Fatal(err) + } + + built, ok := m.(*SignedManifest) + if !ok { + t.Fatalf("unexpected type from Build() : %T", built) + } + + d1 := digest.FromBytes(built.Canonical) + d2 := digest.FromBytes(handCrafted.Canonical) + if d1 != d2 { + t.Errorf("mismatching canonical JSON") + } +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/sign.go b/vendor/github.com/docker/distribution/manifest/schema1/sign.go new file mode 100644 index 000000000..c862dd812 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/sign.go @@ -0,0 +1,68 @@ +package schema1 + +import ( + "crypto/x509" + "encoding/json" + + "github.com/docker/libtrust" +) + +// Sign signs the manifest with the provided private key, returning a +// SignedManifest. This typically won't be used within the registry, except +// for testing. +func Sign(m *Manifest, pk libtrust.PrivateKey) (*SignedManifest, error) { + p, err := json.MarshalIndent(m, "", " ") + if err != nil { + return nil, err + } + + js, err := libtrust.NewJSONSignature(p) + if err != nil { + return nil, err + } + + if err := js.Sign(pk); err != nil { + return nil, err + } + + pretty, err := js.PrettySignature("signatures") + if err != nil { + return nil, err + } + + return &SignedManifest{ + Manifest: *m, + all: pretty, + Canonical: p, + }, nil +} + +// SignWithChain signs the manifest with the given private key and x509 chain. +// The public key of the first element in the chain must be the public key +// corresponding with the sign key. +func SignWithChain(m *Manifest, key libtrust.PrivateKey, chain []*x509.Certificate) (*SignedManifest, error) { + p, err := json.MarshalIndent(m, "", " ") + if err != nil { + return nil, err + } + + js, err := libtrust.NewJSONSignature(p) + if err != nil { + return nil, err + } + + if err := js.SignWithChain(key, chain); err != nil { + return nil, err + } + + pretty, err := js.PrettySignature("signatures") + if err != nil { + return nil, err + } + + return &SignedManifest{ + Manifest: *m, + all: pretty, + Canonical: p, + }, nil +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/verify.go b/vendor/github.com/docker/distribution/manifest/schema1/verify.go new file mode 100644 index 000000000..fa8daa56f --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/verify.go @@ -0,0 +1,32 @@ +package schema1 + +import ( + "crypto/x509" + + "github.com/Sirupsen/logrus" + "github.com/docker/libtrust" +) + +// Verify verifies the signature of the signed manifest returning the public +// keys used during signing. +func Verify(sm *SignedManifest) ([]libtrust.PublicKey, error) { + js, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + logrus.WithField("err", err).Debugf("(*SignedManifest).Verify") + return nil, err + } + + return js.Verify() +} + +// VerifyChains verifies the signature of the signed manifest against the +// certificate pool returning the list of verified chains. Signatures without +// an x509 chain are not checked. +func VerifyChains(sm *SignedManifest, ca *x509.CertPool) ([][]*x509.Certificate, error) { + js, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + return nil, err + } + + return js.VerifyChains(ca) +} diff --git a/vendor/github.com/docker/distribution/manifest/schema2/builder.go b/vendor/github.com/docker/distribution/manifest/schema2/builder.go new file mode 100644 index 000000000..ec0bf858d --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema2/builder.go @@ -0,0 +1,80 @@ +package schema2 + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" +) + +// builder is a type for constructing manifests. +type builder struct { + // bs is a BlobService used to publish the configuration blob. + bs distribution.BlobService + + // configJSON references + configJSON []byte + + // layers is a list of layer descriptors that gets built by successive + // calls to AppendReference. + layers []distribution.Descriptor +} + +// NewManifestBuilder is used to build new manifests for the current schema +// version. It takes a BlobService so it can publish the configuration blob +// as part of the Build process. +func NewManifestBuilder(bs distribution.BlobService, configJSON []byte) distribution.ManifestBuilder { + mb := &builder{ + bs: bs, + configJSON: make([]byte, len(configJSON)), + } + copy(mb.configJSON, configJSON) + + return mb +} + +// Build produces a final manifest from the given references. +func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { + m := Manifest{ + Versioned: SchemaVersion, + Layers: make([]distribution.Descriptor, len(mb.layers)), + } + copy(m.Layers, mb.layers) + + configDigest := digest.FromBytes(mb.configJSON) + + var err error + m.Config, err = mb.bs.Stat(ctx, configDigest) + switch err { + case nil: + // Override MediaType, since Put always replaces the specified media + // type with application/octet-stream in the descriptor it returns. + m.Config.MediaType = MediaTypeConfig + return FromStruct(m) + case distribution.ErrBlobUnknown: + // nop + default: + return nil, err + } + + // Add config to the blob store + m.Config, err = mb.bs.Put(ctx, MediaTypeConfig, mb.configJSON) + // Override MediaType, since Put always replaces the specified media + // type with application/octet-stream in the descriptor it returns. + m.Config.MediaType = MediaTypeConfig + if err != nil { + return nil, err + } + + return FromStruct(m) +} + +// AppendReference adds a reference to the current ManifestBuilder. +func (mb *builder) AppendReference(d distribution.Describable) error { + mb.layers = append(mb.layers, d.Descriptor()) + return nil +} + +// References returns the current references added to this builder. +func (mb *builder) References() []distribution.Descriptor { + return mb.layers +} diff --git a/vendor/github.com/docker/distribution/manifest/schema2/builder_test.go b/vendor/github.com/docker/distribution/manifest/schema2/builder_test.go new file mode 100644 index 000000000..851f917cb --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema2/builder_test.go @@ -0,0 +1,210 @@ +package schema2 + +import ( + "reflect" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" +) + +type mockBlobService struct { + descriptors map[digest.Digest]distribution.Descriptor +} + +func (bs *mockBlobService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if descriptor, ok := bs.descriptors[dgst]; ok { + return descriptor, nil + } + return distribution.Descriptor{}, distribution.ErrBlobUnknown +} + +func (bs *mockBlobService) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + panic("not implemented") +} + +func (bs *mockBlobService) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + panic("not implemented") +} + +func (bs *mockBlobService) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + d := distribution.Descriptor{ + Digest: digest.FromBytes(p), + Size: int64(len(p)), + MediaType: "application/octet-stream", + } + bs.descriptors[d.Digest] = d + return d, nil +} + +func (bs *mockBlobService) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func (bs *mockBlobService) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func TestBuilder(t *testing.T) { + imgJSON := []byte(`{ + "architecture": "amd64", + "config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/bin/sh", + "-c", + "echo hi" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "derived=true", + "asdf=true" + ], + "Hostname": "23304fc829f9", + "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246", + "Labels": {}, + "OnBuild": [], + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "container": "e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001", + "container_config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/bin/sh", + "-c", + "#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "derived=true", + "asdf=true" + ], + "Hostname": "23304fc829f9", + "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246", + "Labels": {}, + "OnBuild": [], + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "created": "2015-11-04T23:06:32.365666163Z", + "docker_version": "1.9.0-dev", + "history": [ + { + "created": "2015-10-31T22:22:54.690851953Z", + "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" + }, + { + "created": "2015-10-31T22:22:55.613815829Z", + "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]" + }, + { + "created": "2015-11-04T23:06:30.934316144Z", + "created_by": "/bin/sh -c #(nop) ENV derived=true", + "empty_layer": true + }, + { + "created": "2015-11-04T23:06:31.192097572Z", + "created_by": "/bin/sh -c #(nop) ENV asdf=true", + "empty_layer": true + }, + { + "created": "2015-11-04T23:06:32.083868454Z", + "created_by": "/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024" + }, + { + "created": "2015-11-04T23:06:32.365666163Z", + "created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]", + "empty_layer": true + } + ], + "os": "linux", + "rootfs": { + "diff_ids": [ + "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef", + "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49" + ], + "type": "layers" + } +}`) + configDigest := digest.FromBytes(imgJSON) + + descriptors := []distribution.Descriptor{ + { + Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), + Size: 5312, + MediaType: MediaTypeLayer, + }, + { + Digest: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"), + Size: 235231, + MediaType: MediaTypeLayer, + }, + { + Digest: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), + Size: 639152, + MediaType: MediaTypeLayer, + }, + } + + bs := &mockBlobService{descriptors: make(map[digest.Digest]distribution.Descriptor)} + builder := NewManifestBuilder(bs, imgJSON) + + for _, d := range descriptors { + if err := builder.AppendReference(d); err != nil { + t.Fatalf("AppendReference returned error: %v", err) + } + } + + built, err := builder.Build(context.Background()) + if err != nil { + t.Fatalf("Build returned error: %v", err) + } + + // Check that the config was put in the blob store + _, err = bs.Stat(context.Background(), configDigest) + if err != nil { + t.Fatal("config was not put in the blob store") + } + + manifest := built.(*DeserializedManifest).Manifest + + if manifest.Versioned.SchemaVersion != 2 { + t.Fatal("SchemaVersion != 2") + } + + target := manifest.Target() + if target.Digest != configDigest { + t.Fatalf("unexpected digest in target: %s", target.Digest.String()) + } + if target.MediaType != MediaTypeConfig { + t.Fatalf("unexpected media type in target: %s", target.MediaType) + } + if target.Size != 3153 { + t.Fatalf("unexpected size in target: %d", target.Size) + } + + references := manifest.References() + expected := append([]distribution.Descriptor{manifest.Target()}, descriptors...) + if !reflect.DeepEqual(references, expected) { + t.Fatal("References() does not match the descriptors added") + } +} diff --git a/vendor/github.com/docker/distribution/manifest/schema2/manifest.go b/vendor/github.com/docker/distribution/manifest/schema2/manifest.go new file mode 100644 index 000000000..741998d04 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema2/manifest.go @@ -0,0 +1,134 @@ +package schema2 + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" +) + +const ( + // MediaTypeManifest specifies the mediaType for the current version. + MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json" + + // MediaTypeConfig specifies the mediaType for the image configuration. + MediaTypeConfig = "application/vnd.docker.container.image.v1+json" + + // MediaTypePluginConfig specifies the mediaType for plugin configuration. + MediaTypePluginConfig = "application/vnd.docker.plugin.v1+json" + + // MediaTypeLayer is the mediaType used for layers referenced by the + // manifest. + MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip" + + // MediaTypeForeignLayer is the mediaType used for layers that must be + // downloaded from foreign URLs. + MediaTypeForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" +) + +var ( + // SchemaVersion provides a pre-initialized version structure for this + // packages version of the manifest. + SchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: MediaTypeManifest, + } +) + +func init() { + schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + m := new(DeserializedManifest) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err + } + err := distribution.RegisterManifestSchema(MediaTypeManifest, schema2Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// Manifest defines a schema2 manifest. +type Manifest struct { + manifest.Versioned + + // Config references the image configuration as a blob. + Config distribution.Descriptor `json:"config"` + + // Layers lists descriptors for the layers referenced by the + // configuration. + Layers []distribution.Descriptor `json:"layers"` +} + +// References returnes the descriptors of this manifests references. +func (m Manifest) References() []distribution.Descriptor { + references := make([]distribution.Descriptor, 0, 1+len(m.Layers)) + references = append(references, m.Config) + references = append(references, m.Layers...) + return references +} + +// Target returns the target of this signed manifest. +func (m Manifest) Target() distribution.Descriptor { + return m.Config +} + +// DeserializedManifest wraps Manifest with a copy of the original JSON. +// It satisfies the distribution.Manifest interface. +type DeserializedManifest struct { + Manifest + + // canonical is the canonical byte representation of the Manifest. + canonical []byte +} + +// FromStruct takes a Manifest structure, marshals it to JSON, and returns a +// DeserializedManifest which contains the manifest and its JSON representation. +func FromStruct(m Manifest) (*DeserializedManifest, error) { + var deserialized DeserializedManifest + deserialized.Manifest = m + + var err error + deserialized.canonical, err = json.MarshalIndent(&m, "", " ") + return &deserialized, err +} + +// UnmarshalJSON populates a new Manifest struct from JSON data. +func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { + m.canonical = make([]byte, len(b), len(b)) + // store manifest in canonical + copy(m.canonical, b) + + // Unmarshal canonical JSON into Manifest object + var manifest Manifest + if err := json.Unmarshal(m.canonical, &manifest); err != nil { + return err + } + + m.Manifest = manifest + + return nil +} + +// MarshalJSON returns the contents of canonical. If canonical is empty, +// marshals the inner contents. +func (m *DeserializedManifest) MarshalJSON() ([]byte, error) { + if len(m.canonical) > 0 { + return m.canonical, nil + } + + return nil, errors.New("JSON representation not initialized in DeserializedManifest") +} + +// Payload returns the raw content of the manifest. The contents can be used to +// calculate the content identifier. +func (m DeserializedManifest) Payload() (string, []byte, error) { + return m.MediaType, m.canonical, nil +} diff --git a/vendor/github.com/docker/distribution/manifest/schema2/manifest_test.go b/vendor/github.com/docker/distribution/manifest/schema2/manifest_test.go new file mode 100644 index 000000000..f0003d18b --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema2/manifest_test.go @@ -0,0 +1,111 @@ +package schema2 + +import ( + "bytes" + "encoding/json" + "reflect" + "testing" + + "github.com/docker/distribution" +) + +var expectedManifestSerialization = []byte(`{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": 985, + "digest": "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b" + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 153263, + "digest": "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b" + } + ] +}`) + +func TestManifest(t *testing.T) { + manifest := Manifest{ + Versioned: SchemaVersion, + Config: distribution.Descriptor{ + Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + Size: 985, + MediaType: MediaTypeConfig, + }, + Layers: []distribution.Descriptor{ + { + Digest: "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b", + Size: 153263, + MediaType: MediaTypeLayer, + }, + }, + } + + deserialized, err := FromStruct(manifest) + if err != nil { + t.Fatalf("error creating DeserializedManifest: %v", err) + } + + mediaType, canonical, err := deserialized.Payload() + + if mediaType != MediaTypeManifest { + t.Fatalf("unexpected media type: %s", mediaType) + } + + // Check that the canonical field is the same as json.MarshalIndent + // with these parameters. + p, err := json.MarshalIndent(&manifest, "", " ") + if err != nil { + t.Fatalf("error marshaling manifest: %v", err) + } + if !bytes.Equal(p, canonical) { + t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(p)) + } + + // Check that canonical field matches expected value. + if !bytes.Equal(expectedManifestSerialization, canonical) { + t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(expectedManifestSerialization)) + } + + var unmarshalled DeserializedManifest + if err := json.Unmarshal(deserialized.canonical, &unmarshalled); err != nil { + t.Fatalf("error unmarshaling manifest: %v", err) + } + + if !reflect.DeepEqual(&unmarshalled, deserialized) { + t.Fatalf("manifests are different after unmarshaling: %v != %v", unmarshalled, *deserialized) + } + + target := deserialized.Target() + if target.Digest != "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b" { + t.Fatalf("unexpected digest in target: %s", target.Digest.String()) + } + if target.MediaType != MediaTypeConfig { + t.Fatalf("unexpected media type in target: %s", target.MediaType) + } + if target.Size != 985 { + t.Fatalf("unexpected size in target: %d", target.Size) + } + + references := deserialized.References() + if len(references) != 2 { + t.Fatalf("unexpected number of references: %d", len(references)) + } + + if !reflect.DeepEqual(references[0], target) { + t.Fatalf("first reference should be target: %v != %v", references[0], target) + } + + // Test the second reference + if references[1].Digest != "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b" { + t.Fatalf("unexpected digest in reference: %s", references[0].Digest.String()) + } + if references[1].MediaType != MediaTypeLayer { + t.Fatalf("unexpected media type in reference: %s", references[0].MediaType) + } + if references[1].Size != 153263 { + t.Fatalf("unexpected size in reference: %d", references[0].Size) + } +} diff --git a/vendor/github.com/docker/distribution/manifest/versioned.go b/vendor/github.com/docker/distribution/manifest/versioned.go new file mode 100644 index 000000000..caa6b14e8 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/versioned.go @@ -0,0 +1,12 @@ +package manifest + +// Versioned provides a struct with the manifest schemaVersion and mediaType. +// Incoming content with unknown schema version can be decoded against this +// struct to check the version. +type Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` + + // MediaType is the media type of this schema. + MediaType string `json:"mediaType,omitempty"` +} diff --git a/vendor/github.com/docker/distribution/manifests.go b/vendor/github.com/docker/distribution/manifests.go new file mode 100644 index 000000000..c4fb63450 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifests.go @@ -0,0 +1,125 @@ +package distribution + +import ( + "fmt" + "mime" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" +) + +// Manifest represents a registry object specifying a set of +// references and an optional target +type Manifest interface { + // References returns a list of objects which make up this manifest. + // A reference is anything which can be represented by a + // distribution.Descriptor. These can consist of layers, resources or other + // manifests. + // + // While no particular order is required, implementations should return + // them from highest to lowest priority. For example, one might want to + // return the base layer before the top layer. + References() []Descriptor + + // Payload provides the serialized format of the manifest, in addition to + // the mediatype. + Payload() (mediatype string, payload []byte, err error) +} + +// ManifestBuilder creates a manifest allowing one to include dependencies. +// Instances can be obtained from a version-specific manifest package. Manifest +// specific data is passed into the function which creates the builder. +type ManifestBuilder interface { + // Build creates the manifest from his builder. + Build(ctx context.Context) (Manifest, error) + + // References returns a list of objects which have been added to this + // builder. The dependencies are returned in the order they were added, + // which should be from base to head. + References() []Descriptor + + // AppendReference includes the given object in the manifest after any + // existing dependencies. If the add fails, such as when adding an + // unsupported dependency, an error may be returned. + // + // The destination of the reference is dependent on the manifest type and + // the dependency type. + AppendReference(dependency Describable) error +} + +// ManifestService describes operations on image manifests. +type ManifestService interface { + // Exists returns true if the manifest exists. + Exists(ctx context.Context, dgst digest.Digest) (bool, error) + + // Get retrieves the manifest specified by the given digest + Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error) + + // Put creates or updates the given manifest returning the manifest digest + Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error) + + // Delete removes the manifest specified by the given digest. Deleting + // a manifest that doesn't exist will return ErrManifestNotFound + Delete(ctx context.Context, dgst digest.Digest) error +} + +// ManifestEnumerator enables iterating over manifests +type ManifestEnumerator interface { + // Enumerate calls ingester for each manifest. + Enumerate(ctx context.Context, ingester func(digest.Digest) error) error +} + +// Describable is an interface for descriptors +type Describable interface { + Descriptor() Descriptor +} + +// ManifestMediaTypes returns the supported media types for manifests. +func ManifestMediaTypes() (mediaTypes []string) { + for t := range mappings { + if t != "" { + mediaTypes = append(mediaTypes, t) + } + } + return +} + +// UnmarshalFunc implements manifest unmarshalling a given MediaType +type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) + +var mappings = make(map[string]UnmarshalFunc, 0) + +// UnmarshalManifest looks up manifest unmarshal functions based on +// MediaType +func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { + // Need to look up by the actual media type, not the raw contents of + // the header. Strip semicolons and anything following them. + var mediatype string + if ctHeader != "" { + var err error + mediatype, _, err = mime.ParseMediaType(ctHeader) + if err != nil { + return nil, Descriptor{}, err + } + } + + unmarshalFunc, ok := mappings[mediatype] + if !ok { + unmarshalFunc, ok = mappings[""] + if !ok { + return nil, Descriptor{}, fmt.Errorf("unsupported manifest mediatype and no default available: %s", mediatype) + } + } + + return unmarshalFunc(p) +} + +// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This +// should be called from specific +func RegisterManifestSchema(mediatype string, u UnmarshalFunc) error { + if _, ok := mappings[mediatype]; ok { + return fmt.Errorf("manifest mediatype registration would overwrite existing: %s", mediatype) + } + mappings[mediatype] = u + return nil +} diff --git a/vendor/github.com/docker/distribution/notifications/bridge.go b/vendor/github.com/docker/distribution/notifications/bridge.go new file mode 100644 index 000000000..502288a40 --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/bridge.go @@ -0,0 +1,214 @@ +package notifications + +import ( + "net/http" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/uuid" +) + +type bridge struct { + ub URLBuilder + actor ActorRecord + source SourceRecord + request RequestRecord + sink Sink +} + +var _ Listener = &bridge{} + +// URLBuilder defines a subset of url builder to be used by the event listener. +type URLBuilder interface { + BuildManifestURL(name reference.Named) (string, error) + BuildBlobURL(ref reference.Canonical) (string, error) +} + +// NewBridge returns a notification listener that writes records to sink, +// using the actor and source. Any urls populated in the events created by +// this bridge will be created using the URLBuilder. +// TODO(stevvooe): Update this to simply take a context.Context object. +func NewBridge(ub URLBuilder, source SourceRecord, actor ActorRecord, request RequestRecord, sink Sink) Listener { + return &bridge{ + ub: ub, + actor: actor, + source: source, + request: request, + sink: sink, + } +} + +// NewRequestRecord builds a RequestRecord for use in NewBridge from an +// http.Request, associating it with a request id. +func NewRequestRecord(id string, r *http.Request) RequestRecord { + return RequestRecord{ + ID: id, + Addr: context.RemoteAddr(r), + Host: r.Host, + Method: r.Method, + UserAgent: r.UserAgent(), + } +} + +func (b *bridge) ManifestPushed(repo reference.Named, sm distribution.Manifest, options ...distribution.ManifestServiceOption) error { + manifestEvent, err := b.createManifestEvent(EventActionPush, repo, sm) + if err != nil { + return err + } + + for _, option := range options { + if opt, ok := option.(distribution.WithTagOption); ok { + manifestEvent.Target.Tag = opt.Tag + break + } + } + return b.sink.Write(*manifestEvent) +} + +func (b *bridge) ManifestPulled(repo reference.Named, sm distribution.Manifest, options ...distribution.ManifestServiceOption) error { + manifestEvent, err := b.createManifestEvent(EventActionPull, repo, sm) + if err != nil { + return err + } + + for _, option := range options { + if opt, ok := option.(distribution.WithTagOption); ok { + manifestEvent.Target.Tag = opt.Tag + break + } + } + return b.sink.Write(*manifestEvent) +} + +func (b *bridge) ManifestDeleted(repo reference.Named, dgst digest.Digest) error { + return b.createManifestDeleteEventAndWrite(EventActionDelete, repo, dgst) +} + +func (b *bridge) BlobPushed(repo reference.Named, desc distribution.Descriptor) error { + return b.createBlobEventAndWrite(EventActionPush, repo, desc) +} + +func (b *bridge) BlobPulled(repo reference.Named, desc distribution.Descriptor) error { + return b.createBlobEventAndWrite(EventActionPull, repo, desc) +} + +func (b *bridge) BlobMounted(repo reference.Named, desc distribution.Descriptor, fromRepo reference.Named) error { + event, err := b.createBlobEvent(EventActionMount, repo, desc) + if err != nil { + return err + } + event.Target.FromRepository = fromRepo.Name() + return b.sink.Write(*event) +} + +func (b *bridge) BlobDeleted(repo reference.Named, dgst digest.Digest) error { + return b.createBlobDeleteEventAndWrite(EventActionDelete, repo, dgst) +} + +func (b *bridge) createManifestEventAndWrite(action string, repo reference.Named, sm distribution.Manifest) error { + manifestEvent, err := b.createManifestEvent(action, repo, sm) + if err != nil { + return err + } + + return b.sink.Write(*manifestEvent) +} + +func (b *bridge) createManifestDeleteEventAndWrite(action string, repo reference.Named, dgst digest.Digest) error { + event := b.createEvent(action) + event.Target.Repository = repo.Name() + event.Target.Digest = dgst + + return b.sink.Write(*event) +} + +func (b *bridge) createManifestEvent(action string, repo reference.Named, sm distribution.Manifest) (*Event, error) { + event := b.createEvent(action) + event.Target.Repository = repo.Name() + + mt, p, err := sm.Payload() + if err != nil { + return nil, err + } + + // Ensure we have the canonical manifest descriptor here + _, desc, err := distribution.UnmarshalManifest(mt, p) + if err != nil { + return nil, err + } + + event.Target.MediaType = mt + event.Target.Length = desc.Size + event.Target.Size = desc.Size + event.Target.Digest = desc.Digest + + ref, err := reference.WithDigest(repo, event.Target.Digest) + if err != nil { + return nil, err + } + + event.Target.URL, err = b.ub.BuildManifestURL(ref) + if err != nil { + return nil, err + } + + return event, nil +} + +func (b *bridge) createBlobDeleteEventAndWrite(action string, repo reference.Named, dgst digest.Digest) error { + event := b.createEvent(action) + event.Target.Digest = dgst + event.Target.Repository = repo.Name() + + return b.sink.Write(*event) +} + +func (b *bridge) createBlobEventAndWrite(action string, repo reference.Named, desc distribution.Descriptor) error { + event, err := b.createBlobEvent(action, repo, desc) + if err != nil { + return err + } + + return b.sink.Write(*event) +} + +func (b *bridge) createBlobEvent(action string, repo reference.Named, desc distribution.Descriptor) (*Event, error) { + event := b.createEvent(action) + event.Target.Descriptor = desc + event.Target.Length = desc.Size + event.Target.Repository = repo.Name() + + ref, err := reference.WithDigest(repo, desc.Digest) + if err != nil { + return nil, err + } + + event.Target.URL, err = b.ub.BuildBlobURL(ref) + if err != nil { + return nil, err + } + + return event, nil +} + +// createEvent creates an event with actor and source populated. +func (b *bridge) createEvent(action string) *Event { + event := createEvent(action) + event.Source = b.source + event.Actor = b.actor + event.Request = b.request + + return event +} + +// createEvent returns a new event, timestamped, with the specified action. +func createEvent(action string) *Event { + return &Event{ + ID: uuid.Generate().String(), + Timestamp: time.Now(), + Action: action, + } +} diff --git a/vendor/github.com/docker/distribution/notifications/bridge_test.go b/vendor/github.com/docker/distribution/notifications/bridge_test.go new file mode 100644 index 000000000..0f85791c8 --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/bridge_test.go @@ -0,0 +1,222 @@ +package notifications + +import ( + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/uuid" + "github.com/docker/libtrust" +) + +var ( + // common environment for expected manifest events. + + repo = "test/repo" + source = SourceRecord{ + Addr: "remote.test", + InstanceID: uuid.Generate().String(), + } + ub = mustUB(v2.NewURLBuilderFromString("http://test.example.com/", false)) + + actor = ActorRecord{ + Name: "test", + } + request = RequestRecord{} + m = schema1.Manifest{ + Name: repo, + Tag: "latest", + } + + sm *schema1.SignedManifest + payload []byte + dgst digest.Digest +) + +func TestEventBridgeManifestPulled(t *testing.T) { + l := createTestEnv(t, testSinkFn(func(events ...Event) error { + checkCommonManifest(t, EventActionPull, events...) + + return nil + })) + + repoRef, _ := reference.ParseNamed(repo) + if err := l.ManifestPulled(repoRef, sm); err != nil { + t.Fatalf("unexpected error notifying manifest pull: %v", err) + } +} + +func TestEventBridgeManifestPushed(t *testing.T) { + l := createTestEnv(t, testSinkFn(func(events ...Event) error { + checkCommonManifest(t, EventActionPush, events...) + + return nil + })) + + repoRef, _ := reference.ParseNamed(repo) + if err := l.ManifestPushed(repoRef, sm); err != nil { + t.Fatalf("unexpected error notifying manifest pull: %v", err) + } +} + +func TestEventBridgeManifestPushedWithTag(t *testing.T) { + l := createTestEnv(t, testSinkFn(func(events ...Event) error { + checkCommonManifest(t, EventActionPush, events...) + if events[0].Target.Tag != "latest" { + t.Fatalf("missing or unexpected tag: %#v", events[0].Target) + } + + return nil + })) + + repoRef, _ := reference.ParseNamed(repo) + if err := l.ManifestPushed(repoRef, sm, distribution.WithTag(m.Tag)); err != nil { + t.Fatalf("unexpected error notifying manifest pull: %v", err) + } +} + +func TestEventBridgeManifestPulledWithTag(t *testing.T) { + l := createTestEnv(t, testSinkFn(func(events ...Event) error { + checkCommonManifest(t, EventActionPull, events...) + if events[0].Target.Tag != "latest" { + t.Fatalf("missing or unexpected tag: %#v", events[0].Target) + } + + return nil + })) + + repoRef, _ := reference.ParseNamed(repo) + if err := l.ManifestPulled(repoRef, sm, distribution.WithTag(m.Tag)); err != nil { + t.Fatalf("unexpected error notifying manifest pull: %v", err) + } +} + +func TestEventBridgeManifestDeleted(t *testing.T) { + l := createTestEnv(t, testSinkFn(func(events ...Event) error { + checkDeleted(t, EventActionDelete, events...) + return nil + })) + + repoRef, _ := reference.ParseNamed(repo) + if err := l.ManifestDeleted(repoRef, dgst); err != nil { + t.Fatalf("unexpected error notifying manifest pull: %v", err) + } +} + +func createTestEnv(t *testing.T, fn testSinkFn) Listener { + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("error generating private key: %v", err) + } + + sm, err = schema1.Sign(&m, pk) + if err != nil { + t.Fatalf("error signing manifest: %v", err) + } + + payload = sm.Canonical + dgst = digest.FromBytes(payload) + + return NewBridge(ub, source, actor, request, fn) +} + +func checkDeleted(t *testing.T, action string, events ...Event) { + if len(events) != 1 { + t.Fatalf("unexpected number of events: %v != 1", len(events)) + } + + event := events[0] + + if event.Source != source { + t.Fatalf("source not equal: %#v != %#v", event.Source, source) + } + + if event.Request != request { + t.Fatalf("request not equal: %#v != %#v", event.Request, request) + } + + if event.Actor != actor { + t.Fatalf("request not equal: %#v != %#v", event.Actor, actor) + } + + if event.Target.Digest != dgst { + t.Fatalf("unexpected digest on event target: %q != %q", event.Target.Digest, dgst) + } + + if event.Target.Repository != repo { + t.Fatalf("unexpected repository: %q != %q", event.Target.Repository, repo) + } + +} + +func checkCommonManifest(t *testing.T, action string, events ...Event) { + checkCommon(t, events...) + + event := events[0] + if event.Action != action { + t.Fatalf("unexpected event action: %q != %q", event.Action, action) + } + + repoRef, _ := reference.ParseNamed(repo) + ref, _ := reference.WithDigest(repoRef, dgst) + u, err := ub.BuildManifestURL(ref) + if err != nil { + t.Fatalf("error building expected url: %v", err) + } + + if event.Target.URL != u { + t.Fatalf("incorrect url passed: \n%q != \n%q", event.Target.URL, u) + } +} + +func checkCommon(t *testing.T, events ...Event) { + if len(events) != 1 { + t.Fatalf("unexpected number of events: %v != 1", len(events)) + } + + event := events[0] + + if event.Source != source { + t.Fatalf("source not equal: %#v != %#v", event.Source, source) + } + + if event.Request != request { + t.Fatalf("request not equal: %#v != %#v", event.Request, request) + } + + if event.Actor != actor { + t.Fatalf("request not equal: %#v != %#v", event.Actor, actor) + } + + if event.Target.Digest != dgst { + t.Fatalf("unexpected digest on event target: %q != %q", event.Target.Digest, dgst) + } + + if event.Target.Length != int64(len(payload)) { + t.Fatalf("unexpected target length: %v != %v", event.Target.Length, len(payload)) + } + + if event.Target.Repository != repo { + t.Fatalf("unexpected repository: %q != %q", event.Target.Repository, repo) + } + +} + +type testSinkFn func(events ...Event) error + +func (tsf testSinkFn) Write(events ...Event) error { + return tsf(events...) +} + +func (tsf testSinkFn) Close() error { return nil } + +func mustUB(ub *v2.URLBuilder, err error) *v2.URLBuilder { + if err != nil { + panic(err) + } + + return ub +} diff --git a/vendor/github.com/docker/distribution/notifications/endpoint.go b/vendor/github.com/docker/distribution/notifications/endpoint.go new file mode 100644 index 000000000..29a9e27b5 --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/endpoint.go @@ -0,0 +1,93 @@ +package notifications + +import ( + "net/http" + "time" +) + +// EndpointConfig covers the optional configuration parameters for an active +// endpoint. +type EndpointConfig struct { + Headers http.Header + Timeout time.Duration + Threshold int + Backoff time.Duration + IgnoredMediaTypes []string + Transport *http.Transport +} + +// defaults set any zero-valued fields to a reasonable default. +func (ec *EndpointConfig) defaults() { + if ec.Timeout <= 0 { + ec.Timeout = time.Second + } + + if ec.Threshold <= 0 { + ec.Threshold = 10 + } + + if ec.Backoff <= 0 { + ec.Backoff = time.Second + } + + if ec.Transport == nil { + ec.Transport = http.DefaultTransport.(*http.Transport) + } +} + +// Endpoint is a reliable, queued, thread-safe sink that notify external http +// services when events are written. Writes are non-blocking and always +// succeed for callers but events may be queued internally. +type Endpoint struct { + Sink + url string + name string + + EndpointConfig + + metrics *safeMetrics +} + +// NewEndpoint returns a running endpoint, ready to receive events. +func NewEndpoint(name, url string, config EndpointConfig) *Endpoint { + var endpoint Endpoint + endpoint.name = name + endpoint.url = url + endpoint.EndpointConfig = config + endpoint.defaults() + endpoint.metrics = newSafeMetrics() + + // Configures the inmemory queue, retry, http pipeline. + endpoint.Sink = newHTTPSink( + endpoint.url, endpoint.Timeout, endpoint.Headers, + endpoint.Transport, endpoint.metrics.httpStatusListener()) + endpoint.Sink = newRetryingSink(endpoint.Sink, endpoint.Threshold, endpoint.Backoff) + endpoint.Sink = newEventQueue(endpoint.Sink, endpoint.metrics.eventQueueListener()) + endpoint.Sink = newIgnoredMediaTypesSink(endpoint.Sink, config.IgnoredMediaTypes) + + register(&endpoint) + return &endpoint +} + +// Name returns the name of the endpoint, generally used for debugging. +func (e *Endpoint) Name() string { + return e.name +} + +// URL returns the url of the endpoint. +func (e *Endpoint) URL() string { + return e.url +} + +// ReadMetrics populates em with metrics from the endpoint. +func (e *Endpoint) ReadMetrics(em *EndpointMetrics) { + e.metrics.Lock() + defer e.metrics.Unlock() + + *em = e.metrics.EndpointMetrics + // Map still need to copied in a threadsafe manner. + em.Statuses = make(map[string]int) + for k, v := range e.metrics.Statuses { + em.Statuses[k] = v + } +} diff --git a/vendor/github.com/docker/distribution/notifications/event.go b/vendor/github.com/docker/distribution/notifications/event.go new file mode 100644 index 000000000..b59a72bed --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/event.go @@ -0,0 +1,160 @@ +package notifications + +import ( + "fmt" + "time" + + "github.com/docker/distribution" +) + +// EventAction constants used in action field of Event. +const ( + EventActionPull = "pull" + EventActionPush = "push" + EventActionMount = "mount" + EventActionDelete = "delete" +) + +const ( + // EventsMediaType is the mediatype for the json event envelope. If the + // Event, ActorRecord, SourceRecord or Envelope structs change, the version + // number should be incremented. + EventsMediaType = "application/vnd.docker.distribution.events.v1+json" + // LayerMediaType is the media type for image rootfs diffs (aka "layers") + // used by Docker. We don't expect this to change for quite a while. + layerMediaType = "application/vnd.docker.container.image.rootfs.diff+x-gtar" +) + +// Envelope defines the fields of a json event envelope message that can hold +// one or more events. +type Envelope struct { + // Events make up the contents of the envelope. Events present in a single + // envelope are not necessarily related. + Events []Event `json:"events,omitempty"` +} + +// TODO(stevvooe): The event type should be separate from the json format. It +// should be defined as an interface. Leaving as is for now since we don't +// need that at this time. If we make this change, the struct below would be +// called "EventRecord". + +// Event provides the fields required to describe a registry event. +type Event struct { + // ID provides a unique identifier for the event. + ID string `json:"id,omitempty"` + + // Timestamp is the time at which the event occurred. + Timestamp time.Time `json:"timestamp,omitempty"` + + // Action indicates what action encompasses the provided event. + Action string `json:"action,omitempty"` + + // Target uniquely describes the target of the event. + Target struct { + // TODO(stevvooe): Use http.DetectContentType for layers, maybe. + + distribution.Descriptor + + // Length in bytes of content. Same as Size field in Descriptor. + // Provided for backwards compatibility. + Length int64 `json:"length,omitempty"` + + // Repository identifies the named repository. + Repository string `json:"repository,omitempty"` + + // FromRepository identifies the named repository which a blob was mounted + // from if appropriate. + FromRepository string `json:"fromRepository,omitempty"` + + // URL provides a direct link to the content. + URL string `json:"url,omitempty"` + + // Tag provides the tag + Tag string `json:"tag,omitempty"` + } `json:"target,omitempty"` + + // Request covers the request that generated the event. + Request RequestRecord `json:"request,omitempty"` + + // Actor specifies the agent that initiated the event. For most + // situations, this could be from the authorizaton context of the request. + Actor ActorRecord `json:"actor,omitempty"` + + // Source identifies the registry node that generated the event. Put + // differently, while the actor "initiates" the event, the source + // "generates" it. + Source SourceRecord `json:"source,omitempty"` +} + +// ActorRecord specifies the agent that initiated the event. For most +// situations, this could be from the authorizaton context of the request. +// Data in this record can refer to both the initiating client and the +// generating request. +type ActorRecord struct { + // Name corresponds to the subject or username associated with the + // request context that generated the event. + Name string `json:"name,omitempty"` + + // TODO(stevvooe): Look into setting a session cookie to get this + // without docker daemon. + // SessionID + + // TODO(stevvooe): Push the "Docker-Command" header to replace cookie and + // get the actual command. + // Command +} + +// RequestRecord covers the request that generated the event. +type RequestRecord struct { + // ID uniquely identifies the request that initiated the event. + ID string `json:"id"` + + // Addr contains the ip or hostname and possibly port of the client + // connection that initiated the event. This is the RemoteAddr from + // the standard http request. + Addr string `json:"addr,omitempty"` + + // Host is the externally accessible host name of the registry instance, + // as specified by the http host header on incoming requests. + Host string `json:"host,omitempty"` + + // Method has the request method that generated the event. + Method string `json:"method"` + + // UserAgent contains the user agent header of the request. + UserAgent string `json:"useragent"` +} + +// SourceRecord identifies the registry node that generated the event. Put +// differently, while the actor "initiates" the event, the source "generates" +// it. +type SourceRecord struct { + // Addr contains the ip or hostname and the port of the registry node + // that generated the event. Generally, this will be resolved by + // os.Hostname() along with the running port. + Addr string `json:"addr,omitempty"` + + // InstanceID identifies a running instance of an application. Changes + // after each restart. + InstanceID string `json:"instanceID,omitempty"` +} + +var ( + // ErrSinkClosed is returned if a write is issued to a sink that has been + // closed. If encountered, the error should be considered terminal and + // retries will not be successful. + ErrSinkClosed = fmt.Errorf("sink: closed") +) + +// Sink accepts and sends events. +type Sink interface { + // Write writes one or more events to the sink. If no error is returned, + // the caller will assume that all events have been committed and will not + // try to send them again. If an error is received, the caller may retry + // sending the event. The caller should cede the slice of memory to the + // sink and not modify it after calling this method. + Write(events ...Event) error + + // Close the sink, possibly waiting for pending events to flush. + Close() error +} diff --git a/vendor/github.com/docker/distribution/notifications/event_test.go b/vendor/github.com/docker/distribution/notifications/event_test.go new file mode 100644 index 000000000..0981a7ad4 --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/event_test.go @@ -0,0 +1,157 @@ +package notifications + +import ( + "encoding/json" + "strings" + "testing" + "time" + + "github.com/docker/distribution/manifest/schema1" +) + +// TestEventJSONFormat provides silly test to detect if the event format or +// envelope has changed. If this code fails, the revision of the protocol may +// need to be incremented. +func TestEventEnvelopeJSONFormat(t *testing.T) { + var expected = strings.TrimSpace(` +{ + "events": [ + { + "id": "asdf-asdf-asdf-asdf-0", + "timestamp": "2006-01-02T15:04:05Z", + "action": "push", + "target": { + "mediaType": "application/vnd.docker.distribution.manifest.v1+prettyjws", + "size": 1, + "digest": "sha256:0123456789abcdef0", + "length": 1, + "repository": "library/test", + "url": "http://example.com/v2/library/test/manifests/latest" + }, + "request": { + "id": "asdfasdf", + "addr": "client.local", + "host": "registrycluster.local", + "method": "PUT", + "useragent": "test/0.1" + }, + "actor": { + "name": "test-actor" + }, + "source": { + "addr": "hostname.local:port" + } + }, + { + "id": "asdf-asdf-asdf-asdf-1", + "timestamp": "2006-01-02T15:04:05Z", + "action": "push", + "target": { + "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", + "size": 2, + "digest": "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5", + "length": 2, + "repository": "library/test", + "url": "http://example.com/v2/library/test/manifests/latest" + }, + "request": { + "id": "asdfasdf", + "addr": "client.local", + "host": "registrycluster.local", + "method": "PUT", + "useragent": "test/0.1" + }, + "actor": { + "name": "test-actor" + }, + "source": { + "addr": "hostname.local:port" + } + }, + { + "id": "asdf-asdf-asdf-asdf-2", + "timestamp": "2006-01-02T15:04:05Z", + "action": "push", + "target": { + "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", + "size": 3, + "digest": "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d6", + "length": 3, + "repository": "library/test", + "url": "http://example.com/v2/library/test/manifests/latest" + }, + "request": { + "id": "asdfasdf", + "addr": "client.local", + "host": "registrycluster.local", + "method": "PUT", + "useragent": "test/0.1" + }, + "actor": { + "name": "test-actor" + }, + "source": { + "addr": "hostname.local:port" + } + } + ] +} + `) + + tm, err := time.Parse(time.RFC3339, time.RFC3339[:len(time.RFC3339)-5]) + if err != nil { + t.Fatalf("error creating time: %v", err) + } + + var prototype Event + prototype.Action = EventActionPush + prototype.Timestamp = tm + prototype.Actor.Name = "test-actor" + prototype.Request.ID = "asdfasdf" + prototype.Request.Addr = "client.local" + prototype.Request.Host = "registrycluster.local" + prototype.Request.Method = "PUT" + prototype.Request.UserAgent = "test/0.1" + prototype.Source.Addr = "hostname.local:port" + + var manifestPush Event + manifestPush = prototype + manifestPush.ID = "asdf-asdf-asdf-asdf-0" + manifestPush.Target.Digest = "sha256:0123456789abcdef0" + manifestPush.Target.Length = 1 + manifestPush.Target.Size = 1 + manifestPush.Target.MediaType = schema1.MediaTypeSignedManifest + manifestPush.Target.Repository = "library/test" + manifestPush.Target.URL = "http://example.com/v2/library/test/manifests/latest" + + var layerPush0 Event + layerPush0 = prototype + layerPush0.ID = "asdf-asdf-asdf-asdf-1" + layerPush0.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5" + layerPush0.Target.Length = 2 + layerPush0.Target.Size = 2 + layerPush0.Target.MediaType = layerMediaType + layerPush0.Target.Repository = "library/test" + layerPush0.Target.URL = "http://example.com/v2/library/test/manifests/latest" + + var layerPush1 Event + layerPush1 = prototype + layerPush1.ID = "asdf-asdf-asdf-asdf-2" + layerPush1.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d6" + layerPush1.Target.Length = 3 + layerPush1.Target.Size = 3 + layerPush1.Target.MediaType = layerMediaType + layerPush1.Target.Repository = "library/test" + layerPush1.Target.URL = "http://example.com/v2/library/test/manifests/latest" + + var envelope Envelope + envelope.Events = append(envelope.Events, manifestPush, layerPush0, layerPush1) + + p, err := json.MarshalIndent(envelope, "", " ") + if err != nil { + t.Fatalf("unexpected error marshaling envelope: %v", err) + } + if string(p) != expected { + t.Fatalf("format has changed\n%s\n != \n%s", string(p), expected) + } +} diff --git a/vendor/github.com/docker/distribution/notifications/http.go b/vendor/github.com/docker/distribution/notifications/http.go new file mode 100644 index 000000000..15751619b --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/http.go @@ -0,0 +1,150 @@ +package notifications + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "sync" + "time" +) + +// httpSink implements a single-flight, http notification endpoint. This is +// very lightweight in that it only makes an attempt at an http request. +// Reliability should be provided by the caller. +type httpSink struct { + url string + + mu sync.Mutex + closed bool + client *http.Client + listeners []httpStatusListener + + // TODO(stevvooe): Allow one to configure the media type accepted by this + // sink and choose the serialization based on that. +} + +// newHTTPSink returns an unreliable, single-flight http sink. Wrap in other +// sinks for increased reliability. +func newHTTPSink(u string, timeout time.Duration, headers http.Header, transport *http.Transport, listeners ...httpStatusListener) *httpSink { + if transport == nil { + transport = http.DefaultTransport.(*http.Transport) + } + return &httpSink{ + url: u, + listeners: listeners, + client: &http.Client{ + Transport: &headerRoundTripper{ + Transport: transport, + headers: headers, + }, + Timeout: timeout, + }, + } +} + +// httpStatusListener is called on various outcomes of sending notifications. +type httpStatusListener interface { + success(status int, events ...Event) + failure(status int, events ...Event) + err(err error, events ...Event) +} + +// Accept makes an attempt to notify the endpoint, returning an error if it +// fails. It is the caller's responsibility to retry on error. The events are +// accepted or rejected as a group. +func (hs *httpSink) Write(events ...Event) error { + hs.mu.Lock() + defer hs.mu.Unlock() + defer hs.client.Transport.(*headerRoundTripper).CloseIdleConnections() + + if hs.closed { + return ErrSinkClosed + } + + envelope := Envelope{ + Events: events, + } + + // TODO(stevvooe): It is not ideal to keep re-encoding the request body on + // retry but we are going to do it to keep the code simple. It is likely + // we could change the event struct to manage its own buffer. + + p, err := json.MarshalIndent(envelope, "", " ") + if err != nil { + for _, listener := range hs.listeners { + listener.err(err, events...) + } + return fmt.Errorf("%v: error marshaling event envelope: %v", hs, err) + } + + body := bytes.NewReader(p) + resp, err := hs.client.Post(hs.url, EventsMediaType, body) + if err != nil { + for _, listener := range hs.listeners { + listener.err(err, events...) + } + + return fmt.Errorf("%v: error posting: %v", hs, err) + } + defer resp.Body.Close() + + // The notifier will treat any 2xx or 3xx response as accepted by the + // endpoint. + switch { + case resp.StatusCode >= 200 && resp.StatusCode < 400: + for _, listener := range hs.listeners { + listener.success(resp.StatusCode, events...) + } + + // TODO(stevvooe): This is a little accepting: we may want to support + // unsupported media type responses with retries using the correct + // media type. There may also be cases that will never work. + + return nil + default: + for _, listener := range hs.listeners { + listener.failure(resp.StatusCode, events...) + } + return fmt.Errorf("%v: response status %v unaccepted", hs, resp.Status) + } +} + +// Close the endpoint +func (hs *httpSink) Close() error { + hs.mu.Lock() + defer hs.mu.Unlock() + + if hs.closed { + return fmt.Errorf("httpsink: already closed") + } + + hs.closed = true + return nil +} + +func (hs *httpSink) String() string { + return fmt.Sprintf("httpSink{%s}", hs.url) +} + +type headerRoundTripper struct { + *http.Transport // must be transport to support CancelRequest + headers http.Header +} + +func (hrt *headerRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + var nreq http.Request + nreq = *req + nreq.Header = make(http.Header) + + merge := func(headers http.Header) { + for k, v := range headers { + nreq.Header[k] = append(nreq.Header[k], v...) + } + } + + merge(req.Header) + merge(hrt.headers) + + return hrt.Transport.RoundTrip(&nreq) +} diff --git a/vendor/github.com/docker/distribution/notifications/http_test.go b/vendor/github.com/docker/distribution/notifications/http_test.go new file mode 100644 index 000000000..e04693621 --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/http_test.go @@ -0,0 +1,185 @@ +package notifications + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "mime" + "net/http" + "net/http/httptest" + "reflect" + "strconv" + "strings" + "testing" + + "github.com/docker/distribution/manifest/schema1" +) + +// TestHTTPSink mocks out an http endpoint and notifies it under a couple of +// conditions, ensuring correct behavior. +func TestHTTPSink(t *testing.T) { + serverHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + if r.Method != "POST" { + w.WriteHeader(http.StatusMethodNotAllowed) + t.Fatalf("unexpected request method: %v", r.Method) + return + } + + // Extract the content type and make sure it matches + contentType := r.Header.Get("Content-Type") + mediaType, _, err := mime.ParseMediaType(contentType) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + t.Fatalf("error parsing media type: %v, contenttype=%q", err, contentType) + return + } + + if mediaType != EventsMediaType { + w.WriteHeader(http.StatusUnsupportedMediaType) + t.Fatalf("incorrect media type: %q != %q", mediaType, EventsMediaType) + return + } + + var envelope Envelope + dec := json.NewDecoder(r.Body) + if err := dec.Decode(&envelope); err != nil { + w.WriteHeader(http.StatusBadRequest) + t.Fatalf("error decoding request body: %v", err) + return + } + + // Let caller choose the status + status, err := strconv.Atoi(r.FormValue("status")) + if err != nil { + t.Logf("error parsing status: %v", err) + + // May just be empty, set status to 200 + status = http.StatusOK + } + + w.WriteHeader(status) + }) + server := httptest.NewTLSServer(serverHandler) + + metrics := newSafeMetrics() + sink := newHTTPSink(server.URL, 0, nil, nil, + &endpointMetricsHTTPStatusListener{safeMetrics: metrics}) + + // first make sure that the default transport gives x509 untrusted cert error + events := []Event{} + err := sink.Write(events...) + if !strings.Contains(err.Error(), "x509") { + t.Fatal("TLS server with default transport should give unknown CA error") + } + if err := sink.Close(); err != nil { + t.Fatalf("unexpected error closing http sink: %v", err) + } + + // make sure that passing in the transport no longer gives this error + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + sink = newHTTPSink(server.URL, 0, nil, tr, + &endpointMetricsHTTPStatusListener{safeMetrics: metrics}) + err = sink.Write(events...) + if err != nil { + t.Fatalf("unexpected error writing events: %v", err) + } + + // reset server to standard http server and sink to a basic sink + server = httptest.NewServer(serverHandler) + sink = newHTTPSink(server.URL, 0, nil, nil, + &endpointMetricsHTTPStatusListener{safeMetrics: metrics}) + var expectedMetrics EndpointMetrics + expectedMetrics.Statuses = make(map[string]int) + + for _, tc := range []struct { + events []Event // events to send + url string + failure bool // true if there should be a failure. + statusCode int // if not set, no status code should be incremented. + }{ + { + statusCode: http.StatusOK, + events: []Event{ + createTestEvent("push", "library/test", schema1.MediaTypeSignedManifest)}, + }, + { + statusCode: http.StatusOK, + events: []Event{ + createTestEvent("push", "library/test", schema1.MediaTypeSignedManifest), + createTestEvent("push", "library/test", layerMediaType), + createTestEvent("push", "library/test", layerMediaType), + }, + }, + { + statusCode: http.StatusTemporaryRedirect, + }, + { + statusCode: http.StatusBadRequest, + failure: true, + }, + { + // Case where connection never goes through. + url: "http://shoudlntresolve/", + failure: true, + }, + } { + + if tc.failure { + expectedMetrics.Failures += len(tc.events) + } else { + expectedMetrics.Successes += len(tc.events) + } + + if tc.statusCode > 0 { + expectedMetrics.Statuses[fmt.Sprintf("%d %s", tc.statusCode, http.StatusText(tc.statusCode))] += len(tc.events) + } + + url := tc.url + if url == "" { + url = server.URL + "/" + } + // setup endpoint to respond with expected status code. + url += fmt.Sprintf("?status=%v", tc.statusCode) + sink.url = url + + t.Logf("testcase: %v, fail=%v", url, tc.failure) + // Try a simple event emission. + err := sink.Write(tc.events...) + + if !tc.failure { + if err != nil { + t.Fatalf("unexpected error send event: %v", err) + } + } else { + if err == nil { + t.Fatalf("the endpoint should have rejected the request") + } + } + + if !reflect.DeepEqual(metrics.EndpointMetrics, expectedMetrics) { + t.Fatalf("metrics not as expected: %#v != %#v", metrics.EndpointMetrics, expectedMetrics) + } + } + + if err := sink.Close(); err != nil { + t.Fatalf("unexpected error closing http sink: %v", err) + } + + // double close returns error + if err := sink.Close(); err == nil { + t.Fatalf("second close should have returned error: %v", err) + } + +} + +func createTestEvent(action, repo, typ string) Event { + event := createEvent(action) + + event.Target.MediaType = typ + event.Target.Repository = repo + + return *event +} diff --git a/vendor/github.com/docker/distribution/notifications/listener.go b/vendor/github.com/docker/distribution/notifications/listener.go new file mode 100644 index 000000000..c968b98ad --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/listener.go @@ -0,0 +1,215 @@ +package notifications + +import ( + "net/http" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" +) + +// ManifestListener describes a set of methods for listening to events related to manifests. +type ManifestListener interface { + ManifestPushed(repo reference.Named, sm distribution.Manifest, options ...distribution.ManifestServiceOption) error + ManifestPulled(repo reference.Named, sm distribution.Manifest, options ...distribution.ManifestServiceOption) error + ManifestDeleted(repo reference.Named, dgst digest.Digest) error +} + +// BlobListener describes a listener that can respond to layer related events. +type BlobListener interface { + BlobPushed(repo reference.Named, desc distribution.Descriptor) error + BlobPulled(repo reference.Named, desc distribution.Descriptor) error + BlobMounted(repo reference.Named, desc distribution.Descriptor, fromRepo reference.Named) error + BlobDeleted(repo reference.Named, desc digest.Digest) error +} + +// Listener combines all repository events into a single interface. +type Listener interface { + ManifestListener + BlobListener +} + +type repositoryListener struct { + distribution.Repository + listener Listener +} + +// Listen dispatches events on the repository to the listener. +func Listen(repo distribution.Repository, listener Listener) distribution.Repository { + return &repositoryListener{ + Repository: repo, + listener: listener, + } +} + +func (rl *repositoryListener) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + manifests, err := rl.Repository.Manifests(ctx, options...) + if err != nil { + return nil, err + } + return &manifestServiceListener{ + ManifestService: manifests, + parent: rl, + }, nil +} + +func (rl *repositoryListener) Blobs(ctx context.Context) distribution.BlobStore { + return &blobServiceListener{ + BlobStore: rl.Repository.Blobs(ctx), + parent: rl, + } +} + +type manifestServiceListener struct { + distribution.ManifestService + parent *repositoryListener +} + +func (msl *manifestServiceListener) Delete(ctx context.Context, dgst digest.Digest) error { + err := msl.ManifestService.Delete(ctx, dgst) + if err == nil { + if err := msl.parent.listener.ManifestDeleted(msl.parent.Repository.Named(), dgst); err != nil { + context.GetLogger(ctx).Errorf("error dispatching manifest delete to listener: %v", err) + } + } + + return err +} + +func (msl *manifestServiceListener) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + sm, err := msl.ManifestService.Get(ctx, dgst, options...) + if err == nil { + if err := msl.parent.listener.ManifestPulled(msl.parent.Repository.Named(), sm, options...); err != nil { + context.GetLogger(ctx).Errorf("error dispatching manifest pull to listener: %v", err) + } + } + + return sm, err +} + +func (msl *manifestServiceListener) Put(ctx context.Context, sm distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + dgst, err := msl.ManifestService.Put(ctx, sm, options...) + + if err == nil { + if err := msl.parent.listener.ManifestPushed(msl.parent.Repository.Named(), sm, options...); err != nil { + context.GetLogger(ctx).Errorf("error dispatching manifest push to listener: %v", err) + } + } + + return dgst, err +} + +type blobServiceListener struct { + distribution.BlobStore + parent *repositoryListener +} + +var _ distribution.BlobStore = &blobServiceListener{} + +func (bsl *blobServiceListener) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + p, err := bsl.BlobStore.Get(ctx, dgst) + if err == nil { + if desc, err := bsl.Stat(ctx, dgst); err != nil { + context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) + } else { + if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Named(), desc); err != nil { + context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) + } + } + } + + return p, err +} + +func (bsl *blobServiceListener) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + rc, err := bsl.BlobStore.Open(ctx, dgst) + if err == nil { + if desc, err := bsl.Stat(ctx, dgst); err != nil { + context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) + } else { + if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Named(), desc); err != nil { + context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) + } + } + } + + return rc, err +} + +func (bsl *blobServiceListener) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + err := bsl.BlobStore.ServeBlob(ctx, w, r, dgst) + if err == nil { + if desc, err := bsl.Stat(ctx, dgst); err != nil { + context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) + } else { + if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Named(), desc); err != nil { + context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) + } + } + } + + return err +} + +func (bsl *blobServiceListener) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + desc, err := bsl.BlobStore.Put(ctx, mediaType, p) + if err == nil { + if err := bsl.parent.listener.BlobPushed(bsl.parent.Repository.Named(), desc); err != nil { + context.GetLogger(ctx).Errorf("error dispatching layer push to listener: %v", err) + } + } + + return desc, err +} + +func (bsl *blobServiceListener) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + wr, err := bsl.BlobStore.Create(ctx, options...) + switch err := err.(type) { + case distribution.ErrBlobMounted: + if err := bsl.parent.listener.BlobMounted(bsl.parent.Repository.Named(), err.Descriptor, err.From); err != nil { + context.GetLogger(ctx).Errorf("error dispatching blob mount to listener: %v", err) + } + return nil, err + } + return bsl.decorateWriter(wr), err +} + +func (bsl *blobServiceListener) Delete(ctx context.Context, dgst digest.Digest) error { + err := bsl.BlobStore.Delete(ctx, dgst) + if err == nil { + if err := bsl.parent.listener.BlobDeleted(bsl.parent.Repository.Named(), dgst); err != nil { + context.GetLogger(ctx).Errorf("error dispatching layer delete to listener: %v", err) + } + } + + return err +} + +func (bsl *blobServiceListener) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + wr, err := bsl.BlobStore.Resume(ctx, id) + return bsl.decorateWriter(wr), err +} + +func (bsl *blobServiceListener) decorateWriter(wr distribution.BlobWriter) distribution.BlobWriter { + return &blobWriterListener{ + BlobWriter: wr, + parent: bsl, + } +} + +type blobWriterListener struct { + distribution.BlobWriter + parent *blobServiceListener +} + +func (bwl *blobWriterListener) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + committed, err := bwl.BlobWriter.Commit(ctx, desc) + if err == nil { + if err := bwl.parent.parent.listener.BlobPushed(bwl.parent.parent.Repository.Named(), committed); err != nil { + context.GetLogger(ctx).Errorf("error dispatching blob push to listener: %v", err) + } + } + + return committed, err +} diff --git a/vendor/github.com/docker/distribution/notifications/listener_test.go b/vendor/github.com/docker/distribution/notifications/listener_test.go new file mode 100644 index 000000000..c7db5944e --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/listener_test.go @@ -0,0 +1,205 @@ +package notifications + +import ( + "io" + "reflect" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/testutil" + "github.com/docker/libtrust" +) + +func TestListener(t *testing.T) { + ctx := context.Background() + k, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + registry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableDelete, storage.EnableRedirect, storage.Schema1SigningKey(k)) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + tl := &testListener{ + ops: make(map[string]int), + } + + repoRef, _ := reference.ParseNamed("foo/bar") + repository, err := registry.Repository(ctx, repoRef) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + repository = Listen(repository, tl) + + // Now take the registry through a number of operations + checkExerciseRepository(t, repository) + + expectedOps := map[string]int{ + "manifest:push": 1, + "manifest:pull": 1, + "manifest:delete": 1, + "layer:push": 2, + "layer:pull": 2, + "layer:delete": 2, + } + + if !reflect.DeepEqual(tl.ops, expectedOps) { + t.Fatalf("counts do not match:\n%v\n !=\n%v", tl.ops, expectedOps) + } + +} + +type testListener struct { + ops map[string]int +} + +func (tl *testListener) ManifestPushed(repo reference.Named, m distribution.Manifest, options ...distribution.ManifestServiceOption) error { + tl.ops["manifest:push"]++ + + return nil +} + +func (tl *testListener) ManifestPulled(repo reference.Named, m distribution.Manifest, options ...distribution.ManifestServiceOption) error { + tl.ops["manifest:pull"]++ + return nil +} + +func (tl *testListener) ManifestDeleted(repo reference.Named, d digest.Digest) error { + tl.ops["manifest:delete"]++ + return nil +} + +func (tl *testListener) BlobPushed(repo reference.Named, desc distribution.Descriptor) error { + tl.ops["layer:push"]++ + return nil +} + +func (tl *testListener) BlobPulled(repo reference.Named, desc distribution.Descriptor) error { + tl.ops["layer:pull"]++ + return nil +} + +func (tl *testListener) BlobMounted(repo reference.Named, desc distribution.Descriptor, fromRepo reference.Named) error { + tl.ops["layer:mount"]++ + return nil +} + +func (tl *testListener) BlobDeleted(repo reference.Named, d digest.Digest) error { + tl.ops["layer:delete"]++ + return nil +} + +// checkExerciseRegistry takes the registry through all of its operations, +// carrying out generic checks. +func checkExerciseRepository(t *testing.T, repository distribution.Repository) { + // TODO(stevvooe): This would be a nice testutil function. Basically, it + // takes the registry through a common set of operations. This could be + // used to make cross-cutting updates by changing internals that affect + // update counts. Basically, it would make writing tests a lot easier. + + ctx := context.Background() + tag := "thetag" + // todo: change this to use Builder + + m := schema1.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: repository.Named().Name(), + Tag: tag, + } + + var blobDigests []digest.Digest + blobs := repository.Blobs(ctx) + for i := 0; i < 2; i++ { + rs, ds, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating test layer: %v", err) + } + dgst := digest.Digest(ds) + blobDigests = append(blobDigests, dgst) + + wr, err := blobs.Create(ctx) + if err != nil { + t.Fatalf("error creating layer upload: %v", err) + } + + // Use the resumes, as well! + wr, err = blobs.Resume(ctx, wr.ID()) + if err != nil { + t.Fatalf("error resuming layer upload: %v", err) + } + + io.Copy(wr, rs) + + if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}); err != nil { + t.Fatalf("unexpected error finishing upload: %v", err) + } + + m.FSLayers = append(m.FSLayers, schema1.FSLayer{ + BlobSum: dgst, + }) + m.History = append(m.History, schema1.History{ + V1Compatibility: "", + }) + + // Then fetch the blobs + if rc, err := blobs.Open(ctx, dgst); err != nil { + t.Fatalf("error fetching layer: %v", err) + } else { + defer rc.Close() + } + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating key: %v", err) + } + + sm, err := schema1.Sign(&m, pk) + if err != nil { + t.Fatalf("unexpected error signing manifest: %v", err) + } + + manifests, err := repository.Manifests(ctx) + if err != nil { + t.Fatal(err.Error()) + } + + var digestPut digest.Digest + if digestPut, err = manifests.Put(ctx, sm); err != nil { + t.Fatalf("unexpected error putting the manifest: %v", err) + } + + dgst := digest.FromBytes(sm.Canonical) + if dgst != digestPut { + t.Fatalf("mismatching digest from payload and put") + } + + _, err = manifests.Get(ctx, dgst) + if err != nil { + t.Fatalf("unexpected error fetching manifest: %v", err) + } + + err = manifests.Delete(ctx, dgst) + if err != nil { + t.Fatalf("unexpected error deleting blob: %v", err) + } + + for _, d := range blobDigests { + err = blobs.Delete(ctx, d) + if err != nil { + t.Fatalf("unexpected error deleting blob: %v", err) + } + + } +} diff --git a/vendor/github.com/docker/distribution/notifications/metrics.go b/vendor/github.com/docker/distribution/notifications/metrics.go new file mode 100644 index 000000000..a20af1687 --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/metrics.go @@ -0,0 +1,152 @@ +package notifications + +import ( + "expvar" + "fmt" + "net/http" + "sync" +) + +// EndpointMetrics track various actions taken by the endpoint, typically by +// number of events. The goal of this to export it via expvar but we may find +// some other future solution to be better. +type EndpointMetrics struct { + Pending int // events pending in queue + Events int // total events incoming + Successes int // total events written successfully + Failures int // total events failed + Errors int // total events errored + Statuses map[string]int // status code histogram, per call event +} + +// safeMetrics guards the metrics implementation with a lock and provides a +// safe update function. +type safeMetrics struct { + EndpointMetrics + sync.Mutex // protects statuses map +} + +// newSafeMetrics returns safeMetrics with map allocated. +func newSafeMetrics() *safeMetrics { + var sm safeMetrics + sm.Statuses = make(map[string]int) + return &sm +} + +// httpStatusListener returns the listener for the http sink that updates the +// relevant counters. +func (sm *safeMetrics) httpStatusListener() httpStatusListener { + return &endpointMetricsHTTPStatusListener{ + safeMetrics: sm, + } +} + +// eventQueueListener returns a listener that maintains queue related counters. +func (sm *safeMetrics) eventQueueListener() eventQueueListener { + return &endpointMetricsEventQueueListener{ + safeMetrics: sm, + } +} + +// endpointMetricsHTTPStatusListener increments counters related to http sinks +// for the relevant events. +type endpointMetricsHTTPStatusListener struct { + *safeMetrics +} + +var _ httpStatusListener = &endpointMetricsHTTPStatusListener{} + +func (emsl *endpointMetricsHTTPStatusListener) success(status int, events ...Event) { + emsl.safeMetrics.Lock() + defer emsl.safeMetrics.Unlock() + emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))] += len(events) + emsl.Successes += len(events) +} + +func (emsl *endpointMetricsHTTPStatusListener) failure(status int, events ...Event) { + emsl.safeMetrics.Lock() + defer emsl.safeMetrics.Unlock() + emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))] += len(events) + emsl.Failures += len(events) +} + +func (emsl *endpointMetricsHTTPStatusListener) err(err error, events ...Event) { + emsl.safeMetrics.Lock() + defer emsl.safeMetrics.Unlock() + emsl.Errors += len(events) +} + +// endpointMetricsEventQueueListener maintains the incoming events counter and +// the queues pending count. +type endpointMetricsEventQueueListener struct { + *safeMetrics +} + +func (eqc *endpointMetricsEventQueueListener) ingress(events ...Event) { + eqc.Lock() + defer eqc.Unlock() + eqc.Events += len(events) + eqc.Pending += len(events) +} + +func (eqc *endpointMetricsEventQueueListener) egress(events ...Event) { + eqc.Lock() + defer eqc.Unlock() + eqc.Pending -= len(events) +} + +// endpoints is global registry of endpoints used to report metrics to expvar +var endpoints struct { + registered []*Endpoint + mu sync.Mutex +} + +// register places the endpoint into expvar so that stats are tracked. +func register(e *Endpoint) { + endpoints.mu.Lock() + defer endpoints.mu.Unlock() + + endpoints.registered = append(endpoints.registered, e) +} + +func init() { + // NOTE(stevvooe): Setup registry metrics structure to report to expvar. + // Ideally, we do more metrics through logging but we need some nice + // realtime metrics for queue state for now. + + registry := expvar.Get("registry") + + if registry == nil { + registry = expvar.NewMap("registry") + } + + var notifications expvar.Map + notifications.Init() + notifications.Set("endpoints", expvar.Func(func() interface{} { + endpoints.mu.Lock() + defer endpoints.mu.Unlock() + + var names []interface{} + for _, v := range endpoints.registered { + var epjson struct { + Name string `json:"name"` + URL string `json:"url"` + EndpointConfig + + Metrics EndpointMetrics + } + + epjson.Name = v.Name() + epjson.URL = v.URL() + epjson.EndpointConfig = v.EndpointConfig + + v.ReadMetrics(&epjson.Metrics) + + names = append(names, epjson) + } + + return names + })) + + registry.(*expvar.Map).Set("notifications", ¬ifications) +} diff --git a/vendor/github.com/docker/distribution/notifications/sinks.go b/vendor/github.com/docker/distribution/notifications/sinks.go new file mode 100644 index 000000000..549ba97e2 --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/sinks.go @@ -0,0 +1,375 @@ +package notifications + +import ( + "container/list" + "fmt" + "sync" + "time" + + "github.com/Sirupsen/logrus" +) + +// NOTE(stevvooe): This file contains definitions for several utility sinks. +// Typically, the broadcaster is the only sink that should be required +// externally, but others are suitable for export if the need arises. Albeit, +// the tight integration with endpoint metrics should be removed. + +// Broadcaster sends events to multiple, reliable Sinks. The goal of this +// component is to dispatch events to configured endpoints. Reliability can be +// provided by wrapping incoming sinks. +type Broadcaster struct { + sinks []Sink + events chan []Event + closed chan chan struct{} +} + +// NewBroadcaster ... +// Add appends one or more sinks to the list of sinks. The broadcaster +// behavior will be affected by the properties of the sink. Generally, the +// sink should accept all messages and deal with reliability on its own. Use +// of EventQueue and RetryingSink should be used here. +func NewBroadcaster(sinks ...Sink) *Broadcaster { + b := Broadcaster{ + sinks: sinks, + events: make(chan []Event), + closed: make(chan chan struct{}), + } + + // Start the broadcaster + go b.run() + + return &b +} + +// Write accepts a block of events to be dispatched to all sinks. This method +// will never fail and should never block (hopefully!). The caller cedes the +// slice memory to the broadcaster and should not modify it after calling +// write. +func (b *Broadcaster) Write(events ...Event) error { + select { + case b.events <- events: + case <-b.closed: + return ErrSinkClosed + } + return nil +} + +// Close the broadcaster, ensuring that all messages are flushed to the +// underlying sink before returning. +func (b *Broadcaster) Close() error { + logrus.Infof("broadcaster: closing") + select { + case <-b.closed: + // already closed + return fmt.Errorf("broadcaster: already closed") + default: + // do a little chan handoff dance to synchronize closing + closed := make(chan struct{}) + b.closed <- closed + close(b.closed) + <-closed + return nil + } +} + +// run is the main broadcast loop, started when the broadcaster is created. +// Under normal conditions, it waits for events on the event channel. After +// Close is called, this goroutine will exit. +func (b *Broadcaster) run() { + for { + select { + case block := <-b.events: + for _, sink := range b.sinks { + if err := sink.Write(block...); err != nil { + logrus.Errorf("broadcaster: error writing events to %v, these events will be lost: %v", sink, err) + } + } + case closing := <-b.closed: + + // close all the underlying sinks + for _, sink := range b.sinks { + if err := sink.Close(); err != nil { + logrus.Errorf("broadcaster: error closing sink %v: %v", sink, err) + } + } + closing <- struct{}{} + + logrus.Debugf("broadcaster: closed") + return + } + } +} + +// eventQueue accepts all messages into a queue for asynchronous consumption +// by a sink. It is unbounded and thread safe but the sink must be reliable or +// events will be dropped. +type eventQueue struct { + sink Sink + events *list.List + listeners []eventQueueListener + cond *sync.Cond + mu sync.Mutex + closed bool +} + +// eventQueueListener is called when various events happen on the queue. +type eventQueueListener interface { + ingress(events ...Event) + egress(events ...Event) +} + +// newEventQueue returns a queue to the provided sink. If the updater is non- +// nil, it will be called to update pending metrics on ingress and egress. +func newEventQueue(sink Sink, listeners ...eventQueueListener) *eventQueue { + eq := eventQueue{ + sink: sink, + events: list.New(), + listeners: listeners, + } + + eq.cond = sync.NewCond(&eq.mu) + go eq.run() + return &eq +} + +// Write accepts the events into the queue, only failing if the queue has +// beend closed. +func (eq *eventQueue) Write(events ...Event) error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return ErrSinkClosed + } + + for _, listener := range eq.listeners { + listener.ingress(events...) + } + eq.events.PushBack(events) + eq.cond.Signal() // signal waiters + + return nil +} + +// Close shutsdown the event queue, flushing +func (eq *eventQueue) Close() error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return fmt.Errorf("eventqueue: already closed") + } + + // set closed flag + eq.closed = true + eq.cond.Signal() // signal flushes queue + eq.cond.Wait() // wait for signal from last flush + + return eq.sink.Close() +} + +// run is the main goroutine to flush events to the target sink. +func (eq *eventQueue) run() { + for { + block := eq.next() + + if block == nil { + return // nil block means event queue is closed. + } + + if err := eq.sink.Write(block...); err != nil { + logrus.Warnf("eventqueue: error writing events to %v, these events will be lost: %v", eq.sink, err) + } + + for _, listener := range eq.listeners { + listener.egress(block...) + } + } +} + +// next encompasses the critical section of the run loop. When the queue is +// empty, it will block on the condition. If new data arrives, it will wake +// and return a block. When closed, a nil slice will be returned. +func (eq *eventQueue) next() []Event { + eq.mu.Lock() + defer eq.mu.Unlock() + + for eq.events.Len() < 1 { + if eq.closed { + eq.cond.Broadcast() + return nil + } + + eq.cond.Wait() + } + + front := eq.events.Front() + block := front.Value.([]Event) + eq.events.Remove(front) + + return block +} + +// ignoredMediaTypesSink discards events with ignored target media types and +// passes the rest along. +type ignoredMediaTypesSink struct { + Sink + ignored map[string]bool +} + +func newIgnoredMediaTypesSink(sink Sink, ignored []string) Sink { + if len(ignored) == 0 { + return sink + } + + ignoredMap := make(map[string]bool) + for _, mediaType := range ignored { + ignoredMap[mediaType] = true + } + + return &ignoredMediaTypesSink{ + Sink: sink, + ignored: ignoredMap, + } +} + +// Write discards events with ignored target media types and passes the rest +// along. +func (imts *ignoredMediaTypesSink) Write(events ...Event) error { + var kept []Event + for _, e := range events { + if !imts.ignored[e.Target.MediaType] { + kept = append(kept, e) + } + } + if len(kept) == 0 { + return nil + } + return imts.Sink.Write(kept...) +} + +// retryingSink retries the write until success or an ErrSinkClosed is +// returned. Underlying sink must have p > 0 of succeeding or the sink will +// block. Internally, it is a circuit breaker retries to manage reset. +// Concurrent calls to a retrying sink are serialized through the sink, +// meaning that if one is in-flight, another will not proceed. +type retryingSink struct { + mu sync.Mutex + sink Sink + closed bool + + // circuit breaker heuristics + failures struct { + threshold int + recent int + last time.Time + backoff time.Duration // time after which we retry after failure. + } +} + +type retryingSinkListener interface { + active(events ...Event) + retry(events ...Event) +} + +// TODO(stevvooe): We are using circuit break here, which actually doesn't +// make a whole lot of sense for this use case, since we always retry. Move +// this to use bounded exponential backoff. + +// newRetryingSink returns a sink that will retry writes to a sink, backing +// off on failure. Parameters threshold and backoff adjust the behavior of the +// circuit breaker. +func newRetryingSink(sink Sink, threshold int, backoff time.Duration) *retryingSink { + rs := &retryingSink{ + sink: sink, + } + rs.failures.threshold = threshold + rs.failures.backoff = backoff + + return rs +} + +// Write attempts to flush the events to the downstream sink until it succeeds +// or the sink is closed. +func (rs *retryingSink) Write(events ...Event) error { + rs.mu.Lock() + defer rs.mu.Unlock() + +retry: + + if rs.closed { + return ErrSinkClosed + } + + if !rs.proceed() { + logrus.Warnf("%v encountered too many errors, backing off", rs.sink) + rs.wait(rs.failures.backoff) + goto retry + } + + if err := rs.write(events...); err != nil { + if err == ErrSinkClosed { + // terminal! + return err + } + + logrus.Errorf("retryingsink: error writing events: %v, retrying", err) + goto retry + } + + return nil +} + +// Close closes the sink and the underlying sink. +func (rs *retryingSink) Close() error { + rs.mu.Lock() + defer rs.mu.Unlock() + + if rs.closed { + return fmt.Errorf("retryingsink: already closed") + } + + rs.closed = true + return rs.sink.Close() +} + +// write provides a helper that dispatches failure and success properly. Used +// by write as the single-flight write call. +func (rs *retryingSink) write(events ...Event) error { + if err := rs.sink.Write(events...); err != nil { + rs.failure() + return err + } + + rs.reset() + return nil +} + +// wait backoff time against the sink, unlocking so others can proceed. Should +// only be called by methods that currently have the mutex. +func (rs *retryingSink) wait(backoff time.Duration) { + rs.mu.Unlock() + defer rs.mu.Lock() + + // backoff here + time.Sleep(backoff) +} + +// reset marks a successful call. +func (rs *retryingSink) reset() { + rs.failures.recent = 0 + rs.failures.last = time.Time{} +} + +// failure records a failure. +func (rs *retryingSink) failure() { + rs.failures.recent++ + rs.failures.last = time.Now().UTC() +} + +// proceed returns true if the call should proceed based on circuit breaker +// heuristics. +func (rs *retryingSink) proceed() bool { + return rs.failures.recent < rs.failures.threshold || + time.Now().UTC().After(rs.failures.last.Add(rs.failures.backoff)) +} diff --git a/vendor/github.com/docker/distribution/notifications/sinks_test.go b/vendor/github.com/docker/distribution/notifications/sinks_test.go new file mode 100644 index 000000000..1bfa12c6b --- /dev/null +++ b/vendor/github.com/docker/distribution/notifications/sinks_test.go @@ -0,0 +1,256 @@ +package notifications + +import ( + "fmt" + "math/rand" + "reflect" + "sync" + "time" + + "github.com/Sirupsen/logrus" + + "testing" +) + +func TestBroadcaster(t *testing.T) { + const nEvents = 1000 + var sinks []Sink + + for i := 0; i < 10; i++ { + sinks = append(sinks, &testSink{}) + } + + b := NewBroadcaster(sinks...) + + var block []Event + var wg sync.WaitGroup + for i := 1; i <= nEvents; i++ { + block = append(block, createTestEvent("push", "library/test", "blob")) + + if i%10 == 0 && i > 0 { + wg.Add(1) + go func(block ...Event) { + if err := b.Write(block...); err != nil { + t.Fatalf("error writing block of length %d: %v", len(block), err) + } + wg.Done() + }(block...) + + block = nil + } + } + + wg.Wait() // Wait until writes complete + checkClose(t, b) + + // Iterate through the sinks and check that they all have the expected length. + for _, sink := range sinks { + ts := sink.(*testSink) + ts.mu.Lock() + defer ts.mu.Unlock() + + if len(ts.events) != nEvents { + t.Fatalf("not all events ended up in testsink: len(testSink) == %d, not %d", len(ts.events), nEvents) + } + + if !ts.closed { + t.Fatalf("sink should have been closed") + } + } + +} + +func TestEventQueue(t *testing.T) { + const nevents = 1000 + var ts testSink + metrics := newSafeMetrics() + eq := newEventQueue( + // delayed sync simulates destination slower than channel comms + &delayedSink{ + Sink: &ts, + delay: time.Millisecond * 1, + }, metrics.eventQueueListener()) + + var wg sync.WaitGroup + var block []Event + for i := 1; i <= nevents; i++ { + block = append(block, createTestEvent("push", "library/test", "blob")) + if i%10 == 0 && i > 0 { + wg.Add(1) + go func(block ...Event) { + if err := eq.Write(block...); err != nil { + t.Fatalf("error writing event block: %v", err) + } + wg.Done() + }(block...) + + block = nil + } + } + + wg.Wait() + checkClose(t, eq) + + ts.mu.Lock() + defer ts.mu.Unlock() + metrics.Lock() + defer metrics.Unlock() + + if len(ts.events) != nevents { + t.Fatalf("events did not make it to the sink: %d != %d", len(ts.events), 1000) + } + + if !ts.closed { + t.Fatalf("sink should have been closed") + } + + if metrics.Events != nevents { + t.Fatalf("unexpected ingress count: %d != %d", metrics.Events, nevents) + } + + if metrics.Pending != 0 { + t.Fatalf("unexpected egress count: %d != %d", metrics.Pending, 0) + } +} + +func TestIgnoredMediaTypesSink(t *testing.T) { + blob := createTestEvent("push", "library/test", "blob") + manifest := createTestEvent("push", "library/test", "manifest") + + type testcase struct { + ignored []string + expected []Event + } + + cases := []testcase{ + {nil, []Event{blob, manifest}}, + {[]string{"other"}, []Event{blob, manifest}}, + {[]string{"blob"}, []Event{manifest}}, + {[]string{"blob", "manifest"}, nil}, + } + + for _, c := range cases { + ts := &testSink{} + s := newIgnoredMediaTypesSink(ts, c.ignored) + + if err := s.Write(blob, manifest); err != nil { + t.Fatalf("error writing event: %v", err) + } + + ts.mu.Lock() + if !reflect.DeepEqual(ts.events, c.expected) { + t.Fatalf("unexpected events: %#v != %#v", ts.events, c.expected) + } + ts.mu.Unlock() + } +} + +func TestRetryingSink(t *testing.T) { + + // Make a sync that fails most of the time, ensuring that all the events + // make it through. + var ts testSink + flaky := &flakySink{ + rate: 1.0, // start out always failing. + Sink: &ts, + } + s := newRetryingSink(flaky, 3, 10*time.Millisecond) + + var wg sync.WaitGroup + var block []Event + for i := 1; i <= 100; i++ { + block = append(block, createTestEvent("push", "library/test", "blob")) + + // Above 50, set the failure rate lower + if i > 50 { + s.mu.Lock() + flaky.rate = 0.90 + s.mu.Unlock() + } + + if i%10 == 0 && i > 0 { + wg.Add(1) + go func(block ...Event) { + defer wg.Done() + if err := s.Write(block...); err != nil { + t.Fatalf("error writing event block: %v", err) + } + }(block...) + + block = nil + } + } + + wg.Wait() + checkClose(t, s) + + ts.mu.Lock() + defer ts.mu.Unlock() + + if len(ts.events) != 100 { + t.Fatalf("events not propagated: %d != %d", len(ts.events), 100) + } +} + +type testSink struct { + events []Event + mu sync.Mutex + closed bool +} + +func (ts *testSink) Write(events ...Event) error { + ts.mu.Lock() + defer ts.mu.Unlock() + ts.events = append(ts.events, events...) + return nil +} + +func (ts *testSink) Close() error { + ts.mu.Lock() + defer ts.mu.Unlock() + ts.closed = true + + logrus.Infof("closing testSink") + return nil +} + +type delayedSink struct { + Sink + delay time.Duration +} + +func (ds *delayedSink) Write(events ...Event) error { + time.Sleep(ds.delay) + return ds.Sink.Write(events...) +} + +type flakySink struct { + Sink + rate float64 +} + +func (fs *flakySink) Write(events ...Event) error { + if rand.Float64() < fs.rate { + return fmt.Errorf("error writing %d events", len(events)) + } + + return fs.Sink.Write(events...) +} + +func checkClose(t *testing.T, sink Sink) { + if err := sink.Close(); err != nil { + t.Fatalf("unexpected error closing: %v", err) + } + + // second close should not crash but should return an error. + if err := sink.Close(); err == nil { + t.Fatalf("no error on double close") + } + + // Write after closed should be an error + if err := sink.Write([]Event{}...); err == nil { + t.Fatalf("write after closed did not have an error") + } else if err != ErrSinkClosed { + t.Fatalf("error should be ErrSinkClosed") + } +} diff --git a/vendor/github.com/docker/distribution/project/dev-image/Dockerfile b/vendor/github.com/docker/distribution/project/dev-image/Dockerfile new file mode 100644 index 000000000..1e2a8471c --- /dev/null +++ b/vendor/github.com/docker/distribution/project/dev-image/Dockerfile @@ -0,0 +1,20 @@ +FROM ubuntu:14.04 + +ENV GOLANG_VERSION 1.4rc1 +ENV GOPATH /var/cache/drone +ENV GOROOT /usr/local/go +ENV PATH $PATH:$GOROOT/bin:$GOPATH/bin + +ENV LANG C +ENV LC_ALL C + +RUN apt-get update && apt-get install -y \ + wget ca-certificates git mercurial bzr \ + --no-install-recommends \ + && rm -rf /var/lib/apt/lists/* + +RUN wget https://golang.org/dl/go$GOLANG_VERSION.linux-amd64.tar.gz --quiet && \ + tar -C /usr/local -xzf go$GOLANG_VERSION.linux-amd64.tar.gz && \ + rm go${GOLANG_VERSION}.linux-amd64.tar.gz + +RUN go get github.com/axw/gocov/gocov github.com/mattn/goveralls github.com/golang/lint/golint diff --git a/vendor/github.com/docker/distribution/project/hooks/README.md b/vendor/github.com/docker/distribution/project/hooks/README.md new file mode 100644 index 000000000..eda886966 --- /dev/null +++ b/vendor/github.com/docker/distribution/project/hooks/README.md @@ -0,0 +1,6 @@ +Git Hooks +========= + +To enforce valid and properly-formatted code, there is CI in place which runs `gofmt`, `golint`, and `go vet` against code in the repository. + +As an aid to prevent committing invalid code in the first place, a git pre-commit hook has been added to the repository, found in [pre-commit](./pre-commit). As it is impossible to automatically add linked hooks to a git repository, this hook should be linked into your `.git/hooks/pre-commit`, which can be done by running the `configure-hooks.sh` script in this directory. This script is the preferred method of configuring hooks, as it will be updated as more are added. \ No newline at end of file diff --git a/vendor/github.com/docker/distribution/project/hooks/configure-hooks.sh b/vendor/github.com/docker/distribution/project/hooks/configure-hooks.sh new file mode 100755 index 000000000..6afea8a13 --- /dev/null +++ b/vendor/github.com/docker/distribution/project/hooks/configure-hooks.sh @@ -0,0 +1,18 @@ +#!/bin/sh + +cd $(dirname $0) + +REPO_ROOT=$(git rev-parse --show-toplevel) +RESOLVE_REPO_ROOT_STATUS=$? +if [ "$RESOLVE_REPO_ROOT_STATUS" -ne "0" ]; then + echo -e "Unable to resolve repository root. Error:\n$REPO_ROOT" > /dev/stderr + exit $RESOLVE_REPO_ROOT_STATUS +fi + +set -e +set -x + +# Just in case the directory doesn't exist +mkdir -p $REPO_ROOT/.git/hooks + +ln -f -s $(pwd)/pre-commit $REPO_ROOT/.git/hooks/pre-commit \ No newline at end of file diff --git a/vendor/github.com/docker/distribution/project/hooks/pre-commit b/vendor/github.com/docker/distribution/project/hooks/pre-commit new file mode 100755 index 000000000..3ee2e913f --- /dev/null +++ b/vendor/github.com/docker/distribution/project/hooks/pre-commit @@ -0,0 +1,29 @@ +#!/bin/sh + +REPO_ROOT=$(git rev-parse --show-toplevel) +RESOLVE_REPO_ROOT_STATUS=$? +if [ "$RESOLVE_REPO_ROOT_STATUS" -ne "0" ]; then + printf "Unable to resolve repository root. Error:\n%s\n" "$RESOLVE_REPO_ROOT_STATUS" > /dev/stderr + exit $RESOLVE_REPO_ROOT_STATUS +fi + +cd $REPO_ROOT + +GOFMT_ERRORS=$(gofmt -s -l . 2>&1) +if [ -n "$GOFMT_ERRORS" ]; then + printf 'gofmt failed for the following files:\n%s\n\nPlease run "gofmt -s -l ." in the root of your repository before committing\n' "$GOFMT_ERRORS" > /dev/stderr + exit 1 +fi + +GOLINT_ERRORS=$(golint ./... 2>&1) +if [ -n "$GOLINT_ERRORS" ]; then + printf "golint failed with the following errors:\n%s\n" "$GOLINT_ERRORS" > /dev/stderr + exit 1 +fi + +GOVET_ERRORS=$(go vet ./... 2>&1) +GOVET_STATUS=$? +if [ "$GOVET_STATUS" -ne "0" ]; then + printf "govet failed with the following errors:\n%s\n" "$GOVET_ERRORS" > /dev/stderr + exit $GOVET_STATUS +fi diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go new file mode 100644 index 000000000..02786628e --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/reference.go @@ -0,0 +1,370 @@ +// Package reference provides a general type to represent any way of referencing images within the registry. +// Its main purpose is to abstract tags and digests (content-addressable hash). +// +// Grammar +// +// reference := name [ ":" tag ] [ "@" digest ] +// name := [hostname '/'] component ['/' component]* +// hostname := hostcomponent ['.' hostcomponent]* [':' port-number] +// hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// port-number := /[0-9]+/ +// component := alpha-numeric [separator alpha-numeric]* +// alpha-numeric := /[a-z0-9]+/ +// separator := /[_.]|__|[-]*/ +// +// tag := /[\w][\w.-]{0,127}/ +// +// digest := digest-algorithm ":" digest-hex +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ] +// digest-algorithm-separator := /[+.-_]/ +// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ +// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +package reference + +import ( + "errors" + "fmt" + "path" + "strings" + + "github.com/docker/distribution/digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + NameTotalLengthMax = 255 +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + ErrReferenceInvalidFormat = errors.New("invalid reference format") + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + ErrTagInvalidFormat = errors.New("invalid tag format") + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + ErrDigestInvalidFormat = errors.New("invalid digest format") + + // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. + ErrNameContainsUppercase = errors.New("repository name must be lowercase") + + // ErrNameEmpty is returned for empty, invalid repository names. + ErrNameEmpty = errors.New("repository name must have at least one component") + + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +type Reference interface { + // String returns the full reference + String() string +} + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +type Field struct { + reference Reference +} + +// AsField wraps a reference in a Field for encoding. +func AsField(reference Reference) Field { + return Field{reference} +} + +// Reference unwraps the reference type from the field to +// return the Reference object. This object should be +// of the appropriate type to further check for different +// reference types. +func (f Field) Reference() Reference { + return f.reference +} + +// MarshalText serializes the field to byte text which +// is the string of the reference. +func (f Field) MarshalText() (p []byte, err error) { + return []byte(f.reference.String()), nil +} + +// UnmarshalText parses text bytes by invoking the +// reference parser to ensure the appropriately +// typed reference object is wrapped by field. +func (f *Field) UnmarshalText(p []byte) error { + r, err := Parse(string(p)) + if err != nil { + return err + } + + f.reference = r + return nil +} + +// Named is an object with a full name +type Named interface { + Reference + Name() string +} + +// Tagged is an object which has a tag +type Tagged interface { + Reference + Tag() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Digested is an object which has a digest +// in which it can be referenced by +type Digested interface { + Reference + Digest() digest.Digest +} + +// Canonical reference is an object with a fully unique +// name including a name with hostname and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +func SplitHostname(named Named) (string, string) { + name := named.Name() + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: Parse will not handle short digests. +func Parse(s string) (Reference, error) { + matches := ReferenceRegexp.FindStringSubmatch(s) + if matches == nil { + if s == "" { + return nil, ErrNameEmpty + } + if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { + return nil, ErrNameContainsUppercase + } + return nil, ErrReferenceInvalidFormat + } + + if len(matches[1]) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + ref := reference{ + name: matches[1], + tag: matches[2], + } + if matches[3] != "" { + var err error + ref.digest, err = digest.ParseDigest(matches[3]) + if err != nil { + return nil, err + } + } + + r := getBestReferenceType(ref) + if r == nil { + return nil, ErrNameEmpty + } + + return r, nil +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name, otherwise an error is +// returned. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: ParseNamed will not handle short digests. +func ParseNamed(s string) (Named, error) { + ref, err := Parse(s) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + if len(name) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + if !anchoredNameRegexp.MatchString(name) { + return nil, ErrReferenceInvalidFormat + } + return repository(name), nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + if !anchoredTagRegexp.MatchString(tag) { + return nil, ErrTagInvalidFormat + } + if canonical, ok := name.(Canonical); ok { + return reference{ + name: name.Name(), + tag: tag, + digest: canonical.Digest(), + }, nil + } + return taggedReference{ + name: name.Name(), + tag: tag, + }, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + if !anchoredDigestRegexp.MatchString(digest.String()) { + return nil, ErrDigestInvalidFormat + } + if tagged, ok := name.(Tagged); ok { + return reference{ + name: name.Name(), + tag: tagged.Tag(), + digest: digest, + }, nil + } + return canonicalReference{ + name: name.Name(), + digest: digest, + }, nil +} + +// Match reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func Match(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, ref.String()) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, namedRef.Name()) + } + return matched, err +} + +// TrimNamed removes any tag or digest from the named reference. +func TrimNamed(ref Named) Named { + return repository(ref.Name()) +} + +func getBestReferenceType(ref reference) Reference { + if ref.name == "" { + // Allow digest only references + if ref.digest != "" { + return digestReference(ref.digest) + } + return nil + } + if ref.tag == "" { + if ref.digest != "" { + return canonicalReference{ + name: ref.name, + digest: ref.digest, + } + } + return repository(ref.name) + } + if ref.digest == "" { + return taggedReference{ + name: ref.name, + tag: ref.tag, + } + } + + return ref +} + +type reference struct { + name string + tag string + digest digest.Digest +} + +func (r reference) String() string { + return r.name + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Name() string { + return r.name +} + +func (r reference) Tag() string { + return r.tag +} + +func (r reference) Digest() digest.Digest { + return r.digest +} + +type repository string + +func (r repository) String() string { + return string(r) +} + +func (r repository) Name() string { + return string(r) +} + +type digestReference digest.Digest + +func (d digestReference) String() string { + return d.String() +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +type taggedReference struct { + name string + tag string +} + +func (t taggedReference) String() string { + return t.name + ":" + t.tag +} + +func (t taggedReference) Name() string { + return t.name +} + +func (t taggedReference) Tag() string { + return t.tag +} + +type canonicalReference struct { + name string + digest digest.Digest +} + +func (c canonicalReference) String() string { + return c.name + "@" + c.digest.String() +} + +func (c canonicalReference) Name() string { + return c.name +} + +func (c canonicalReference) Digest() digest.Digest { + return c.digest +} diff --git a/vendor/github.com/docker/distribution/reference/reference_test.go b/vendor/github.com/docker/distribution/reference/reference_test.go new file mode 100644 index 000000000..405c47c05 --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/reference_test.go @@ -0,0 +1,661 @@ +package reference + +import ( + "encoding/json" + "strconv" + "strings" + "testing" + + "github.com/docker/distribution/digest" +) + +func TestReferenceParse(t *testing.T) { + // referenceTestcases is a unified set of testcases for + // testing the parsing of references + referenceTestcases := []struct { + // input is the repository name or name component testcase + input string + // err is the error expected from Parse, or nil + err error + // repository is the string representation for the reference + repository string + // hostname is the hostname expected in the reference + hostname string + // tag is the tag for the reference + tag string + // digest is the digest for the reference (enforces digest reference) + digest string + }{ + { + input: "test_com", + repository: "test_com", + }, + { + input: "test.com:tag", + repository: "test.com", + tag: "tag", + }, + { + input: "test.com:5000", + repository: "test.com", + tag: "5000", + }, + { + input: "test.com/repo:tag", + hostname: "test.com", + repository: "test.com/repo", + tag: "tag", + }, + { + input: "test:5000/repo", + hostname: "test:5000", + repository: "test:5000/repo", + }, + { + input: "test:5000/repo:tag", + hostname: "test:5000", + repository: "test:5000/repo", + tag: "tag", + }, + { + input: "test:5000/repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + hostname: "test:5000", + repository: "test:5000/repo", + digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + }, + { + input: "test:5000/repo:tag@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + hostname: "test:5000", + repository: "test:5000/repo", + tag: "tag", + digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + }, + { + input: "test:5000/repo", + hostname: "test:5000", + repository: "test:5000/repo", + }, + { + input: "", + err: ErrNameEmpty, + }, + { + input: ":justtag", + err: ErrReferenceInvalidFormat, + }, + { + input: "@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + err: ErrReferenceInvalidFormat, + }, + { + input: "repo@sha256:ffffffffffffffffffffffffffffffffff", + err: digest.ErrDigestInvalidLength, + }, + { + input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + err: digest.ErrDigestUnsupported, + }, + { + input: "Uppercase:tag", + err: ErrNameContainsUppercase, + }, + // FIXME "Uppercase" is incorrectly handled as a domain-name here, therefore passes. + // See https://github.com/docker/distribution/pull/1778, and https://github.com/docker/docker/pull/20175 + //{ + // input: "Uppercase/lowercase:tag", + // err: ErrNameContainsUppercase, + //}, + { + input: "test:5000/Uppercase/lowercase:tag", + err: ErrNameContainsUppercase, + }, + { + input: "lowercase:Uppercase", + repository: "lowercase", + tag: "Uppercase", + }, + { + input: strings.Repeat("a/", 128) + "a:tag", + err: ErrNameTooLong, + }, + { + input: strings.Repeat("a/", 127) + "a:tag-puts-this-over-max", + hostname: "a", + repository: strings.Repeat("a/", 127) + "a", + tag: "tag-puts-this-over-max", + }, + { + input: "aa/asdf$$^/aa", + err: ErrReferenceInvalidFormat, + }, + { + input: "sub-dom1.foo.com/bar/baz/quux", + hostname: "sub-dom1.foo.com", + repository: "sub-dom1.foo.com/bar/baz/quux", + }, + { + input: "sub-dom1.foo.com/bar/baz/quux:some-long-tag", + hostname: "sub-dom1.foo.com", + repository: "sub-dom1.foo.com/bar/baz/quux", + tag: "some-long-tag", + }, + { + input: "b.gcr.io/test.example.com/my-app:test.example.com", + hostname: "b.gcr.io", + repository: "b.gcr.io/test.example.com/my-app", + tag: "test.example.com", + }, + { + input: "xn--n3h.com/myimage:xn--n3h.com", // ☃.com in punycode + hostname: "xn--n3h.com", + repository: "xn--n3h.com/myimage", + tag: "xn--n3h.com", + }, + { + input: "xn--7o8h.com/myimage:xn--7o8h.com@sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // 🐳.com in punycode + hostname: "xn--7o8h.com", + repository: "xn--7o8h.com/myimage", + tag: "xn--7o8h.com", + digest: "sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + }, + { + input: "foo_bar.com:8080", + repository: "foo_bar.com", + tag: "8080", + }, + { + input: "foo/foo_bar.com:8080", + hostname: "foo", + repository: "foo/foo_bar.com", + tag: "8080", + }, + } + for _, testcase := range referenceTestcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) + t.Fail() + } + + repo, err := Parse(testcase.input) + if testcase.err != nil { + if err == nil { + failf("missing expected error: %v", testcase.err) + } else if testcase.err != err { + failf("mismatched error: got %v, expected %v", err, testcase.err) + } + continue + } else if err != nil { + failf("unexpected parse error: %v", err) + continue + } + if repo.String() != testcase.input { + failf("mismatched repo: got %q, expected %q", repo.String(), testcase.input) + } + + if named, ok := repo.(Named); ok { + if named.Name() != testcase.repository { + failf("unexpected repository: got %q, expected %q", named.Name(), testcase.repository) + } + hostname, _ := SplitHostname(named) + if hostname != testcase.hostname { + failf("unexpected hostname: got %q, expected %q", hostname, testcase.hostname) + } + } else if testcase.repository != "" || testcase.hostname != "" { + failf("expected named type, got %T", repo) + } + + tagged, ok := repo.(Tagged) + if testcase.tag != "" { + if ok { + if tagged.Tag() != testcase.tag { + failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag) + } + } else { + failf("expected tagged type, got %T", repo) + } + } else if ok { + failf("unexpected tagged type") + } + + digested, ok := repo.(Digested) + if testcase.digest != "" { + if ok { + if digested.Digest().String() != testcase.digest { + failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest) + } + } else { + failf("expected digested type, got %T", repo) + } + } else if ok { + failf("unexpected digested type") + } + + } +} + +// TestWithNameFailure tests cases where WithName should fail. Cases where it +// should succeed are covered by TestSplitHostname, below. +func TestWithNameFailure(t *testing.T) { + testcases := []struct { + input string + err error + }{ + { + input: "", + err: ErrNameEmpty, + }, + { + input: ":justtag", + err: ErrReferenceInvalidFormat, + }, + { + input: "@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + err: ErrReferenceInvalidFormat, + }, + { + input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + err: ErrReferenceInvalidFormat, + }, + { + input: strings.Repeat("a/", 128) + "a:tag", + err: ErrNameTooLong, + }, + { + input: "aa/asdf$$^/aa", + err: ErrReferenceInvalidFormat, + }, + } + for _, testcase := range testcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) + t.Fail() + } + + _, err := WithName(testcase.input) + if err == nil { + failf("no error parsing name. expected: %s", testcase.err) + } + } +} + +func TestSplitHostname(t *testing.T) { + testcases := []struct { + input string + hostname string + name string + }{ + { + input: "test.com/foo", + hostname: "test.com", + name: "foo", + }, + { + input: "test_com/foo", + hostname: "", + name: "test_com/foo", + }, + { + input: "test:8080/foo", + hostname: "test:8080", + name: "foo", + }, + { + input: "test.com:8080/foo", + hostname: "test.com:8080", + name: "foo", + }, + { + input: "test-com:8080/foo", + hostname: "test-com:8080", + name: "foo", + }, + { + input: "xn--n3h.com:18080/foo", + hostname: "xn--n3h.com:18080", + name: "foo", + }, + } + for _, testcase := range testcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) + t.Fail() + } + + named, err := WithName(testcase.input) + if err != nil { + failf("error parsing name: %s", err) + } + hostname, name := SplitHostname(named) + if hostname != testcase.hostname { + failf("unexpected hostname: got %q, expected %q", hostname, testcase.hostname) + } + if name != testcase.name { + failf("unexpected name: got %q, expected %q", name, testcase.name) + } + } +} + +type serializationType struct { + Description string + Field Field +} + +func TestSerialization(t *testing.T) { + testcases := []struct { + description string + input string + name string + tag string + digest string + err error + }{ + { + description: "empty value", + err: ErrNameEmpty, + }, + { + description: "just a name", + input: "example.com:8000/named", + name: "example.com:8000/named", + }, + { + description: "name with a tag", + input: "example.com:8000/named:tagged", + name: "example.com:8000/named", + tag: "tagged", + }, + { + description: "name with digest", + input: "other.com/named@sha256:1234567890098765432112345667890098765432112345667890098765432112", + name: "other.com/named", + digest: "sha256:1234567890098765432112345667890098765432112345667890098765432112", + }, + } + for _, testcase := range testcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) + t.Fail() + } + + m := map[string]string{ + "Description": testcase.description, + "Field": testcase.input, + } + b, err := json.Marshal(m) + if err != nil { + failf("error marshalling: %v", err) + } + t := serializationType{} + + if err := json.Unmarshal(b, &t); err != nil { + if testcase.err == nil { + failf("error unmarshalling: %v", err) + } + if err != testcase.err { + failf("wrong error, expected %v, got %v", testcase.err, err) + } + + continue + } else if testcase.err != nil { + failf("expected error unmarshalling: %v", testcase.err) + } + + if t.Description != testcase.description { + failf("wrong description, expected %q, got %q", testcase.description, t.Description) + } + + ref := t.Field.Reference() + + if named, ok := ref.(Named); ok { + if named.Name() != testcase.name { + failf("unexpected repository: got %q, expected %q", named.Name(), testcase.name) + } + } else if testcase.name != "" { + failf("expected named type, got %T", ref) + } + + tagged, ok := ref.(Tagged) + if testcase.tag != "" { + if ok { + if tagged.Tag() != testcase.tag { + failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag) + } + } else { + failf("expected tagged type, got %T", ref) + } + } else if ok { + failf("unexpected tagged type") + } + + digested, ok := ref.(Digested) + if testcase.digest != "" { + if ok { + if digested.Digest().String() != testcase.digest { + failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest) + } + } else { + failf("expected digested type, got %T", ref) + } + } else if ok { + failf("unexpected digested type") + } + + t = serializationType{ + Description: testcase.description, + Field: AsField(ref), + } + + b2, err := json.Marshal(t) + if err != nil { + failf("error marshing serialization type: %v", err) + } + + if string(b) != string(b2) { + failf("unexpected serialized value: expected %q, got %q", string(b), string(b2)) + } + + // Ensure t.Field is not implementing "Reference" directly, getting + // around the Reference type system + var fieldInterface interface{} = t.Field + if _, ok := fieldInterface.(Reference); ok { + failf("field should not implement Reference interface") + } + + } +} + +func TestWithTag(t *testing.T) { + testcases := []struct { + name string + digest digest.Digest + tag string + combined string + }{ + { + name: "test.com/foo", + tag: "tag", + combined: "test.com/foo:tag", + }, + { + name: "foo", + tag: "tag2", + combined: "foo:tag2", + }, + { + name: "test.com:8000/foo", + tag: "tag4", + combined: "test.com:8000/foo:tag4", + }, + { + name: "test.com:8000/foo", + tag: "TAG5", + combined: "test.com:8000/foo:TAG5", + }, + { + name: "test.com:8000/foo", + digest: "sha256:1234567890098765432112345667890098765", + tag: "TAG5", + combined: "test.com:8000/foo:TAG5@sha256:1234567890098765432112345667890098765", + }, + } + for _, testcase := range testcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.name)+": "+format, v...) + t.Fail() + } + + named, err := WithName(testcase.name) + if err != nil { + failf("error parsing name: %s", err) + } + if testcase.digest != "" { + canonical, err := WithDigest(named, testcase.digest) + if err != nil { + failf("error adding digest") + } + named = canonical + } + + tagged, err := WithTag(named, testcase.tag) + if err != nil { + failf("WithTag failed: %s", err) + } + if tagged.String() != testcase.combined { + failf("unexpected: got %q, expected %q", tagged.String(), testcase.combined) + } + } +} + +func TestWithDigest(t *testing.T) { + testcases := []struct { + name string + digest digest.Digest + tag string + combined string + }{ + { + name: "test.com/foo", + digest: "sha256:1234567890098765432112345667890098765", + combined: "test.com/foo@sha256:1234567890098765432112345667890098765", + }, + { + name: "foo", + digest: "sha256:1234567890098765432112345667890098765", + combined: "foo@sha256:1234567890098765432112345667890098765", + }, + { + name: "test.com:8000/foo", + digest: "sha256:1234567890098765432112345667890098765", + combined: "test.com:8000/foo@sha256:1234567890098765432112345667890098765", + }, + { + name: "test.com:8000/foo", + digest: "sha256:1234567890098765432112345667890098765", + tag: "latest", + combined: "test.com:8000/foo:latest@sha256:1234567890098765432112345667890098765", + }, + } + for _, testcase := range testcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.name)+": "+format, v...) + t.Fail() + } + + named, err := WithName(testcase.name) + if err != nil { + failf("error parsing name: %s", err) + } + if testcase.tag != "" { + tagged, err := WithTag(named, testcase.tag) + if err != nil { + failf("error adding tag") + } + named = tagged + } + digested, err := WithDigest(named, testcase.digest) + if err != nil { + failf("WithDigest failed: %s", err) + } + if digested.String() != testcase.combined { + failf("unexpected: got %q, expected %q", digested.String(), testcase.combined) + } + } +} + +func TestMatchError(t *testing.T) { + named, err := Parse("foo") + if err != nil { + t.Fatal(err) + } + _, err = Match("[-x]", named) + if err == nil { + t.Fatalf("expected an error, got nothing") + } +} + +func TestMatch(t *testing.T) { + matchCases := []struct { + reference string + pattern string + expected bool + }{ + { + reference: "foo", + pattern: "foo/**/ba[rz]", + expected: false, + }, + { + reference: "foo/any/bat", + pattern: "foo/**/ba[rz]", + expected: false, + }, + { + reference: "foo/a/bar", + pattern: "foo/**/ba[rz]", + expected: true, + }, + { + reference: "foo/b/baz", + pattern: "foo/**/ba[rz]", + expected: true, + }, + { + reference: "foo/c/baz:tag", + pattern: "foo/**/ba[rz]", + expected: true, + }, + { + reference: "foo/c/baz:tag", + pattern: "foo/*/baz:tag", + expected: true, + }, + { + reference: "foo/c/baz:tag", + pattern: "foo/c/baz:tag", + expected: true, + }, + { + reference: "example.com/foo/c/baz:tag", + pattern: "*/foo/c/baz", + expected: true, + }, + { + reference: "example.com/foo/c/baz:tag", + pattern: "example.com/foo/c/baz", + expected: true, + }, + } + for _, c := range matchCases { + named, err := Parse(c.reference) + if err != nil { + t.Fatal(err) + } + actual, err := Match(c.pattern, named) + if err != nil { + t.Fatal(err) + } + if actual != c.expected { + t.Fatalf("expected %s match %s to be %v, was %v", c.reference, c.pattern, c.expected, actual) + } + } +} diff --git a/vendor/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go new file mode 100644 index 000000000..9a7d366bc --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/regexp.go @@ -0,0 +1,124 @@ +package reference + +import "regexp" + +var ( + // alphaNumericRegexp defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumericRegexp = match(`[a-z0-9]+`) + + // separatorRegexp defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. + separatorRegexp = match(`(?:[._]|__|[-]*)`) + + // nameComponentRegexp restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponentRegexp = expression( + alphaNumericRegexp, + optional(repeated(separatorRegexp, alphaNumericRegexp))) + + // hostnameComponentRegexp restricts the registry hostname component of a + // repository name to start with a component as defined by hostnameRegexp + // and followed by an optional port. + hostnameComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) + + // hostnameRegexp defines the structure of potential hostname components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + hostnameRegexp = expression( + hostnameComponentRegexp, + optional(repeated(literal(`.`), hostnameComponentRegexp)), + optional(literal(`:`), match(`[0-9]+`))) + + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = match(`[\w][\w.-]{0,127}`) + + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = anchored(TagRegexp) + + // DigestRegexp matches valid digests. + DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) + + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = anchored(DigestRegexp) + + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the hostname and name part omitting + // the separating forward slash from either. + NameRegexp = expression( + optional(hostnameRegexp, literal(`/`)), + nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp))) + + // anchoredNameRegexp is used to parse a name value, capturing the + // hostname and trailing components. + anchoredNameRegexp = anchored( + optional(capture(hostnameRegexp), literal(`/`)), + capture(nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp)))) + + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = anchored(capture(NameRegexp), + optional(literal(":"), capture(TagRegexp)), + optional(literal("@"), capture(DigestRegexp))) +) + +// match compiles the string to a regular expression. +var match = regexp.MustCompile + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) *regexp.Regexp { + re := match(regexp.QuoteMeta(s)) + + if _, complete := re.LiteralPrefix(); !complete { + panic("must be a literal") + } + + return re +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...*regexp.Regexp) *regexp.Regexp { + var s string + for _, re := range res { + s += re.String() + } + + return match(s) +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `?`) +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `+`) +} + +// group wraps the regexp in a non-capturing group. +func group(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(?:` + expression(res...).String() + `)`) +} + +// capture wraps the expression in a capturing group. +func capture(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(` + expression(res...).String() + `)`) +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...*regexp.Regexp) *regexp.Regexp { + return match(`^` + expression(res...).String() + `$`) +} diff --git a/vendor/github.com/docker/distribution/reference/regexp_test.go b/vendor/github.com/docker/distribution/reference/regexp_test.go new file mode 100644 index 000000000..2ec39377a --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/regexp_test.go @@ -0,0 +1,489 @@ +package reference + +import ( + "regexp" + "strings" + "testing" +) + +type regexpMatch struct { + input string + match bool + subs []string +} + +func checkRegexp(t *testing.T, r *regexp.Regexp, m regexpMatch) { + matches := r.FindStringSubmatch(m.input) + if m.match && matches != nil { + if len(matches) != (r.NumSubexp()+1) || matches[0] != m.input { + t.Fatalf("Bad match result %#v for %q", matches, m.input) + } + if len(matches) < (len(m.subs) + 1) { + t.Errorf("Expected %d sub matches, only have %d for %q", len(m.subs), len(matches)-1, m.input) + } + for i := range m.subs { + if m.subs[i] != matches[i+1] { + t.Errorf("Unexpected submatch %d: %q, expected %q for %q", i+1, matches[i+1], m.subs[i], m.input) + } + } + } else if m.match { + t.Errorf("Expected match for %q", m.input) + } else if matches != nil { + t.Errorf("Unexpected match for %q", m.input) + } +} + +func TestHostRegexp(t *testing.T) { + hostcases := []regexpMatch{ + { + input: "test.com", + match: true, + }, + { + input: "test.com:10304", + match: true, + }, + { + input: "test.com:http", + match: false, + }, + { + input: "localhost", + match: true, + }, + { + input: "localhost:8080", + match: true, + }, + { + input: "a", + match: true, + }, + { + input: "a.b", + match: true, + }, + { + input: "ab.cd.com", + match: true, + }, + { + input: "a-b.com", + match: true, + }, + { + input: "-ab.com", + match: false, + }, + { + input: "ab-.com", + match: false, + }, + { + input: "ab.c-om", + match: true, + }, + { + input: "ab.-com", + match: false, + }, + { + input: "ab.com-", + match: false, + }, + { + input: "0101.com", + match: true, // TODO(dmcgowan): valid if this should be allowed + }, + { + input: "001a.com", + match: true, + }, + { + input: "b.gbc.io:443", + match: true, + }, + { + input: "b.gbc.io", + match: true, + }, + { + input: "xn--n3h.com", // ☃.com in punycode + match: true, + }, + { + input: "Asdf.com", // uppercase character + match: true, + }, + } + r := regexp.MustCompile(`^` + hostnameRegexp.String() + `$`) + for i := range hostcases { + checkRegexp(t, r, hostcases[i]) + } +} + +func TestFullNameRegexp(t *testing.T) { + if anchoredNameRegexp.NumSubexp() != 2 { + t.Fatalf("anchored name regexp should have two submatches: %v, %v != 2", + anchoredNameRegexp, anchoredNameRegexp.NumSubexp()) + } + + testcases := []regexpMatch{ + { + input: "", + match: false, + }, + { + input: "short", + match: true, + subs: []string{"", "short"}, + }, + { + input: "simple/name", + match: true, + subs: []string{"simple", "name"}, + }, + { + input: "library/ubuntu", + match: true, + subs: []string{"library", "ubuntu"}, + }, + { + input: "docker/stevvooe/app", + match: true, + subs: []string{"docker", "stevvooe/app"}, + }, + { + input: "aa/aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb", + match: true, + subs: []string{"aa", "aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb"}, + }, + { + input: "aa/aa/bb/bb/bb", + match: true, + subs: []string{"aa", "aa/bb/bb/bb"}, + }, + { + input: "a/a/a/a", + match: true, + subs: []string{"a", "a/a/a"}, + }, + { + input: "a/a/a/a/", + match: false, + }, + { + input: "a//a/a", + match: false, + }, + { + input: "a", + match: true, + subs: []string{"", "a"}, + }, + { + input: "a/aa", + match: true, + subs: []string{"a", "aa"}, + }, + { + input: "a/aa/a", + match: true, + subs: []string{"a", "aa/a"}, + }, + { + input: "foo.com", + match: true, + subs: []string{"", "foo.com"}, + }, + { + input: "foo.com/", + match: false, + }, + { + input: "foo.com:8080/bar", + match: true, + subs: []string{"foo.com:8080", "bar"}, + }, + { + input: "foo.com:http/bar", + match: false, + }, + { + input: "foo.com/bar", + match: true, + subs: []string{"foo.com", "bar"}, + }, + { + input: "foo.com/bar/baz", + match: true, + subs: []string{"foo.com", "bar/baz"}, + }, + { + input: "localhost:8080/bar", + match: true, + subs: []string{"localhost:8080", "bar"}, + }, + { + input: "sub-dom1.foo.com/bar/baz/quux", + match: true, + subs: []string{"sub-dom1.foo.com", "bar/baz/quux"}, + }, + { + input: "blog.foo.com/bar/baz", + match: true, + subs: []string{"blog.foo.com", "bar/baz"}, + }, + { + input: "a^a", + match: false, + }, + { + input: "aa/asdf$$^/aa", + match: false, + }, + { + input: "asdf$$^/aa", + match: false, + }, + { + input: "aa-a/a", + match: true, + subs: []string{"aa-a", "a"}, + }, + { + input: strings.Repeat("a/", 128) + "a", + match: true, + subs: []string{"a", strings.Repeat("a/", 127) + "a"}, + }, + { + input: "a-/a/a/a", + match: false, + }, + { + input: "foo.com/a-/a/a", + match: false, + }, + { + input: "-foo/bar", + match: false, + }, + { + input: "foo/bar-", + match: false, + }, + { + input: "foo-/bar", + match: false, + }, + { + input: "foo/-bar", + match: false, + }, + { + input: "_foo/bar", + match: false, + }, + { + input: "foo_bar", + match: true, + subs: []string{"", "foo_bar"}, + }, + { + input: "foo_bar.com", + match: true, + subs: []string{"", "foo_bar.com"}, + }, + { + input: "foo_bar.com:8080", + match: false, + }, + { + input: "foo_bar.com:8080/app", + match: false, + }, + { + input: "foo.com/foo_bar", + match: true, + subs: []string{"foo.com", "foo_bar"}, + }, + { + input: "____/____", + match: false, + }, + { + input: "_docker/_docker", + match: false, + }, + { + input: "docker_/docker_", + match: false, + }, + { + input: "b.gcr.io/test.example.com/my-app", + match: true, + subs: []string{"b.gcr.io", "test.example.com/my-app"}, + }, + { + input: "xn--n3h.com/myimage", // ☃.com in punycode + match: true, + subs: []string{"xn--n3h.com", "myimage"}, + }, + { + input: "xn--7o8h.com/myimage", // 🐳.com in punycode + match: true, + subs: []string{"xn--7o8h.com", "myimage"}, + }, + { + input: "example.com/xn--7o8h.com/myimage", // 🐳.com in punycode + match: true, + subs: []string{"example.com", "xn--7o8h.com/myimage"}, + }, + { + input: "example.com/some_separator__underscore/myimage", + match: true, + subs: []string{"example.com", "some_separator__underscore/myimage"}, + }, + { + input: "example.com/__underscore/myimage", + match: false, + }, + { + input: "example.com/..dots/myimage", + match: false, + }, + { + input: "example.com/.dots/myimage", + match: false, + }, + { + input: "example.com/nodouble..dots/myimage", + match: false, + }, + { + input: "example.com/nodouble..dots/myimage", + match: false, + }, + { + input: "docker./docker", + match: false, + }, + { + input: ".docker/docker", + match: false, + }, + { + input: "docker-/docker", + match: false, + }, + { + input: "-docker/docker", + match: false, + }, + { + input: "do..cker/docker", + match: false, + }, + { + input: "do__cker:8080/docker", + match: false, + }, + { + input: "do__cker/docker", + match: true, + subs: []string{"", "do__cker/docker"}, + }, + { + input: "b.gcr.io/test.example.com/my-app", + match: true, + subs: []string{"b.gcr.io", "test.example.com/my-app"}, + }, + { + input: "registry.io/foo/project--id.module--name.ver---sion--name", + match: true, + subs: []string{"registry.io", "foo/project--id.module--name.ver---sion--name"}, + }, + { + input: "Asdf.com/foo/bar", // uppercase character in hostname + match: true, + }, + { + input: "Foo/FarB", // uppercase characters in remote name + match: false, + }, + } + for i := range testcases { + checkRegexp(t, anchoredNameRegexp, testcases[i]) + } +} + +func TestReferenceRegexp(t *testing.T) { + if ReferenceRegexp.NumSubexp() != 3 { + t.Fatalf("anchored name regexp should have three submatches: %v, %v != 3", + ReferenceRegexp, ReferenceRegexp.NumSubexp()) + } + + testcases := []regexpMatch{ + { + input: "registry.com:8080/myapp:tag", + match: true, + subs: []string{"registry.com:8080/myapp", "tag", ""}, + }, + { + input: "registry.com:8080/myapp@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: true, + subs: []string{"registry.com:8080/myapp", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, + }, + { + input: "registry.com:8080/myapp:tag2@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: true, + subs: []string{"registry.com:8080/myapp", "tag2", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, + }, + { + input: "registry.com:8080/myapp@sha256:badbadbadbad", + match: false, + }, + { + input: "registry.com:8080/myapp:invalid~tag", + match: false, + }, + { + input: "bad_hostname.com:8080/myapp:tag", + match: false, + }, + { + input:// localhost treated as name, missing tag with 8080 as tag + "localhost:8080@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: true, + subs: []string{"localhost", "8080", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, + }, + { + input: "localhost:8080/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: true, + subs: []string{"localhost:8080/name", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, + }, + { + input: "localhost:http/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: false, + }, + { + // localhost will be treated as an image name without a host + input: "localhost@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: true, + subs: []string{"localhost", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, + }, + { + input: "registry.com:8080/myapp@bad", + match: false, + }, + { + input: "registry.com:8080/myapp@2bad", + match: false, // TODO(dmcgowan): Support this as valid + }, + } + + for i := range testcases { + checkRegexp(t, ReferenceRegexp, testcases[i]) + } + +} diff --git a/vendor/github.com/docker/distribution/registry.go b/vendor/github.com/docker/distribution/registry.go new file mode 100644 index 000000000..1ede31ebb --- /dev/null +++ b/vendor/github.com/docker/distribution/registry.go @@ -0,0 +1,97 @@ +package distribution + +import ( + "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" +) + +// Scope defines the set of items that match a namespace. +type Scope interface { + // Contains returns true if the name belongs to the namespace. + Contains(name string) bool +} + +type fullScope struct{} + +func (f fullScope) Contains(string) bool { + return true +} + +// GlobalScope represents the full namespace scope which contains +// all other scopes. +var GlobalScope = Scope(fullScope{}) + +// Namespace represents a collection of repositories, addressable by name. +// Generally, a namespace is backed by a set of one or more services, +// providing facilities such as registry access, trust, and indexing. +type Namespace interface { + // Scope describes the names that can be used with this Namespace. The + // global namespace will have a scope that matches all names. The scope + // effectively provides an identity for the namespace. + Scope() Scope + + // Repository should return a reference to the named repository. The + // registry may or may not have the repository but should always return a + // reference. + Repository(ctx context.Context, name reference.Named) (Repository, error) + + // Repositories fills 'repos' with a lexigraphically sorted catalog of repositories + // up to the size of 'repos' and returns the value 'n' for the number of entries + // which were filled. 'last' contains an offset in the catalog, and 'err' will be + // set to io.EOF if there are no more entries to obtain. + Repositories(ctx context.Context, repos []string, last string) (n int, err error) + + // Blobs returns a blob enumerator to access all blobs + Blobs() BlobEnumerator + + // BlobStatter returns a BlobStatter to control + BlobStatter() BlobStatter +} + +// RepositoryEnumerator describes an operation to enumerate repositories +type RepositoryEnumerator interface { + Enumerate(ctx context.Context, ingester func(string) error) error +} + +// ManifestServiceOption is a function argument for Manifest Service methods +type ManifestServiceOption interface { + Apply(ManifestService) error +} + +// WithTag allows a tag to be passed into Put +func WithTag(tag string) ManifestServiceOption { + return WithTagOption{tag} +} + +// WithTagOption holds a tag +type WithTagOption struct{ Tag string } + +// Apply conforms to the ManifestServiceOption interface +func (o WithTagOption) Apply(m ManifestService) error { + // no implementation + return nil +} + +// Repository is a named collection of manifests and layers. +type Repository interface { + // Named returns the name of the repository. + Named() reference.Named + + // Manifests returns a reference to this repository's manifest service. + // with the supplied options applied. + Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error) + + // Blobs returns a reference to this repository's blob service. + Blobs(ctx context.Context) BlobStore + + // TODO(stevvooe): The above BlobStore return can probably be relaxed to + // be a BlobService for use with clients. This will allow such + // implementations to avoid implementing ServeBlob. + + // Tags returns a reference to this repositories tag service + Tags(ctx context.Context) TagService +} + +// TODO(stevvooe): Must add close methods to all these. May want to change the +// way instances are created to better reflect internal dependency +// relationships. diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go new file mode 100644 index 000000000..6d9bb4b62 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go @@ -0,0 +1,267 @@ +package errcode + +import ( + "encoding/json" + "fmt" + "strings" +) + +// ErrorCoder is the base interface for ErrorCode and Error allowing +// users of each to just call ErrorCode to get the real ID of each +type ErrorCoder interface { + ErrorCode() ErrorCode +} + +// ErrorCode represents the error type. The errors are serialized via strings +// and the integer format may change and should *never* be exported. +type ErrorCode int + +var _ error = ErrorCode(0) + +// ErrorCode just returns itself +func (ec ErrorCode) ErrorCode() ErrorCode { + return ec +} + +// Error returns the ID/Value +func (ec ErrorCode) Error() string { + // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. + return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) +} + +// Descriptor returns the descriptor for the error code. +func (ec ErrorCode) Descriptor() ErrorDescriptor { + d, ok := errorCodeToDescriptors[ec] + + if !ok { + return ErrorCodeUnknown.Descriptor() + } + + return d +} + +// String returns the canonical identifier for this error code. +func (ec ErrorCode) String() string { + return ec.Descriptor().Value +} + +// Message returned the human-readable error message for this error code. +func (ec ErrorCode) Message() string { + return ec.Descriptor().Message +} + +// MarshalText encodes the receiver into UTF-8-encoded text and returns the +// result. +func (ec ErrorCode) MarshalText() (text []byte, err error) { + return []byte(ec.String()), nil +} + +// UnmarshalText decodes the form generated by MarshalText. +func (ec *ErrorCode) UnmarshalText(text []byte) error { + desc, ok := idToDescriptors[string(text)] + + if !ok { + desc = ErrorCodeUnknown.Descriptor() + } + + *ec = desc.Code + + return nil +} + +// WithMessage creates a new Error struct based on the passed-in info and +// overrides the Message property. +func (ec ErrorCode) WithMessage(message string) Error { + return Error{ + Code: ec, + Message: message, + } +} + +// WithDetail creates a new Error struct based on the passed-in info and +// set the Detail property appropriately +func (ec ErrorCode) WithDetail(detail interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithDetail(detail) +} + +// WithArgs creates a new Error struct and sets the Args slice +func (ec ErrorCode) WithArgs(args ...interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithArgs(args...) +} + +// Error provides a wrapper around ErrorCode with extra Details provided. +type Error struct { + Code ErrorCode `json:"code"` + Message string `json:"message"` + Detail interface{} `json:"detail,omitempty"` + + // TODO(duglin): See if we need an "args" property so we can do the + // variable substitution right before showing the message to the user +} + +var _ error = Error{} + +// ErrorCode returns the ID/Value of this Error +func (e Error) ErrorCode() ErrorCode { + return e.Code +} + +// Error returns a human readable representation of the error. +func (e Error) Error() string { + return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) +} + +// WithDetail will return a new Error, based on the current one, but with +// some Detail info added +func (e Error) WithDetail(detail interface{}) Error { + return Error{ + Code: e.Code, + Message: e.Message, + Detail: detail, + } +} + +// WithArgs uses the passed-in list of interface{} as the substitution +// variables in the Error's Message string, but returns a new Error +func (e Error) WithArgs(args ...interface{}) Error { + return Error{ + Code: e.Code, + Message: fmt.Sprintf(e.Code.Message(), args...), + Detail: e.Detail, + } +} + +// ErrorDescriptor provides relevant information about a given error code. +type ErrorDescriptor struct { + // Code is the error code that this descriptor describes. + Code ErrorCode + + // Value provides a unique, string key, often captilized with + // underscores, to identify the error code. This value is used as the + // keyed value when serializing api errors. + Value string + + // Message is a short, human readable decription of the error condition + // included in API responses. + Message string + + // Description provides a complete account of the errors purpose, suitable + // for use in documentation. + Description string + + // HTTPStatusCode provides the http status code that is associated with + // this error condition. + HTTPStatusCode int +} + +// ParseErrorCode returns the value by the string error code. +// `ErrorCodeUnknown` will be returned if the error is not known. +func ParseErrorCode(value string) ErrorCode { + ed, ok := idToDescriptors[value] + if ok { + return ed.Code + } + + return ErrorCodeUnknown +} + +// Errors provides the envelope for multiple errors and a few sugar methods +// for use within the application. +type Errors []error + +var _ error = Errors{} + +func (errs Errors) Error() string { + switch len(errs) { + case 0: + return "" + case 1: + return errs[0].Error() + default: + msg := "errors:\n" + for _, err := range errs { + msg += err.Error() + "\n" + } + return msg + } +} + +// Len returns the current number of errors. +func (errs Errors) Len() int { + return len(errs) +} + +// MarshalJSON converts slice of error, ErrorCode or Error into a +// slice of Error - then serializes +func (errs Errors) MarshalJSON() ([]byte, error) { + var tmpErrs struct { + Errors []Error `json:"errors,omitempty"` + } + + for _, daErr := range errs { + var err Error + + switch daErr.(type) { + case ErrorCode: + err = daErr.(ErrorCode).WithDetail(nil) + case Error: + err = daErr.(Error) + default: + err = ErrorCodeUnknown.WithDetail(daErr) + + } + + // If the Error struct was setup and they forgot to set the + // Message field (meaning its "") then grab it from the ErrCode + msg := err.Message + if msg == "" { + msg = err.Code.Message() + } + + tmpErrs.Errors = append(tmpErrs.Errors, Error{ + Code: err.Code, + Message: msg, + Detail: err.Detail, + }) + } + + return json.Marshal(tmpErrs) +} + +// UnmarshalJSON deserializes []Error and then converts it into slice of +// Error or ErrorCode +func (errs *Errors) UnmarshalJSON(data []byte) error { + var tmpErrs struct { + Errors []Error + } + + if err := json.Unmarshal(data, &tmpErrs); err != nil { + return err + } + + var newErrs Errors + for _, daErr := range tmpErrs.Errors { + // If Message is empty or exactly matches the Code's message string + // then just use the Code, no need for a full Error struct + if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { + // Error's w/o details get converted to ErrorCode + newErrs = append(newErrs, daErr.Code) + } else { + // Error's w/ details are untouched + newErrs = append(newErrs, Error{ + Code: daErr.Code, + Message: daErr.Message, + Detail: daErr.Detail, + }) + } + } + + *errs = newErrs + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/errors_test.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors_test.go new file mode 100644 index 000000000..54e7a736d --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/errcode/errors_test.go @@ -0,0 +1,185 @@ +package errcode + +import ( + "encoding/json" + "net/http" + "reflect" + "strings" + "testing" +) + +// TestErrorsManagement does a quick check of the Errors type to ensure that +// members are properly pushed and marshaled. +var ErrorCodeTest1 = Register("test.errors", ErrorDescriptor{ + Value: "TEST1", + Message: "test error 1", + Description: `Just a test message #1.`, + HTTPStatusCode: http.StatusInternalServerError, +}) + +var ErrorCodeTest2 = Register("test.errors", ErrorDescriptor{ + Value: "TEST2", + Message: "test error 2", + Description: `Just a test message #2.`, + HTTPStatusCode: http.StatusNotFound, +}) + +var ErrorCodeTest3 = Register("test.errors", ErrorDescriptor{ + Value: "TEST3", + Message: "Sorry %q isn't valid", + Description: `Just a test message #3.`, + HTTPStatusCode: http.StatusNotFound, +}) + +// TestErrorCodes ensures that error code format, mappings and +// marshaling/unmarshaling. round trips are stable. +func TestErrorCodes(t *testing.T) { + if len(errorCodeToDescriptors) == 0 { + t.Fatal("errors aren't loaded!") + } + + for ec, desc := range errorCodeToDescriptors { + if ec != desc.Code { + t.Fatalf("error code in descriptor isn't correct, %q != %q", ec, desc.Code) + } + + if idToDescriptors[desc.Value].Code != ec { + t.Fatalf("error code in idToDesc isn't correct, %q != %q", idToDescriptors[desc.Value].Code, ec) + } + + if ec.Message() != desc.Message { + t.Fatalf("ec.Message doesn't mtach desc.Message: %q != %q", ec.Message(), desc.Message) + } + + // Test (de)serializing the ErrorCode + p, err := json.Marshal(ec) + if err != nil { + t.Fatalf("couldn't marshal ec %v: %v", ec, err) + } + + if len(p) <= 0 { + t.Fatalf("expected content in marshaled before for error code %v", ec) + } + + // First, unmarshal to interface and ensure we have a string. + var ecUnspecified interface{} + if err := json.Unmarshal(p, &ecUnspecified); err != nil { + t.Fatalf("error unmarshaling error code %v: %v", ec, err) + } + + if _, ok := ecUnspecified.(string); !ok { + t.Fatalf("expected a string for error code %v on unmarshal got a %T", ec, ecUnspecified) + } + + // Now, unmarshal with the error code type and ensure they are equal + var ecUnmarshaled ErrorCode + if err := json.Unmarshal(p, &ecUnmarshaled); err != nil { + t.Fatalf("error unmarshaling error code %v: %v", ec, err) + } + + if ecUnmarshaled != ec { + t.Fatalf("unexpected error code during error code marshal/unmarshal: %v != %v", ecUnmarshaled, ec) + } + + expectedErrorString := strings.ToLower(strings.Replace(ec.Descriptor().Value, "_", " ", -1)) + if ec.Error() != expectedErrorString { + t.Fatalf("unexpected return from %v.Error(): %q != %q", ec, ec.Error(), expectedErrorString) + } + } + +} + +func TestErrorsManagement(t *testing.T) { + var errs Errors + + errs = append(errs, ErrorCodeTest1) + errs = append(errs, ErrorCodeTest2.WithDetail( + map[string]interface{}{"digest": "sometestblobsumdoesntmatter"})) + errs = append(errs, ErrorCodeTest3.WithArgs("BOOGIE")) + errs = append(errs, ErrorCodeTest3.WithArgs("BOOGIE").WithDetail("data")) + + p, err := json.Marshal(errs) + + if err != nil { + t.Fatalf("error marashaling errors: %v", err) + } + + expectedJSON := `{"errors":[` + + `{"code":"TEST1","message":"test error 1"},` + + `{"code":"TEST2","message":"test error 2","detail":{"digest":"sometestblobsumdoesntmatter"}},` + + `{"code":"TEST3","message":"Sorry \"BOOGIE\" isn't valid"},` + + `{"code":"TEST3","message":"Sorry \"BOOGIE\" isn't valid","detail":"data"}` + + `]}` + + if string(p) != expectedJSON { + t.Fatalf("unexpected json:\ngot:\n%q\n\nexpected:\n%q", string(p), expectedJSON) + } + + // Now test the reverse + var unmarshaled Errors + if err := json.Unmarshal(p, &unmarshaled); err != nil { + t.Fatalf("unexpected error unmarshaling error envelope: %v", err) + } + + if !reflect.DeepEqual(unmarshaled, errs) { + t.Fatalf("errors not equal after round trip:\nunmarshaled:\n%#v\n\nerrs:\n%#v", unmarshaled, errs) + } + + // Test the arg substitution stuff + e1 := unmarshaled[3].(Error) + exp1 := `Sorry "BOOGIE" isn't valid` + if e1.Message != exp1 { + t.Fatalf("Wrong msg, got:\n%q\n\nexpected:\n%q", e1.Message, exp1) + } + + exp1 = "test3: " + exp1 + if e1.Error() != exp1 { + t.Fatalf("Error() didn't return the right string, got:%s\nexpected:%s", e1.Error(), exp1) + } + + // Test again with a single value this time + errs = Errors{ErrorCodeUnknown} + expectedJSON = "{\"errors\":[{\"code\":\"UNKNOWN\",\"message\":\"unknown error\"}]}" + p, err = json.Marshal(errs) + + if err != nil { + t.Fatalf("error marashaling errors: %v", err) + } + + if string(p) != expectedJSON { + t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON) + } + + // Now test the reverse + unmarshaled = nil + if err := json.Unmarshal(p, &unmarshaled); err != nil { + t.Fatalf("unexpected error unmarshaling error envelope: %v", err) + } + + if !reflect.DeepEqual(unmarshaled, errs) { + t.Fatalf("errors not equal after round trip:\nunmarshaled:\n%#v\n\nerrs:\n%#v", unmarshaled, errs) + } + + // Verify that calling WithArgs() more than once does the right thing. + // Meaning creates a new Error and uses the ErrorCode Message + e1 = ErrorCodeTest3.WithArgs("test1") + e2 := e1.WithArgs("test2") + if &e1 == &e2 { + t.Fatalf("args: e2 and e1 should not be the same, but they are") + } + if e2.Message != `Sorry "test2" isn't valid` { + t.Fatalf("e2 had wrong message: %q", e2.Message) + } + + // Verify that calling WithDetail() more than once does the right thing. + // Meaning creates a new Error and overwrites the old detail field + e1 = ErrorCodeTest3.WithDetail("stuff1") + e2 = e1.WithDetail("stuff2") + if &e1 == &e2 { + t.Fatalf("detail: e2 and e1 should not be the same, but they are") + } + if e2.Detail != `stuff2` { + t.Fatalf("e2 had wrong detail: %q", e2.Detail) + } + +} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go new file mode 100644 index 000000000..49a64a86e --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go @@ -0,0 +1,44 @@ +package errcode + +import ( + "encoding/json" + "net/http" +) + +// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err +// and sets the content-type header to 'application/json'. It will handle +// ErrorCoder and Errors, and if necessary will create an envelope. +func ServeJSON(w http.ResponseWriter, err error) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + var sc int + + switch errs := err.(type) { + case Errors: + if len(errs) < 1 { + break + } + + if err, ok := errs[0].(ErrorCoder); ok { + sc = err.ErrorCode().Descriptor().HTTPStatusCode + } + case ErrorCoder: + sc = errs.ErrorCode().Descriptor().HTTPStatusCode + err = Errors{err} // create an envelope. + default: + // We just have an unhandled error type, so just place in an envelope + // and move along. + err = Errors{err} + } + + if sc == 0 { + sc = http.StatusInternalServerError + } + + w.WriteHeader(sc) + + if err := json.NewEncoder(w).Encode(err); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/register.go b/vendor/github.com/docker/distribution/registry/api/errcode/register.go new file mode 100644 index 000000000..d1e8826c6 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/errcode/register.go @@ -0,0 +1,138 @@ +package errcode + +import ( + "fmt" + "net/http" + "sort" + "sync" +) + +var ( + errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} + idToDescriptors = map[string]ErrorDescriptor{} + groupToDescriptors = map[string][]ErrorDescriptor{} +) + +var ( + // ErrorCodeUnknown is a generic error that can be used as a last + // resort if there is no situation-specific error message that can be used + ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ + Value: "UNKNOWN", + Message: "unknown error", + Description: `Generic error returned when the error does not have an + API classification.`, + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeUnsupported is returned when an operation is not supported. + ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ + Value: "UNSUPPORTED", + Message: "The operation is unsupported.", + Description: `The operation was unsupported due to a missing + implementation or invalid set of parameters.`, + HTTPStatusCode: http.StatusMethodNotAllowed, + }) + + // ErrorCodeUnauthorized is returned if a request requires + // authentication. + ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ + Value: "UNAUTHORIZED", + Message: "authentication required", + Description: `The access controller was unable to authenticate + the client. Often this will be accompanied by a + Www-Authenticate HTTP response header indicating how to + authenticate.`, + HTTPStatusCode: http.StatusUnauthorized, + }) + + // ErrorCodeDenied is returned if a client does not have sufficient + // permission to perform an action. + ErrorCodeDenied = Register("errcode", ErrorDescriptor{ + Value: "DENIED", + Message: "requested access to the resource is denied", + Description: `The access controller denied access for the + operation on a resource.`, + HTTPStatusCode: http.StatusForbidden, + }) + + // ErrorCodeUnavailable provides a common error to report unavailability + // of a service or endpoint. + ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ + Value: "UNAVAILABLE", + Message: "service unavailable", + Description: "Returned when a service is not available", + HTTPStatusCode: http.StatusServiceUnavailable, + }) + + // ErrorCodeTooManyRequests is returned if a client attempts too many + // times to contact a service endpoint. + ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ + Value: "TOOMANYREQUESTS", + Message: "too many requests", + Description: `Returned when a client attempts to contact a + service too many times`, + HTTPStatusCode: http.StatusTooManyRequests, + }) +) + +var nextCode = 1000 +var registerLock sync.Mutex + +// Register will make the passed-in error known to the environment and +// return a new ErrorCode +func Register(group string, descriptor ErrorDescriptor) ErrorCode { + registerLock.Lock() + defer registerLock.Unlock() + + descriptor.Code = ErrorCode(nextCode) + + if _, ok := idToDescriptors[descriptor.Value]; ok { + panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) + } + if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { + panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) + } + + groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) + errorCodeToDescriptors[descriptor.Code] = descriptor + idToDescriptors[descriptor.Value] = descriptor + + nextCode++ + return descriptor.Code +} + +type byValue []ErrorDescriptor + +func (a byValue) Len() int { return len(a) } +func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } + +// GetGroupNames returns the list of Error group names that are registered +func GetGroupNames() []string { + keys := []string{} + + for k := range groupToDescriptors { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// GetErrorCodeGroup returns the named group of error descriptors +func GetErrorCodeGroup(name string) []ErrorDescriptor { + desc := groupToDescriptors[name] + sort.Sort(byValue(desc)) + return desc +} + +// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are +// registered, irrespective of what group they're in +func GetErrorAllDescriptors() []ErrorDescriptor { + result := []ErrorDescriptor{} + + for _, group := range GetGroupNames() { + result = append(result, GetErrorCodeGroup(group)...) + } + sort.Sort(byValue(result)) + return result +} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go new file mode 100644 index 000000000..9979abae6 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go @@ -0,0 +1,1596 @@ +package v2 + +import ( + "net/http" + "regexp" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" +) + +var ( + nameParameterDescriptor = ParameterDescriptor{ + Name: "name", + Type: "string", + Format: reference.NameRegexp.String(), + Required: true, + Description: `Name of the target repository.`, + } + + referenceParameterDescriptor = ParameterDescriptor{ + Name: "reference", + Type: "string", + Format: reference.TagRegexp.String(), + Required: true, + Description: `Tag or digest of the target manifest.`, + } + + uuidParameterDescriptor = ParameterDescriptor{ + Name: "uuid", + Type: "opaque", + Required: true, + Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.", + } + + digestPathParameter = ParameterDescriptor{ + Name: "digest", + Type: "path", + Required: true, + Format: digest.DigestRegexp.String(), + Description: `Digest of desired blob.`, + } + + hostHeader = ParameterDescriptor{ + Name: "Host", + Type: "string", + Description: "Standard HTTP Host Header. Should be set to the registry host.", + Format: "", + Examples: []string{"registry-1.docker.io"}, + } + + authHeader = ParameterDescriptor{ + Name: "Authorization", + Type: "string", + Description: "An RFC7235 compliant authorization header.", + Format: " ", + Examples: []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="}, + } + + authChallengeHeader = ParameterDescriptor{ + Name: "WWW-Authenticate", + Type: "string", + Description: "An RFC7235 compliant authentication challenge header.", + Format: ` realm="", ..."`, + Examples: []string{ + `Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`, + }, + } + + contentLengthZeroHeader = ParameterDescriptor{ + Name: "Content-Length", + Description: "The `Content-Length` header must be zero and the body must be empty.", + Type: "integer", + Format: "0", + } + + dockerUploadUUIDHeader = ParameterDescriptor{ + Name: "Docker-Upload-UUID", + Description: "Identifies the docker upload uuid for the current request.", + Type: "uuid", + Format: "", + } + + digestHeader = ParameterDescriptor{ + Name: "Docker-Content-Digest", + Description: "Digest of the targeted content for the request.", + Type: "digest", + Format: "", + } + + linkHeader = ParameterDescriptor{ + Name: "Link", + Type: "link", + Description: "RFC5988 compliant rel='next' with URL to next result set, if available", + Format: `<?n=&last=>; rel="next"`, + } + + paginationParameters = []ParameterDescriptor{ + { + Name: "n", + Type: "integer", + Description: "Limit the number of entries in each response. It not present, all entries will be returned.", + Format: "", + Required: false, + }, + { + Name: "last", + Type: "string", + Description: "Result set will include values lexically after last.", + Format: "", + Required: false, + }, + } + + unauthorizedResponseDescriptor = ResponseDescriptor{ + Name: "Authentication Required", + StatusCode: http.StatusUnauthorized, + Description: "The client is not authenticated.", + Headers: []ParameterDescriptor{ + authChallengeHeader, + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnauthorized, + }, + } + + repositoryNotFoundResponseDescriptor = ResponseDescriptor{ + Name: "No Such Repository Error", + StatusCode: http.StatusNotFound, + Description: "The repository is not known to the registry.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + }, + } + + deniedResponseDescriptor = ResponseDescriptor{ + Name: "Access Denied", + StatusCode: http.StatusForbidden, + Description: "The client does not have required access to the repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeDenied, + }, + } + + tooManyRequestsDescriptor = ResponseDescriptor{ + Name: "Too Many Requests", + StatusCode: http.StatusTooManyRequests, + Description: "The client made too many requests within a time interval.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeTooManyRequests, + }, + } +) + +const ( + manifestBody = `{ + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": "" + }, + ... + ] + ], + "history": , + "signature": +}` + + errorsBody = `{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +}` +) + +// APIDescriptor exports descriptions of the layout of the v2 registry API. +var APIDescriptor = struct { + // RouteDescriptors provides a list of the routes available in the API. + RouteDescriptors []RouteDescriptor +}{ + RouteDescriptors: routeDescriptors, +} + +// RouteDescriptor describes a route specified by name. +type RouteDescriptor struct { + // Name is the name of the route, as specified in RouteNameXXX exports. + // These names a should be considered a unique reference for a route. If + // the route is registered with gorilla, this is the name that will be + // used. + Name string + + // Path is a gorilla/mux-compatible regexp that can be used to match the + // route. For any incoming method and path, only one route descriptor + // should match. + Path string + + // Entity should be a short, human-readalbe description of the object + // targeted by the endpoint. + Entity string + + // Description should provide an accurate overview of the functionality + // provided by the route. + Description string + + // Methods should describe the various HTTP methods that may be used on + // this route, including request and response formats. + Methods []MethodDescriptor +} + +// MethodDescriptor provides a description of the requests that may be +// conducted with the target method. +type MethodDescriptor struct { + + // Method is an HTTP method, such as GET, PUT or POST. + Method string + + // Description should provide an overview of the functionality provided by + // the covered method, suitable for use in documentation. Use of markdown + // here is encouraged. + Description string + + // Requests is a slice of request descriptors enumerating how this + // endpoint may be used. + Requests []RequestDescriptor +} + +// RequestDescriptor covers a particular set of headers and parameters that +// can be carried out with the parent method. Its most helpful to have one +// RequestDescriptor per API use case. +type RequestDescriptor struct { + // Name provides a short identifier for the request, usable as a title or + // to provide quick context for the particular request. + Name string + + // Description should cover the requests purpose, covering any details for + // this particular use case. + Description string + + // Headers describes headers that must be used with the HTTP request. + Headers []ParameterDescriptor + + // PathParameters enumerate the parameterized path components for the + // given request, as defined in the route's regular expression. + PathParameters []ParameterDescriptor + + // QueryParameters provides a list of query parameters for the given + // request. + QueryParameters []ParameterDescriptor + + // Body describes the format of the request body. + Body BodyDescriptor + + // Successes enumerates the possible responses that are considered to be + // the result of a successful request. + Successes []ResponseDescriptor + + // Failures covers the possible failures from this particular request. + Failures []ResponseDescriptor +} + +// ResponseDescriptor describes the components of an API response. +type ResponseDescriptor struct { + // Name provides a short identifier for the response, usable as a title or + // to provide quick context for the particular response. + Name string + + // Description should provide a brief overview of the role of the + // response. + Description string + + // StatusCode specifies the status received by this particular response. + StatusCode int + + // Headers covers any headers that may be returned from the response. + Headers []ParameterDescriptor + + // Fields describes any fields that may be present in the response. + Fields []ParameterDescriptor + + // ErrorCodes enumerates the error codes that may be returned along with + // the response. + ErrorCodes []errcode.ErrorCode + + // Body describes the body of the response, if any. + Body BodyDescriptor +} + +// BodyDescriptor describes a request body and its expected content type. For +// the most part, it should be example json or some placeholder for body +// data in documentation. +type BodyDescriptor struct { + ContentType string + Format string +} + +// ParameterDescriptor describes the format of a request parameter, which may +// be a header, path parameter or query parameter. +type ParameterDescriptor struct { + // Name is the name of the parameter, either of the path component or + // query parameter. + Name string + + // Type specifies the type of the parameter, such as string, integer, etc. + Type string + + // Description provides a human-readable description of the parameter. + Description string + + // Required means the field is required when set. + Required bool + + // Format is a specifying the string format accepted by this parameter. + Format string + + // Regexp is a compiled regular expression that can be used to validate + // the contents of the parameter. + Regexp *regexp.Regexp + + // Examples provides multiple examples for the values that might be valid + // for this parameter. + Examples []string +} + +var routeDescriptors = []RouteDescriptor{ + { + Name: RouteNameBase, + Path: "/v2/", + Entity: "Base", + Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication.`, + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Check that the endpoint implements Docker Registry API V2.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + Successes: []ResponseDescriptor{ + { + Description: "The API implements V2 protocol and is accessible.", + StatusCode: http.StatusOK, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "The registry does not implement the V2 API.", + StatusCode: http.StatusNotFound, + }, + unauthorizedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameTags, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/tags/list", + Entity: "Tags", + Description: "Retrieve information about tags.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Fetch the tags under the repository identified by `name`.", + Requests: []RequestDescriptor{ + { + Name: "Tags", + Description: "Return all tags for the repository", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Description: "A list of tags for the named repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "name": , + "tags": [ + , + ... + ] +}`, + }, + }, + }, + Failures: []ResponseDescriptor{ + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Tags Paginated", + Description: "Return a portion of the tags for the specified repository.", + PathParameters: []ParameterDescriptor{nameParameterDescriptor}, + QueryParameters: paginationParameters, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Description: "A list of tags for the named repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + linkHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "name": , + "tags": [ + , + ... + ], +}`, + }, + }, + }, + Failures: []ResponseDescriptor{ + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameManifest, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/manifests/{reference:" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + "}", + Entity: "Manifest", + Description: "Create, update, delete and retrieve manifests.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Description: "The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + digestHeader, + }, + Body: BodyDescriptor{ + ContentType: "", + Format: manifestBody, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "The name or reference was invalid.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "PUT", + Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Body: BodyDescriptor{ + ContentType: "", + Format: manifestBody, + }, + Successes: []ResponseDescriptor{ + { + Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Description: "The canonical location url of the uploaded manifest.", + Format: "", + }, + contentLengthZeroHeader, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Manifest", + Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.", + StatusCode: http.StatusBadRequest, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + ErrorCodeManifestInvalid, + ErrorCodeManifestUnverified, + ErrorCodeBlobUnknown, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + { + Name: "Missing Layer(s)", + Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "errors:" [{ + "code": "BLOB_UNKNOWN", + "message": "blob unknown to registry", + "detail": { + "digest": "" + } + }, + ... + ] +}`, + }, + }, + { + Name: "Not allowed", + Description: "Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusAccepted, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Reference", + Description: "The specified `name` or `reference` were invalid and the delete was unable to proceed.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + { + Name: "Unknown Manifest", + Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeManifestUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Name: "Not allowed", + Description: "Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled.", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + }, + }, + }, + }, + }, + }, + + { + Name: RouteNameBlob, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", + Entity: "Blob", + Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", + Requests: []RequestDescriptor{ + { + Name: "Fetch Blob", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "The length of the requested blob content.", + Format: "", + }, + digestHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + }, + { + Description: "The blob identified by `digest` is available at the provided location.", + StatusCode: http.StatusTemporaryRedirect, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Description: "The location where the layer should be accessible.", + Format: "", + }, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeDigestInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", + StatusCode: http.StatusNotFound, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Fetch Blob Part", + Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Range", + Type: "string", + Description: "HTTP Range header specifying blob chunk.", + Format: "bytes=-", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.", + StatusCode: http.StatusPartialContent, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "The length of the requested blob chunk.", + Format: "", + }, + { + Name: "Content-Range", + Type: "byte range", + Description: "Content range of blob chunk.", + Format: "bytes -/", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeDigestInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.", + StatusCode: http.StatusRequestedRangeNotSatisfiable, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Delete the blob identified by `name` and `digest`", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusAccepted, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "0", + Format: "0", + }, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", + StatusCode: http.StatusNotFound, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + }, + { + Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled", + StatusCode: http.StatusMethodNotAllowed, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + + // TODO(stevvooe): We may want to add a PUT request here to + // kickoff an upload of a blob, integrated with the blob upload + // API. + }, + }, + + { + Name: RouteNameBlobUpload, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/", + Entity: "Initiate Blob Upload", + Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", + Methods: []MethodDescriptor{ + { + Method: "POST", + Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.", + Requests: []RequestDescriptor{ + { + Name: "Initiate Monolithic Blob Upload", + Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Length", + Type: "integer", + Format: "", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "digest", + Type: "query", + Format: "", + Regexp: digest.DigestRegexp, + Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`, + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octect-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob has been created in the registry and is available at the provided location.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Name: "Not allowed", + Description: "Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Initiate Resumable Blob Upload", + Description: "Initiate a resumable blob upload with an empty request body.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.", + StatusCode: http.StatusAccepted, + Headers: []ParameterDescriptor{ + contentLengthZeroHeader, + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Format: "0-0", + Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.", + }, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Mount Blob", + Description: "Mount a blob identified by the `mount` parameter from another repository.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "mount", + Type: "query", + Format: "", + Regexp: digest.DigestRegexp, + Description: `Digest of blob to mount from the source repository.`, + }, + { + Name: "from", + Type: "query", + Format: "", + Regexp: reference.NameRegexp, + Description: `Name of the source repository.`, + }, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob has been mounted in the repository and is available at the provided location.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Name: "Not allowed", + Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + + { + Name: RouteNameBlobUploadChunk, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", + Entity: "Blob Upload", + Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.", + Requests: []RequestDescriptor{ + { + Description: "Retrieve the progress of the current upload, as reported by the `Range` header.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Progress", + Description: "The upload is known and in progress. The last received offset is available in the `Range` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "PATCH", + Description: "Upload a chunk of data for the specified upload.", + Requests: []RequestDescriptor{ + { + Name: "Stream upload", + Description: "Upload a stream of data to upload without completing the upload.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Data Accepted", + Description: "The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Chunked upload", + Description: "Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Range", + Type: "header", + Format: "-", + Required: true, + Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.", + }, + { + Name: "Content-Length", + Type: "integer", + Format: "", + Description: "Length of the chunk being uploaded, corresponding the length of the request body.", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Chunk Accepted", + Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.", + StatusCode: http.StatusRequestedRangeNotSatisfiable, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "PUT", + Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.", + Requests: []RequestDescriptor{ + { + Description: "Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Length", + Type: "integer", + Format: "", + Description: "Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "digest", + Type: "string", + Format: "", + Regexp: digest.DigestRegexp, + Required: true, + Description: `Digest of uploaded blob.`, + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Complete", + Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + Description: "The canonical location of the blob for retrieval", + }, + { + Name: "Content-Range", + Type: "header", + Format: "-", + Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.", + }, + contentLengthZeroHeader, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + errcode.ErrorCodeUnsupported, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.", + Requests: []RequestDescriptor{ + { + Description: "Cancel the upload specified by `uuid`.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Deleted", + Description: "The upload has been successfully deleted.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + contentLengthZeroHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "An error was encountered processing the delete. The client may ignore this error.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameCatalog, + Path: "/v2/_catalog", + Entity: "Catalog", + Description: "List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve a sorted, json list of repositories available in the registry.", + Requests: []RequestDescriptor{ + { + Name: "Catalog Fetch", + Description: "Request an unabridged list of repositories available. The implementation may impose a maximum limit and return a partial set with pagination links.", + Successes: []ResponseDescriptor{ + { + Description: "Returns the unabridged list of repositories as a json response.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "repositories": [ + , + ... + ] +}`, + }, + }, + }, + }, + { + Name: "Catalog Fetch Paginated", + Description: "Return the specified portion of repositories.", + QueryParameters: paginationParameters, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "repositories": [ + , + ... + ] + "next": "?last=&n=" +}`, + }, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + linkHeader, + }, + }, + }, + }, + }, + }, + }, + }, +} + +var routeDescriptorsMap map[string]RouteDescriptor + +func init() { + routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors)) + + for _, descriptor := range routeDescriptors { + routeDescriptorsMap[descriptor.Name] = descriptor + } +} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/doc.go b/vendor/github.com/docker/distribution/registry/api/v2/doc.go new file mode 100644 index 000000000..cde011959 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/doc.go @@ -0,0 +1,9 @@ +// Package v2 describes routes, urls and the error codes used in the Docker +// Registry JSON HTTP API V2. In addition to declarations, descriptors are +// provided for routes and error codes that can be used for implementation and +// automatically generating documentation. +// +// Definitions here are considered to be locked down for the V2 registry api. +// Any changes must be considered carefully and should not proceed without a +// change proposal in docker core. +package v2 diff --git a/vendor/github.com/docker/distribution/registry/api/v2/errors.go b/vendor/github.com/docker/distribution/registry/api/v2/errors.go new file mode 100644 index 000000000..97d6923aa --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/errors.go @@ -0,0 +1,136 @@ +package v2 + +import ( + "net/http" + + "github.com/docker/distribution/registry/api/errcode" +) + +const errGroup = "registry.api.v2" + +var ( + // ErrorCodeDigestInvalid is returned when uploading a blob if the + // provided digest does not match the blob contents. + ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "DIGEST_INVALID", + Message: "provided digest did not match uploaded content", + Description: `When a blob is uploaded, the registry will check that + the content matches the digest provided by the client. The error may + include a detail structure with the key "digest", including the + invalid digest string. This error may also be returned when a manifest + includes an invalid layer digest.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeSizeInvalid is returned when uploading a blob if the provided + ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "SIZE_INVALID", + Message: "provided length did not match content length", + Description: `When a layer is uploaded, the provided size will be + checked against the uploaded content. If they do not match, this error + will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeNameInvalid is returned when the name in the manifest does not + // match the provided name. + ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NAME_INVALID", + Message: "invalid repository name", + Description: `Invalid repository name encountered either during + manifest validation or any API operation.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeTagInvalid is returned when the tag in the manifest does not + // match the provided tag. + ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "TAG_INVALID", + Message: "manifest tag did not match URI", + Description: `During a manifest upload, if the tag in the manifest + does not match the uri tag, this error will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeNameUnknown when the repository name is not known. + ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NAME_UNKNOWN", + Message: "repository name not known to registry", + Description: `This is returned if the name used during an operation is + unknown to the registry.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeManifestUnknown returned when image manifest is unknown. + ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_UNKNOWN", + Message: "manifest unknown", + Description: `This error is returned when the manifest, identified by + name and tag is unknown to the repository.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeManifestInvalid returned when an image manifest is invalid, + // typically during a PUT operation. This error encompasses all errors + // encountered during manifest validation that aren't signature errors. + ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_INVALID", + Message: "manifest invalid", + Description: `During upload, manifests undergo several checks ensuring + validity. If those checks fail, this error may be returned, unless a + more specific error is included. The detail will contain information + the failed validation.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeManifestUnverified is returned when the manifest fails + // signature verification. + ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_UNVERIFIED", + Message: "manifest failed signature verification", + Description: `During manifest upload, if the manifest fails signature + verification, this error will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeManifestBlobUnknown is returned when a manifest blob is + // unknown to the registry. + ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a manifest blob is + unknown to the registry.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeBlobUnknown is returned when a blob is unknown to the + // registry. This can happen when the manifest references a nonexistent + // layer or the result is not found by a blob fetch. + ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a blob is unknown to the + registry in a specified repository. This can be returned with a + standard get or if a manifest references an unknown layer during + upload.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. + ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UPLOAD_UNKNOWN", + Message: "blob upload unknown to registry", + Description: `If a blob upload has been cancelled or was never + started, this error code may be returned.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. + ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UPLOAD_INVALID", + Message: "blob upload invalid", + Description: `The blob upload encountered an error and can no + longer proceed.`, + HTTPStatusCode: http.StatusNotFound, + }) +) diff --git a/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go b/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go new file mode 100644 index 000000000..9bc41a3a6 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go @@ -0,0 +1,161 @@ +package v2 + +import ( + "fmt" + "regexp" + "strings" + "unicode" +) + +var ( + // according to rfc7230 + reToken = regexp.MustCompile(`^[^"(),/:;<=>?@[\]{}[:space:][:cntrl:]]+`) + reQuotedValue = regexp.MustCompile(`^[^\\"]+`) + reEscapedCharacter = regexp.MustCompile(`^[[:blank:][:graph:]]`) +) + +// parseForwardedHeader is a benevolent parser of Forwarded header defined in rfc7239. The header contains +// a comma-separated list of forwarding key-value pairs. Each list element is set by single proxy. The +// function parses only the first element of the list, which is set by the very first proxy. It returns a map +// of corresponding key-value pairs and an unparsed slice of the input string. +// +// Examples of Forwarded header values: +// +// 1. Forwarded: For=192.0.2.43; Proto=https,For="[2001:db8:cafe::17]",For=unknown +// 2. Forwarded: for="192.0.2.43:443"; host="registry.example.org", for="10.10.05.40:80" +// +// The first will be parsed into {"for": "192.0.2.43", "proto": "https"} while the second into +// {"for": "192.0.2.43:443", "host": "registry.example.org"}. +func parseForwardedHeader(forwarded string) (map[string]string, string, error) { + // Following are states of forwarded header parser. Any state could transition to a failure. + const ( + // terminating state; can transition to Parameter + stateElement = iota + // terminating state; can transition to KeyValueDelimiter + stateParameter + // can transition to Value + stateKeyValueDelimiter + // can transition to one of { QuotedValue, PairEnd } + stateValue + // can transition to one of { EscapedCharacter, PairEnd } + stateQuotedValue + // can transition to one of { QuotedValue } + stateEscapedCharacter + // terminating state; can transition to one of { Parameter, Element } + statePairEnd + ) + + var ( + parameter string + value string + parse = forwarded[:] + res = map[string]string{} + state = stateElement + ) + +Loop: + for { + // skip spaces unless in quoted value + if state != stateQuotedValue && state != stateEscapedCharacter { + parse = strings.TrimLeftFunc(parse, unicode.IsSpace) + } + + if len(parse) == 0 { + if state != stateElement && state != statePairEnd && state != stateParameter { + return nil, parse, fmt.Errorf("unexpected end of input") + } + // terminating + break + } + + switch state { + // terminate at list element delimiter + case stateElement: + if parse[0] == ',' { + parse = parse[1:] + break Loop + } + state = stateParameter + + // parse parameter (the key of key-value pair) + case stateParameter: + match := reToken.FindString(parse) + if len(match) == 0 { + return nil, parse, fmt.Errorf("failed to parse token at position %d", len(forwarded)-len(parse)) + } + parameter = strings.ToLower(match) + parse = parse[len(match):] + state = stateKeyValueDelimiter + + // parse '=' + case stateKeyValueDelimiter: + if parse[0] != '=' { + return nil, parse, fmt.Errorf("expected '=', not '%c' at position %d", parse[0], len(forwarded)-len(parse)) + } + parse = parse[1:] + state = stateValue + + // parse value or quoted value + case stateValue: + if parse[0] == '"' { + parse = parse[1:] + state = stateQuotedValue + } else { + value = reToken.FindString(parse) + if len(value) == 0 { + return nil, parse, fmt.Errorf("failed to parse value at position %d", len(forwarded)-len(parse)) + } + if _, exists := res[parameter]; exists { + return nil, parse, fmt.Errorf("duplicate parameter %q at position %d", parameter, len(forwarded)-len(parse)) + } + res[parameter] = value + parse = parse[len(value):] + value = "" + state = statePairEnd + } + + // parse a part of quoted value until the first backslash + case stateQuotedValue: + match := reQuotedValue.FindString(parse) + value += match + parse = parse[len(match):] + switch { + case len(parse) == 0: + return nil, parse, fmt.Errorf("unterminated quoted string") + case parse[0] == '"': + res[parameter] = value + value = "" + parse = parse[1:] + state = statePairEnd + case parse[0] == '\\': + parse = parse[1:] + state = stateEscapedCharacter + } + + // parse escaped character in a quoted string, ignore the backslash + // transition back to QuotedValue state + case stateEscapedCharacter: + c := reEscapedCharacter.FindString(parse) + if len(c) == 0 { + return nil, parse, fmt.Errorf("invalid escape sequence at position %d", len(forwarded)-len(parse)-1) + } + value += c + parse = parse[1:] + state = stateQuotedValue + + // expect either a new key-value pair, new list or end of input + case statePairEnd: + switch parse[0] { + case ';': + parse = parse[1:] + state = stateParameter + case ',': + state = stateElement + default: + return nil, parse, fmt.Errorf("expected ',' or ';', not %c at position %d", parse[0], len(forwarded)-len(parse)) + } + } + } + + return res, parse, nil +} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/headerparser_test.go b/vendor/github.com/docker/distribution/registry/api/v2/headerparser_test.go new file mode 100644 index 000000000..b8c37490d --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/headerparser_test.go @@ -0,0 +1,161 @@ +package v2 + +import ( + "testing" +) + +func TestParseForwardedHeader(t *testing.T) { + for _, tc := range []struct { + name string + raw string + expected map[string]string + expectedRest string + expectedError bool + }{ + { + name: "empty", + raw: "", + }, + { + name: "one pair", + raw: " key = value ", + expected: map[string]string{"key": "value"}, + }, + { + name: "two pairs", + raw: " key1 = value1; key2=value2", + expected: map[string]string{"key1": "value1", "key2": "value2"}, + }, + { + name: "uppercase parameter", + raw: "KeY=VaL", + expected: map[string]string{"key": "VaL"}, + }, + { + name: "missing key=value pair - be tolerant", + raw: "key=val;", + expected: map[string]string{"key": "val"}, + }, + { + name: "quoted values", + raw: `key="val";param = "[[ $((1 + 1)) == 3 ]] && echo panic!;" ; p=" abcd "`, + expected: map[string]string{"key": "val", "param": "[[ $((1 + 1)) == 3 ]] && echo panic!;", "p": " abcd "}, + }, + { + name: "empty quoted value", + raw: `key=""`, + expected: map[string]string{"key": ""}, + }, + { + name: "quoted double quotes", + raw: `key="\"value\""`, + expected: map[string]string{"key": `"value"`}, + }, + { + name: "quoted backslash", + raw: `key="\"\\\""`, + expected: map[string]string{"key": `"\"`}, + }, + { + name: "ignore subsequent elements", + raw: "key=a, param= b", + expected: map[string]string{"key": "a"}, + expectedRest: " param= b", + }, + { + name: "empty element - be tolerant", + raw: " , key=val", + expectedRest: " key=val", + }, + { + name: "obscure key", + raw: `ob₷C&r€ = value`, + expected: map[string]string{`ob₷c&r€`: "value"}, + }, + { + name: "duplicate parameter", + raw: "key=a; p=b; key=c", + expectedError: true, + }, + { + name: "empty parameter", + raw: "=value", + expectedError: true, + }, + { + name: "empty value", + raw: "key= ", + expectedError: true, + }, + { + name: "empty value before a new element ", + raw: "key=,", + expectedError: true, + }, + { + name: "empty value before a new pair", + raw: "key=;", + expectedError: true, + }, + { + name: "just parameter", + raw: "key", + expectedError: true, + }, + { + name: "missing key-value", + raw: "a=b;;", + expectedError: true, + }, + { + name: "unclosed quoted value", + raw: `key="value`, + expectedError: true, + }, + { + name: "escaped terminating dquote", + raw: `key="value\"`, + expectedError: true, + }, + { + name: "just a quoted value", + raw: `"key=val"`, + expectedError: true, + }, + { + name: "quoted key", + raw: `"key"=val`, + expectedError: true, + }, + } { + parsed, rest, err := parseForwardedHeader(tc.raw) + if err != nil && !tc.expectedError { + t.Errorf("[%s] got unexpected error: %v", tc.name, err) + } + if err == nil && tc.expectedError { + t.Errorf("[%s] got unexpected non-error", tc.name) + } + if err != nil || tc.expectedError { + continue + } + for key, value := range tc.expected { + v, exists := parsed[key] + if !exists { + t.Errorf("[%s] missing expected parameter %q", tc.name, key) + continue + } + if v != value { + t.Errorf("[%s] got unexpected value for parameter %q: %q != %q", tc.name, key, v, value) + } + } + for key, value := range parsed { + if _, exists := tc.expected[key]; !exists { + t.Errorf("[%s] got unexpected key/value pair: %q=%q", tc.name, key, value) + } + } + + if rest != tc.expectedRest { + t.Errorf("[%s] got unexpected unparsed string: %q != %q", tc.name, rest, tc.expectedRest) + } + } +} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/routes.go b/vendor/github.com/docker/distribution/registry/api/v2/routes.go new file mode 100644 index 000000000..5b80d5be7 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/routes.go @@ -0,0 +1,49 @@ +package v2 + +import "github.com/gorilla/mux" + +// The following are definitions of the name under which all V2 routes are +// registered. These symbols can be used to look up a route based on the name. +const ( + RouteNameBase = "base" + RouteNameManifest = "manifest" + RouteNameTags = "tags" + RouteNameBlob = "blob" + RouteNameBlobUpload = "blob-upload" + RouteNameBlobUploadChunk = "blob-upload-chunk" + RouteNameCatalog = "catalog" +) + +var allEndpoints = []string{ + RouteNameManifest, + RouteNameCatalog, + RouteNameTags, + RouteNameBlob, + RouteNameBlobUpload, + RouteNameBlobUploadChunk, +} + +// Router builds a gorilla router with named routes for the various API +// methods. This can be used directly by both server implementations and +// clients. +func Router() *mux.Router { + return RouterWithPrefix("") +} + +// RouterWithPrefix builds a gorilla router with a configured prefix +// on all routes. +func RouterWithPrefix(prefix string) *mux.Router { + rootRouter := mux.NewRouter() + router := rootRouter + if prefix != "" { + router = router.PathPrefix(prefix).Subrouter() + } + + router.StrictSlash(true) + + for _, descriptor := range routeDescriptors { + router.Path(descriptor.Path).Name(descriptor.Name) + } + + return rootRouter +} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/routes_test.go b/vendor/github.com/docker/distribution/registry/api/v2/routes_test.go new file mode 100644 index 000000000..f632d981c --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/routes_test.go @@ -0,0 +1,355 @@ +package v2 + +import ( + "encoding/json" + "fmt" + "math/rand" + "net/http" + "net/http/httptest" + "reflect" + "strings" + "testing" + "time" + + "github.com/gorilla/mux" +) + +type routeTestCase struct { + RequestURI string + ExpectedURI string + Vars map[string]string + RouteName string + StatusCode int +} + +// TestRouter registers a test handler with all the routes and ensures that +// each route returns the expected path variables. Not method verification is +// present. This not meant to be exhaustive but as check to ensure that the +// expected variables are extracted. +// +// This may go away as the application structure comes together. +func TestRouter(t *testing.T) { + testCases := []routeTestCase{ + { + RouteName: RouteNameBase, + RequestURI: "/v2/", + Vars: map[string]string{}, + }, + { + RouteName: RouteNameManifest, + RequestURI: "/v2/foo/manifests/bar", + Vars: map[string]string{ + "name": "foo", + "reference": "bar", + }, + }, + { + RouteName: RouteNameManifest, + RequestURI: "/v2/foo/bar/manifests/tag", + Vars: map[string]string{ + "name": "foo/bar", + "reference": "tag", + }, + }, + { + RouteName: RouteNameManifest, + RequestURI: "/v2/foo/bar/manifests/sha256:abcdef01234567890", + Vars: map[string]string{ + "name": "foo/bar", + "reference": "sha256:abcdef01234567890", + }, + }, + { + RouteName: RouteNameTags, + RequestURI: "/v2/foo/bar/tags/list", + Vars: map[string]string{ + "name": "foo/bar", + }, + }, + { + RouteName: RouteNameTags, + RequestURI: "/v2/docker.com/foo/tags/list", + Vars: map[string]string{ + "name": "docker.com/foo", + }, + }, + { + RouteName: RouteNameTags, + RequestURI: "/v2/docker.com/foo/bar/tags/list", + Vars: map[string]string{ + "name": "docker.com/foo/bar", + }, + }, + { + RouteName: RouteNameTags, + RequestURI: "/v2/docker.com/foo/bar/baz/tags/list", + Vars: map[string]string{ + "name": "docker.com/foo/bar/baz", + }, + }, + { + RouteName: RouteNameBlob, + RequestURI: "/v2/foo/bar/blobs/sha256:abcdef0919234", + Vars: map[string]string{ + "name": "foo/bar", + "digest": "sha256:abcdef0919234", + }, + }, + { + RouteName: RouteNameBlobUpload, + RequestURI: "/v2/foo/bar/blobs/uploads/", + Vars: map[string]string{ + "name": "foo/bar", + }, + }, + { + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/bar/blobs/uploads/uuid", + Vars: map[string]string{ + "name": "foo/bar", + "uuid": "uuid", + }, + }, + { + // support uuid proper + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/bar/blobs/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + Vars: map[string]string{ + "name": "foo/bar", + "uuid": "D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + }, + }, + { + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/bar/blobs/uploads/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==", + Vars: map[string]string{ + "name": "foo/bar", + "uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==", + }, + }, + { + // supports urlsafe base64 + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/bar/blobs/uploads/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA_-==", + Vars: map[string]string{ + "name": "foo/bar", + "uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA_-==", + }, + }, + { + // does not match + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/bar/blobs/uploads/totalandcompletejunk++$$-==", + StatusCode: http.StatusNotFound, + }, + { + // Check ambiguity: ensure we can distinguish between tags for + // "foo/bar/image/image" and image for "foo/bar/image" with tag + // "tags" + RouteName: RouteNameManifest, + RequestURI: "/v2/foo/bar/manifests/manifests/tags", + Vars: map[string]string{ + "name": "foo/bar/manifests", + "reference": "tags", + }, + }, + { + // This case presents an ambiguity between foo/bar with tag="tags" + // and list tags for "foo/bar/manifest" + RouteName: RouteNameTags, + RequestURI: "/v2/foo/bar/manifests/tags/list", + Vars: map[string]string{ + "name": "foo/bar/manifests", + }, + }, + { + RouteName: RouteNameManifest, + RequestURI: "/v2/locahost:8080/foo/bar/baz/manifests/tag", + Vars: map[string]string{ + "name": "locahost:8080/foo/bar/baz", + "reference": "tag", + }, + }, + } + + checkTestRouter(t, testCases, "", true) + checkTestRouter(t, testCases, "/prefix/", true) +} + +func TestRouterWithPathTraversals(t *testing.T) { + testCases := []routeTestCase{ + { + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/../../blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + ExpectedURI: "/blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + StatusCode: http.StatusNotFound, + }, + { + // Testing for path traversal attack handling + RouteName: RouteNameTags, + RequestURI: "/v2/foo/../bar/baz/tags/list", + ExpectedURI: "/v2/bar/baz/tags/list", + Vars: map[string]string{ + "name": "bar/baz", + }, + }, + } + checkTestRouter(t, testCases, "", false) +} + +func TestRouterWithBadCharacters(t *testing.T) { + if testing.Short() { + testCases := []routeTestCase{ + { + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/blob/uploads/不95306FA-FAD3-4E36-8D41-CF1C93EF8286", + StatusCode: http.StatusNotFound, + }, + { + // Testing for path traversal attack handling + RouteName: RouteNameTags, + RequestURI: "/v2/foo/不bar/tags/list", + StatusCode: http.StatusNotFound, + }, + } + checkTestRouter(t, testCases, "", true) + } else { + // in the long version we're going to fuzz the router + // with random UTF8 characters not in the 128 bit ASCII range. + // These are not valid characters for the router and we expect + // 404s on every test. + rand.Seed(time.Now().UTC().UnixNano()) + testCases := make([]routeTestCase, 1000) + for idx := range testCases { + testCases[idx] = routeTestCase{ + RouteName: RouteNameTags, + RequestURI: fmt.Sprintf("/v2/%v/%v/tags/list", randomString(10), randomString(10)), + StatusCode: http.StatusNotFound, + } + } + checkTestRouter(t, testCases, "", true) + } +} + +func checkTestRouter(t *testing.T, testCases []routeTestCase, prefix string, deeplyEqual bool) { + router := RouterWithPrefix(prefix) + + testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + testCase := routeTestCase{ + RequestURI: r.RequestURI, + Vars: mux.Vars(r), + RouteName: mux.CurrentRoute(r).GetName(), + } + + enc := json.NewEncoder(w) + + if err := enc.Encode(testCase); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + }) + + // Startup test server + server := httptest.NewServer(router) + + for _, testcase := range testCases { + testcase.RequestURI = strings.TrimSuffix(prefix, "/") + testcase.RequestURI + // Register the endpoint + route := router.GetRoute(testcase.RouteName) + if route == nil { + t.Fatalf("route for name %q not found", testcase.RouteName) + } + + route.Handler(testHandler) + + u := server.URL + testcase.RequestURI + + resp, err := http.Get(u) + + if err != nil { + t.Fatalf("error issuing get request: %v", err) + } + + if testcase.StatusCode == 0 { + // Override default, zero-value + testcase.StatusCode = http.StatusOK + } + if testcase.ExpectedURI == "" { + // Override default, zero-value + testcase.ExpectedURI = testcase.RequestURI + } + + if resp.StatusCode != testcase.StatusCode { + t.Fatalf("unexpected status for %s: %v %v", u, resp.Status, resp.StatusCode) + } + + if testcase.StatusCode != http.StatusOK { + resp.Body.Close() + // We don't care about json response. + continue + } + + dec := json.NewDecoder(resp.Body) + + var actualRouteInfo routeTestCase + if err := dec.Decode(&actualRouteInfo); err != nil { + t.Fatalf("error reading json response: %v", err) + } + // Needs to be set out of band + actualRouteInfo.StatusCode = resp.StatusCode + + if actualRouteInfo.RequestURI != testcase.ExpectedURI { + t.Fatalf("URI %v incorrectly parsed, expected %v", actualRouteInfo.RequestURI, testcase.ExpectedURI) + } + + if actualRouteInfo.RouteName != testcase.RouteName { + t.Fatalf("incorrect route %q matched, expected %q", actualRouteInfo.RouteName, testcase.RouteName) + } + + // when testing deep equality, the actualRouteInfo has an empty ExpectedURI, we don't want + // that to make the comparison fail. We're otherwise done with the testcase so empty the + // testcase.ExpectedURI + testcase.ExpectedURI = "" + if deeplyEqual && !reflect.DeepEqual(actualRouteInfo, testcase) { + t.Fatalf("actual does not equal expected: %#v != %#v", actualRouteInfo, testcase) + } + + resp.Body.Close() + } + +} + +// -------------- START LICENSED CODE -------------- +// The following code is derivative of https://github.com/google/gofuzz +// gofuzz is licensed under the Apache License, Version 2.0, January 2004, +// a copy of which can be found in the LICENSE file at the root of this +// repository. + +// These functions allow us to generate strings containing only multibyte +// characters that are invalid in our URLs. They are used above for fuzzing +// to ensure we always get 404s on these invalid strings +type charRange struct { + first, last rune +} + +// choose returns a random unicode character from the given range, using the +// given randomness source. +func (r *charRange) choose() rune { + count := int64(r.last - r.first) + return r.first + rune(rand.Int63n(count)) +} + +var unicodeRanges = []charRange{ + {'\u00a0', '\u02af'}, // Multi-byte encoded characters + {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) +} + +func randomString(length int) string { + runes := make([]rune, length) + for i := range runes { + runes[i] = unicodeRanges[rand.Intn(len(unicodeRanges))].choose() + } + return string(runes) +} + +// -------------- END LICENSED CODE -------------- diff --git a/vendor/github.com/docker/distribution/registry/api/v2/urls.go b/vendor/github.com/docker/distribution/registry/api/v2/urls.go new file mode 100644 index 000000000..5e24ca9b5 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/urls.go @@ -0,0 +1,263 @@ +package v2 + +import ( + "net/http" + "net/url" + "strings" + + "github.com/docker/distribution/reference" + "github.com/gorilla/mux" +) + +// URLBuilder creates registry API urls from a single base endpoint. It can be +// used to create urls for use in a registry client or server. +// +// All urls will be created from the given base, including the api version. +// For example, if a root of "/foo/" is provided, urls generated will be fall +// under "/foo/v2/...". Most application will only provide a schema, host and +// port, such as "https://localhost:5000/". +type URLBuilder struct { + root *url.URL // url root (ie http://localhost/) + router *mux.Router + relative bool +} + +// NewURLBuilder creates a URLBuilder with provided root url object. +func NewURLBuilder(root *url.URL, relative bool) *URLBuilder { + return &URLBuilder{ + root: root, + router: Router(), + relative: relative, + } +} + +// NewURLBuilderFromString workes identically to NewURLBuilder except it takes +// a string argument for the root, returning an error if it is not a valid +// url. +func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) { + u, err := url.Parse(root) + if err != nil { + return nil, err + } + + return NewURLBuilder(u, relative), nil +} + +// NewURLBuilderFromRequest uses information from an *http.Request to +// construct the root url. +func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder { + var ( + scheme = "http" + host = r.Host + ) + + if r.TLS != nil { + scheme = "https" + } else if len(r.URL.Scheme) > 0 { + scheme = r.URL.Scheme + } + + // Handle fowarded headers + // Prefer "Forwarded" header as defined by rfc7239 if given + // see https://tools.ietf.org/html/rfc7239 + if forwarded := r.Header.Get("Forwarded"); len(forwarded) > 0 { + forwardedHeader, _, err := parseForwardedHeader(forwarded) + if err == nil { + if fproto := forwardedHeader["proto"]; len(fproto) > 0 { + scheme = fproto + } + if fhost := forwardedHeader["host"]; len(fhost) > 0 { + host = fhost + } + } + } else { + if forwardedProto := r.Header.Get("X-Forwarded-Proto"); len(forwardedProto) > 0 { + scheme = forwardedProto + } + if forwardedHost := r.Header.Get("X-Forwarded-Host"); len(forwardedHost) > 0 { + // According to the Apache mod_proxy docs, X-Forwarded-Host can be a + // comma-separated list of hosts, to which each proxy appends the + // requested host. We want to grab the first from this comma-separated + // list. + hosts := strings.SplitN(forwardedHost, ",", 2) + host = strings.TrimSpace(hosts[0]) + } + } + + basePath := routeDescriptorsMap[RouteNameBase].Path + + requestPath := r.URL.Path + index := strings.Index(requestPath, basePath) + + u := &url.URL{ + Scheme: scheme, + Host: host, + } + + if index > 0 { + // N.B. index+1 is important because we want to include the trailing / + u.Path = requestPath[0 : index+1] + } + + return NewURLBuilder(u, relative) +} + +// BuildBaseURL constructs a base url for the API, typically just "/v2/". +func (ub *URLBuilder) BuildBaseURL() (string, error) { + route := ub.cloneRoute(RouteNameBase) + + baseURL, err := route.URL() + if err != nil { + return "", err + } + + return baseURL.String(), nil +} + +// BuildCatalogURL constructs a url get a catalog of repositories +func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameCatalog) + + catalogURL, err := route.URL() + if err != nil { + return "", err + } + + return appendValuesURL(catalogURL, values...).String(), nil +} + +// BuildTagsURL constructs a url to list the tags in the named repository. +func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) { + route := ub.cloneRoute(RouteNameTags) + + tagsURL, err := route.URL("name", name.Name()) + if err != nil { + return "", err + } + + return tagsURL.String(), nil +} + +// BuildManifestURL constructs a url for the manifest identified by name and +// reference. The argument reference may be either a tag or digest. +func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) { + route := ub.cloneRoute(RouteNameManifest) + + tagOrDigest := "" + switch v := ref.(type) { + case reference.Tagged: + tagOrDigest = v.Tag() + case reference.Digested: + tagOrDigest = v.Digest().String() + } + + manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest) + if err != nil { + return "", err + } + + return manifestURL.String(), nil +} + +// BuildBlobURL constructs the url for the blob identified by name and dgst. +func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) { + route := ub.cloneRoute(RouteNameBlob) + + layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String()) + if err != nil { + return "", err + } + + return layerURL.String(), nil +} + +// BuildBlobUploadURL constructs a url to begin a blob upload in the +// repository identified by name. +func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameBlobUpload) + + uploadURL, err := route.URL("name", name.Name()) + if err != nil { + return "", err + } + + return appendValuesURL(uploadURL, values...).String(), nil +} + +// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid, +// including any url values. This should generally not be used by clients, as +// this url is provided by server implementations during the blob upload +// process. +func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameBlobUploadChunk) + + uploadURL, err := route.URL("name", name.Name(), "uuid", uuid) + if err != nil { + return "", err + } + + return appendValuesURL(uploadURL, values...).String(), nil +} + +// clondedRoute returns a clone of the named route from the router. Routes +// must be cloned to avoid modifying them during url generation. +func (ub *URLBuilder) cloneRoute(name string) clonedRoute { + route := new(mux.Route) + root := new(url.URL) + + *route = *ub.router.GetRoute(name) // clone the route + *root = *ub.root + + return clonedRoute{Route: route, root: root, relative: ub.relative} +} + +type clonedRoute struct { + *mux.Route + root *url.URL + relative bool +} + +func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { + routeURL, err := cr.Route.URL(pairs...) + if err != nil { + return nil, err + } + + if cr.relative { + return routeURL, nil + } + + if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" { + routeURL.Path = routeURL.Path[1:] + } + + url := cr.root.ResolveReference(routeURL) + url.Scheme = cr.root.Scheme + return url, nil +} + +// appendValuesURL appends the parameters to the url. +func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { + merged := u.Query() + + for _, v := range values { + for k, vv := range v { + merged[k] = append(merged[k], vv...) + } + } + + u.RawQuery = merged.Encode() + return u +} + +// appendValues appends the parameters to the url. Panics if the string is not +// a url. +func appendValues(u string, values ...url.Values) string { + up, err := url.Parse(u) + + if err != nil { + panic(err) // should never happen + } + + return appendValuesURL(up, values...).String() +} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/urls_test.go b/vendor/github.com/docker/distribution/registry/api/v2/urls_test.go new file mode 100644 index 000000000..6d8973fad --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/urls_test.go @@ -0,0 +1,484 @@ +package v2 + +import ( + "net/http" + "net/url" + "testing" + + "github.com/docker/distribution/reference" +) + +type urlBuilderTestCase struct { + description string + expectedPath string + build func() (string, error) +} + +func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { + fooBarRef, _ := reference.ParseNamed("foo/bar") + return []urlBuilderTestCase{ + { + description: "test base url", + expectedPath: "/v2/", + build: urlBuilder.BuildBaseURL, + }, + { + description: "test tags url", + expectedPath: "/v2/foo/bar/tags/list", + build: func() (string, error) { + return urlBuilder.BuildTagsURL(fooBarRef) + }, + }, + { + description: "test manifest url", + expectedPath: "/v2/foo/bar/manifests/tag", + build: func() (string, error) { + ref, _ := reference.WithTag(fooBarRef, "tag") + return urlBuilder.BuildManifestURL(ref) + }, + }, + { + description: "build blob url", + expectedPath: "/v2/foo/bar/blobs/sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5", + build: func() (string, error) { + ref, _ := reference.WithDigest(fooBarRef, "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5") + return urlBuilder.BuildBlobURL(ref) + }, + }, + { + description: "build blob upload url", + expectedPath: "/v2/foo/bar/blobs/uploads/", + build: func() (string, error) { + return urlBuilder.BuildBlobUploadURL(fooBarRef) + }, + }, + { + description: "build blob upload url with digest and size", + expectedPath: "/v2/foo/bar/blobs/uploads/?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000", + build: func() (string, error) { + return urlBuilder.BuildBlobUploadURL(fooBarRef, url.Values{ + "size": []string{"10000"}, + "digest": []string{"sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"}, + }) + }, + }, + { + description: "build blob upload chunk url", + expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part", + build: func() (string, error) { + return urlBuilder.BuildBlobUploadChunkURL(fooBarRef, "uuid-part") + }, + }, + { + description: "build blob upload chunk url with digest and size", + expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000", + build: func() (string, error) { + return urlBuilder.BuildBlobUploadChunkURL(fooBarRef, "uuid-part", url.Values{ + "size": []string{"10000"}, + "digest": []string{"sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"}, + }) + }, + }, + } +} + +// TestURLBuilder tests the various url building functions, ensuring they are +// returning the expected values. +func TestURLBuilder(t *testing.T) { + roots := []string{ + "http://example.com", + "https://example.com", + "http://localhost:5000", + "https://localhost:5443", + } + + doTest := func(relative bool) { + for _, root := range roots { + urlBuilder, err := NewURLBuilderFromString(root, relative) + if err != nil { + t.Fatalf("unexpected error creating urlbuilder: %v", err) + } + + for _, testCase := range makeURLBuilderTestCases(urlBuilder) { + url, err := testCase.build() + if err != nil { + t.Fatalf("%s: error building url: %v", testCase.description, err) + } + expectedURL := testCase.expectedPath + if !relative { + expectedURL = root + expectedURL + } + + if url != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + } + } + } + } + doTest(true) + doTest(false) +} + +func TestURLBuilderWithPrefix(t *testing.T) { + roots := []string{ + "http://example.com/prefix/", + "https://example.com/prefix/", + "http://localhost:5000/prefix/", + "https://localhost:5443/prefix/", + } + + doTest := func(relative bool) { + for _, root := range roots { + urlBuilder, err := NewURLBuilderFromString(root, relative) + if err != nil { + t.Fatalf("unexpected error creating urlbuilder: %v", err) + } + + for _, testCase := range makeURLBuilderTestCases(urlBuilder) { + url, err := testCase.build() + if err != nil { + t.Fatalf("%s: error building url: %v", testCase.description, err) + } + + expectedURL := testCase.expectedPath + if !relative { + expectedURL = root[0:len(root)-1] + expectedURL + } + if url != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + } + } + } + } + doTest(true) + doTest(false) +} + +type builderFromRequestTestCase struct { + request *http.Request + base string +} + +func TestBuilderFromRequest(t *testing.T) { + u, err := url.Parse("http://example.com") + if err != nil { + t.Fatal(err) + } + + testRequests := []struct { + name string + request *http.Request + base string + configHost url.URL + }{ + { + name: "no forwarded header", + request: &http.Request{URL: u, Host: u.Host}, + base: "http://example.com", + }, + { + name: "https protocol forwarded with a non-standard header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Custom-Forwarded-Proto": []string{"https"}, + }}, + base: "http://example.com", + }, + { + name: "forwarded protocol is the same", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Proto": []string{"https"}, + }}, + base: "https://example.com", + }, + { + name: "forwarded host with a non-standard header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Host": []string{"first.example.com"}, + }}, + base: "http://first.example.com", + }, + { + name: "forwarded multiple hosts a with non-standard header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Host": []string{"first.example.com, proxy1.example.com"}, + }}, + base: "http://first.example.com", + }, + { + name: "host configured in config file takes priority", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Host": []string{"first.example.com, proxy1.example.com"}, + }}, + base: "https://third.example.com:5000", + configHost: url.URL{ + Scheme: "https", + Host: "third.example.com:5000", + }, + }, + { + name: "forwarded host and port with just one non-standard header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Host": []string{"first.example.com:443"}, + }}, + base: "http://first.example.com:443", + }, + { + name: "forwarded port with a non-standard header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Host": []string{"example.com:5000"}, + "X-Forwarded-Port": []string{"5000"}, + }}, + base: "http://example.com:5000", + }, + { + name: "forwarded multiple ports with a non-standard header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Port": []string{"443 , 5001"}, + }}, + base: "http://example.com", + }, + { + name: "forwarded standard port with non-standard headers", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Proto": []string{"https"}, + "X-Forwarded-Host": []string{"example.com"}, + "X-Forwarded-Port": []string{"443"}, + }}, + base: "https://example.com", + }, + { + name: "forwarded standard port with non-standard headers and explicit port", + request: &http.Request{URL: u, Host: u.Host + ":443", Header: http.Header{ + "X-Forwarded-Proto": []string{"https"}, + "X-Forwarded-Host": []string{u.Host + ":443"}, + "X-Forwarded-Port": []string{"443"}, + }}, + base: "https://example.com:443", + }, + { + name: "several non-standard headers", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Proto": []string{"https"}, + "X-Forwarded-Host": []string{" first.example.com:12345 "}, + }}, + base: "https://first.example.com:12345", + }, + { + name: "forwarded host with port supplied takes priority", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Host": []string{"first.example.com:5000"}, + "X-Forwarded-Port": []string{"80"}, + }}, + base: "http://first.example.com:5000", + }, + { + name: "malformed forwarded port", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Host": []string{"first.example.com"}, + "X-Forwarded-Port": []string{"abcd"}, + }}, + base: "http://first.example.com", + }, + { + name: "forwarded protocol and addr using standard header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "Forwarded": []string{`proto=https;host="192.168.22.30:80"`}, + }}, + base: "https://192.168.22.30:80", + }, + { + name: "forwarded host takes priority over for", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "Forwarded": []string{`host="reg.example.com:5000";for="192.168.22.30"`}, + }}, + base: "http://reg.example.com:5000", + }, + { + name: "forwarded host and protocol using standard header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "Forwarded": []string{`host=reg.example.com;proto=https`}, + }}, + base: "https://reg.example.com", + }, + { + name: "process just the first standard forwarded header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "Forwarded": []string{`host="reg.example.com:88";proto=http`, `host=reg.example.com;proto=https`}, + }}, + base: "http://reg.example.com:88", + }, + { + name: "process just the first list element of standard header", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "Forwarded": []string{`host="reg.example.com:443";proto=https, host="reg.example.com:80";proto=http`}, + }}, + base: "https://reg.example.com:443", + }, + { + name: "IPv6 address use host", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "Forwarded": []string{`for="2607:f0d0:1002:51::4";host="[2607:f0d0:1002:51::4]:5001"`}, + "X-Forwarded-Port": []string{"5002"}, + }}, + base: "http://[2607:f0d0:1002:51::4]:5001", + }, + { + name: "IPv6 address with port", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "Forwarded": []string{`host="[2607:f0d0:1002:51::4]:4000"`}, + "X-Forwarded-Port": []string{"5001"}, + }}, + base: "http://[2607:f0d0:1002:51::4]:4000", + }, + { + name: "non-standard and standard forward headers", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Proto": []string{`https`}, + "X-Forwarded-Host": []string{`first.example.com`}, + "X-Forwarded-Port": []string{``}, + "Forwarded": []string{`host=first.example.com; proto=https`}, + }}, + base: "https://first.example.com", + }, + { + name: "standard header takes precedence over non-standard headers", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Proto": []string{`http`}, + "Forwarded": []string{`host=second.example.com; proto=https`}, + "X-Forwarded-Host": []string{`first.example.com`}, + "X-Forwarded-Port": []string{`4000`}, + }}, + base: "https://second.example.com", + }, + { + name: "incomplete standard header uses default", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Proto": []string{`https`}, + "Forwarded": []string{`for=127.0.0.1`}, + "X-Forwarded-Host": []string{`first.example.com`}, + "X-Forwarded-Port": []string{`4000`}, + }}, + base: "http://" + u.Host, + }, + { + name: "standard with just proto", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Proto": []string{`https`}, + "Forwarded": []string{`proto=https`}, + "X-Forwarded-Host": []string{`first.example.com`}, + "X-Forwarded-Port": []string{`4000`}, + }}, + base: "https://" + u.Host, + }, + } + + doTest := func(relative bool) { + for _, tr := range testRequests { + var builder *URLBuilder + if tr.configHost.Scheme != "" && tr.configHost.Host != "" { + builder = NewURLBuilder(&tr.configHost, relative) + } else { + builder = NewURLBuilderFromRequest(tr.request, relative) + } + + for _, testCase := range makeURLBuilderTestCases(builder) { + buildURL, err := testCase.build() + if err != nil { + t.Fatalf("[relative=%t, request=%q, case=%q]: error building url: %v", relative, tr.name, testCase.description, err) + } + + expectedURL := testCase.expectedPath + if !relative { + expectedURL = tr.base + expectedURL + } + + if buildURL != expectedURL { + t.Errorf("[relative=%t, request=%q, case=%q]: %q != %q", relative, tr.name, testCase.description, buildURL, expectedURL) + } + } + } + } + + doTest(true) + doTest(false) +} + +func TestBuilderFromRequestWithPrefix(t *testing.T) { + u, err := url.Parse("http://example.com/prefix/v2/") + if err != nil { + t.Fatal(err) + } + + forwardedProtoHeader := make(http.Header, 1) + forwardedProtoHeader.Set("X-Forwarded-Proto", "https") + + testRequests := []struct { + request *http.Request + base string + configHost url.URL + }{ + { + request: &http.Request{URL: u, Host: u.Host}, + base: "http://example.com/prefix/", + }, + + { + request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, + base: "http://example.com/prefix/", + }, + { + request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, + base: "https://example.com/prefix/", + }, + { + request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, + base: "https://subdomain.example.com/prefix/", + configHost: url.URL{ + Scheme: "https", + Host: "subdomain.example.com", + Path: "/prefix/", + }, + }, + } + + var relative bool + for _, tr := range testRequests { + var builder *URLBuilder + if tr.configHost.Scheme != "" && tr.configHost.Host != "" { + builder = NewURLBuilder(&tr.configHost, false) + } else { + builder = NewURLBuilderFromRequest(tr.request, false) + } + + for _, testCase := range makeURLBuilderTestCases(builder) { + buildURL, err := testCase.build() + if err != nil { + t.Fatalf("%s: error building url: %v", testCase.description, err) + } + + var expectedURL string + proto, ok := tr.request.Header["X-Forwarded-Proto"] + if !ok { + expectedURL = testCase.expectedPath + if !relative { + expectedURL = tr.base[0:len(tr.base)-1] + expectedURL + } + } else { + urlBase, err := url.Parse(tr.base) + if err != nil { + t.Fatal(err) + } + urlBase.Scheme = proto[0] + expectedURL = testCase.expectedPath + if !relative { + expectedURL = urlBase.String()[0:len(urlBase.String())-1] + expectedURL + } + + } + + if buildURL != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, buildURL, expectedURL) + } + } + } +} diff --git a/vendor/github.com/docker/distribution/registry/auth/auth.go b/vendor/github.com/docker/distribution/registry/auth/auth.go new file mode 100644 index 000000000..1c9af8821 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/auth/auth.go @@ -0,0 +1,202 @@ +// Package auth defines a standard interface for request access controllers. +// +// An access controller has a simple interface with a single `Authorized` +// method which checks that a given request is authorized to perform one or +// more actions on one or more resources. This method should return a non-nil +// error if the request is not authorized. +// +// An implementation registers its access controller by name with a constructor +// which accepts an options map for configuring the access controller. +// +// options := map[string]interface{}{"sillySecret": "whysosilly?"} +// accessController, _ := auth.GetAccessController("silly", options) +// +// This `accessController` can then be used in a request handler like so: +// +// func updateOrder(w http.ResponseWriter, r *http.Request) { +// orderNumber := r.FormValue("orderNumber") +// resource := auth.Resource{Type: "customerOrder", Name: orderNumber} +// access := auth.Access{Resource: resource, Action: "update"} +// +// if ctx, err := accessController.Authorized(ctx, access); err != nil { +// if challenge, ok := err.(auth.Challenge) { +// // Let the challenge write the response. +// challenge.SetHeaders(w) +// w.WriteHeader(http.StatusUnauthorized) +// return +// } else { +// // Some other error. +// } +// } +// } +// +package auth + +import ( + "errors" + "fmt" + "net/http" + + "github.com/docker/distribution/context" +) + +const ( + // UserKey is used to get the user object from + // a user context + UserKey = "auth.user" + + // UserNameKey is used to get the user name from + // a user context + UserNameKey = "auth.user.name" +) + +var ( + // ErrInvalidCredential is returned when the auth token does not authenticate correctly. + ErrInvalidCredential = errors.New("invalid authorization credential") + + // ErrAuthenticationFailure returned when authentication fails. + ErrAuthenticationFailure = errors.New("authentication failure") +) + +// UserInfo carries information about +// an autenticated/authorized client. +type UserInfo struct { + Name string +} + +// Resource describes a resource by type and name. +type Resource struct { + Type string + Class string + Name string +} + +// Access describes a specific action that is +// requested or allowed for a given resource. +type Access struct { + Resource + Action string +} + +// Challenge is a special error type which is used for HTTP 401 Unauthorized +// responses and is able to write the response with WWW-Authenticate challenge +// header values based on the error. +type Challenge interface { + error + + // SetHeaders prepares the request to conduct a challenge response by + // adding the an HTTP challenge header on the response message. Callers + // are expected to set the appropriate HTTP status code (e.g. 401) + // themselves. + SetHeaders(w http.ResponseWriter) +} + +// AccessController controls access to registry resources based on a request +// and required access levels for a request. Implementations can support both +// complete denial and http authorization challenges. +type AccessController interface { + // Authorized returns a non-nil error if the context is granted access and + // returns a new authorized context. If one or more Access structs are + // provided, the requested access will be compared with what is available + // to the context. The given context will contain a "http.request" key with + // a `*http.Request` value. If the error is non-nil, access should always + // be denied. The error may be of type Challenge, in which case the caller + // may have the Challenge handle the request or choose what action to take + // based on the Challenge header or response status. The returned context + // object should have a "auth.user" value set to a UserInfo struct. + Authorized(ctx context.Context, access ...Access) (context.Context, error) +} + +// CredentialAuthenticator is an object which is able to authenticate credentials +type CredentialAuthenticator interface { + AuthenticateUser(username, password string) error +} + +// WithUser returns a context with the authorized user info. +func WithUser(ctx context.Context, user UserInfo) context.Context { + return userInfoContext{ + Context: ctx, + user: user, + } +} + +type userInfoContext struct { + context.Context + user UserInfo +} + +func (uic userInfoContext) Value(key interface{}) interface{} { + switch key { + case UserKey: + return uic.user + case UserNameKey: + return uic.user.Name + } + + return uic.Context.Value(key) +} + +// WithResources returns a context with the authorized resources. +func WithResources(ctx context.Context, resources []Resource) context.Context { + return resourceContext{ + Context: ctx, + resources: resources, + } +} + +type resourceContext struct { + context.Context + resources []Resource +} + +type resourceKey struct{} + +func (rc resourceContext) Value(key interface{}) interface{} { + if key == (resourceKey{}) { + return rc.resources + } + + return rc.Context.Value(key) +} + +// AuthorizedResources returns the list of resources which have +// been authorized for this request. +func AuthorizedResources(ctx context.Context) []Resource { + if resources, ok := ctx.Value(resourceKey{}).([]Resource); ok { + return resources + } + + return nil +} + +// InitFunc is the type of an AccessController factory function and is used +// to register the constructor for different AccesController backends. +type InitFunc func(options map[string]interface{}) (AccessController, error) + +var accessControllers map[string]InitFunc + +func init() { + accessControllers = make(map[string]InitFunc) +} + +// Register is used to register an InitFunc for +// an AccessController backend with the given name. +func Register(name string, initFunc InitFunc) error { + if _, exists := accessControllers[name]; exists { + return fmt.Errorf("name already registered: %s", name) + } + + accessControllers[name] = initFunc + + return nil +} + +// GetAccessController constructs an AccessController +// with the given options using the named backend. +func GetAccessController(name string, options map[string]interface{}) (AccessController, error) { + if initFunc, exists := accessControllers[name]; exists { + return initFunc(options) + } + + return nil, fmt.Errorf("no access controller registered with name: %s", name) +} diff --git a/vendor/github.com/docker/distribution/registry/auth/htpasswd/access.go b/vendor/github.com/docker/distribution/registry/auth/htpasswd/access.go new file mode 100644 index 000000000..819b09ca8 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/auth/htpasswd/access.go @@ -0,0 +1,115 @@ +// Package htpasswd provides a simple authentication scheme that checks for the +// user credential hash in an htpasswd formatted file in a configuration-determined +// location. +// +// This authentication method MUST be used under TLS, as simple token-replay attack is possible. +package htpasswd + +import ( + "fmt" + "net/http" + "os" + "sync" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" +) + +type accessController struct { + realm string + path string + modtime time.Time + mu sync.Mutex + htpasswd *htpasswd +} + +var _ auth.AccessController = &accessController{} + +func newAccessController(options map[string]interface{}) (auth.AccessController, error) { + realm, present := options["realm"] + if _, ok := realm.(string); !present || !ok { + return nil, fmt.Errorf(`"realm" must be set for htpasswd access controller`) + } + + path, present := options["path"] + if _, ok := path.(string); !present || !ok { + return nil, fmt.Errorf(`"path" must be set for htpasswd access controller`) + } + + return &accessController{realm: realm.(string), path: path.(string)}, nil +} + +func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { + req, err := context.GetRequest(ctx) + if err != nil { + return nil, err + } + + username, password, ok := req.BasicAuth() + if !ok { + return nil, &challenge{ + realm: ac.realm, + err: auth.ErrInvalidCredential, + } + } + + // Dynamically parsing the latest account list + fstat, err := os.Stat(ac.path) + if err != nil { + return nil, err + } + + lastModified := fstat.ModTime() + ac.mu.Lock() + if ac.htpasswd == nil || !ac.modtime.Equal(lastModified) { + ac.modtime = lastModified + + f, err := os.Open(ac.path) + if err != nil { + ac.mu.Unlock() + return nil, err + } + defer f.Close() + + h, err := newHTPasswd(f) + if err != nil { + ac.mu.Unlock() + return nil, err + } + ac.htpasswd = h + } + localHTPasswd := ac.htpasswd + ac.mu.Unlock() + + if err := localHTPasswd.authenticateUser(username, password); err != nil { + context.GetLogger(ctx).Errorf("error authenticating user %q: %v", username, err) + return nil, &challenge{ + realm: ac.realm, + err: auth.ErrAuthenticationFailure, + } + } + + return auth.WithUser(ctx, auth.UserInfo{Name: username}), nil +} + +// challenge implements the auth.Challenge interface. +type challenge struct { + realm string + err error +} + +var _ auth.Challenge = challenge{} + +// SetHeaders sets the basic challenge header on the response. +func (ch challenge) SetHeaders(w http.ResponseWriter) { + w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", ch.realm)) +} + +func (ch challenge) Error() string { + return fmt.Sprintf("basic authentication challenge for realm %q: %s", ch.realm, ch.err) +} + +func init() { + auth.Register("htpasswd", auth.InitFunc(newAccessController)) +} diff --git a/vendor/github.com/docker/distribution/registry/auth/htpasswd/access_test.go b/vendor/github.com/docker/distribution/registry/auth/htpasswd/access_test.go new file mode 100644 index 000000000..553f05cf9 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/auth/htpasswd/access_test.go @@ -0,0 +1,122 @@ +package htpasswd + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" +) + +func TestBasicAccessController(t *testing.T) { + testRealm := "The-Shire" + testUsers := []string{"bilbo", "frodo", "MiShil", "DeokMan"} + testPasswords := []string{"baggins", "baggins", "새주", "공주님"} + testHtpasswdContent := `bilbo:{SHA}5siv5c0SHx681xU6GiSx9ZQryqs= + frodo:$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W + MiShil:$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2 + DeokMan:공주님` + + tempFile, err := ioutil.TempFile("", "htpasswd-test") + if err != nil { + t.Fatal("could not create temporary htpasswd file") + } + if _, err = tempFile.WriteString(testHtpasswdContent); err != nil { + t.Fatal("could not write temporary htpasswd file") + } + + options := map[string]interface{}{ + "realm": testRealm, + "path": tempFile.Name(), + } + ctx := context.Background() + + accessController, err := newAccessController(options) + if err != nil { + t.Fatal("error creating access controller") + } + + tempFile.Close() + + var userNumber = 0 + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := context.WithRequest(ctx, r) + authCtx, err := accessController.Authorized(ctx) + if err != nil { + switch err := err.(type) { + case auth.Challenge: + err.SetHeaders(w) + w.WriteHeader(http.StatusUnauthorized) + return + default: + t.Fatalf("unexpected error authorizing request: %v", err) + } + } + + userInfo, ok := authCtx.Value(auth.UserKey).(auth.UserInfo) + if !ok { + t.Fatal("basic accessController did not set auth.user context") + } + + if userInfo.Name != testUsers[userNumber] { + t.Fatalf("expected user name %q, got %q", testUsers[userNumber], userInfo.Name) + } + + w.WriteHeader(http.StatusNoContent) + })) + + client := &http.Client{ + CheckRedirect: nil, + } + + req, _ := http.NewRequest("GET", server.URL, nil) + resp, err := client.Do(req) + + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + // Request should not be authorized + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("unexpected non-fail response status: %v != %v", resp.StatusCode, http.StatusUnauthorized) + } + + nonbcrypt := map[string]struct{}{ + "bilbo": {}, + "DeokMan": {}, + } + + for i := 0; i < len(testUsers); i++ { + userNumber = i + req, err := http.NewRequest("GET", server.URL, nil) + if err != nil { + t.Fatalf("error allocating new request: %v", err) + } + + req.SetBasicAuth(testUsers[i], testPasswords[i]) + + resp, err = client.Do(req) + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + if _, ok := nonbcrypt[testUsers[i]]; ok { + // these are not allowed. + // Request should be authorized + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("unexpected non-success response status: %v != %v for %s %s", resp.StatusCode, http.StatusUnauthorized, testUsers[i], testPasswords[i]) + } + } else { + // Request should be authorized + if resp.StatusCode != http.StatusNoContent { + t.Fatalf("unexpected non-success response status: %v != %v for %s %s", resp.StatusCode, http.StatusNoContent, testUsers[i], testPasswords[i]) + } + } + } + +} diff --git a/vendor/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go b/vendor/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go new file mode 100644 index 000000000..b10b256c7 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go @@ -0,0 +1,82 @@ +package htpasswd + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/docker/distribution/registry/auth" + + "golang.org/x/crypto/bcrypt" +) + +// htpasswd holds a path to a system .htpasswd file and the machinery to parse +// it. Only bcrypt hash entries are supported. +type htpasswd struct { + entries map[string][]byte // maps username to password byte slice. +} + +// newHTPasswd parses the reader and returns an htpasswd or an error. +func newHTPasswd(rd io.Reader) (*htpasswd, error) { + entries, err := parseHTPasswd(rd) + if err != nil { + return nil, err + } + + return &htpasswd{entries: entries}, nil +} + +// AuthenticateUser checks a given user:password credential against the +// receiving HTPasswd's file. If the check passes, nil is returned. +func (htpasswd *htpasswd) authenticateUser(username string, password string) error { + credentials, ok := htpasswd.entries[username] + if !ok { + // timing attack paranoia + bcrypt.CompareHashAndPassword([]byte{}, []byte(password)) + + return auth.ErrAuthenticationFailure + } + + err := bcrypt.CompareHashAndPassword([]byte(credentials), []byte(password)) + if err != nil { + return auth.ErrAuthenticationFailure + } + + return nil +} + +// parseHTPasswd parses the contents of htpasswd. This will read all the +// entries in the file, whether or not they are needed. An error is returned +// if a syntax errors are encountered or if the reader fails. +func parseHTPasswd(rd io.Reader) (map[string][]byte, error) { + entries := map[string][]byte{} + scanner := bufio.NewScanner(rd) + var line int + for scanner.Scan() { + line++ // 1-based line numbering + t := strings.TrimSpace(scanner.Text()) + + if len(t) < 1 { + continue + } + + // lines that *begin* with a '#' are considered comments + if t[0] == '#' { + continue + } + + i := strings.Index(t, ":") + if i < 0 || i >= len(t) { + return nil, fmt.Errorf("htpasswd: invalid entry at line %d: %q", line, scanner.Text()) + } + + entries[t[:i]] = []byte(t[i+1:]) + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + return entries, nil +} diff --git a/vendor/github.com/docker/distribution/registry/auth/htpasswd/htpasswd_test.go b/vendor/github.com/docker/distribution/registry/auth/htpasswd/htpasswd_test.go new file mode 100644 index 000000000..309c359ad --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/auth/htpasswd/htpasswd_test.go @@ -0,0 +1,85 @@ +package htpasswd + +import ( + "fmt" + "reflect" + "strings" + "testing" +) + +func TestParseHTPasswd(t *testing.T) { + + for _, tc := range []struct { + desc string + input string + err error + entries map[string][]byte + }{ + { + desc: "basic example", + input: ` +# This is a comment in a basic example. +bilbo:{SHA}5siv5c0SHx681xU6GiSx9ZQryqs= +frodo:$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W +MiShil:$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2 +DeokMan:공주님 +`, + entries: map[string][]byte{ + "bilbo": []byte("{SHA}5siv5c0SHx681xU6GiSx9ZQryqs="), + "frodo": []byte("$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W"), + "MiShil": []byte("$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2"), + "DeokMan": []byte("공주님"), + }, + }, + { + desc: "ensures comments are filtered", + input: ` +# asdf:asdf +`, + }, + { + desc: "ensure midline hash is not comment", + input: ` +asdf:as#df +`, + entries: map[string][]byte{ + "asdf": []byte("as#df"), + }, + }, + { + desc: "ensure midline hash is not comment", + input: ` +# A valid comment +valid:entry +asdf +`, + err: fmt.Errorf(`htpasswd: invalid entry at line 4: "asdf"`), + }, + } { + + entries, err := parseHTPasswd(strings.NewReader(tc.input)) + if err != tc.err { + if tc.err == nil { + t.Fatalf("%s: unexpected error: %v", tc.desc, err) + } else { + if err.Error() != tc.err.Error() { // use string equality here. + t.Fatalf("%s: expected error not returned: %v != %v", tc.desc, err, tc.err) + } + } + } + + if tc.err != nil { + continue // don't test output + } + + // allow empty and nil to be equal + if tc.entries == nil { + tc.entries = map[string][]byte{} + } + + if !reflect.DeepEqual(entries, tc.entries) { + t.Fatalf("%s: entries not parsed correctly: %v != %v", tc.desc, entries, tc.entries) + } + } + +} diff --git a/vendor/github.com/docker/distribution/registry/auth/silly/access.go b/vendor/github.com/docker/distribution/registry/auth/silly/access.go new file mode 100644 index 000000000..2b801d946 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/auth/silly/access.go @@ -0,0 +1,97 @@ +// Package silly provides a simple authentication scheme that checks for the +// existence of an Authorization header and issues access if is present and +// non-empty. +// +// This package is present as an example implementation of a minimal +// auth.AccessController and for testing. This is not suitable for any kind of +// production security. +package silly + +import ( + "fmt" + "net/http" + "strings" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" +) + +// accessController provides a simple implementation of auth.AccessController +// that simply checks for a non-empty Authorization header. It is useful for +// demonstration and testing. +type accessController struct { + realm string + service string +} + +var _ auth.AccessController = &accessController{} + +func newAccessController(options map[string]interface{}) (auth.AccessController, error) { + realm, present := options["realm"] + if _, ok := realm.(string); !present || !ok { + return nil, fmt.Errorf(`"realm" must be set for silly access controller`) + } + + service, present := options["service"] + if _, ok := service.(string); !present || !ok { + return nil, fmt.Errorf(`"service" must be set for silly access controller`) + } + + return &accessController{realm: realm.(string), service: service.(string)}, nil +} + +// Authorized simply checks for the existence of the authorization header, +// responding with a bearer challenge if it doesn't exist. +func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { + req, err := context.GetRequest(ctx) + if err != nil { + return nil, err + } + + if req.Header.Get("Authorization") == "" { + challenge := challenge{ + realm: ac.realm, + service: ac.service, + } + + if len(accessRecords) > 0 { + var scopes []string + for _, access := range accessRecords { + scopes = append(scopes, fmt.Sprintf("%s:%s:%s", access.Type, access.Resource.Name, access.Action)) + } + challenge.scope = strings.Join(scopes, " ") + } + + return nil, &challenge + } + + return auth.WithUser(ctx, auth.UserInfo{Name: "silly"}), nil +} + +type challenge struct { + realm string + service string + scope string +} + +var _ auth.Challenge = challenge{} + +// SetHeaders sets a simple bearer challenge on the response. +func (ch challenge) SetHeaders(w http.ResponseWriter) { + header := fmt.Sprintf("Bearer realm=%q,service=%q", ch.realm, ch.service) + + if ch.scope != "" { + header = fmt.Sprintf("%s,scope=%q", header, ch.scope) + } + + w.Header().Set("WWW-Authenticate", header) +} + +func (ch challenge) Error() string { + return fmt.Sprintf("silly authentication challenge: %#v", ch) +} + +// init registers the silly auth backend. +func init() { + auth.Register("silly", auth.InitFunc(newAccessController)) +} diff --git a/vendor/github.com/docker/distribution/registry/auth/silly/access_test.go b/vendor/github.com/docker/distribution/registry/auth/silly/access_test.go new file mode 100644 index 000000000..0a5103e6c --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/auth/silly/access_test.go @@ -0,0 +1,71 @@ +package silly + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" +) + +func TestSillyAccessController(t *testing.T) { + ac := &accessController{ + realm: "test-realm", + service: "test-service", + } + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := context.WithRequest(context.Background(), r) + authCtx, err := ac.Authorized(ctx) + if err != nil { + switch err := err.(type) { + case auth.Challenge: + err.SetHeaders(w) + w.WriteHeader(http.StatusUnauthorized) + return + default: + t.Fatalf("unexpected error authorizing request: %v", err) + } + } + + userInfo, ok := authCtx.Value(auth.UserKey).(auth.UserInfo) + if !ok { + t.Fatal("silly accessController did not set auth.user context") + } + + if userInfo.Name != "silly" { + t.Fatalf("expected user name %q, got %q", "silly", userInfo.Name) + } + + w.WriteHeader(http.StatusNoContent) + })) + + resp, err := http.Get(server.URL) + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + // Request should not be authorized + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusUnauthorized) + } + + req, err := http.NewRequest("GET", server.URL, nil) + if err != nil { + t.Fatalf("unexpected error creating new request: %v", err) + } + req.Header.Set("Authorization", "seriously, anything") + + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + // Request should not be authorized + if resp.StatusCode != http.StatusNoContent { + t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusNoContent) + } +} diff --git a/vendor/github.com/docker/distribution/registry/auth/token/accesscontroller.go b/vendor/github.com/docker/distribution/registry/auth/token/accesscontroller.go new file mode 100644 index 000000000..4e8b7f1ce --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/auth/token/accesscontroller.go @@ -0,0 +1,272 @@ +package token + +import ( + "crypto" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" + "github.com/docker/libtrust" +) + +// accessSet maps a typed, named resource to +// a set of actions requested or authorized. +type accessSet map[auth.Resource]actionSet + +// newAccessSet constructs an accessSet from +// a variable number of auth.Access items. +func newAccessSet(accessItems ...auth.Access) accessSet { + accessSet := make(accessSet, len(accessItems)) + + for _, access := range accessItems { + resource := auth.Resource{ + Type: access.Type, + Name: access.Name, + } + + set, exists := accessSet[resource] + if !exists { + set = newActionSet() + accessSet[resource] = set + } + + set.add(access.Action) + } + + return accessSet +} + +// contains returns whether or not the given access is in this accessSet. +func (s accessSet) contains(access auth.Access) bool { + actionSet, ok := s[access.Resource] + if ok { + return actionSet.contains(access.Action) + } + + return false +} + +// scopeParam returns a collection of scopes which can +// be used for a WWW-Authenticate challenge parameter. +// See https://tools.ietf.org/html/rfc6750#section-3 +func (s accessSet) scopeParam() string { + scopes := make([]string, 0, len(s)) + + for resource, actionSet := range s { + actions := strings.Join(actionSet.keys(), ",") + scopes = append(scopes, fmt.Sprintf("%s:%s:%s", resource.Type, resource.Name, actions)) + } + + return strings.Join(scopes, " ") +} + +// Errors used and exported by this package. +var ( + ErrInsufficientScope = errors.New("insufficient scope") + ErrTokenRequired = errors.New("authorization token required") +) + +// authChallenge implements the auth.Challenge interface. +type authChallenge struct { + err error + realm string + service string + accessSet accessSet +} + +var _ auth.Challenge = authChallenge{} + +// Error returns the internal error string for this authChallenge. +func (ac authChallenge) Error() string { + return ac.err.Error() +} + +// Status returns the HTTP Response Status Code for this authChallenge. +func (ac authChallenge) Status() int { + return http.StatusUnauthorized +} + +// challengeParams constructs the value to be used in +// the WWW-Authenticate response challenge header. +// See https://tools.ietf.org/html/rfc6750#section-3 +func (ac authChallenge) challengeParams() string { + str := fmt.Sprintf("Bearer realm=%q,service=%q", ac.realm, ac.service) + + if scope := ac.accessSet.scopeParam(); scope != "" { + str = fmt.Sprintf("%s,scope=%q", str, scope) + } + + if ac.err == ErrInvalidToken || ac.err == ErrMalformedToken { + str = fmt.Sprintf("%s,error=%q", str, "invalid_token") + } else if ac.err == ErrInsufficientScope { + str = fmt.Sprintf("%s,error=%q", str, "insufficient_scope") + } + + return str +} + +// SetChallenge sets the WWW-Authenticate value for the response. +func (ac authChallenge) SetHeaders(w http.ResponseWriter) { + w.Header().Add("WWW-Authenticate", ac.challengeParams()) +} + +// accessController implements the auth.AccessController interface. +type accessController struct { + realm string + issuer string + service string + rootCerts *x509.CertPool + trustedKeys map[string]libtrust.PublicKey +} + +// tokenAccessOptions is a convenience type for handling +// options to the contstructor of an accessController. +type tokenAccessOptions struct { + realm string + issuer string + service string + rootCertBundle string +} + +// checkOptions gathers the necessary options +// for an accessController from the given map. +func checkOptions(options map[string]interface{}) (tokenAccessOptions, error) { + var opts tokenAccessOptions + + keys := []string{"realm", "issuer", "service", "rootcertbundle"} + vals := make([]string, 0, len(keys)) + for _, key := range keys { + val, ok := options[key].(string) + if !ok { + return opts, fmt.Errorf("token auth requires a valid option string: %q", key) + } + vals = append(vals, val) + } + + opts.realm, opts.issuer, opts.service, opts.rootCertBundle = vals[0], vals[1], vals[2], vals[3] + + return opts, nil +} + +// newAccessController creates an accessController using the given options. +func newAccessController(options map[string]interface{}) (auth.AccessController, error) { + config, err := checkOptions(options) + if err != nil { + return nil, err + } + + fp, err := os.Open(config.rootCertBundle) + if err != nil { + return nil, fmt.Errorf("unable to open token auth root certificate bundle file %q: %s", config.rootCertBundle, err) + } + defer fp.Close() + + rawCertBundle, err := ioutil.ReadAll(fp) + if err != nil { + return nil, fmt.Errorf("unable to read token auth root certificate bundle file %q: %s", config.rootCertBundle, err) + } + + var rootCerts []*x509.Certificate + pemBlock, rawCertBundle := pem.Decode(rawCertBundle) + for pemBlock != nil { + if pemBlock.Type == "CERTIFICATE" { + cert, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to parse token auth root certificate: %s", err) + } + + rootCerts = append(rootCerts, cert) + } + + pemBlock, rawCertBundle = pem.Decode(rawCertBundle) + } + + if len(rootCerts) == 0 { + return nil, errors.New("token auth requires at least one token signing root certificate") + } + + rootPool := x509.NewCertPool() + trustedKeys := make(map[string]libtrust.PublicKey, len(rootCerts)) + for _, rootCert := range rootCerts { + rootPool.AddCert(rootCert) + pubKey, err := libtrust.FromCryptoPublicKey(crypto.PublicKey(rootCert.PublicKey)) + if err != nil { + return nil, fmt.Errorf("unable to get public key from token auth root certificate: %s", err) + } + trustedKeys[pubKey.KeyID()] = pubKey + } + + return &accessController{ + realm: config.realm, + issuer: config.issuer, + service: config.service, + rootCerts: rootPool, + trustedKeys: trustedKeys, + }, nil +} + +// Authorized handles checking whether the given request is authorized +// for actions on resources described by the given access items. +func (ac *accessController) Authorized(ctx context.Context, accessItems ...auth.Access) (context.Context, error) { + challenge := &authChallenge{ + realm: ac.realm, + service: ac.service, + accessSet: newAccessSet(accessItems...), + } + + req, err := context.GetRequest(ctx) + if err != nil { + return nil, err + } + + parts := strings.Split(req.Header.Get("Authorization"), " ") + + if len(parts) != 2 || strings.ToLower(parts[0]) != "bearer" { + challenge.err = ErrTokenRequired + return nil, challenge + } + + rawToken := parts[1] + + token, err := NewToken(rawToken) + if err != nil { + challenge.err = err + return nil, challenge + } + + verifyOpts := VerifyOptions{ + TrustedIssuers: []string{ac.issuer}, + AcceptedAudiences: []string{ac.service}, + Roots: ac.rootCerts, + TrustedKeys: ac.trustedKeys, + } + + if err = token.Verify(verifyOpts); err != nil { + challenge.err = err + return nil, challenge + } + + accessSet := token.accessSet() + for _, access := range accessItems { + if !accessSet.contains(access) { + challenge.err = ErrInsufficientScope + return nil, challenge + } + } + + ctx = auth.WithResources(ctx, token.resources()) + + return auth.WithUser(ctx, auth.UserInfo{Name: token.Claims.Subject}), nil +} + +// init handles registering the token auth backend. +func init() { + auth.Register("token", auth.InitFunc(newAccessController)) +} diff --git a/vendor/github.com/docker/distribution/registry/auth/token/stringset.go b/vendor/github.com/docker/distribution/registry/auth/token/stringset.go new file mode 100644 index 000000000..1d04f104c --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/auth/token/stringset.go @@ -0,0 +1,35 @@ +package token + +// StringSet is a useful type for looking up strings. +type stringSet map[string]struct{} + +// NewStringSet creates a new StringSet with the given strings. +func newStringSet(keys ...string) stringSet { + ss := make(stringSet, len(keys)) + ss.add(keys...) + return ss +} + +// Add inserts the given keys into this StringSet. +func (ss stringSet) add(keys ...string) { + for _, key := range keys { + ss[key] = struct{}{} + } +} + +// Contains returns whether the given key is in this StringSet. +func (ss stringSet) contains(key string) bool { + _, ok := ss[key] + return ok +} + +// Keys returns a slice of all keys in this StringSet. +func (ss stringSet) keys() []string { + keys := make([]string, 0, len(ss)) + + for key := range ss { + keys = append(keys, key) + } + + return keys +} diff --git a/vendor/github.com/docker/distribution/registry/auth/token/token.go b/vendor/github.com/docker/distribution/registry/auth/token/token.go new file mode 100644 index 000000000..850f5813f --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/auth/token/token.go @@ -0,0 +1,378 @@ +package token + +import ( + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/docker/libtrust" + + "github.com/docker/distribution/registry/auth" +) + +const ( + // TokenSeparator is the value which separates the header, claims, and + // signature in the compact serialization of a JSON Web Token. + TokenSeparator = "." + // Leeway is the Duration that will be added to NBF and EXP claim + // checks to account for clock skew as per https://tools.ietf.org/html/rfc7519#section-4.1.5 + Leeway = 60 * time.Second +) + +// Errors used by token parsing and verification. +var ( + ErrMalformedToken = errors.New("malformed token") + ErrInvalidToken = errors.New("invalid token") +) + +// ResourceActions stores allowed actions on a named and typed resource. +type ResourceActions struct { + Type string `json:"type"` + Class string `json:"class,omitempty"` + Name string `json:"name"` + Actions []string `json:"actions"` +} + +// ClaimSet describes the main section of a JSON Web Token. +type ClaimSet struct { + // Public claims + Issuer string `json:"iss"` + Subject string `json:"sub"` + Audience string `json:"aud"` + Expiration int64 `json:"exp"` + NotBefore int64 `json:"nbf"` + IssuedAt int64 `json:"iat"` + JWTID string `json:"jti"` + + // Private claims + Access []*ResourceActions `json:"access"` +} + +// Header describes the header section of a JSON Web Token. +type Header struct { + Type string `json:"typ"` + SigningAlg string `json:"alg"` + KeyID string `json:"kid,omitempty"` + X5c []string `json:"x5c,omitempty"` + RawJWK *json.RawMessage `json:"jwk,omitempty"` +} + +// Token describes a JSON Web Token. +type Token struct { + Raw string + Header *Header + Claims *ClaimSet + Signature []byte +} + +// VerifyOptions is used to specify +// options when verifying a JSON Web Token. +type VerifyOptions struct { + TrustedIssuers []string + AcceptedAudiences []string + Roots *x509.CertPool + TrustedKeys map[string]libtrust.PublicKey +} + +// NewToken parses the given raw token string +// and constructs an unverified JSON Web Token. +func NewToken(rawToken string) (*Token, error) { + parts := strings.Split(rawToken, TokenSeparator) + if len(parts) != 3 { + return nil, ErrMalformedToken + } + + var ( + rawHeader, rawClaims = parts[0], parts[1] + headerJSON, claimsJSON []byte + err error + ) + + defer func() { + if err != nil { + log.Infof("error while unmarshalling raw token: %s", err) + } + }() + + if headerJSON, err = joseBase64UrlDecode(rawHeader); err != nil { + err = fmt.Errorf("unable to decode header: %s", err) + return nil, ErrMalformedToken + } + + if claimsJSON, err = joseBase64UrlDecode(rawClaims); err != nil { + err = fmt.Errorf("unable to decode claims: %s", err) + return nil, ErrMalformedToken + } + + token := new(Token) + token.Header = new(Header) + token.Claims = new(ClaimSet) + + token.Raw = strings.Join(parts[:2], TokenSeparator) + if token.Signature, err = joseBase64UrlDecode(parts[2]); err != nil { + err = fmt.Errorf("unable to decode signature: %s", err) + return nil, ErrMalformedToken + } + + if err = json.Unmarshal(headerJSON, token.Header); err != nil { + return nil, ErrMalformedToken + } + + if err = json.Unmarshal(claimsJSON, token.Claims); err != nil { + return nil, ErrMalformedToken + } + + return token, nil +} + +// Verify attempts to verify this token using the given options. +// Returns a nil error if the token is valid. +func (t *Token) Verify(verifyOpts VerifyOptions) error { + // Verify that the Issuer claim is a trusted authority. + if !contains(verifyOpts.TrustedIssuers, t.Claims.Issuer) { + log.Infof("token from untrusted issuer: %q", t.Claims.Issuer) + return ErrInvalidToken + } + + // Verify that the Audience claim is allowed. + if !contains(verifyOpts.AcceptedAudiences, t.Claims.Audience) { + log.Infof("token intended for another audience: %q", t.Claims.Audience) + return ErrInvalidToken + } + + // Verify that the token is currently usable and not expired. + currentTime := time.Now() + + ExpWithLeeway := time.Unix(t.Claims.Expiration, 0).Add(Leeway) + if currentTime.After(ExpWithLeeway) { + log.Infof("token not to be used after %s - currently %s", ExpWithLeeway, currentTime) + return ErrInvalidToken + } + + NotBeforeWithLeeway := time.Unix(t.Claims.NotBefore, 0).Add(-Leeway) + if currentTime.Before(NotBeforeWithLeeway) { + log.Infof("token not to be used before %s - currently %s", NotBeforeWithLeeway, currentTime) + return ErrInvalidToken + } + + // Verify the token signature. + if len(t.Signature) == 0 { + log.Info("token has no signature") + return ErrInvalidToken + } + + // Verify that the signing key is trusted. + signingKey, err := t.VerifySigningKey(verifyOpts) + if err != nil { + log.Info(err) + return ErrInvalidToken + } + + // Finally, verify the signature of the token using the key which signed it. + if err := signingKey.Verify(strings.NewReader(t.Raw), t.Header.SigningAlg, t.Signature); err != nil { + log.Infof("unable to verify token signature: %s", err) + return ErrInvalidToken + } + + return nil +} + +// VerifySigningKey attempts to get the key which was used to sign this token. +// The token header should contain either of these 3 fields: +// `x5c` - The x509 certificate chain for the signing key. Needs to be +// verified. +// `jwk` - The JSON Web Key representation of the signing key. +// May contain its own `x5c` field which needs to be verified. +// `kid` - The unique identifier for the key. This library interprets it +// as a libtrust fingerprint. The key itself can be looked up in +// the trustedKeys field of the given verify options. +// Each of these methods are tried in that order of preference until the +// signing key is found or an error is returned. +func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust.PublicKey, err error) { + // First attempt to get an x509 certificate chain from the header. + var ( + x5c = t.Header.X5c + rawJWK = t.Header.RawJWK + keyID = t.Header.KeyID + ) + + switch { + case len(x5c) > 0: + signingKey, err = parseAndVerifyCertChain(x5c, verifyOpts.Roots) + case rawJWK != nil: + signingKey, err = parseAndVerifyRawJWK(rawJWK, verifyOpts) + case len(keyID) > 0: + signingKey = verifyOpts.TrustedKeys[keyID] + if signingKey == nil { + err = fmt.Errorf("token signed by untrusted key with ID: %q", keyID) + } + default: + err = errors.New("unable to get token signing key") + } + + return +} + +func parseAndVerifyCertChain(x5c []string, roots *x509.CertPool) (leafKey libtrust.PublicKey, err error) { + if len(x5c) == 0 { + return nil, errors.New("empty x509 certificate chain") + } + + // Ensure the first element is encoded correctly. + leafCertDer, err := base64.StdEncoding.DecodeString(x5c[0]) + if err != nil { + return nil, fmt.Errorf("unable to decode leaf certificate: %s", err) + } + + // And that it is a valid x509 certificate. + leafCert, err := x509.ParseCertificate(leafCertDer) + if err != nil { + return nil, fmt.Errorf("unable to parse leaf certificate: %s", err) + } + + // The rest of the certificate chain are intermediate certificates. + intermediates := x509.NewCertPool() + for i := 1; i < len(x5c); i++ { + intermediateCertDer, err := base64.StdEncoding.DecodeString(x5c[i]) + if err != nil { + return nil, fmt.Errorf("unable to decode intermediate certificate: %s", err) + } + + intermediateCert, err := x509.ParseCertificate(intermediateCertDer) + if err != nil { + return nil, fmt.Errorf("unable to parse intermediate certificate: %s", err) + } + + intermediates.AddCert(intermediateCert) + } + + verifyOpts := x509.VerifyOptions{ + Intermediates: intermediates, + Roots: roots, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + } + + // TODO: this call returns certificate chains which we ignore for now, but + // we should check them for revocations if we have the ability later. + if _, err = leafCert.Verify(verifyOpts); err != nil { + return nil, fmt.Errorf("unable to verify certificate chain: %s", err) + } + + // Get the public key from the leaf certificate. + leafCryptoKey, ok := leafCert.PublicKey.(crypto.PublicKey) + if !ok { + return nil, errors.New("unable to get leaf cert public key value") + } + + leafKey, err = libtrust.FromCryptoPublicKey(leafCryptoKey) + if err != nil { + return nil, fmt.Errorf("unable to make libtrust public key from leaf certificate: %s", err) + } + + return +} + +func parseAndVerifyRawJWK(rawJWK *json.RawMessage, verifyOpts VerifyOptions) (pubKey libtrust.PublicKey, err error) { + pubKey, err = libtrust.UnmarshalPublicKeyJWK([]byte(*rawJWK)) + if err != nil { + return nil, fmt.Errorf("unable to decode raw JWK value: %s", err) + } + + // Check to see if the key includes a certificate chain. + x5cVal, ok := pubKey.GetExtendedField("x5c").([]interface{}) + if !ok { + // The JWK should be one of the trusted root keys. + if _, trusted := verifyOpts.TrustedKeys[pubKey.KeyID()]; !trusted { + return nil, errors.New("untrusted JWK with no certificate chain") + } + + // The JWK is one of the trusted keys. + return + } + + // Ensure each item in the chain is of the correct type. + x5c := make([]string, len(x5cVal)) + for i, val := range x5cVal { + certString, ok := val.(string) + if !ok || len(certString) == 0 { + return nil, errors.New("malformed certificate chain") + } + x5c[i] = certString + } + + // Ensure that the x509 certificate chain can + // be verified up to one of our trusted roots. + leafKey, err := parseAndVerifyCertChain(x5c, verifyOpts.Roots) + if err != nil { + return nil, fmt.Errorf("could not verify JWK certificate chain: %s", err) + } + + // Verify that the public key in the leaf cert *is* the signing key. + if pubKey.KeyID() != leafKey.KeyID() { + return nil, errors.New("leaf certificate public key ID does not match JWK key ID") + } + + return +} + +// accessSet returns a set of actions available for the resource +// actions listed in the `access` section of this token. +func (t *Token) accessSet() accessSet { + if t.Claims == nil { + return nil + } + + accessSet := make(accessSet, len(t.Claims.Access)) + + for _, resourceActions := range t.Claims.Access { + resource := auth.Resource{ + Type: resourceActions.Type, + Name: resourceActions.Name, + } + + set, exists := accessSet[resource] + if !exists { + set = newActionSet() + accessSet[resource] = set + } + + for _, action := range resourceActions.Actions { + set.add(action) + } + } + + return accessSet +} + +func (t *Token) resources() []auth.Resource { + if t.Claims == nil { + return nil + } + + resourceSet := map[auth.Resource]struct{}{} + for _, resourceActions := range t.Claims.Access { + resource := auth.Resource{ + Type: resourceActions.Type, + Class: resourceActions.Class, + Name: resourceActions.Name, + } + resourceSet[resource] = struct{}{} + } + + resources := make([]auth.Resource, 0, len(resourceSet)) + for resource := range resourceSet { + resources = append(resources, resource) + } + + return resources +} + +func (t *Token) compactRaw() string { + return fmt.Sprintf("%s.%s", t.Raw, joseBase64UrlEncode(t.Signature)) +} diff --git a/vendor/github.com/docker/distribution/registry/auth/token/token_test.go b/vendor/github.com/docker/distribution/registry/auth/token/token_test.go new file mode 100644 index 000000000..cbfe2a6b4 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/auth/token/token_test.go @@ -0,0 +1,510 @@ +package token + +import ( + "crypto" + "crypto/rand" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" + "testing" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" + "github.com/docker/libtrust" +) + +func makeRootKeys(numKeys int) ([]libtrust.PrivateKey, error) { + keys := make([]libtrust.PrivateKey, 0, numKeys) + + for i := 0; i < numKeys; i++ { + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, err + } + keys = append(keys, key) + } + + return keys, nil +} + +func makeSigningKeyWithChain(rootKey libtrust.PrivateKey, depth int) (libtrust.PrivateKey, error) { + if depth == 0 { + // Don't need to build a chain. + return rootKey, nil + } + + var ( + x5c = make([]string, depth) + parentKey = rootKey + key libtrust.PrivateKey + cert *x509.Certificate + err error + ) + + for depth > 0 { + if key, err = libtrust.GenerateECP256PrivateKey(); err != nil { + return nil, err + } + + if cert, err = libtrust.GenerateCACert(parentKey, key); err != nil { + return nil, err + } + + depth-- + x5c[depth] = base64.StdEncoding.EncodeToString(cert.Raw) + parentKey = key + } + + key.AddExtendedField("x5c", x5c) + + return key, nil +} + +func makeRootCerts(rootKeys []libtrust.PrivateKey) ([]*x509.Certificate, error) { + certs := make([]*x509.Certificate, 0, len(rootKeys)) + + for _, key := range rootKeys { + cert, err := libtrust.GenerateCACert(key, key) + if err != nil { + return nil, err + } + certs = append(certs, cert) + } + + return certs, nil +} + +func makeTrustedKeyMap(rootKeys []libtrust.PrivateKey) map[string]libtrust.PublicKey { + trustedKeys := make(map[string]libtrust.PublicKey, len(rootKeys)) + + for _, key := range rootKeys { + trustedKeys[key.KeyID()] = key.PublicKey() + } + + return trustedKeys +} + +func makeTestToken(issuer, audience string, access []*ResourceActions, rootKey libtrust.PrivateKey, depth int, now time.Time, exp time.Time) (*Token, error) { + signingKey, err := makeSigningKeyWithChain(rootKey, depth) + if err != nil { + return nil, fmt.Errorf("unable to make signing key with chain: %s", err) + } + + var rawJWK json.RawMessage + rawJWK, err = signingKey.PublicKey().MarshalJSON() + if err != nil { + return nil, fmt.Errorf("unable to marshal signing key to JSON: %s", err) + } + + joseHeader := &Header{ + Type: "JWT", + SigningAlg: "ES256", + RawJWK: &rawJWK, + } + + randomBytes := make([]byte, 15) + if _, err = rand.Read(randomBytes); err != nil { + return nil, fmt.Errorf("unable to read random bytes for jwt id: %s", err) + } + + claimSet := &ClaimSet{ + Issuer: issuer, + Subject: "foo", + Audience: audience, + Expiration: exp.Unix(), + NotBefore: now.Unix(), + IssuedAt: now.Unix(), + JWTID: base64.URLEncoding.EncodeToString(randomBytes), + Access: access, + } + + var joseHeaderBytes, claimSetBytes []byte + + if joseHeaderBytes, err = json.Marshal(joseHeader); err != nil { + return nil, fmt.Errorf("unable to marshal jose header: %s", err) + } + if claimSetBytes, err = json.Marshal(claimSet); err != nil { + return nil, fmt.Errorf("unable to marshal claim set: %s", err) + } + + encodedJoseHeader := joseBase64UrlEncode(joseHeaderBytes) + encodedClaimSet := joseBase64UrlEncode(claimSetBytes) + encodingToSign := fmt.Sprintf("%s.%s", encodedJoseHeader, encodedClaimSet) + + var signatureBytes []byte + if signatureBytes, _, err = signingKey.Sign(strings.NewReader(encodingToSign), crypto.SHA256); err != nil { + return nil, fmt.Errorf("unable to sign jwt payload: %s", err) + } + + signature := joseBase64UrlEncode(signatureBytes) + tokenString := fmt.Sprintf("%s.%s", encodingToSign, signature) + + return NewToken(tokenString) +} + +// This test makes 4 tokens with a varying number of intermediate +// certificates ranging from no intermediate chain to a length of 3 +// intermediates. +func TestTokenVerify(t *testing.T) { + var ( + numTokens = 4 + issuer = "test-issuer" + audience = "test-audience" + access = []*ResourceActions{ + { + Type: "repository", + Name: "foo/bar", + Actions: []string{"pull", "push"}, + }, + } + ) + + rootKeys, err := makeRootKeys(numTokens) + if err != nil { + t.Fatal(err) + } + + rootCerts, err := makeRootCerts(rootKeys) + if err != nil { + t.Fatal(err) + } + + rootPool := x509.NewCertPool() + for _, rootCert := range rootCerts { + rootPool.AddCert(rootCert) + } + + trustedKeys := makeTrustedKeyMap(rootKeys) + + tokens := make([]*Token, 0, numTokens) + + for i := 0; i < numTokens; i++ { + token, err := makeTestToken(issuer, audience, access, rootKeys[i], i, time.Now(), time.Now().Add(5*time.Minute)) + if err != nil { + t.Fatal(err) + } + tokens = append(tokens, token) + } + + verifyOps := VerifyOptions{ + TrustedIssuers: []string{issuer}, + AcceptedAudiences: []string{audience}, + Roots: rootPool, + TrustedKeys: trustedKeys, + } + + for _, token := range tokens { + if err := token.Verify(verifyOps); err != nil { + t.Fatal(err) + } + } +} + +// This tests that we don't fail tokens with nbf within +// the defined leeway in seconds +func TestLeeway(t *testing.T) { + var ( + issuer = "test-issuer" + audience = "test-audience" + access = []*ResourceActions{ + { + Type: "repository", + Name: "foo/bar", + Actions: []string{"pull", "push"}, + }, + } + ) + + rootKeys, err := makeRootKeys(1) + if err != nil { + t.Fatal(err) + } + + trustedKeys := makeTrustedKeyMap(rootKeys) + + verifyOps := VerifyOptions{ + TrustedIssuers: []string{issuer}, + AcceptedAudiences: []string{audience}, + Roots: nil, + TrustedKeys: trustedKeys, + } + + // nbf verification should pass within leeway + futureNow := time.Now().Add(time.Duration(5) * time.Second) + token, err := makeTestToken(issuer, audience, access, rootKeys[0], 0, futureNow, futureNow.Add(5*time.Minute)) + if err != nil { + t.Fatal(err) + } + + if err := token.Verify(verifyOps); err != nil { + t.Fatal(err) + } + + // nbf verification should fail with a skew larger than leeway + futureNow = time.Now().Add(time.Duration(61) * time.Second) + token, err = makeTestToken(issuer, audience, access, rootKeys[0], 0, futureNow, futureNow.Add(5*time.Minute)) + if err != nil { + t.Fatal(err) + } + + if err = token.Verify(verifyOps); err == nil { + t.Fatal("Verification should fail for token with nbf in the future outside leeway") + } + + // exp verification should pass within leeway + token, err = makeTestToken(issuer, audience, access, rootKeys[0], 0, time.Now(), time.Now().Add(-59*time.Second)) + if err != nil { + t.Fatal(err) + } + + if err = token.Verify(verifyOps); err != nil { + t.Fatal(err) + } + + // exp verification should fail with a skew larger than leeway + token, err = makeTestToken(issuer, audience, access, rootKeys[0], 0, time.Now(), time.Now().Add(-60*time.Second)) + if err != nil { + t.Fatal(err) + } + + if err = token.Verify(verifyOps); err == nil { + t.Fatal("Verification should fail for token with exp in the future outside leeway") + } +} + +func writeTempRootCerts(rootKeys []libtrust.PrivateKey) (filename string, err error) { + rootCerts, err := makeRootCerts(rootKeys) + if err != nil { + return "", err + } + + tempFile, err := ioutil.TempFile("", "rootCertBundle") + if err != nil { + return "", err + } + defer tempFile.Close() + + for _, cert := range rootCerts { + if err = pem.Encode(tempFile, &pem.Block{ + Type: "CERTIFICATE", + Bytes: cert.Raw, + }); err != nil { + os.Remove(tempFile.Name()) + return "", err + } + } + + return tempFile.Name(), nil +} + +// TestAccessController tests complete integration of the token auth package. +// It starts by mocking the options for a token auth accessController which +// it creates. It then tries a few mock requests: +// - don't supply a token; should error with challenge +// - supply an invalid token; should error with challenge +// - supply a token with insufficient access; should error with challenge +// - supply a valid token; should not error +func TestAccessController(t *testing.T) { + // Make 2 keys; only the first is to be a trusted root key. + rootKeys, err := makeRootKeys(2) + if err != nil { + t.Fatal(err) + } + + rootCertBundleFilename, err := writeTempRootCerts(rootKeys[:1]) + if err != nil { + t.Fatal(err) + } + defer os.Remove(rootCertBundleFilename) + + realm := "https://auth.example.com/token/" + issuer := "test-issuer.example.com" + service := "test-service.example.com" + + options := map[string]interface{}{ + "realm": realm, + "issuer": issuer, + "service": service, + "rootcertbundle": rootCertBundleFilename, + } + + accessController, err := newAccessController(options) + if err != nil { + t.Fatal(err) + } + + // 1. Make a mock http.Request with no token. + req, err := http.NewRequest("GET", "http://example.com/foo", nil) + if err != nil { + t.Fatal(err) + } + + testAccess := auth.Access{ + Resource: auth.Resource{ + Type: "foo", + Name: "bar", + }, + Action: "baz", + } + + ctx := context.WithRequest(context.Background(), req) + authCtx, err := accessController.Authorized(ctx, testAccess) + challenge, ok := err.(auth.Challenge) + if !ok { + t.Fatal("accessController did not return a challenge") + } + + if challenge.Error() != ErrTokenRequired.Error() { + t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired) + } + + if authCtx != nil { + t.Fatalf("expected nil auth context but got %s", authCtx) + } + + // 2. Supply an invalid token. + token, err := makeTestToken( + issuer, service, + []*ResourceActions{{ + Type: testAccess.Type, + Name: testAccess.Name, + Actions: []string{testAccess.Action}, + }}, + rootKeys[1], 1, time.Now(), time.Now().Add(5*time.Minute), // Everything is valid except the key which signed it. + ) + if err != nil { + t.Fatal(err) + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) + + authCtx, err = accessController.Authorized(ctx, testAccess) + challenge, ok = err.(auth.Challenge) + if !ok { + t.Fatal("accessController did not return a challenge") + } + + if challenge.Error() != ErrInvalidToken.Error() { + t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired) + } + + if authCtx != nil { + t.Fatalf("expected nil auth context but got %s", authCtx) + } + + // 3. Supply a token with insufficient access. + token, err = makeTestToken( + issuer, service, + []*ResourceActions{}, // No access specified. + rootKeys[0], 1, time.Now(), time.Now().Add(5*time.Minute), + ) + if err != nil { + t.Fatal(err) + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) + + authCtx, err = accessController.Authorized(ctx, testAccess) + challenge, ok = err.(auth.Challenge) + if !ok { + t.Fatal("accessController did not return a challenge") + } + + if challenge.Error() != ErrInsufficientScope.Error() { + t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrInsufficientScope) + } + + if authCtx != nil { + t.Fatalf("expected nil auth context but got %s", authCtx) + } + + // 4. Supply the token we need, or deserve, or whatever. + token, err = makeTestToken( + issuer, service, + []*ResourceActions{{ + Type: testAccess.Type, + Name: testAccess.Name, + Actions: []string{testAccess.Action}, + }}, + rootKeys[0], 1, time.Now(), time.Now().Add(5*time.Minute), + ) + if err != nil { + t.Fatal(err) + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) + + authCtx, err = accessController.Authorized(ctx, testAccess) + if err != nil { + t.Fatalf("accessController returned unexpected error: %s", err) + } + + userInfo, ok := authCtx.Value(auth.UserKey).(auth.UserInfo) + if !ok { + t.Fatal("token accessController did not set auth.user context") + } + + if userInfo.Name != "foo" { + t.Fatalf("expected user name %q, got %q", "foo", userInfo.Name) + } +} + +// This tests that newAccessController can handle PEM blocks in the certificate +// file other than certificates, for example a private key. +func TestNewAccessControllerPemBlock(t *testing.T) { + rootKeys, err := makeRootKeys(2) + if err != nil { + t.Fatal(err) + } + + rootCertBundleFilename, err := writeTempRootCerts(rootKeys) + if err != nil { + t.Fatal(err) + } + defer os.Remove(rootCertBundleFilename) + + // Add something other than a certificate to the rootcertbundle + file, err := os.OpenFile(rootCertBundleFilename, os.O_WRONLY|os.O_APPEND, 0666) + if err != nil { + t.Fatal(err) + } + keyBlock, err := rootKeys[0].PEMBlock() + if err != nil { + t.Fatal(err) + } + err = pem.Encode(file, keyBlock) + if err != nil { + t.Fatal(err) + } + err = file.Close() + if err != nil { + t.Fatal(err) + } + + realm := "https://auth.example.com/token/" + issuer := "test-issuer.example.com" + service := "test-service.example.com" + + options := map[string]interface{}{ + "realm": realm, + "issuer": issuer, + "service": service, + "rootcertbundle": rootCertBundleFilename, + } + + ac, err := newAccessController(options) + if err != nil { + t.Fatal(err) + } + + if len(ac.(*accessController).rootCerts.Subjects()) != 2 { + t.Fatal("accessController has the wrong number of certificates") + } +} diff --git a/vendor/github.com/docker/distribution/registry/auth/token/util.go b/vendor/github.com/docker/distribution/registry/auth/token/util.go new file mode 100644 index 000000000..d7f95be42 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/auth/token/util.go @@ -0,0 +1,58 @@ +package token + +import ( + "encoding/base64" + "errors" + "strings" +) + +// joseBase64UrlEncode encodes the given data using the standard base64 url +// encoding format but with all trailing '=' characters omitted in accordance +// with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlEncode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +// actionSet is a special type of stringSet. +type actionSet struct { + stringSet +} + +func newActionSet(actions ...string) actionSet { + return actionSet{newStringSet(actions...)} +} + +// Contains calls StringSet.Contains() for +// either "*" or the given action string. +func (s actionSet) contains(action string) bool { + return s.stringSet.contains("*") || s.stringSet.contains(action) +} + +// contains returns true if q is found in ss. +func contains(ss []string, q string) bool { + for _, s := range ss { + if s == q { + return true + } + } + + return false +} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/api_version.go b/vendor/github.com/docker/distribution/registry/client/auth/api_version.go new file mode 100644 index 000000000..7d8f1d957 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/auth/api_version.go @@ -0,0 +1,58 @@ +package auth + +import ( + "net/http" + "strings" +) + +// APIVersion represents a version of an API including its +// type and version number. +type APIVersion struct { + // Type refers to the name of a specific API specification + // such as "registry" + Type string + + // Version is the version of the API specification implemented, + // This may omit the revision number and only include + // the major and minor version, such as "2.0" + Version string +} + +// String returns the string formatted API Version +func (v APIVersion) String() string { + return v.Type + "/" + v.Version +} + +// APIVersions gets the API versions out of an HTTP response using the provided +// version header as the key for the HTTP header. +func APIVersions(resp *http.Response, versionHeader string) []APIVersion { + versions := []APIVersion{} + if versionHeader != "" { + for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] { + for _, version := range strings.Fields(supportedVersions) { + versions = append(versions, ParseAPIVersion(version)) + } + } + } + return versions +} + +// ParseAPIVersion parses an API version string into an APIVersion +// Format (Expected, not enforced): +// API version string = '/' +// API type = [a-z][a-z0-9]* +// API version = [0-9]+(\.[0-9]+)? +// TODO(dmcgowan): Enforce format, add error condition, remove unknown type +func ParseAPIVersion(versionStr string) APIVersion { + idx := strings.IndexRune(versionStr, '/') + if idx == -1 { + return APIVersion{ + Type: "unknown", + Version: versionStr, + } + } + return APIVersion{ + Type: strings.ToLower(versionStr[:idx]), + Version: versionStr[idx+1:], + } +} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go new file mode 100644 index 000000000..2c3ebe165 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go @@ -0,0 +1,27 @@ +package challenge + +import ( + "net/url" + "strings" +) + +// FROM: https://golang.org/src/net/http/http.go +// Given a string of the form "host", "host:port", or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } + +// FROM: http://golang.org/src/net/http/transport.go +var portMap = map[string]string{ + "http": "80", + "https": "443", +} + +// canonicalAddr returns url.Host but always with a ":port" suffix +// FROM: http://golang.org/src/net/http/transport.go +func canonicalAddr(url *url.URL) string { + addr := url.Host + if !hasPort(addr) { + return addr + ":" + portMap[url.Scheme] + } + return addr +} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go new file mode 100644 index 000000000..c9bdfc355 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go @@ -0,0 +1,237 @@ +package challenge + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "sync" +) + +// Challenge carries information from a WWW-Authenticate response header. +// See RFC 2617. +type Challenge struct { + // Scheme is the auth-scheme according to RFC 2617 + Scheme string + + // Parameters are the auth-params according to RFC 2617 + Parameters map[string]string +} + +// Manager manages the challenges for endpoints. +// The challenges are pulled out of HTTP responses. Only +// responses which expect challenges should be added to +// the manager, since a non-unauthorized request will be +// viewed as not requiring challenges. +type Manager interface { + // GetChallenges returns the challenges for the given + // endpoint URL. + GetChallenges(endpoint url.URL) ([]Challenge, error) + + // AddResponse adds the response to the challenge + // manager. The challenges will be parsed out of + // the WWW-Authenicate headers and added to the + // URL which was produced the response. If the + // response was authorized, any challenges for the + // endpoint will be cleared. + AddResponse(resp *http.Response) error +} + +// NewSimpleManager returns an instance of +// Manger which only maps endpoints to challenges +// based on the responses which have been added the +// manager. The simple manager will make no attempt to +// perform requests on the endpoints or cache the responses +// to a backend. +func NewSimpleManager() Manager { + return &simpleManager{ + Challanges: make(map[string][]Challenge), + } +} + +type simpleManager struct { + sync.RWMutex + Challanges map[string][]Challenge +} + +func normalizeURL(endpoint *url.URL) { + endpoint.Host = strings.ToLower(endpoint.Host) + endpoint.Host = canonicalAddr(endpoint) +} + +func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { + normalizeURL(&endpoint) + + m.RLock() + defer m.RUnlock() + challenges := m.Challanges[endpoint.String()] + return challenges, nil +} + +func (m *simpleManager) AddResponse(resp *http.Response) error { + challenges := ResponseChallenges(resp) + if resp.Request == nil { + return fmt.Errorf("missing request reference") + } + urlCopy := url.URL{ + Path: resp.Request.URL.Path, + Host: resp.Request.URL.Host, + Scheme: resp.Request.URL.Scheme, + } + normalizeURL(&urlCopy) + + m.Lock() + defer m.Unlock() + m.Challanges[urlCopy.String()] = challenges + return nil +} + +// Octet types from RFC 2616. +type octetType byte + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 + if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// ResponseChallenges returns a list of authorization challenges +// for the given http Response. Challenges are only checked if +// the response status code was a 401. +func ResponseChallenges(resp *http.Response) []Challenge { + if resp.StatusCode == http.StatusUnauthorized { + // Parse the WWW-Authenticate Header and store the challenges + // on this endpoint object. + return parseAuthHeader(resp.Header) + } + + return nil +} + +func parseAuthHeader(header http.Header) []Challenge { + challenges := []Challenge{} + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + if v != "" { + challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) + } + } + return challenges +} + +func parseValueAndParams(header string) (value string, params map[string]string) { + params = make(map[string]string) + value, s := expectToken(header) + if value == "" { + return + } + value = strings.ToLower(value) + s = "," + skipSpace(s) + for strings.HasPrefix(s, ",") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge_test.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge_test.go new file mode 100644 index 000000000..d4986b39e --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge_test.go @@ -0,0 +1,127 @@ +package challenge + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "sync" + "testing" +) + +func TestAuthChallengeParse(t *testing.T) { + header := http.Header{} + header.Add("WWW-Authenticate", `Bearer realm="https://auth.example.com/token",service="registry.example.com",other=fun,slashed="he\"\l\lo"`) + + challenges := parseAuthHeader(header) + if len(challenges) != 1 { + t.Fatalf("Unexpected number of auth challenges: %d, expected 1", len(challenges)) + } + challenge := challenges[0] + + if expected := "bearer"; challenge.Scheme != expected { + t.Fatalf("Unexpected scheme: %s, expected: %s", challenge.Scheme, expected) + } + + if expected := "https://auth.example.com/token"; challenge.Parameters["realm"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["realm"], expected) + } + + if expected := "registry.example.com"; challenge.Parameters["service"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["service"], expected) + } + + if expected := "fun"; challenge.Parameters["other"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["other"], expected) + } + + if expected := "he\"llo"; challenge.Parameters["slashed"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["slashed"], expected) + } + +} + +func TestAuthChallengeNormalization(t *testing.T) { + testAuthChallengeNormalization(t, "reg.EXAMPLE.com") + testAuthChallengeNormalization(t, "bɿɒʜɔiɿ-ɿɘƚƨim-ƚol-ɒ-ƨʞnɒʜƚ.com") + testAuthChallengeNormalization(t, "reg.example.com:80") + testAuthChallengeConcurrent(t, "reg.EXAMPLE.com") +} + +func testAuthChallengeNormalization(t *testing.T, host string) { + + scm := NewSimpleManager() + + url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host)) + if err != nil { + t.Fatal(err) + } + + resp := &http.Response{ + Request: &http.Request{ + URL: url, + }, + Header: make(http.Header), + StatusCode: http.StatusUnauthorized, + } + resp.Header.Add("WWW-Authenticate", fmt.Sprintf("Bearer realm=\"https://%s/token\",service=\"registry.example.com\"", host)) + + err = scm.AddResponse(resp) + if err != nil { + t.Fatal(err) + } + + lowered := *url + lowered.Host = strings.ToLower(lowered.Host) + lowered.Host = canonicalAddr(&lowered) + c, err := scm.GetChallenges(lowered) + if err != nil { + t.Fatal(err) + } + + if len(c) == 0 { + t.Fatal("Expected challenge for lower-cased-host URL") + } +} + +func testAuthChallengeConcurrent(t *testing.T, host string) { + + scm := NewSimpleManager() + + url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host)) + if err != nil { + t.Fatal(err) + } + + resp := &http.Response{ + Request: &http.Request{ + URL: url, + }, + Header: make(http.Header), + StatusCode: http.StatusUnauthorized, + } + resp.Header.Add("WWW-Authenticate", fmt.Sprintf("Bearer realm=\"https://%s/token\",service=\"registry.example.com\"", host)) + var s sync.WaitGroup + s.Add(2) + go func() { + defer s.Done() + for i := 0; i < 200; i++ { + err = scm.AddResponse(resp) + if err != nil { + t.Error(err) + } + } + }() + go func() { + defer s.Done() + lowered := *url + lowered.Host = strings.ToLower(lowered.Host) + for k := 0; k < 200; k++ { + _, err := scm.GetChallenges(lowered) + if err != nil { + t.Error(err) + } + } + }() + s.Wait() +} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/session.go b/vendor/github.com/docker/distribution/registry/client/auth/session.go new file mode 100644 index 000000000..d6d884ffd --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/auth/session.go @@ -0,0 +1,503 @@ +package auth + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/docker/distribution/registry/client/transport" +) + +var ( + // ErrNoBasicAuthCredentials is returned if a request can't be authorized with + // basic auth due to lack of credentials. + ErrNoBasicAuthCredentials = errors.New("no basic auth credentials") + + // ErrNoToken is returned if a request is successful but the body does not + // contain an authorization token. + ErrNoToken = errors.New("authorization server did not include a token in the response") +) + +const defaultClientID = "registry-client" + +// AuthenticationHandler is an interface for authorizing a request from +// params from a "WWW-Authenicate" header for a single scheme. +type AuthenticationHandler interface { + // Scheme returns the scheme as expected from the "WWW-Authenicate" header. + Scheme() string + + // AuthorizeRequest adds the authorization header to a request (if needed) + // using the parameters from "WWW-Authenticate" method. The parameters + // values depend on the scheme. + AuthorizeRequest(req *http.Request, params map[string]string) error +} + +// CredentialStore is an interface for getting credentials for +// a given URL +type CredentialStore interface { + // Basic returns basic auth for the given URL + Basic(*url.URL) (string, string) + + // RefreshToken returns a refresh token for the + // given URL and service + RefreshToken(*url.URL, string) string + + // SetRefreshToken sets the refresh token if none + // is provided for the given url and service + SetRefreshToken(realm *url.URL, service, token string) +} + +// NewAuthorizer creates an authorizer which can handle multiple authentication +// schemes. The handlers are tried in order, the higher priority authentication +// methods should be first. The challengeMap holds a list of challenges for +// a given root API endpoint (for example "https://registry-1.docker.io/v2/"). +func NewAuthorizer(manager challenge.Manager, handlers ...AuthenticationHandler) transport.RequestModifier { + return &endpointAuthorizer{ + challenges: manager, + handlers: handlers, + } +} + +type endpointAuthorizer struct { + challenges challenge.Manager + handlers []AuthenticationHandler + transport http.RoundTripper +} + +func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { + pingPath := req.URL.Path + if v2Root := strings.Index(req.URL.Path, "/v2/"); v2Root != -1 { + pingPath = pingPath[:v2Root+4] + } else if v1Root := strings.Index(req.URL.Path, "/v1/"); v1Root != -1 { + pingPath = pingPath[:v1Root] + "/v2/" + } else { + return nil + } + + ping := url.URL{ + Host: req.URL.Host, + Scheme: req.URL.Scheme, + Path: pingPath, + } + + challenges, err := ea.challenges.GetChallenges(ping) + if err != nil { + return err + } + + if len(challenges) > 0 { + for _, handler := range ea.handlers { + for _, c := range challenges { + if c.Scheme != handler.Scheme() { + continue + } + if err := handler.AuthorizeRequest(req, c.Parameters); err != nil { + return err + } + } + } + } + + return nil +} + +// This is the minimum duration a token can last (in seconds). +// A token must not live less than 60 seconds because older versions +// of the Docker client didn't read their expiration from the token +// response and assumed 60 seconds. So to remain compatible with +// those implementations, a token must live at least this long. +const minimumTokenLifetimeSeconds = 60 + +// Private interface for time used by this package to enable tests to provide their own implementation. +type clock interface { + Now() time.Time +} + +type tokenHandler struct { + header http.Header + creds CredentialStore + transport http.RoundTripper + clock clock + + offlineAccess bool + forceOAuth bool + clientID string + scopes []Scope + + tokenLock sync.Mutex + tokenCache string + tokenExpiration time.Time +} + +// Scope is a type which is serializable to a string +// using the allow scope grammar. +type Scope interface { + String() string +} + +// RepositoryScope represents a token scope for access +// to a repository. +type RepositoryScope struct { + Repository string + Class string + Actions []string +} + +// String returns the string representation of the repository +// using the scope grammar +func (rs RepositoryScope) String() string { + repoType := "repository" + if rs.Class != "" { + repoType = fmt.Sprintf("%s(%s)", repoType, rs.Class) + } + return fmt.Sprintf("%s:%s:%s", repoType, rs.Repository, strings.Join(rs.Actions, ",")) +} + +// RegistryScope represents a token scope for access +// to resources in the registry. +type RegistryScope struct { + Name string + Actions []string +} + +// String returns the string representation of the user +// using the scope grammar +func (rs RegistryScope) String() string { + return fmt.Sprintf("registry:%s:%s", rs.Name, strings.Join(rs.Actions, ",")) +} + +// TokenHandlerOptions is used to configure a new token handler +type TokenHandlerOptions struct { + Transport http.RoundTripper + Credentials CredentialStore + + OfflineAccess bool + ForceOAuth bool + ClientID string + Scopes []Scope +} + +// An implementation of clock for providing real time data. +type realClock struct{} + +// Now implements clock +func (realClock) Now() time.Time { return time.Now() } + +// NewTokenHandler creates a new AuthenicationHandler which supports +// fetching tokens from a remote token server. +func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler { + // Create options... + return NewTokenHandlerWithOptions(TokenHandlerOptions{ + Transport: transport, + Credentials: creds, + Scopes: []Scope{ + RepositoryScope{ + Repository: scope, + Actions: actions, + }, + }, + }) +} + +// NewTokenHandlerWithOptions creates a new token handler using the provided +// options structure. +func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandler { + handler := &tokenHandler{ + transport: options.Transport, + creds: options.Credentials, + offlineAccess: options.OfflineAccess, + forceOAuth: options.ForceOAuth, + clientID: options.ClientID, + scopes: options.Scopes, + clock: realClock{}, + } + + return handler +} + +func (th *tokenHandler) client() *http.Client { + return &http.Client{ + Transport: th.transport, + Timeout: 15 * time.Second, + } +} + +func (th *tokenHandler) Scheme() string { + return "bearer" +} + +func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + var additionalScopes []string + if fromParam := req.URL.Query().Get("from"); fromParam != "" { + additionalScopes = append(additionalScopes, RepositoryScope{ + Repository: fromParam, + Actions: []string{"pull"}, + }.String()) + } + + token, err := th.getToken(params, additionalScopes...) + if err != nil { + return err + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + + return nil +} + +func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...string) (string, error) { + th.tokenLock.Lock() + defer th.tokenLock.Unlock() + scopes := make([]string, 0, len(th.scopes)+len(additionalScopes)) + for _, scope := range th.scopes { + scopes = append(scopes, scope.String()) + } + var addedScopes bool + for _, scope := range additionalScopes { + scopes = append(scopes, scope) + addedScopes = true + } + + now := th.clock.Now() + if now.After(th.tokenExpiration) || addedScopes { + token, expiration, err := th.fetchToken(params, scopes) + if err != nil { + return "", err + } + + // do not update cache for added scope tokens + if !addedScopes { + th.tokenCache = token + th.tokenExpiration = expiration + } + + return token, nil + } + + return th.tokenCache, nil +} + +type postTokenResponse struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + Scope string `json:"scope"` +} + +func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, service string, scopes []string) (token string, expiration time.Time, err error) { + form := url.Values{} + form.Set("scope", strings.Join(scopes, " ")) + form.Set("service", service) + + clientID := th.clientID + if clientID == "" { + // Use default client, this is a required field + clientID = defaultClientID + } + form.Set("client_id", clientID) + + if refreshToken != "" { + form.Set("grant_type", "refresh_token") + form.Set("refresh_token", refreshToken) + } else if th.creds != nil { + form.Set("grant_type", "password") + username, password := th.creds.Basic(realm) + form.Set("username", username) + form.Set("password", password) + + // attempt to get a refresh token + form.Set("access_type", "offline") + } else { + // refuse to do oauth without a grant type + return "", time.Time{}, fmt.Errorf("no supported grant type") + } + + resp, err := th.client().PostForm(realm.String(), form) + if err != nil { + return "", time.Time{}, err + } + defer resp.Body.Close() + + if !client.SuccessStatus(resp.StatusCode) { + err := client.HandleErrorResponse(resp) + return "", time.Time{}, err + } + + decoder := json.NewDecoder(resp.Body) + + var tr postTokenResponse + if err = decoder.Decode(&tr); err != nil { + return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) + } + + if tr.RefreshToken != "" && tr.RefreshToken != refreshToken { + th.creds.SetRefreshToken(realm, service, tr.RefreshToken) + } + + if tr.ExpiresIn < minimumTokenLifetimeSeconds { + // The default/minimum lifetime. + tr.ExpiresIn = minimumTokenLifetimeSeconds + logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) + } + + if tr.IssuedAt.IsZero() { + // issued_at is optional in the token response. + tr.IssuedAt = th.clock.Now().UTC() + } + + return tr.AccessToken, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil +} + +type getTokenResponse struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + RefreshToken string `json:"refresh_token"` +} + +func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, scopes []string) (token string, expiration time.Time, err error) { + + req, err := http.NewRequest("GET", realm.String(), nil) + if err != nil { + return "", time.Time{}, err + } + + reqParams := req.URL.Query() + + if service != "" { + reqParams.Add("service", service) + } + + for _, scope := range scopes { + reqParams.Add("scope", scope) + } + + if th.offlineAccess { + reqParams.Add("offline_token", "true") + clientID := th.clientID + if clientID == "" { + clientID = defaultClientID + } + reqParams.Add("client_id", clientID) + } + + if th.creds != nil { + username, password := th.creds.Basic(realm) + if username != "" && password != "" { + reqParams.Add("account", username) + req.SetBasicAuth(username, password) + } + } + + req.URL.RawQuery = reqParams.Encode() + + resp, err := th.client().Do(req) + if err != nil { + return "", time.Time{}, err + } + defer resp.Body.Close() + + if !client.SuccessStatus(resp.StatusCode) { + err := client.HandleErrorResponse(resp) + return "", time.Time{}, err + } + + decoder := json.NewDecoder(resp.Body) + + var tr getTokenResponse + if err = decoder.Decode(&tr); err != nil { + return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) + } + + if tr.RefreshToken != "" && th.creds != nil { + th.creds.SetRefreshToken(realm, service, tr.RefreshToken) + } + + // `access_token` is equivalent to `token` and if both are specified + // the choice is undefined. Canonicalize `access_token` by sticking + // things in `token`. + if tr.AccessToken != "" { + tr.Token = tr.AccessToken + } + + if tr.Token == "" { + return "", time.Time{}, ErrNoToken + } + + if tr.ExpiresIn < minimumTokenLifetimeSeconds { + // The default/minimum lifetime. + tr.ExpiresIn = minimumTokenLifetimeSeconds + logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) + } + + if tr.IssuedAt.IsZero() { + // issued_at is optional in the token response. + tr.IssuedAt = th.clock.Now().UTC() + } + + return tr.Token, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil +} + +func (th *tokenHandler) fetchToken(params map[string]string, scopes []string) (token string, expiration time.Time, err error) { + realm, ok := params["realm"] + if !ok { + return "", time.Time{}, errors.New("no realm specified for token auth challenge") + } + + // TODO(dmcgowan): Handle empty scheme and relative realm + realmURL, err := url.Parse(realm) + if err != nil { + return "", time.Time{}, fmt.Errorf("invalid token auth challenge realm: %s", err) + } + + service := params["service"] + + var refreshToken string + + if th.creds != nil { + refreshToken = th.creds.RefreshToken(realmURL, service) + } + + if refreshToken != "" || th.forceOAuth { + return th.fetchTokenWithOAuth(realmURL, refreshToken, service, scopes) + } + + return th.fetchTokenWithBasicAuth(realmURL, service, scopes) +} + +type basicHandler struct { + creds CredentialStore +} + +// NewBasicHandler creaters a new authentiation handler which adds +// basic authentication credentials to a request. +func NewBasicHandler(creds CredentialStore) AuthenticationHandler { + return &basicHandler{ + creds: creds, + } +} + +func (*basicHandler) Scheme() string { + return "basic" +} + +func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + if bh.creds != nil { + username, password := bh.creds.Basic(req.URL) + if username != "" && password != "" { + req.SetBasicAuth(username, password) + return nil + } + } + return ErrNoBasicAuthCredentials +} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/session_test.go b/vendor/github.com/docker/distribution/registry/client/auth/session_test.go new file mode 100644 index 000000000..4f54c75cc --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/auth/session_test.go @@ -0,0 +1,866 @@ +package auth + +import ( + "encoding/base64" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "testing" + "time" + + "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/distribution/testutil" +) + +// An implementation of clock for providing fake time data. +type fakeClock struct { + current time.Time +} + +// Now implements clock +func (fc *fakeClock) Now() time.Time { return fc.current } + +func testServer(rrm testutil.RequestResponseMap) (string, func()) { + h := testutil.NewHandler(rrm) + s := httptest.NewServer(h) + return s.URL, s.Close +} + +type testAuthenticationWrapper struct { + headers http.Header + authCheck func(string) bool + next http.Handler +} + +func (w *testAuthenticationWrapper) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + auth := r.Header.Get("Authorization") + if auth == "" || !w.authCheck(auth) { + h := rw.Header() + for k, values := range w.headers { + h[k] = values + } + rw.WriteHeader(http.StatusUnauthorized) + return + } + w.next.ServeHTTP(rw, r) +} + +func testServerWithAuth(rrm testutil.RequestResponseMap, authenticate string, authCheck func(string) bool) (string, func()) { + h := testutil.NewHandler(rrm) + wrapper := &testAuthenticationWrapper{ + + headers: http.Header(map[string][]string{ + "X-API-Version": {"registry/2.0"}, + "X-Multi-API-Version": {"registry/2.0", "registry/2.1", "trust/1.0"}, + "WWW-Authenticate": {authenticate}, + }), + authCheck: authCheck, + next: h, + } + + s := httptest.NewServer(wrapper) + return s.URL, s.Close +} + +// ping pings the provided endpoint to determine its required authorization challenges. +// If a version header is provided, the versions will be returned. +func ping(manager challenge.Manager, endpoint, versionHeader string) ([]APIVersion, error) { + resp, err := http.Get(endpoint) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err := manager.AddResponse(resp); err != nil { + return nil, err + } + + return APIVersions(resp, versionHeader), err +} + +type testCredentialStore struct { + username string + password string + refreshTokens map[string]string +} + +func (tcs *testCredentialStore) Basic(*url.URL) (string, string) { + return tcs.username, tcs.password +} + +func (tcs *testCredentialStore) RefreshToken(u *url.URL, service string) string { + return tcs.refreshTokens[service] +} + +func (tcs *testCredentialStore) SetRefreshToken(u *url.URL, service string, token string) { + if tcs.refreshTokens != nil { + tcs.refreshTokens[service] = token + } +} + +func TestEndpointAuthorizeToken(t *testing.T) { + service := "localhost.localdomain" + repo1 := "some/registry" + repo2 := "other/registry" + scope1 := fmt.Sprintf("repository:%s:pull,push", repo1) + scope2 := fmt.Sprintf("repository:%s:pull,push", repo2) + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?scope=%s&service=%s", url.QueryEscape(scope1), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"token":"statictoken"}`), + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?scope=%s&service=%s", url.QueryEscape(scope2), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"token":"badtoken"}`), + }, + }, + }) + te, tc := testServer(tokenMap) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) + validCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate, validCheck) + defer c() + + challengeManager1 := challenge.NewSimpleManager() + versions, err := ping(challengeManager1, e+"/v2/", "x-api-version") + if err != nil { + t.Fatal(err) + } + if len(versions) != 1 { + t.Fatalf("Unexpected version count: %d, expected 1", len(versions)) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager1, NewTokenHandler(nil, nil, repo1, "pull", "push"))) + client := &http.Client{Transport: transport1} + + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + + e2, c2 := testServerWithAuth(m, authenicate, validCheck) + defer c2() + + challengeManager2 := challenge.NewSimpleManager() + versions, err = ping(challengeManager2, e2+"/v2/", "x-multi-api-version") + if err != nil { + t.Fatal(err) + } + if len(versions) != 3 { + t.Fatalf("Unexpected version count: %d, expected 3", len(versions)) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } + if check := (APIVersion{Type: "registry", Version: "2.1"}); versions[1] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[1], check) + } + if check := (APIVersion{Type: "trust", Version: "1.0"}); versions[2] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[2], check) + } + transport2 := transport.NewTransport(nil, NewAuthorizer(challengeManager2, NewTokenHandler(nil, nil, repo2, "pull", "push"))) + client2 := &http.Client{Transport: transport2} + + req, _ = http.NewRequest("GET", e2+"/v2/hello", nil) + resp, err = client2.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusUnauthorized) + } +} + +func TestEndpointAuthorizeRefreshToken(t *testing.T) { + service := "localhost.localdomain" + repo1 := "some/registry" + repo2 := "other/registry" + scope1 := fmt.Sprintf("repository:%s:pull,push", repo1) + scope2 := fmt.Sprintf("repository:%s:pull,push", repo2) + refreshToken1 := "0123456790abcdef" + refreshToken2 := "0123456790fedcba" + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "POST", + Route: "/token", + Body: []byte(fmt.Sprintf("client_id=registry-client&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken1, url.QueryEscape(scope1), service)), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(fmt.Sprintf(`{"access_token":"statictoken","refresh_token":"%s"}`, refreshToken1)), + }, + }, + { + // In the future this test may fail and require using basic auth to get a different refresh token + Request: testutil.Request{ + Method: "POST", + Route: "/token", + Body: []byte(fmt.Sprintf("client_id=registry-client&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken1, url.QueryEscape(scope2), service)), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(fmt.Sprintf(`{"access_token":"statictoken","refresh_token":"%s"}`, refreshToken2)), + }, + }, + { + Request: testutil.Request{ + Method: "POST", + Route: "/token", + Body: []byte(fmt.Sprintf("client_id=registry-client&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken2, url.QueryEscape(scope2), service)), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"access_token":"badtoken","refresh_token":"%s"}`), + }, + }, + }) + te, tc := testServer(tokenMap) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) + validCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate, validCheck) + defer c() + + challengeManager1 := challenge.NewSimpleManager() + versions, err := ping(challengeManager1, e+"/v2/", "x-api-version") + if err != nil { + t.Fatal(err) + } + if len(versions) != 1 { + t.Fatalf("Unexpected version count: %d, expected 1", len(versions)) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } + creds := &testCredentialStore{ + refreshTokens: map[string]string{ + service: refreshToken1, + }, + } + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager1, NewTokenHandler(nil, creds, repo1, "pull", "push"))) + client := &http.Client{Transport: transport1} + + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + + // Try with refresh token setting + e2, c2 := testServerWithAuth(m, authenicate, validCheck) + defer c2() + + challengeManager2 := challenge.NewSimpleManager() + versions, err = ping(challengeManager2, e2+"/v2/", "x-api-version") + if err != nil { + t.Fatal(err) + } + if len(versions) != 1 { + t.Fatalf("Unexpected version count: %d, expected 1", len(versions)) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } + + transport2 := transport.NewTransport(nil, NewAuthorizer(challengeManager2, NewTokenHandler(nil, creds, repo2, "pull", "push"))) + client2 := &http.Client{Transport: transport2} + + req, _ = http.NewRequest("GET", e2+"/v2/hello", nil) + resp, err = client2.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusUnauthorized) + } + + if creds.refreshTokens[service] != refreshToken2 { + t.Fatalf("Refresh token not set after change") + } + + // Try with bad token + e3, c3 := testServerWithAuth(m, authenicate, validCheck) + defer c3() + + challengeManager3 := challenge.NewSimpleManager() + versions, err = ping(challengeManager3, e3+"/v2/", "x-api-version") + if err != nil { + t.Fatal(err) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } + + transport3 := transport.NewTransport(nil, NewAuthorizer(challengeManager3, NewTokenHandler(nil, creds, repo2, "pull", "push"))) + client3 := &http.Client{Transport: transport3} + + req, _ = http.NewRequest("GET", e3+"/v2/hello", nil) + resp, err = client3.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusUnauthorized) + } +} + +func TestEndpointAuthorizeV2RefreshToken(t *testing.T) { + service := "localhost.localdomain" + scope1 := "registry:catalog:search" + refreshToken1 := "0123456790abcdef" + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "POST", + Route: "/token", + Body: []byte(fmt.Sprintf("client_id=registry-client&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken1, url.QueryEscape(scope1), service)), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(fmt.Sprintf(`{"access_token":"statictoken","refresh_token":"%s"}`, refreshToken1)), + }, + }, + }) + te, tc := testServer(tokenMap) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v1/search", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) + validCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate, validCheck) + defer c() + + challengeManager1 := challenge.NewSimpleManager() + versions, err := ping(challengeManager1, e+"/v2/", "x-api-version") + if err != nil { + t.Fatal(err) + } + if len(versions) != 1 { + t.Fatalf("Unexpected version count: %d, expected 1", len(versions)) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } + tho := TokenHandlerOptions{ + Credentials: &testCredentialStore{ + refreshTokens: map[string]string{ + service: refreshToken1, + }, + }, + Scopes: []Scope{ + RegistryScope{ + Name: "catalog", + Actions: []string{"search"}, + }, + }, + } + + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager1, NewTokenHandlerWithOptions(tho))) + client := &http.Client{Transport: transport1} + + req, _ := http.NewRequest("GET", e+"/v1/search", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } +} + +func basicAuth(username, password string) string { + auth := username + ":" + password + return base64.StdEncoding.EncodeToString([]byte(auth)) +} + +func TestEndpointAuthorizeTokenBasic(t *testing.T) { + service := "localhost.localdomain" + repo := "some/fun/registry" + scope := fmt.Sprintf("repository:%s:pull,push", repo) + username := "tokenuser" + password := "superSecretPa$$word" + + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"access_token":"statictoken"}`), + }, + }, + }) + + authenicate1 := fmt.Sprintf("Basic realm=localhost") + basicCheck := func(a string) bool { + return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) + } + te, tc := testServerWithAuth(tokenMap, authenicate1, basicCheck) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) + bearerCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate2, bearerCheck) + defer c() + + creds := &testCredentialStore{ + username: username, + password: password, + } + + challengeManager := challenge.NewSimpleManager() + _, err := ping(challengeManager, e+"/v2/", "") + if err != nil { + t.Fatal(err) + } + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, NewTokenHandler(nil, creds, repo, "pull", "push"), NewBasicHandler(creds))) + client := &http.Client{Transport: transport1} + + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } +} + +func TestEndpointAuthorizeTokenBasicWithExpiresIn(t *testing.T) { + service := "localhost.localdomain" + repo := "some/fun/registry" + scope := fmt.Sprintf("repository:%s:pull,push", repo) + username := "tokenuser" + password := "superSecretPa$$word" + + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"token":"statictoken", "expires_in": 3001}`), + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"access_token":"statictoken", "expires_in": 3001}`), + }, + }, + }) + + authenicate1 := fmt.Sprintf("Basic realm=localhost") + tokenExchanges := 0 + basicCheck := func(a string) bool { + tokenExchanges = tokenExchanges + 1 + return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) + } + te, tc := testServerWithAuth(tokenMap, authenicate1, basicCheck) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) + bearerCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate2, bearerCheck) + defer c() + + creds := &testCredentialStore{ + username: username, + password: password, + } + + challengeManager := challenge.NewSimpleManager() + _, err := ping(challengeManager, e+"/v2/", "") + if err != nil { + t.Fatal(err) + } + clock := &fakeClock{current: time.Now()} + options := TokenHandlerOptions{ + Transport: nil, + Credentials: creds, + Scopes: []Scope{ + RepositoryScope{ + Repository: repo, + Actions: []string{"pull", "push"}, + }, + }, + } + tHandler := NewTokenHandlerWithOptions(options) + tHandler.(*tokenHandler).clock = clock + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, tHandler, NewBasicHandler(creds))) + client := &http.Client{Transport: transport1} + + // First call should result in a token exchange + // Subsequent calls should recycle the token from the first request, until the expiration has lapsed. + timeIncrement := 1000 * time.Second + for i := 0; i < 4; i++ { + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + if tokenExchanges != 1 { + t.Fatalf("Unexpected number of token exchanges, want: 1, got %d (iteration: %d)", tokenExchanges, i) + } + clock.current = clock.current.Add(timeIncrement) + } + + // After we've exceeded the expiration, we should see a second token exchange. + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + if tokenExchanges != 2 { + t.Fatalf("Unexpected number of token exchanges, want: 2, got %d", tokenExchanges) + } +} + +func TestEndpointAuthorizeTokenBasicWithExpiresInAndIssuedAt(t *testing.T) { + service := "localhost.localdomain" + repo := "some/fun/registry" + scope := fmt.Sprintf("repository:%s:pull,push", repo) + username := "tokenuser" + password := "superSecretPa$$word" + + // This test sets things up such that the token was issued one increment + // earlier than its sibling in TestEndpointAuthorizeTokenBasicWithExpiresIn. + // This will mean that the token expires after 3 increments instead of 4. + clock := &fakeClock{current: time.Now()} + timeIncrement := 1000 * time.Second + firstIssuedAt := clock.Now() + clock.current = clock.current.Add(timeIncrement) + secondIssuedAt := clock.current.Add(2 * timeIncrement) + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"token":"statictoken", "issued_at": "` + firstIssuedAt.Format(time.RFC3339Nano) + `", "expires_in": 3001}`), + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"access_token":"statictoken", "issued_at": "` + secondIssuedAt.Format(time.RFC3339Nano) + `", "expires_in": 3001}`), + }, + }, + }) + + authenicate1 := fmt.Sprintf("Basic realm=localhost") + tokenExchanges := 0 + basicCheck := func(a string) bool { + tokenExchanges = tokenExchanges + 1 + return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) + } + te, tc := testServerWithAuth(tokenMap, authenicate1, basicCheck) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) + bearerCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate2, bearerCheck) + defer c() + + creds := &testCredentialStore{ + username: username, + password: password, + } + + challengeManager := challenge.NewSimpleManager() + _, err := ping(challengeManager, e+"/v2/", "") + if err != nil { + t.Fatal(err) + } + + options := TokenHandlerOptions{ + Transport: nil, + Credentials: creds, + Scopes: []Scope{ + RepositoryScope{ + Repository: repo, + Actions: []string{"pull", "push"}, + }, + }, + } + tHandler := NewTokenHandlerWithOptions(options) + tHandler.(*tokenHandler).clock = clock + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, tHandler, NewBasicHandler(creds))) + client := &http.Client{Transport: transport1} + + // First call should result in a token exchange + // Subsequent calls should recycle the token from the first request, until the expiration has lapsed. + // We shaved one increment off of the equivalent logic in TestEndpointAuthorizeTokenBasicWithExpiresIn + // so this loop should have one fewer iteration. + for i := 0; i < 3; i++ { + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + if tokenExchanges != 1 { + t.Fatalf("Unexpected number of token exchanges, want: 1, got %d (iteration: %d)", tokenExchanges, i) + } + clock.current = clock.current.Add(timeIncrement) + } + + // After we've exceeded the expiration, we should see a second token exchange. + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + if tokenExchanges != 2 { + t.Fatalf("Unexpected number of token exchanges, want: 2, got %d", tokenExchanges) + } +} + +func TestEndpointAuthorizeBasic(t *testing.T) { + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + username := "user1" + password := "funSecretPa$$word" + authenicate := fmt.Sprintf("Basic realm=localhost") + validCheck := func(a string) bool { + return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) + } + e, c := testServerWithAuth(m, authenicate, validCheck) + defer c() + creds := &testCredentialStore{ + username: username, + password: password, + } + + challengeManager := challenge.NewSimpleManager() + _, err := ping(challengeManager, e+"/v2/", "") + if err != nil { + t.Fatal(err) + } + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, NewBasicHandler(creds))) + client := &http.Client{Transport: transport1} + + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } +} diff --git a/vendor/github.com/docker/distribution/registry/client/blob_writer.go b/vendor/github.com/docker/distribution/registry/client/blob_writer.go new file mode 100644 index 000000000..e3ffcb00f --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/blob_writer.go @@ -0,0 +1,162 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" +) + +type httpBlobUpload struct { + statter distribution.BlobStatter + client *http.Client + + uuid string + startedAt time.Time + + location string // always the last value of the location header. + offset int64 + closed bool +} + +func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) { + panic("Not implemented") +} + +func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { + if resp.StatusCode == http.StatusNotFound { + return distribution.ErrBlobUploadUnknown + } + return HandleErrorResponse(resp) +} + +func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { + req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r)) + if err != nil { + return 0, err + } + defer req.Body.Close() + + resp, err := hbu.client.Do(req) + if err != nil { + return 0, err + } + + if !SuccessStatus(resp.StatusCode) { + return 0, hbu.handleErrorResponse(resp) + } + + hbu.uuid = resp.Header.Get("Docker-Upload-UUID") + hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) + if err != nil { + return 0, err + } + rng := resp.Header.Get("Range") + var start, end int64 + if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { + return 0, err + } else if n != 2 || end < start { + return 0, fmt.Errorf("bad range format: %s", rng) + } + + return (end - start + 1), nil + +} + +func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { + req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p)) + if err != nil { + return 0, err + } + req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1))) + req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p))) + req.Header.Set("Content-Type", "application/octet-stream") + + resp, err := hbu.client.Do(req) + if err != nil { + return 0, err + } + + if !SuccessStatus(resp.StatusCode) { + return 0, hbu.handleErrorResponse(resp) + } + + hbu.uuid = resp.Header.Get("Docker-Upload-UUID") + hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) + if err != nil { + return 0, err + } + rng := resp.Header.Get("Range") + var start, end int + if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { + return 0, err + } else if n != 2 || end < start { + return 0, fmt.Errorf("bad range format: %s", rng) + } + + return (end - start + 1), nil + +} + +func (hbu *httpBlobUpload) Size() int64 { + return hbu.offset +} + +func (hbu *httpBlobUpload) ID() string { + return hbu.uuid +} + +func (hbu *httpBlobUpload) StartedAt() time.Time { + return hbu.startedAt +} + +func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + // TODO(dmcgowan): Check if already finished, if so just fetch + req, err := http.NewRequest("PUT", hbu.location, nil) + if err != nil { + return distribution.Descriptor{}, err + } + + values := req.URL.Query() + values.Set("digest", desc.Digest.String()) + req.URL.RawQuery = values.Encode() + + resp, err := hbu.client.Do(req) + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + if !SuccessStatus(resp.StatusCode) { + return distribution.Descriptor{}, hbu.handleErrorResponse(resp) + } + + return hbu.statter.Stat(ctx, desc.Digest) +} + +func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { + req, err := http.NewRequest("DELETE", hbu.location, nil) + if err != nil { + return err + } + resp, err := hbu.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) { + return nil + } + return hbu.handleErrorResponse(resp) +} + +func (hbu *httpBlobUpload) Close() error { + hbu.closed = true + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/client/blob_writer_test.go b/vendor/github.com/docker/distribution/registry/client/blob_writer_test.go new file mode 100644 index 000000000..099dca4f0 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/blob_writer_test.go @@ -0,0 +1,211 @@ +package client + +import ( + "bytes" + "fmt" + "net/http" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/testutil" +) + +// Test implements distribution.BlobWriter +var _ distribution.BlobWriter = &httpBlobUpload{} + +func TestUploadReadFrom(t *testing.T) { + _, b := newRandomBlob(64) + repo := "test/upload/readfrom" + locationPath := fmt.Sprintf("/v2/%s/uploads/testid", repo) + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/", + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Docker-Distribution-API-Version": {"registry/2.0"}, + }), + }, + }, + // Test Valid case + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Docker-Upload-UUID": {"46603072-7a1b-4b41-98f9-fd8a7da89f9b"}, + "Location": {locationPath}, + "Range": {"0-63"}, + }), + }, + }, + // Test invalid range + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Docker-Upload-UUID": {"46603072-7a1b-4b41-98f9-fd8a7da89f9b"}, + "Location": {locationPath}, + "Range": {""}, + }), + }, + }, + // Test 404 + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusNotFound, + }, + }, + // Test 400 valid json + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusBadRequest, + Body: []byte(` + { "errors": + [ + { + "code": "BLOB_UPLOAD_INVALID", + "message": "blob upload invalid", + "detail": "more detail" + } + ] + } `), + }, + }, + // Test 400 invalid json + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusBadRequest, + Body: []byte("something bad happened"), + }, + }, + // Test 500 + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusInternalServerError, + }, + }, + }) + + e, c := testServer(m) + defer c() + + blobUpload := &httpBlobUpload{ + client: &http.Client{}, + } + + // Valid case + blobUpload.location = e + locationPath + n, err := blobUpload.ReadFrom(bytes.NewReader(b)) + if err != nil { + t.Fatalf("Error calling ReadFrom: %s", err) + } + if n != 64 { + t.Fatalf("Wrong length returned from ReadFrom: %d, expected 64", n) + } + + // Bad range + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when bad range received") + } + + // 404 + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when not found") + } + if err != distribution.ErrBlobUploadUnknown { + t.Fatalf("Wrong error thrown: %s, expected %s", err, distribution.ErrBlobUploadUnknown) + } + + // 400 valid json + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when not found") + } + if uploadErr, ok := err.(errcode.Errors); !ok { + t.Fatalf("Wrong error type %T: %s", err, err) + } else if len(uploadErr) != 1 { + t.Fatalf("Unexpected number of errors: %d, expected 1", len(uploadErr)) + } else { + v2Err, ok := uploadErr[0].(errcode.Error) + if !ok { + t.Fatalf("Not an 'Error' type: %#v", uploadErr[0]) + } + if v2Err.Code != v2.ErrorCodeBlobUploadInvalid { + t.Fatalf("Unexpected error code: %s, expected %d", v2Err.Code.String(), v2.ErrorCodeBlobUploadInvalid) + } + if expected := "blob upload invalid"; v2Err.Message != expected { + t.Fatalf("Unexpected error message: %q, expected %q", v2Err.Message, expected) + } + if expected := "more detail"; v2Err.Detail.(string) != expected { + t.Fatalf("Unexpected error message: %q, expected %q", v2Err.Detail.(string), expected) + } + } + + // 400 invalid json + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when not found") + } + if uploadErr, ok := err.(*UnexpectedHTTPResponseError); !ok { + t.Fatalf("Wrong error type %T: %s", err, err) + } else { + respStr := string(uploadErr.Response) + if expected := "something bad happened"; respStr != expected { + t.Fatalf("Unexpected response string: %s, expected: %s", respStr, expected) + } + } + + // 500 + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when not found") + } + if uploadErr, ok := err.(*UnexpectedHTTPStatusError); !ok { + t.Fatalf("Wrong error type %T: %s", err, err) + } else if expected := "500 " + http.StatusText(http.StatusInternalServerError); uploadErr.Status != expected { + t.Fatalf("Unexpected response status: %s, expected %s", uploadErr.Status, expected) + } +} diff --git a/vendor/github.com/docker/distribution/registry/client/errors.go b/vendor/github.com/docker/distribution/registry/client/errors.go new file mode 100644 index 000000000..52d49d5d2 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/errors.go @@ -0,0 +1,139 @@ +package client + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/client/auth/challenge" +) + +// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty +// errcode.Errors slice. +var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body") + +// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is +// returned when making a registry api call. +type UnexpectedHTTPStatusError struct { + Status string +} + +func (e *UnexpectedHTTPStatusError) Error() string { + return fmt.Sprintf("received unexpected HTTP status: %s", e.Status) +} + +// UnexpectedHTTPResponseError is returned when an expected HTTP status code +// is returned, but the content was unexpected and failed to be parsed. +type UnexpectedHTTPResponseError struct { + ParseErr error + StatusCode int + Response []byte +} + +func (e *UnexpectedHTTPResponseError) Error() string { + return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response)) +} + +func parseHTTPErrorResponse(statusCode int, r io.Reader) error { + var errors errcode.Errors + body, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + // For backward compatibility, handle irregularly formatted + // messages that contain a "details" field. + var detailsErr struct { + Details string `json:"details"` + } + err = json.Unmarshal(body, &detailsErr) + if err == nil && detailsErr.Details != "" { + switch statusCode { + case http.StatusUnauthorized: + return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) + case http.StatusTooManyRequests: + return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details) + default: + return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) + } + } + + if err := json.Unmarshal(body, &errors); err != nil { + return &UnexpectedHTTPResponseError{ + ParseErr: err, + StatusCode: statusCode, + Response: body, + } + } + + if len(errors) == 0 { + // If there was no error specified in the body, return + // UnexpectedHTTPResponseError. + return &UnexpectedHTTPResponseError{ + ParseErr: ErrNoErrorsInBody, + StatusCode: statusCode, + Response: body, + } + } + + return errors +} + +func makeErrorList(err error) []error { + if errL, ok := err.(errcode.Errors); ok { + return []error(errL) + } + return []error{err} +} + +func mergeErrors(err1, err2 error) error { + return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...)) +} + +// HandleErrorResponse returns error parsed from HTTP response for an +// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An +// UnexpectedHTTPStatusError returned for response code outside of expected +// range. +func HandleErrorResponse(resp *http.Response) error { + if resp.StatusCode >= 400 && resp.StatusCode < 500 { + // Check for OAuth errors within the `WWW-Authenticate` header first + // See https://tools.ietf.org/html/rfc6750#section-3 + for _, c := range challenge.ResponseChallenges(resp) { + if c.Scheme == "bearer" { + var err errcode.Error + // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1 + switch c.Parameters["error"] { + case "invalid_token": + err.Code = errcode.ErrorCodeUnauthorized + case "insufficient_scope": + err.Code = errcode.ErrorCodeDenied + default: + continue + } + if description := c.Parameters["error_description"]; description != "" { + err.Message = description + } else { + err.Message = err.Code.Message() + } + + return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body)) + } + } + err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) + if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 { + return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) + } + return err + } + return &UnexpectedHTTPStatusError{Status: resp.Status} +} + +// SuccessStatus returns true if the argument is a successful HTTP response +// code (in the range 200 - 399 inclusive). +func SuccessStatus(status int) bool { + return status >= 200 && status <= 399 +} diff --git a/vendor/github.com/docker/distribution/registry/client/errors_test.go b/vendor/github.com/docker/distribution/registry/client/errors_test.go new file mode 100644 index 000000000..ca9dddd10 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/errors_test.go @@ -0,0 +1,104 @@ +package client + +import ( + "bytes" + "io" + "net/http" + "strings" + "testing" +) + +type nopCloser struct { + io.Reader +} + +func (nopCloser) Close() error { return nil } + +func TestHandleErrorResponse401ValidBody(t *testing.T) { + json := "{\"errors\":[{\"code\":\"UNAUTHORIZED\",\"message\":\"action requires authentication\"}]}" + response := &http.Response{ + Status: "401 Unauthorized", + StatusCode: 401, + Body: nopCloser{bytes.NewBufferString(json)}, + } + err := HandleErrorResponse(response) + + expectedMsg := "unauthorized: action requires authentication" + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} + +func TestHandleErrorResponse401WithInvalidBody(t *testing.T) { + json := "{invalid json}" + response := &http.Response{ + Status: "401 Unauthorized", + StatusCode: 401, + Body: nopCloser{bytes.NewBufferString(json)}, + } + err := HandleErrorResponse(response) + + expectedMsg := "unauthorized: authentication required" + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} + +func TestHandleErrorResponseExpectedStatusCode400ValidBody(t *testing.T) { + json := "{\"errors\":[{\"code\":\"DIGEST_INVALID\",\"message\":\"provided digest does not match\"}]}" + response := &http.Response{ + Status: "400 Bad Request", + StatusCode: 400, + Body: nopCloser{bytes.NewBufferString(json)}, + } + err := HandleErrorResponse(response) + + expectedMsg := "digest invalid: provided digest does not match" + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} + +func TestHandleErrorResponseExpectedStatusCode404EmptyErrorSlice(t *testing.T) { + json := `{"randomkey": "randomvalue"}` + response := &http.Response{ + Status: "404 Not Found", + StatusCode: 404, + Body: nopCloser{bytes.NewBufferString(json)}, + } + err := HandleErrorResponse(response) + + expectedMsg := `error parsing HTTP 404 response body: no error details found in HTTP response body: "{\"randomkey\": \"randomvalue\"}"` + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} + +func TestHandleErrorResponseExpectedStatusCode404InvalidBody(t *testing.T) { + json := "{invalid json}" + response := &http.Response{ + Status: "404 Not Found", + StatusCode: 404, + Body: nopCloser{bytes.NewBufferString(json)}, + } + err := HandleErrorResponse(response) + + expectedMsg := "error parsing HTTP 404 response body: invalid character 'i' looking for beginning of object key string: \"{invalid json}\"" + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} + +func TestHandleErrorResponseUnexpectedStatusCode501(t *testing.T) { + response := &http.Response{ + Status: "501 Not Implemented", + StatusCode: 501, + Body: nopCloser{bytes.NewBufferString("{\"Error Encountered\" : \"Function not implemented.\"}")}, + } + err := HandleErrorResponse(response) + + expectedMsg := "received unexpected HTTP status: 501 Not Implemented" + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} diff --git a/vendor/github.com/docker/distribution/registry/client/repository.go b/vendor/github.com/docker/distribution/registry/client/repository.go new file mode 100644 index 000000000..1ebd0b183 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/repository.go @@ -0,0 +1,853 @@ +package client + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/distribution/registry/storage/cache" + "github.com/docker/distribution/registry/storage/cache/memory" +) + +// Registry provides an interface for calling Repositories, which returns a catalog of repositories. +type Registry interface { + Repositories(ctx context.Context, repos []string, last string) (n int, err error) +} + +// checkHTTPRedirect is a callback that can manipulate redirected HTTP +// requests. It is used to preserve Accept and Range headers. +func checkHTTPRedirect(req *http.Request, via []*http.Request) error { + if len(via) >= 10 { + return errors.New("stopped after 10 redirects") + } + + if len(via) > 0 { + for headerName, headerVals := range via[0].Header { + if headerName != "Accept" && headerName != "Range" { + continue + } + for _, val := range headerVals { + // Don't add to redirected request if redirected + // request already has a header with the same + // name and value. + hasValue := false + for _, existingVal := range req.Header[headerName] { + if existingVal == val { + hasValue = true + break + } + } + if !hasValue { + req.Header.Add(headerName, val) + } + } + } + } + + return nil +} + +// NewRegistry creates a registry namespace which can be used to get a listing of repositories +func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) { + ub, err := v2.NewURLBuilderFromString(baseURL, false) + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: transport, + Timeout: 1 * time.Minute, + CheckRedirect: checkHTTPRedirect, + } + + return ®istry{ + client: client, + ub: ub, + context: ctx, + }, nil +} + +type registry struct { + client *http.Client + ub *v2.URLBuilder + context context.Context +} + +// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size +// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there +// are no more entries +func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) { + var numFilled int + var returnErr error + + values := buildCatalogValues(len(entries), last) + u, err := r.ub.BuildCatalogURL(values) + if err != nil { + return 0, err + } + + resp, err := r.client.Get(u) + if err != nil { + return 0, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + var ctlg struct { + Repositories []string `json:"repositories"` + } + decoder := json.NewDecoder(resp.Body) + + if err := decoder.Decode(&ctlg); err != nil { + return 0, err + } + + for cnt := range ctlg.Repositories { + entries[cnt] = ctlg.Repositories[cnt] + } + numFilled = len(ctlg.Repositories) + + link := resp.Header.Get("Link") + if link == "" { + returnErr = io.EOF + } + } else { + return 0, HandleErrorResponse(resp) + } + + return numFilled, returnErr +} + +// NewRepository creates a new Repository for the given repository name and base URL. +func NewRepository(ctx context.Context, name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { + ub, err := v2.NewURLBuilderFromString(baseURL, false) + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: transport, + CheckRedirect: checkHTTPRedirect, + // TODO(dmcgowan): create cookie jar + } + + return &repository{ + client: client, + ub: ub, + name: name, + context: ctx, + }, nil +} + +type repository struct { + client *http.Client + ub *v2.URLBuilder + context context.Context + name reference.Named +} + +func (r *repository) Named() reference.Named { + return r.name +} + +func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { + statter := &blobStatter{ + name: r.name, + ub: r.ub, + client: r.client, + } + return &blobs{ + name: r.name, + ub: r.ub, + client: r.client, + statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), + } +} + +func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + // todo(richardscothern): options should be sent over the wire + return &manifests{ + name: r.name, + ub: r.ub, + client: r.client, + etags: make(map[string]string), + }, nil +} + +func (r *repository) Tags(ctx context.Context) distribution.TagService { + return &tags{ + client: r.client, + ub: r.ub, + context: r.context, + name: r.Named(), + } +} + +// tags implements remote tagging operations. +type tags struct { + client *http.Client + ub *v2.URLBuilder + context context.Context + name reference.Named +} + +// All returns all tags +func (t *tags) All(ctx context.Context) ([]string, error) { + var tags []string + + u, err := t.ub.BuildTagsURL(t.name) + if err != nil { + return tags, err + } + + for { + resp, err := t.client.Get(u) + if err != nil { + return tags, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return tags, err + } + + tagsResponse := struct { + Tags []string `json:"tags"` + }{} + if err := json.Unmarshal(b, &tagsResponse); err != nil { + return tags, err + } + tags = append(tags, tagsResponse.Tags...) + if link := resp.Header.Get("Link"); link != "" { + u = strings.Trim(strings.Split(link, ";")[0], "<>") + } else { + return tags, nil + } + } else { + return tags, HandleErrorResponse(resp) + } + } +} + +func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) { + desc := distribution.Descriptor{} + headers := response.Header + + ctHeader := headers.Get("Content-Type") + if ctHeader == "" { + return distribution.Descriptor{}, errors.New("missing or empty Content-Type header") + } + desc.MediaType = ctHeader + + digestHeader := headers.Get("Docker-Content-Digest") + if digestHeader == "" { + bytes, err := ioutil.ReadAll(response.Body) + if err != nil { + return distribution.Descriptor{}, err + } + _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes) + if err != nil { + return distribution.Descriptor{}, err + } + return desc, nil + } + + dgst, err := digest.ParseDigest(digestHeader) + if err != nil { + return distribution.Descriptor{}, err + } + desc.Digest = dgst + + lengthHeader := headers.Get("Content-Length") + if lengthHeader == "" { + return distribution.Descriptor{}, errors.New("missing or empty Content-Length header") + } + length, err := strconv.ParseInt(lengthHeader, 10, 64) + if err != nil { + return distribution.Descriptor{}, err + } + desc.Size = length + + return desc, nil + +} + +// Get issues a HEAD request for a Manifest against its named endpoint in order +// to construct a descriptor for the tag. If the registry doesn't support HEADing +// a manifest, fallback to GET. +func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + ref, err := reference.WithTag(t.name, tag) + if err != nil { + return distribution.Descriptor{}, err + } + u, err := t.ub.BuildManifestURL(ref) + if err != nil { + return distribution.Descriptor{}, err + } + + newRequest := func(method string) (*http.Response, error) { + req, err := http.NewRequest(method, u, nil) + if err != nil { + return nil, err + } + + for _, t := range distribution.ManifestMediaTypes() { + req.Header.Add("Accept", t) + } + resp, err := t.client.Do(req) + return resp, err + } + + resp, err := newRequest("HEAD") + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + switch { + case resp.StatusCode >= 200 && resp.StatusCode < 400: + return descriptorFromResponse(resp) + default: + // if the response is an error - there will be no body to decode. + // Issue a GET request: + // - for data from a server that does not handle HEAD + // - to get error details in case of a failure + resp, err = newRequest("GET") + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + if resp.StatusCode >= 200 && resp.StatusCode < 400 { + return descriptorFromResponse(resp) + } + return distribution.Descriptor{}, HandleErrorResponse(resp) + } +} + +func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { + panic("not implemented") +} + +func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + panic("not implemented") +} + +func (t *tags) Untag(ctx context.Context, tag string) error { + panic("not implemented") +} + +type manifests struct { + name reference.Named + ub *v2.URLBuilder + client *http.Client + etags map[string]string +} + +func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + ref, err := reference.WithDigest(ms.name, dgst) + if err != nil { + return false, err + } + u, err := ms.ub.BuildManifestURL(ref) + if err != nil { + return false, err + } + + resp, err := ms.client.Head(u) + if err != nil { + return false, err + } + + if SuccessStatus(resp.StatusCode) { + return true, nil + } else if resp.StatusCode == http.StatusNotFound { + return false, nil + } + return false, HandleErrorResponse(resp) +} + +// AddEtagToTag allows a client to supply an eTag to Get which will be +// used for a conditional HTTP request. If the eTag matches, a nil manifest +// and ErrManifestNotModified error will be returned. etag is automatically +// quoted when added to this map. +func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { + return etagOption{tag, etag} +} + +type etagOption struct{ tag, etag string } + +func (o etagOption) Apply(ms distribution.ManifestService) error { + if ms, ok := ms.(*manifests); ok { + ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag) + return nil + } + return fmt.Errorf("etag options is a client-only option") +} + +// ReturnContentDigest allows a client to set a the content digest on +// a successful request from the 'Docker-Content-Digest' header. This +// returned digest is represents the digest which the registry uses +// to refer to the content and can be used to delete the content. +func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption { + return contentDigestOption{dgst} +} + +type contentDigestOption struct{ digest *digest.Digest } + +func (o contentDigestOption) Apply(ms distribution.ManifestService) error { + return nil +} + +func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + var ( + digestOrTag string + ref reference.Named + err error + contentDgst *digest.Digest + ) + + for _, option := range options { + if opt, ok := option.(distribution.WithTagOption); ok { + digestOrTag = opt.Tag + ref, err = reference.WithTag(ms.name, opt.Tag) + if err != nil { + return nil, err + } + } else if opt, ok := option.(contentDigestOption); ok { + contentDgst = opt.digest + } else { + err := option.Apply(ms) + if err != nil { + return nil, err + } + } + } + + if digestOrTag == "" { + digestOrTag = dgst.String() + ref, err = reference.WithDigest(ms.name, dgst) + if err != nil { + return nil, err + } + } + + u, err := ms.ub.BuildManifestURL(ref) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return nil, err + } + + for _, t := range distribution.ManifestMediaTypes() { + req.Header.Add("Accept", t) + } + + if _, ok := ms.etags[digestOrTag]; ok { + req.Header.Set("If-None-Match", ms.etags[digestOrTag]) + } + + resp, err := ms.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusNotModified { + return nil, distribution.ErrManifestNotModified + } else if SuccessStatus(resp.StatusCode) { + if contentDgst != nil { + dgst, err := digest.ParseDigest(resp.Header.Get("Docker-Content-Digest")) + if err == nil { + *contentDgst = dgst + } + } + mt := resp.Header.Get("Content-Type") + body, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return nil, err + } + m, _, err := distribution.UnmarshalManifest(mt, body) + if err != nil { + return nil, err + } + return m, nil + } + return nil, HandleErrorResponse(resp) +} + +// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the +// tag name in order to build the correct upload URL. +func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + ref := ms.name + var tagged bool + + for _, option := range options { + if opt, ok := option.(distribution.WithTagOption); ok { + var err error + ref, err = reference.WithTag(ref, opt.Tag) + if err != nil { + return "", err + } + tagged = true + } else { + err := option.Apply(ms) + if err != nil { + return "", err + } + } + } + mediaType, p, err := m.Payload() + if err != nil { + return "", err + } + + if !tagged { + // generate a canonical digest and Put by digest + _, d, err := distribution.UnmarshalManifest(mediaType, p) + if err != nil { + return "", err + } + ref, err = reference.WithDigest(ref, d.Digest) + if err != nil { + return "", err + } + } + + manifestURL, err := ms.ub.BuildManifestURL(ref) + if err != nil { + return "", err + } + + putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p)) + if err != nil { + return "", err + } + + putRequest.Header.Set("Content-Type", mediaType) + + resp, err := ms.client.Do(putRequest) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + dgstHeader := resp.Header.Get("Docker-Content-Digest") + dgst, err := digest.ParseDigest(dgstHeader) + if err != nil { + return "", err + } + + return dgst, nil + } + + return "", HandleErrorResponse(resp) +} + +func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { + ref, err := reference.WithDigest(ms.name, dgst) + if err != nil { + return err + } + u, err := ms.ub.BuildManifestURL(ref) + if err != nil { + return err + } + req, err := http.NewRequest("DELETE", u, nil) + if err != nil { + return err + } + + resp, err := ms.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + return nil + } + return HandleErrorResponse(resp) +} + +// todo(richardscothern): Restore interface and implementation with merge of #1050 +/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { + panic("not supported") +}*/ + +type blobs struct { + name reference.Named + ub *v2.URLBuilder + client *http.Client + + statter distribution.BlobDescriptorService + distribution.BlobDeleter +} + +func sanitizeLocation(location, base string) (string, error) { + baseURL, err := url.Parse(base) + if err != nil { + return "", err + } + + locationURL, err := url.Parse(location) + if err != nil { + return "", err + } + + return baseURL.ResolveReference(locationURL).String(), nil +} + +func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return bs.statter.Stat(ctx, dgst) + +} + +func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + reader, err := bs.Open(ctx, dgst) + if err != nil { + return nil, err + } + defer reader.Close() + + return ioutil.ReadAll(reader) +} + +func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return nil, err + } + blobURL, err := bs.ub.BuildBlobURL(ref) + if err != nil { + return nil, err + } + + return transport.NewHTTPReadSeeker(bs.client, blobURL, + func(resp *http.Response) error { + if resp.StatusCode == http.StatusNotFound { + return distribution.ErrBlobUnknown + } + return HandleErrorResponse(resp) + }), nil +} + +func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + panic("not implemented") +} + +func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + writer, err := bs.Create(ctx) + if err != nil { + return distribution.Descriptor{}, err + } + dgstr := digest.Canonical.New() + n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) + if err != nil { + return distribution.Descriptor{}, err + } + if n < int64(len(p)) { + return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p)) + } + + desc := distribution.Descriptor{ + MediaType: mediaType, + Size: int64(len(p)), + Digest: dgstr.Digest(), + } + + return writer.Commit(ctx, desc) +} + +type optionFunc func(interface{}) error + +func (f optionFunc) Apply(v interface{}) error { + return f(v) +} + +// WithMountFrom returns a BlobCreateOption which designates that the blob should be +// mounted from the given canonical reference. +func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { + return optionFunc(func(v interface{}) error { + opts, ok := v.(*distribution.CreateOptions) + if !ok { + return fmt.Errorf("unexpected options type: %T", v) + } + + opts.Mount.ShouldMount = true + opts.Mount.From = ref + + return nil + }) +} + +func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + var opts distribution.CreateOptions + + for _, option := range options { + err := option.Apply(&opts) + if err != nil { + return nil, err + } + } + + var values []url.Values + + if opts.Mount.ShouldMount { + values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}}) + } + + u, err := bs.ub.BuildBlobUploadURL(bs.name, values...) + if err != nil { + return nil, err + } + + resp, err := bs.client.Post(u, "", nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusCreated: + desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest()) + if err != nil { + return nil, err + } + return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} + case http.StatusAccepted: + // TODO(dmcgowan): Check for invalid UUID + uuid := resp.Header.Get("Docker-Upload-UUID") + location, err := sanitizeLocation(resp.Header.Get("Location"), u) + if err != nil { + return nil, err + } + + return &httpBlobUpload{ + statter: bs.statter, + client: bs.client, + uuid: uuid, + startedAt: time.Now(), + location: location, + }, nil + default: + return nil, HandleErrorResponse(resp) + } +} + +func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { + return bs.statter.Clear(ctx, dgst) +} + +type blobStatter struct { + name reference.Named + ub *v2.URLBuilder + client *http.Client +} + +func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + u, err := bs.ub.BuildBlobURL(ref) + if err != nil { + return distribution.Descriptor{}, err + } + + resp, err := bs.client.Head(u) + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + lengthHeader := resp.Header.Get("Content-Length") + if lengthHeader == "" { + return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u) + } + + length, err := strconv.ParseInt(lengthHeader, 10, 64) + if err != nil { + return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) + } + + return distribution.Descriptor{ + MediaType: resp.Header.Get("Content-Type"), + Size: length, + Digest: dgst, + }, nil + } else if resp.StatusCode == http.StatusNotFound { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + return distribution.Descriptor{}, HandleErrorResponse(resp) +} + +func buildCatalogValues(maxEntries int, last string) url.Values { + values := url.Values{} + + if maxEntries > 0 { + values.Add("n", strconv.Itoa(maxEntries)) + } + + if last != "" { + values.Add("last", last) + } + + return values +} + +func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return err + } + blobURL, err := bs.ub.BuildBlobURL(ref) + if err != nil { + return err + } + + req, err := http.NewRequest("DELETE", blobURL, nil) + if err != nil { + return err + } + + resp, err := bs.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + return nil + } + return HandleErrorResponse(resp) +} + +func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/client/repository_test.go b/vendor/github.com/docker/distribution/registry/client/repository_test.go new file mode 100644 index 000000000..a232e03ec --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/repository_test.go @@ -0,0 +1,1226 @@ +package client + +import ( + "bytes" + "crypto/rand" + "encoding/json" + "fmt" + "io" + "log" + "net/http" + "net/http/httptest" + "strconv" + "strings" + "testing" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/testutil" + "github.com/docker/distribution/uuid" + "github.com/docker/libtrust" +) + +func testServer(rrm testutil.RequestResponseMap) (string, func()) { + h := testutil.NewHandler(rrm) + s := httptest.NewServer(h) + return s.URL, s.Close +} + +func newRandomBlob(size int) (digest.Digest, []byte) { + b := make([]byte, size) + if n, err := rand.Read(b); err != nil { + panic(err) + } else if n != size { + panic("unable to read enough bytes") + } + + return digest.FromBytes(b), b +} + +func addTestFetch(repo string, dgst digest.Digest, content []byte, m *testutil.RequestResponseMap) { + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) +} + +func addTestCatalog(route string, content []byte, link string, m *testutil.RequestResponseMap) { + headers := map[string][]string{ + "Content-Length": {strconv.Itoa(len(content))}, + "Content-Type": {"application/json; charset=utf-8"}, + } + if link != "" { + headers["Link"] = append(headers["Link"], link) + } + + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: route, + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(headers), + }, + }) +} + +func TestBlobDelete(t *testing.T) { + dgst, _ := newRandomBlob(1024) + var m testutil.RequestResponseMap + repo, _ := reference.ParseNamed("test.example.com/repo1") + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "DELETE", + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + err = l.Delete(ctx, dgst) + if err != nil { + t.Errorf("Error deleting blob: %s", err.Error()) + } + +} + +func TestBlobFetch(t *testing.T) { + d1, b1 := newRandomBlob(1024) + var m testutil.RequestResponseMap + addTestFetch("test.example.com/repo1", d1, b1, &m) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + repo, _ := reference.ParseNamed("test.example.com/repo1") + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + + b, err := l.Get(ctx, d1) + if err != nil { + t.Fatal(err) + } + if bytes.Compare(b, b1) != 0 { + t.Fatalf("Wrong bytes values fetched: [%d]byte != [%d]byte", len(b), len(b1)) + } + + // TODO(dmcgowan): Test for unknown blob case +} + +func TestBlobExistsNoContentLength(t *testing.T) { + var m testutil.RequestResponseMap + + repo, _ := reference.ParseNamed("biff") + dgst, content := newRandomBlob(1024) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(map[string][]string{ + // "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + // "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + + _, err = l.Stat(ctx, dgst) + if err == nil { + t.Fatal(err) + } + if !strings.Contains(err.Error(), "missing content-length heade") { + t.Fatalf("Expected missing content-length error message") + } + +} + +func TestBlobExists(t *testing.T) { + d1, b1 := newRandomBlob(1024) + var m testutil.RequestResponseMap + addTestFetch("test.example.com/repo1", d1, b1, &m) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + repo, _ := reference.ParseNamed("test.example.com/repo1") + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + + stat, err := l.Stat(ctx, d1) + if err != nil { + t.Fatal(err) + } + + if stat.Digest != d1 { + t.Fatalf("Unexpected digest: %s, expected %s", stat.Digest, d1) + } + + if stat.Size != int64(len(b1)) { + t.Fatalf("Unexpected length: %d, expected %d", stat.Size, len(b1)) + } + + // TODO(dmcgowan): Test error cases and ErrBlobUnknown case +} + +func TestBlobUploadChunked(t *testing.T) { + dgst, b1 := newRandomBlob(1024) + var m testutil.RequestResponseMap + chunks := [][]byte{ + b1[0:256], + b1[256:512], + b1[512:513], + b1[513:1024], + } + repo, _ := reference.ParseNamed("test.example.com/uploadrepo") + uuids := []string{uuid.Generate().String()} + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "POST", + Route: "/v2/" + repo.Name() + "/blobs/uploads/", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uuids[0]}, + "Docker-Upload-UUID": {uuids[0]}, + "Range": {"0-0"}, + }), + }, + }) + offset := 0 + for i, chunk := range chunks { + uuids = append(uuids, uuid.Generate().String()) + newOffset := offset + len(chunk) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PATCH", + Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uuids[i], + Body: chunk, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uuids[i+1]}, + "Docker-Upload-UUID": {uuids[i+1]}, + "Range": {fmt.Sprintf("%d-%d", offset, newOffset-1)}, + }), + }, + }) + offset = newOffset + } + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PUT", + Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uuids[len(uuids)-1], + QueryParams: map[string][]string{ + "digest": {dgst.String()}, + }, + }, + Response: testutil.Response{ + StatusCode: http.StatusCreated, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Docker-Content-Digest": {dgst.String()}, + "Content-Range": {fmt.Sprintf("0-%d", offset-1)}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(offset)}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + + upload, err := l.Create(ctx) + if err != nil { + t.Fatal(err) + } + + if upload.ID() != uuids[0] { + log.Fatalf("Unexpected UUID %s; expected %s", upload.ID(), uuids[0]) + } + + for _, chunk := range chunks { + n, err := upload.Write(chunk) + if err != nil { + t.Fatal(err) + } + if n != len(chunk) { + t.Fatalf("Unexpected length returned from write: %d; expected: %d", n, len(chunk)) + } + } + + blob, err := upload.Commit(ctx, distribution.Descriptor{ + Digest: dgst, + Size: int64(len(b1)), + }) + if err != nil { + t.Fatal(err) + } + + if blob.Size != int64(len(b1)) { + t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Size, len(b1)) + } +} + +func TestBlobUploadMonolithic(t *testing.T) { + dgst, b1 := newRandomBlob(1024) + var m testutil.RequestResponseMap + repo, _ := reference.ParseNamed("test.example.com/uploadrepo") + uploadID := uuid.Generate().String() + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "POST", + Route: "/v2/" + repo.Name() + "/blobs/uploads/", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uploadID}, + "Docker-Upload-UUID": {uploadID}, + "Range": {"0-0"}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PATCH", + Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uploadID, + Body: b1, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uploadID}, + "Docker-Upload-UUID": {uploadID}, + "Content-Length": {"0"}, + "Docker-Content-Digest": {dgst.String()}, + "Range": {fmt.Sprintf("0-%d", len(b1)-1)}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PUT", + Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uploadID, + QueryParams: map[string][]string{ + "digest": {dgst.String()}, + }, + }, + Response: testutil.Response{ + StatusCode: http.StatusCreated, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Docker-Content-Digest": {dgst.String()}, + "Content-Range": {fmt.Sprintf("0-%d", len(b1)-1)}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(b1))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + + upload, err := l.Create(ctx) + if err != nil { + t.Fatal(err) + } + + if upload.ID() != uploadID { + log.Fatalf("Unexpected UUID %s; expected %s", upload.ID(), uploadID) + } + + n, err := upload.ReadFrom(bytes.NewReader(b1)) + if err != nil { + t.Fatal(err) + } + if n != int64(len(b1)) { + t.Fatalf("Unexpected ReadFrom length: %d; expected: %d", n, len(b1)) + } + + blob, err := upload.Commit(ctx, distribution.Descriptor{ + Digest: dgst, + Size: int64(len(b1)), + }) + if err != nil { + t.Fatal(err) + } + + if blob.Size != int64(len(b1)) { + t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Size, len(b1)) + } +} + +func TestBlobMount(t *testing.T) { + dgst, content := newRandomBlob(1024) + var m testutil.RequestResponseMap + repo, _ := reference.ParseNamed("test.example.com/uploadrepo") + + sourceRepo, _ := reference.ParseNamed("test.example.com/sourcerepo") + canonicalRef, _ := reference.WithDigest(sourceRepo, dgst) + + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "POST", + Route: "/v2/" + repo.Name() + "/blobs/uploads/", + QueryParams: map[string][]string{"from": {sourceRepo.Name()}, "mount": {dgst.String()}}, + }, + Response: testutil.Response{ + StatusCode: http.StatusCreated, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Location": {"/v2/" + repo.Name() + "/blobs/" + dgst.String()}, + "Docker-Content-Digest": {dgst.String()}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + + l := r.Blobs(ctx) + + bw, err := l.Create(ctx, WithMountFrom(canonicalRef)) + if bw != nil { + t.Fatalf("Expected blob writer to be nil, was %v", bw) + } + + if ebm, ok := err.(distribution.ErrBlobMounted); ok { + if ebm.From.Digest() != dgst { + t.Fatalf("Unexpected digest: %s, expected %s", ebm.From.Digest(), dgst) + } + if ebm.From.Name() != sourceRepo.Name() { + t.Fatalf("Unexpected from: %s, expected %s", ebm.From.Name(), sourceRepo) + } + } else { + t.Fatalf("Unexpected error: %v, expected an ErrBlobMounted", err) + } +} + +func newRandomSchemaV1Manifest(name reference.Named, tag string, blobCount int) (*schema1.SignedManifest, digest.Digest, []byte) { + blobs := make([]schema1.FSLayer, blobCount) + history := make([]schema1.History, blobCount) + + for i := 0; i < blobCount; i++ { + dgst, blob := newRandomBlob((i % 5) * 16) + + blobs[i] = schema1.FSLayer{BlobSum: dgst} + history[i] = schema1.History{V1Compatibility: fmt.Sprintf("{\"Hex\": \"%x\"}", blob)} + } + + m := schema1.Manifest{ + Name: name.String(), + Tag: tag, + Architecture: "x86", + FSLayers: blobs, + History: history, + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + panic(err) + } + + sm, err := schema1.Sign(&m, pk) + if err != nil { + panic(err) + } + + return sm, digest.FromBytes(sm.Canonical), sm.Canonical +} + +func addTestManifestWithEtag(repo reference.Named, reference string, content []byte, m *testutil.RequestResponseMap, dgst string) { + actualDigest := digest.FromBytes(content) + getReqWithEtag := testutil.Request{ + Method: "GET", + Route: "/v2/" + repo.Name() + "/manifests/" + reference, + Headers: http.Header(map[string][]string{ + "If-None-Match": {fmt.Sprintf(`"%s"`, dgst)}, + }), + } + + var getRespWithEtag testutil.Response + if actualDigest.String() == dgst { + getRespWithEtag = testutil.Response{ + StatusCode: http.StatusNotModified, + Body: []byte{}, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {schema1.MediaTypeSignedManifest}, + }), + } + } else { + getRespWithEtag = testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {schema1.MediaTypeSignedManifest}, + }), + } + + } + *m = append(*m, testutil.RequestResponseMapping{Request: getReqWithEtag, Response: getRespWithEtag}) +} + +func contentDigestString(mediatype string, content []byte) string { + if mediatype == schema1.MediaTypeSignedManifest { + m, _, _ := distribution.UnmarshalManifest(mediatype, content) + content = m.(*schema1.SignedManifest).Canonical + } + return digest.Canonical.FromBytes(content).String() +} + +func addTestManifest(repo reference.Named, reference string, mediatype string, content []byte, m *testutil.RequestResponseMap) { + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo.Name() + "/manifests/" + reference, + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {mediatype}, + "Docker-Content-Digest": {contentDigestString(mediatype, content)}, + }), + }, + }) + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo.Name() + "/manifests/" + reference, + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {mediatype}, + "Docker-Content-Digest": {digest.Canonical.FromBytes(content).String()}, + }), + }, + }) + +} + +func checkEqualManifest(m1, m2 *schema1.SignedManifest) error { + if m1.Name != m2.Name { + return fmt.Errorf("name does not match %q != %q", m1.Name, m2.Name) + } + if m1.Tag != m2.Tag { + return fmt.Errorf("tag does not match %q != %q", m1.Tag, m2.Tag) + } + if len(m1.FSLayers) != len(m2.FSLayers) { + return fmt.Errorf("fs blob length does not match %d != %d", len(m1.FSLayers), len(m2.FSLayers)) + } + for i := range m1.FSLayers { + if m1.FSLayers[i].BlobSum != m2.FSLayers[i].BlobSum { + return fmt.Errorf("blobsum does not match %q != %q", m1.FSLayers[i].BlobSum, m2.FSLayers[i].BlobSum) + } + } + if len(m1.History) != len(m2.History) { + return fmt.Errorf("history length does not match %d != %d", len(m1.History), len(m2.History)) + } + for i := range m1.History { + if m1.History[i].V1Compatibility != m2.History[i].V1Compatibility { + return fmt.Errorf("blobsum does not match %q != %q", m1.History[i].V1Compatibility, m2.History[i].V1Compatibility) + } + } + return nil +} + +func TestV1ManifestFetch(t *testing.T) { + ctx := context.Background() + repo, _ := reference.ParseNamed("test.example.com/repo") + m1, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) + var m testutil.RequestResponseMap + _, pl, err := m1.Payload() + if err != nil { + t.Fatal(err) + } + addTestManifest(repo, dgst.String(), schema1.MediaTypeSignedManifest, pl, &m) + addTestManifest(repo, "latest", schema1.MediaTypeSignedManifest, pl, &m) + addTestManifest(repo, "badcontenttype", "text/html", pl, &m) + + e, c := testServer(m) + defer c() + + r, err := NewRepository(context.Background(), repo, e, nil) + if err != nil { + t.Fatal(err) + } + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + ok, err := ms.Exists(ctx, dgst) + if err != nil { + t.Fatal(err) + } + if !ok { + t.Fatal("Manifest does not exist") + } + + manifest, err := ms.Get(ctx, dgst) + if err != nil { + t.Fatal(err) + } + v1manifest, ok := manifest.(*schema1.SignedManifest) + if !ok { + t.Fatalf("Unexpected manifest type from Get: %T", manifest) + } + + if err := checkEqualManifest(v1manifest, m1); err != nil { + t.Fatal(err) + } + + var contentDigest digest.Digest + manifest, err = ms.Get(ctx, dgst, distribution.WithTag("latest"), ReturnContentDigest(&contentDigest)) + if err != nil { + t.Fatal(err) + } + v1manifest, ok = manifest.(*schema1.SignedManifest) + if !ok { + t.Fatalf("Unexpected manifest type from Get: %T", manifest) + } + + if err = checkEqualManifest(v1manifest, m1); err != nil { + t.Fatal(err) + } + + if contentDigest != dgst { + t.Fatalf("Unexpected returned content digest %v, expected %v", contentDigest, dgst) + } + + manifest, err = ms.Get(ctx, dgst, distribution.WithTag("badcontenttype")) + if err != nil { + t.Fatal(err) + } + v1manifest, ok = manifest.(*schema1.SignedManifest) + if !ok { + t.Fatalf("Unexpected manifest type from Get: %T", manifest) + } + + if err = checkEqualManifest(v1manifest, m1); err != nil { + t.Fatal(err) + } +} + +func TestManifestFetchWithEtag(t *testing.T) { + repo, _ := reference.ParseNamed("test.example.com/repo/by/tag") + _, d1, p1 := newRandomSchemaV1Manifest(repo, "latest", 6) + var m testutil.RequestResponseMap + addTestManifestWithEtag(repo, "latest", p1, &m, d1.String()) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + clientManifestService, ok := ms.(*manifests) + if !ok { + panic("wrong type for client manifest service") + } + _, err = clientManifestService.Get(ctx, d1, distribution.WithTag("latest"), AddEtagToTag("latest", d1.String())) + if err != distribution.ErrManifestNotModified { + t.Fatal(err) + } +} + +func TestManifestDelete(t *testing.T) { + repo, _ := reference.ParseNamed("test.example.com/repo/delete") + _, dgst1, _ := newRandomSchemaV1Manifest(repo, "latest", 6) + _, dgst2, _ := newRandomSchemaV1Manifest(repo, "latest", 6) + var m testutil.RequestResponseMap + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "DELETE", + Route: "/v2/" + repo.Name() + "/manifests/" + dgst1.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + r, err := NewRepository(context.Background(), repo, e, nil) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + if err := ms.Delete(ctx, dgst1); err != nil { + t.Fatal(err) + } + if err := ms.Delete(ctx, dgst2); err == nil { + t.Fatal("Expected error deleting unknown manifest") + } + // TODO(dmcgowan): Check for specific unknown error +} + +func TestManifestPut(t *testing.T) { + repo, _ := reference.ParseNamed("test.example.com/repo/delete") + m1, dgst, _ := newRandomSchemaV1Manifest(repo, "other", 6) + + _, payload, err := m1.Payload() + if err != nil { + t.Fatal(err) + } + + var m testutil.RequestResponseMap + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PUT", + Route: "/v2/" + repo.Name() + "/manifests/other", + Body: payload, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Docker-Content-Digest": {dgst.String()}, + }), + }, + }) + + putDgst := digest.FromBytes(m1.Canonical) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PUT", + Route: "/v2/" + repo.Name() + "/manifests/" + putDgst.String(), + Body: payload, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Docker-Content-Digest": {putDgst.String()}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + r, err := NewRepository(context.Background(), repo, e, nil) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + if _, err := ms.Put(ctx, m1, distribution.WithTag(m1.Tag)); err != nil { + t.Fatal(err) + } + + if _, err := ms.Put(ctx, m1); err != nil { + t.Fatal(err) + } + + // TODO(dmcgowan): Check for invalid input error +} + +func TestManifestTags(t *testing.T) { + repo, _ := reference.ParseNamed("test.example.com/repo/tags/list") + tagsList := []byte(strings.TrimSpace(` +{ + "name": "test.example.com/repo/tags/list", + "tags": [ + "tag1", + "tag2", + "funtag" + ] +} + `)) + var m testutil.RequestResponseMap + for i := 0; i < 3; i++ { + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo.Name() + "/tags/list", + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: tagsList, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(tagsList))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + } + e, c := testServer(m) + defer c() + + r, err := NewRepository(context.Background(), repo, e, nil) + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + tagService := r.Tags(ctx) + + tags, err := tagService.All(ctx) + if err != nil { + t.Fatal(err) + } + if len(tags) != 3 { + t.Fatalf("Wrong number of tags returned: %d, expected 3", len(tags)) + } + + expected := map[string]struct{}{ + "tag1": {}, + "tag2": {}, + "funtag": {}, + } + for _, t := range tags { + delete(expected, t) + } + if len(expected) != 0 { + t.Fatalf("unexpected tags returned: %v", expected) + } + // TODO(dmcgowan): Check for error cases +} + +func TestObtainsErrorForMissingTag(t *testing.T) { + repo, _ := reference.ParseNamed("test.example.com/repo") + + var m testutil.RequestResponseMap + var errors errcode.Errors + errors = append(errors, v2.ErrorCodeManifestUnknown.WithDetail("unknown manifest")) + errBytes, err := json.Marshal(errors) + if err != nil { + t.Fatal(err) + } + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo.Name() + "/manifests/1.0.0", + }, + Response: testutil.Response{ + StatusCode: http.StatusNotFound, + Body: errBytes, + Headers: http.Header(map[string][]string{ + "Content-Type": {"application/json; charset=utf-8"}, + }), + }, + }) + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + + tagService := r.Tags(ctx) + + _, err = tagService.Get(ctx, "1.0.0") + if err == nil { + t.Fatalf("Expected an error") + } + if !strings.Contains(err.Error(), "manifest unknown") { + t.Fatalf("Expected unknown manifest error message") + } +} + +func TestManifestTagsPaginated(t *testing.T) { + s := httptest.NewServer(http.NotFoundHandler()) + defer s.Close() + + repo, _ := reference.ParseNamed("test.example.com/repo/tags/list") + tagsList := []string{"tag1", "tag2", "funtag"} + var m testutil.RequestResponseMap + for i := 0; i < 3; i++ { + body, err := json.Marshal(map[string]interface{}{ + "name": "test.example.com/repo/tags/list", + "tags": []string{tagsList[i]}, + }) + if err != nil { + t.Fatal(err) + } + queryParams := make(map[string][]string) + if i > 0 { + queryParams["n"] = []string{"1"} + queryParams["last"] = []string{tagsList[i-1]} + } + headers := http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(body))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }) + if i < 2 { + headers.Set("Link", "<"+s.URL+"/v2/"+repo.Name()+"/tags/list?n=1&last="+tagsList[i]+`>; rel="next"`) + } + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo.Name() + "/tags/list", + QueryParams: queryParams, + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: body, + Headers: headers, + }, + }) + } + + s.Config.Handler = testutil.NewHandler(m) + + r, err := NewRepository(context.Background(), repo, s.URL, nil) + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + tagService := r.Tags(ctx) + + tags, err := tagService.All(ctx) + if err != nil { + t.Fatal(tags, err) + } + if len(tags) != 3 { + t.Fatalf("Wrong number of tags returned: %d, expected 3", len(tags)) + } + + expected := map[string]struct{}{ + "tag1": {}, + "tag2": {}, + "funtag": {}, + } + for _, t := range tags { + delete(expected, t) + } + if len(expected) != 0 { + t.Fatalf("unexpected tags returned: %v", expected) + } +} + +func TestManifestUnauthorized(t *testing.T) { + repo, _ := reference.ParseNamed("test.example.com/repo") + _, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) + var m testutil.RequestResponseMap + + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo.Name() + "/manifests/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusUnauthorized, + Body: []byte("garbage"), + }, + }) + + e, c := testServer(m) + defer c() + + r, err := NewRepository(context.Background(), repo, e, nil) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + _, err = ms.Get(ctx, dgst) + if err == nil { + t.Fatal("Expected error fetching manifest") + } + v2Err, ok := err.(errcode.Error) + if !ok { + t.Fatalf("Unexpected error type: %#v", err) + } + if v2Err.Code != errcode.ErrorCodeUnauthorized { + t.Fatalf("Unexpected error code: %s", v2Err.Code.String()) + } + if expected := errcode.ErrorCodeUnauthorized.Message(); v2Err.Message != expected { + t.Fatalf("Unexpected message value: %q, expected %q", v2Err.Message, expected) + } +} + +func TestCatalog(t *testing.T) { + var m testutil.RequestResponseMap + addTestCatalog( + "/v2/_catalog?n=5", + []byte("{\"repositories\":[\"foo\", \"bar\", \"baz\"]}"), "", &m) + + e, c := testServer(m) + defer c() + + entries := make([]string, 5) + + r, err := NewRegistry(context.Background(), e, nil) + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + numFilled, err := r.Repositories(ctx, entries, "") + if err != io.EOF { + t.Fatal(err) + } + + if numFilled != 3 { + t.Fatalf("Got wrong number of repos") + } +} + +func TestCatalogInParts(t *testing.T) { + var m testutil.RequestResponseMap + addTestCatalog( + "/v2/_catalog?n=2", + []byte("{\"repositories\":[\"bar\", \"baz\"]}"), + "", &m) + addTestCatalog( + "/v2/_catalog?last=baz&n=2", + []byte("{\"repositories\":[\"foo\"]}"), + "", &m) + + e, c := testServer(m) + defer c() + + entries := make([]string, 2) + + r, err := NewRegistry(context.Background(), e, nil) + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + numFilled, err := r.Repositories(ctx, entries, "") + if err != nil { + t.Fatal(err) + } + + if numFilled != 2 { + t.Fatalf("Got wrong number of repos") + } + + numFilled, err = r.Repositories(ctx, entries, "baz") + if err != io.EOF { + t.Fatal(err) + } + + if numFilled != 1 { + t.Fatalf("Got wrong number of repos") + } +} + +func TestSanitizeLocation(t *testing.T) { + for _, testcase := range []struct { + description string + location string + source string + expected string + err error + }{ + { + description: "ensure relative location correctly resolved", + location: "/v2/foo/baasdf", + source: "http://blahalaja.com/v1", + expected: "http://blahalaja.com/v2/foo/baasdf", + }, + { + description: "ensure parameters are preserved", + location: "/v2/foo/baasdf?_state=asdfasfdasdfasdf&digest=foo", + source: "http://blahalaja.com/v1", + expected: "http://blahalaja.com/v2/foo/baasdf?_state=asdfasfdasdfasdf&digest=foo", + }, + { + description: "ensure new hostname overidden", + location: "https://mwhahaha.com/v2/foo/baasdf?_state=asdfasfdasdfasdf", + source: "http://blahalaja.com/v1", + expected: "https://mwhahaha.com/v2/foo/baasdf?_state=asdfasfdasdfasdf", + }, + } { + fatalf := func(format string, args ...interface{}) { + t.Fatalf(testcase.description+": "+format, args...) + } + + s, err := sanitizeLocation(testcase.location, testcase.source) + if err != testcase.err { + if testcase.err != nil { + fatalf("expected error: %v != %v", err, testcase) + } else { + fatalf("unexpected error sanitizing: %v", err) + } + } + + if s != testcase.expected { + fatalf("bad sanitize: %q != %q", s, testcase.expected) + } + } +} diff --git a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go new file mode 100644 index 000000000..e5ff09d75 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go @@ -0,0 +1,251 @@ +package transport + +import ( + "errors" + "fmt" + "io" + "net/http" + "os" + "regexp" + "strconv" +) + +var ( + contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`) + + // ErrWrongCodeForByteRange is returned if the client sends a request + // with a Range header but the server returns a 2xx or 3xx code other + // than 206 Partial Content. + ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request") +) + +// ReadSeekCloser combines io.ReadSeeker with io.Closer. +type ReadSeekCloser interface { + io.ReadSeeker + io.Closer +} + +// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET +// request. When seeking and starting a read from a non-zero offset +// the a "Range" header will be added which sets the offset. +// TODO(dmcgowan): Move this into a separate utility package +func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser { + return &httpReadSeeker{ + client: client, + url: url, + errorHandler: errorHandler, + } +} + +type httpReadSeeker struct { + client *http.Client + url string + + // errorHandler creates an error from an unsuccessful HTTP response. + // This allows the error to be created with the HTTP response body + // without leaking the body through a returned error. + errorHandler func(*http.Response) error + + size int64 + + // rc is the remote read closer. + rc io.ReadCloser + // readerOffset tracks the offset as of the last read. + readerOffset int64 + // seekOffset allows Seek to override the offset. Seek changes + // seekOffset instead of changing readOffset directly so that + // connection resets can be delayed and possibly avoided if the + // seek is undone (i.e. seeking to the end and then back to the + // beginning). + seekOffset int64 + err error +} + +func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { + if hrs.err != nil { + return 0, hrs.err + } + + // If we sought to a different position, we need to reset the + // connection. This logic is here instead of Seek so that if + // a seek is undone before the next read, the connection doesn't + // need to be closed and reopened. A common example of this is + // seeking to the end to determine the length, and then seeking + // back to the original position. + if hrs.readerOffset != hrs.seekOffset { + hrs.reset() + } + + hrs.readerOffset = hrs.seekOffset + + rd, err := hrs.reader() + if err != nil { + return 0, err + } + + n, err = rd.Read(p) + hrs.seekOffset += int64(n) + hrs.readerOffset += int64(n) + + return n, err +} + +func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { + if hrs.err != nil { + return 0, hrs.err + } + + lastReaderOffset := hrs.readerOffset + + if whence == os.SEEK_SET && hrs.rc == nil { + // If no request has been made yet, and we are seeking to an + // absolute position, set the read offset as well to avoid an + // unnecessary request. + hrs.readerOffset = offset + } + + _, err := hrs.reader() + if err != nil { + hrs.readerOffset = lastReaderOffset + return 0, err + } + + newOffset := hrs.seekOffset + + switch whence { + case os.SEEK_CUR: + newOffset += offset + case os.SEEK_END: + if hrs.size < 0 { + return 0, errors.New("content length not known") + } + newOffset = hrs.size + offset + case os.SEEK_SET: + newOffset = offset + } + + if newOffset < 0 { + err = errors.New("cannot seek to negative position") + } else { + hrs.seekOffset = newOffset + } + + return hrs.seekOffset, err +} + +func (hrs *httpReadSeeker) Close() error { + if hrs.err != nil { + return hrs.err + } + + // close and release reader chain + if hrs.rc != nil { + hrs.rc.Close() + } + + hrs.rc = nil + + hrs.err = errors.New("httpLayer: closed") + + return nil +} + +func (hrs *httpReadSeeker) reset() { + if hrs.err != nil { + return + } + if hrs.rc != nil { + hrs.rc.Close() + hrs.rc = nil + } +} + +func (hrs *httpReadSeeker) reader() (io.Reader, error) { + if hrs.err != nil { + return nil, hrs.err + } + + if hrs.rc != nil { + return hrs.rc, nil + } + + req, err := http.NewRequest("GET", hrs.url, nil) + if err != nil { + return nil, err + } + + if hrs.readerOffset > 0 { + // If we are at different offset, issue a range request from there. + req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset)) + // TODO: get context in here + // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) + } + + req.Header.Add("Accept-Encoding", "identity") + resp, err := hrs.client.Do(req) + if err != nil { + return nil, err + } + + // Normally would use client.SuccessStatus, but that would be a cyclic + // import + if resp.StatusCode >= 200 && resp.StatusCode <= 399 { + if hrs.readerOffset > 0 { + if resp.StatusCode != http.StatusPartialContent { + return nil, ErrWrongCodeForByteRange + } + + contentRange := resp.Header.Get("Content-Range") + if contentRange == "" { + return nil, errors.New("no Content-Range header found in HTTP 206 response") + } + + submatches := contentRangeRegexp.FindStringSubmatch(contentRange) + if len(submatches) < 4 { + return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange) + } + + startByte, err := strconv.ParseUint(submatches[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange) + } + + if startByte != uint64(hrs.readerOffset) { + return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset) + } + + endByte, err := strconv.ParseUint(submatches[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange) + } + + if submatches[3] == "*" { + hrs.size = -1 + } else { + size, err := strconv.ParseUint(submatches[3], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange) + } + + if endByte+1 != size { + return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange) + } + + hrs.size = int64(size) + } + } else if resp.StatusCode == http.StatusOK { + hrs.size = resp.ContentLength + } else { + hrs.size = -1 + } + hrs.rc = resp.Body + } else { + defer resp.Body.Close() + if hrs.errorHandler != nil { + return nil, hrs.errorHandler(resp) + } + return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) + } + + return hrs.rc, nil +} diff --git a/vendor/github.com/docker/distribution/registry/client/transport/transport.go b/vendor/github.com/docker/distribution/registry/client/transport/transport.go new file mode 100644 index 000000000..30e45fab0 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/transport/transport.go @@ -0,0 +1,147 @@ +package transport + +import ( + "io" + "net/http" + "sync" +) + +// RequestModifier represents an object which will do an inplace +// modification of an HTTP request. +type RequestModifier interface { + ModifyRequest(*http.Request) error +} + +type headerModifier http.Header + +// NewHeaderRequestModifier returns a new RequestModifier which will +// add the given headers to a request. +func NewHeaderRequestModifier(header http.Header) RequestModifier { + return headerModifier(header) +} + +func (h headerModifier) ModifyRequest(req *http.Request) error { + for k, s := range http.Header(h) { + req.Header[k] = append(req.Header[k], s...) + } + + return nil +} + +// NewTransport creates a new transport which will apply modifiers to +// the request on a RoundTrip call. +func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper { + return &transport{ + Modifiers: modifiers, + Base: base, + } +} + +// transport is an http.RoundTripper that makes HTTP requests after +// copying and modifying the request +type transport struct { + Modifiers []RequestModifier + Base http.RoundTripper + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// RoundTrip authorizes and authenticates the request with an +// access token. If no token exists or token is expired, +// tries to refresh/fetch a new token. +func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { + req2 := cloneRequest(req) + for _, modifier := range t.Modifiers { + if err := modifier.ModifyRequest(req2); err != nil { + return nil, err + } + } + + t.setModReq(req, req2) + res, err := t.base().RoundTrip(req2) + if err != nil { + t.setModReq(req, nil) + return nil, err + } + res.Body = &onEOFReader{ + rc: res.Body, + fn: func() { t.setModReq(req, nil) }, + } + return res, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + t.mu.Lock() + modReq := t.modReq[req] + delete(t.modReq, req) + t.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func (t *transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +func (t *transport) setModReq(orig, mod *http.Request) { + t.mu.Lock() + defer t.mu.Unlock() + if t.modReq == nil { + t.modReq = make(map[*http.Request]*http.Request) + } + if mod == nil { + delete(t.modReq, orig) + } else { + t.modReq[orig] = mod + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + + return r2 +} + +type onEOFReader struct { + rc io.ReadCloser + fn func() +} + +func (r *onEOFReader) Read(p []byte) (n int, err error) { + n, err = r.rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *onEOFReader) Close() error { + err := r.rc.Close() + r.runFunc() + return err +} + +func (r *onEOFReader) runFunc() { + if fn := r.fn; fn != nil { + fn() + r.fn = nil + } +} diff --git a/vendor/github.com/docker/distribution/registry/doc.go b/vendor/github.com/docker/distribution/registry/doc.go new file mode 100644 index 000000000..a1ba7f3ab --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/doc.go @@ -0,0 +1,2 @@ +// Package registry provides the main entrypoints for running a registry. +package registry diff --git a/vendor/github.com/docker/distribution/registry/handlers/api_test.go b/vendor/github.com/docker/distribution/registry/handlers/api_test.go new file mode 100644 index 000000000..9d64fbbf4 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/api_test.go @@ -0,0 +1,2513 @@ +package handlers + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/http/httputil" + "net/url" + "os" + "path" + "reflect" + "regexp" + "strconv" + "strings" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + _ "github.com/docker/distribution/registry/storage/driver/testdriver" + "github.com/docker/distribution/testutil" + "github.com/docker/libtrust" + "github.com/gorilla/handlers" +) + +var headerConfig = http.Header{ + "X-Content-Type-Options": []string{"nosniff"}, +} + +// TestCheckAPI hits the base endpoint (/v2/) ensures we return the specified +// 200 OK response. +func TestCheckAPI(t *testing.T) { + env := newTestEnv(t, false) + defer env.Shutdown() + baseURL, err := env.builder.BuildBaseURL() + if err != nil { + t.Fatalf("unexpected error building base url: %v", err) + } + + resp, err := http.Get(baseURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing api base check", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Type": []string{"application/json; charset=utf-8"}, + "Content-Length": []string{"2"}, + }) + + p, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("unexpected error reading response body: %v", err) + } + + if string(p) != "{}" { + t.Fatalf("unexpected response body: %v", string(p)) + } +} + +// TestCatalogAPI tests the /v2/_catalog endpoint +func TestCatalogAPI(t *testing.T) { + chunkLen := 2 + env := newTestEnv(t, false) + defer env.Shutdown() + + values := url.Values{ + "last": []string{""}, + "n": []string{strconv.Itoa(chunkLen)}} + + catalogURL, err := env.builder.BuildCatalogURL(values) + if err != nil { + t.Fatalf("unexpected error building catalog url: %v", err) + } + + // ----------------------------------- + // try to get an empty catalog + resp, err := http.Get(catalogURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing catalog api check", resp, http.StatusOK) + + var ctlg struct { + Repositories []string `json:"repositories"` + } + + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&ctlg); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + // we haven't pushed anything to the registry yet + if len(ctlg.Repositories) != 0 { + t.Fatalf("repositories has unexpected values") + } + + if resp.Header.Get("Link") != "" { + t.Fatalf("repositories has more data when none expected") + } + + // ----------------------------------- + // push something to the registry and try again + images := []string{"foo/aaaa", "foo/bbbb", "foo/cccc"} + + for _, image := range images { + createRepository(env, t, image, "sometag") + } + + resp, err = http.Get(catalogURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing catalog api check", resp, http.StatusOK) + + dec = json.NewDecoder(resp.Body) + if err = dec.Decode(&ctlg); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + if len(ctlg.Repositories) != chunkLen { + t.Fatalf("repositories has unexpected values") + } + + for _, image := range images[:chunkLen] { + if !contains(ctlg.Repositories, image) { + t.Fatalf("didn't find our repository '%s' in the catalog", image) + } + } + + link := resp.Header.Get("Link") + if link == "" { + t.Fatalf("repositories has less data than expected") + } + + newValues := checkLink(t, link, chunkLen, ctlg.Repositories[len(ctlg.Repositories)-1]) + + // ----------------------------------- + // get the last chunk of data + + catalogURL, err = env.builder.BuildCatalogURL(newValues) + if err != nil { + t.Fatalf("unexpected error building catalog url: %v", err) + } + + resp, err = http.Get(catalogURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing catalog api check", resp, http.StatusOK) + + dec = json.NewDecoder(resp.Body) + if err = dec.Decode(&ctlg); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + if len(ctlg.Repositories) != 1 { + t.Fatalf("repositories has unexpected values") + } + + lastImage := images[len(images)-1] + if !contains(ctlg.Repositories, lastImage) { + t.Fatalf("didn't find our repository '%s' in the catalog", lastImage) + } + + link = resp.Header.Get("Link") + if link != "" { + t.Fatalf("catalog has unexpected data") + } +} + +func checkLink(t *testing.T, urlStr string, numEntries int, last string) url.Values { + re := regexp.MustCompile("<(/v2/_catalog.*)>; rel=\"next\"") + matches := re.FindStringSubmatch(urlStr) + + if len(matches) != 2 { + t.Fatalf("Catalog link address response was incorrect") + } + linkURL, _ := url.Parse(matches[1]) + urlValues := linkURL.Query() + + if urlValues.Get("n") != strconv.Itoa(numEntries) { + t.Fatalf("Catalog link entry size is incorrect") + } + + if urlValues.Get("last") != last { + t.Fatal("Catalog link last entry is incorrect") + } + + return urlValues +} + +func contains(elems []string, e string) bool { + for _, elem := range elems { + if elem == e { + return true + } + } + return false +} + +func TestURLPrefix(t *testing.T) { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "testdriver": configuration.Parameters{}, + "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ + "enabled": false, + }}, + }, + } + config.HTTP.Prefix = "/test/" + config.HTTP.Headers = headerConfig + + env := newTestEnvWithConfig(t, &config) + defer env.Shutdown() + + baseURL, err := env.builder.BuildBaseURL() + if err != nil { + t.Fatalf("unexpected error building base url: %v", err) + } + + parsed, _ := url.Parse(baseURL) + if !strings.HasPrefix(parsed.Path, config.HTTP.Prefix) { + t.Fatalf("Prefix %v not included in test url %v", config.HTTP.Prefix, baseURL) + } + + resp, err := http.Get(baseURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing api base check", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Type": []string{"application/json; charset=utf-8"}, + "Content-Length": []string{"2"}, + }) +} + +type blobArgs struct { + imageName reference.Named + layerFile io.ReadSeeker + layerDigest digest.Digest +} + +func makeBlobArgs(t *testing.T) blobArgs { + layerFile, layerDigest, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random layer file: %v", err) + } + + args := blobArgs{ + layerFile: layerFile, + layerDigest: layerDigest, + } + args.imageName, _ = reference.ParseNamed("foo/bar") + return args +} + +// TestBlobAPI conducts a full test of the of the blob api. +func TestBlobAPI(t *testing.T) { + deleteEnabled := false + env1 := newTestEnv(t, deleteEnabled) + defer env1.Shutdown() + args := makeBlobArgs(t) + testBlobAPI(t, env1, args) + + deleteEnabled = true + env2 := newTestEnv(t, deleteEnabled) + defer env2.Shutdown() + args = makeBlobArgs(t) + testBlobAPI(t, env2, args) + +} + +func TestBlobDelete(t *testing.T) { + deleteEnabled := true + env := newTestEnv(t, deleteEnabled) + defer env.Shutdown() + + args := makeBlobArgs(t) + env = testBlobAPI(t, env, args) + testBlobDelete(t, env, args) +} + +func TestRelativeURL(t *testing.T) { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "testdriver": configuration.Parameters{}, + "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ + "enabled": false, + }}, + }, + } + config.HTTP.Headers = headerConfig + config.HTTP.RelativeURLs = false + env := newTestEnvWithConfig(t, &config) + defer env.Shutdown() + ref, _ := reference.WithName("foo/bar") + uploadURLBaseAbs, _ := startPushLayer(t, env, ref) + + u, err := url.Parse(uploadURLBaseAbs) + if err != nil { + t.Fatal(err) + } + if !u.IsAbs() { + t.Fatal("Relative URL returned from blob upload chunk with non-relative configuration") + } + + args := makeBlobArgs(t) + resp, err := doPushLayer(t, env.builder, ref, args.layerDigest, uploadURLBaseAbs, args.layerFile) + if err != nil { + t.Fatalf("unexpected error doing layer push relative url: %v", err) + } + checkResponse(t, "relativeurl blob upload", resp, http.StatusCreated) + u, err = url.Parse(resp.Header.Get("Location")) + if err != nil { + t.Fatal(err) + } + if !u.IsAbs() { + t.Fatal("Relative URL returned from blob upload with non-relative configuration") + } + + config.HTTP.RelativeURLs = true + args = makeBlobArgs(t) + uploadURLBaseRelative, _ := startPushLayer(t, env, ref) + u, err = url.Parse(uploadURLBaseRelative) + if err != nil { + t.Fatal(err) + } + if u.IsAbs() { + t.Fatal("Absolute URL returned from blob upload chunk with relative configuration") + } + + // Start a new upload in absolute mode to get a valid base URL + config.HTTP.RelativeURLs = false + uploadURLBaseAbs, _ = startPushLayer(t, env, ref) + u, err = url.Parse(uploadURLBaseAbs) + if err != nil { + t.Fatal(err) + } + if !u.IsAbs() { + t.Fatal("Relative URL returned from blob upload chunk with non-relative configuration") + } + + // Complete upload with relative URLs enabled to ensure the final location is relative + config.HTTP.RelativeURLs = true + resp, err = doPushLayer(t, env.builder, ref, args.layerDigest, uploadURLBaseAbs, args.layerFile) + if err != nil { + t.Fatalf("unexpected error doing layer push relative url: %v", err) + } + + checkResponse(t, "relativeurl blob upload", resp, http.StatusCreated) + u, err = url.Parse(resp.Header.Get("Location")) + if err != nil { + t.Fatal(err) + } + if u.IsAbs() { + t.Fatal("Relative URL returned from blob upload with non-relative configuration") + } +} + +func TestBlobDeleteDisabled(t *testing.T) { + deleteEnabled := false + env := newTestEnv(t, deleteEnabled) + defer env.Shutdown() + args := makeBlobArgs(t) + + imageName := args.imageName + layerDigest := args.layerDigest + ref, _ := reference.WithDigest(imageName, layerDigest) + layerURL, err := env.builder.BuildBlobURL(ref) + if err != nil { + t.Fatalf("error building url: %v", err) + } + + resp, err := httpDelete(layerURL) + if err != nil { + t.Fatalf("unexpected error deleting when disabled: %v", err) + } + + checkResponse(t, "status of disabled delete", resp, http.StatusMethodNotAllowed) +} + +func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { + // TODO(stevvooe): This test code is complete junk but it should cover the + // complete flow. This must be broken down and checked against the + // specification *before* we submit the final to docker core. + imageName := args.imageName + layerFile := args.layerFile + layerDigest := args.layerDigest + + // ----------------------------------- + // Test fetch for non-existent content + ref, _ := reference.WithDigest(imageName, layerDigest) + layerURL, err := env.builder.BuildBlobURL(ref) + if err != nil { + t.Fatalf("error building url: %v", err) + } + + resp, err := http.Get(layerURL) + if err != nil { + t.Fatalf("unexpected error fetching non-existent layer: %v", err) + } + + checkResponse(t, "fetching non-existent content", resp, http.StatusNotFound) + + // ------------------------------------------ + // Test head request for non-existent content + resp, err = http.Head(layerURL) + if err != nil { + t.Fatalf("unexpected error checking head on non-existent layer: %v", err) + } + + checkResponse(t, "checking head on non-existent layer", resp, http.StatusNotFound) + + // ------------------------------------------ + // Start an upload, check the status then cancel + uploadURLBase, uploadUUID := startPushLayer(t, env, imageName) + + // A status check should work + resp, err = http.Get(uploadURLBase) + if err != nil { + t.Fatalf("unexpected error getting upload status: %v", err) + } + checkResponse(t, "status of deleted upload", resp, http.StatusNoContent) + checkHeaders(t, resp, http.Header{ + "Location": []string{"*"}, + "Range": []string{"0-0"}, + "Docker-Upload-UUID": []string{uploadUUID}, + }) + + req, err := http.NewRequest("DELETE", uploadURLBase, nil) + if err != nil { + t.Fatalf("unexpected error creating delete request: %v", err) + } + + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("unexpected error sending delete request: %v", err) + } + + checkResponse(t, "deleting upload", resp, http.StatusNoContent) + + // A status check should result in 404 + resp, err = http.Get(uploadURLBase) + if err != nil { + t.Fatalf("unexpected error getting upload status: %v", err) + } + checkResponse(t, "status of deleted upload", resp, http.StatusNotFound) + + // ----------------------------------------- + // Do layer push with an empty body and different digest + uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) + resp, err = doPushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, bytes.NewReader([]byte{})) + if err != nil { + t.Fatalf("unexpected error doing bad layer push: %v", err) + } + + checkResponse(t, "bad layer push", resp, http.StatusBadRequest) + checkBodyHasErrorCodes(t, "bad layer push", resp, v2.ErrorCodeDigestInvalid) + + // ----------------------------------------- + // Do layer push with an empty body and correct digest + zeroDigest, err := digest.FromReader(bytes.NewReader([]byte{})) + if err != nil { + t.Fatalf("unexpected error digesting empty buffer: %v", err) + } + + uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) + pushLayer(t, env.builder, imageName, zeroDigest, uploadURLBase, bytes.NewReader([]byte{})) + + // ----------------------------------------- + // Do layer push with an empty body and correct digest + + // This is a valid but empty tarfile! + emptyTar := bytes.Repeat([]byte("\x00"), 1024) + emptyDigest, err := digest.FromReader(bytes.NewReader(emptyTar)) + if err != nil { + t.Fatalf("unexpected error digesting empty tar: %v", err) + } + + uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) + pushLayer(t, env.builder, imageName, emptyDigest, uploadURLBase, bytes.NewReader(emptyTar)) + + // ------------------------------------------ + // Now, actually do successful upload. + layerLength, _ := layerFile.Seek(0, os.SEEK_END) + layerFile.Seek(0, os.SEEK_SET) + + uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) + pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) + + // ------------------------------------------ + // Now, push just a chunk + layerFile.Seek(0, 0) + + canonicalDigester := digest.Canonical.New() + if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil { + t.Fatalf("error copying to digest: %v", err) + } + canonicalDigest := canonicalDigester.Digest() + + layerFile.Seek(0, 0) + uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) + uploadURLBase, dgst := pushChunk(t, env.builder, imageName, uploadURLBase, layerFile, layerLength) + finishUpload(t, env.builder, imageName, uploadURLBase, dgst) + + // ------------------------ + // Use a head request to see if the layer exists. + resp, err = http.Head(layerURL) + if err != nil { + t.Fatalf("unexpected error checking head on existing layer: %v", err) + } + + checkResponse(t, "checking head on existing layer", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{fmt.Sprint(layerLength)}, + "Docker-Content-Digest": []string{canonicalDigest.String()}, + }) + + // ---------------- + // Fetch the layer! + resp, err = http.Get(layerURL) + if err != nil { + t.Fatalf("unexpected error fetching layer: %v", err) + } + + checkResponse(t, "fetching layer", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{fmt.Sprint(layerLength)}, + "Docker-Content-Digest": []string{canonicalDigest.String()}, + }) + + // Verify the body + verifier, err := digest.NewDigestVerifier(layerDigest) + if err != nil { + t.Fatalf("unexpected error getting digest verifier: %s", err) + } + io.Copy(verifier, resp.Body) + + if !verifier.Verified() { + t.Fatalf("response body did not pass verification") + } + + // ---------------- + // Fetch the layer with an invalid digest + badURL := strings.Replace(layerURL, "sha256", "sha257", 1) + resp, err = http.Get(badURL) + if err != nil { + t.Fatalf("unexpected error fetching layer: %v", err) + } + + checkResponse(t, "fetching layer bad digest", resp, http.StatusBadRequest) + + // Cache headers + resp, err = http.Get(layerURL) + if err != nil { + t.Fatalf("unexpected error fetching layer: %v", err) + } + + checkResponse(t, "fetching layer", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{fmt.Sprint(layerLength)}, + "Docker-Content-Digest": []string{canonicalDigest.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, canonicalDigest)}, + "Cache-Control": []string{"max-age=31536000"}, + }) + + // Matching etag, gives 304 + etag := resp.Header.Get("Etag") + req, err = http.NewRequest("GET", layerURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching layer with etag", resp, http.StatusNotModified) + + // Non-matching etag, gives 200 + req, err = http.NewRequest("GET", layerURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", "") + resp, err = http.DefaultClient.Do(req) + checkResponse(t, "fetching layer with invalid etag", resp, http.StatusOK) + + // Missing tests: + // - Upload the same tar file under and different repository and + // ensure the content remains uncorrupted. + return env +} + +func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) { + // Upload a layer + imageName := args.imageName + layerFile := args.layerFile + layerDigest := args.layerDigest + + ref, _ := reference.WithDigest(imageName, layerDigest) + layerURL, err := env.builder.BuildBlobURL(ref) + if err != nil { + t.Fatalf(err.Error()) + } + // --------------- + // Delete a layer + resp, err := httpDelete(layerURL) + if err != nil { + t.Fatalf("unexpected error deleting layer: %v", err) + } + + checkResponse(t, "deleting layer", resp, http.StatusAccepted) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{"0"}, + }) + + // --------------- + // Try and get it back + // Use a head request to see if the layer exists. + resp, err = http.Head(layerURL) + if err != nil { + t.Fatalf("unexpected error checking head on existing layer: %v", err) + } + + checkResponse(t, "checking existence of deleted layer", resp, http.StatusNotFound) + + // Delete already deleted layer + resp, err = httpDelete(layerURL) + if err != nil { + t.Fatalf("unexpected error deleting layer: %v", err) + } + + checkResponse(t, "deleting layer", resp, http.StatusNotFound) + + // ---------------- + // Attempt to delete a layer with an invalid digest + badURL := strings.Replace(layerURL, "sha256", "sha257", 1) + resp, err = httpDelete(badURL) + if err != nil { + t.Fatalf("unexpected error fetching layer: %v", err) + } + + checkResponse(t, "deleting layer bad digest", resp, http.StatusBadRequest) + + // ---------------- + // Reupload previously deleted blob + layerFile.Seek(0, os.SEEK_SET) + + uploadURLBase, _ := startPushLayer(t, env, imageName) + pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) + + layerFile.Seek(0, os.SEEK_SET) + canonicalDigester := digest.Canonical.New() + if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil { + t.Fatalf("error copying to digest: %v", err) + } + canonicalDigest := canonicalDigester.Digest() + + // ------------------------ + // Use a head request to see if it exists + resp, err = http.Head(layerURL) + if err != nil { + t.Fatalf("unexpected error checking head on existing layer: %v", err) + } + + layerLength, _ := layerFile.Seek(0, os.SEEK_END) + checkResponse(t, "checking head on reuploaded layer", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{fmt.Sprint(layerLength)}, + "Docker-Content-Digest": []string{canonicalDigest.String()}, + }) +} + +func TestDeleteDisabled(t *testing.T) { + env := newTestEnv(t, false) + defer env.Shutdown() + + imageName, _ := reference.ParseNamed("foo/bar") + // "build" our layer file + layerFile, layerDigest, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random layer file: %v", err) + } + + ref, _ := reference.WithDigest(imageName, layerDigest) + layerURL, err := env.builder.BuildBlobURL(ref) + if err != nil { + t.Fatalf("Error building blob URL") + } + uploadURLBase, _ := startPushLayer(t, env, imageName) + pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) + + resp, err := httpDelete(layerURL) + if err != nil { + t.Fatalf("unexpected error deleting layer: %v", err) + } + + checkResponse(t, "deleting layer with delete disabled", resp, http.StatusMethodNotAllowed) +} + +func TestDeleteReadOnly(t *testing.T) { + env := newTestEnv(t, true) + defer env.Shutdown() + + imageName, _ := reference.ParseNamed("foo/bar") + // "build" our layer file + layerFile, layerDigest, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random layer file: %v", err) + } + + ref, _ := reference.WithDigest(imageName, layerDigest) + layerURL, err := env.builder.BuildBlobURL(ref) + if err != nil { + t.Fatalf("Error building blob URL") + } + uploadURLBase, _ := startPushLayer(t, env, imageName) + pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) + + env.app.readOnly = true + + resp, err := httpDelete(layerURL) + if err != nil { + t.Fatalf("unexpected error deleting layer: %v", err) + } + + checkResponse(t, "deleting layer in read-only mode", resp, http.StatusMethodNotAllowed) +} + +func TestStartPushReadOnly(t *testing.T) { + env := newTestEnv(t, true) + defer env.Shutdown() + env.app.readOnly = true + + imageName, _ := reference.ParseNamed("foo/bar") + + layerUploadURL, err := env.builder.BuildBlobUploadURL(imageName) + if err != nil { + t.Fatalf("unexpected error building layer upload url: %v", err) + } + + resp, err := http.Post(layerUploadURL, "", nil) + if err != nil { + t.Fatalf("unexpected error starting layer push: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "starting push in read-only mode", resp, http.StatusMethodNotAllowed) +} + +func httpDelete(url string) (*http.Response, error) { + req, err := http.NewRequest("DELETE", url, nil) + if err != nil { + return nil, err + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + // defer resp.Body.Close() + return resp, err +} + +type manifestArgs struct { + imageName reference.Named + mediaType string + manifest distribution.Manifest + dgst digest.Digest +} + +func TestManifestAPI(t *testing.T) { + schema1Repo, _ := reference.ParseNamed("foo/schema1") + schema2Repo, _ := reference.ParseNamed("foo/schema2") + + deleteEnabled := false + env1 := newTestEnv(t, deleteEnabled) + defer env1.Shutdown() + testManifestAPISchema1(t, env1, schema1Repo) + schema2Args := testManifestAPISchema2(t, env1, schema2Repo) + testManifestAPIManifestList(t, env1, schema2Args) + + deleteEnabled = true + env2 := newTestEnv(t, deleteEnabled) + defer env2.Shutdown() + testManifestAPISchema1(t, env2, schema1Repo) + schema2Args = testManifestAPISchema2(t, env2, schema2Repo) + testManifestAPIManifestList(t, env2, schema2Args) +} + +func TestManifestDelete(t *testing.T) { + schema1Repo, _ := reference.ParseNamed("foo/schema1") + schema2Repo, _ := reference.ParseNamed("foo/schema2") + + deleteEnabled := true + env := newTestEnv(t, deleteEnabled) + defer env.Shutdown() + schema1Args := testManifestAPISchema1(t, env, schema1Repo) + testManifestDelete(t, env, schema1Args) + schema2Args := testManifestAPISchema2(t, env, schema2Repo) + testManifestDelete(t, env, schema2Args) +} + +func TestManifestDeleteDisabled(t *testing.T) { + schema1Repo, _ := reference.ParseNamed("foo/schema1") + deleteEnabled := false + env := newTestEnv(t, deleteEnabled) + defer env.Shutdown() + testManifestDeleteDisabled(t, env, schema1Repo) +} + +func testManifestDeleteDisabled(t *testing.T, env *testEnv, imageName reference.Named) { + ref, _ := reference.WithDigest(imageName, digest.DigestSha256EmptyTar) + manifestURL, err := env.builder.BuildManifestURL(ref) + if err != nil { + t.Fatalf("unexpected error getting manifest url: %v", err) + } + + resp, err := httpDelete(manifestURL) + if err != nil { + t.Fatalf("unexpected error deleting manifest %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "status of disabled delete of manifest", resp, http.StatusMethodNotAllowed) +} + +func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Named) manifestArgs { + tag := "thetag" + args := manifestArgs{imageName: imageName} + + tagRef, _ := reference.WithTag(imageName, tag) + manifestURL, err := env.builder.BuildManifestURL(tagRef) + if err != nil { + t.Fatalf("unexpected error getting manifest url: %v", err) + } + + // ----------------------------- + // Attempt to fetch the manifest + resp, err := http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error getting manifest: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "getting non-existent manifest", resp, http.StatusNotFound) + checkBodyHasErrorCodes(t, "getting non-existent manifest", resp, v2.ErrorCodeManifestUnknown) + + tagsURL, err := env.builder.BuildTagsURL(imageName) + if err != nil { + t.Fatalf("unexpected error building tags url: %v", err) + } + + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + // Check that we get an unknown repository error when asking for tags + checkResponse(t, "getting unknown manifest tags", resp, http.StatusNotFound) + checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, v2.ErrorCodeNameUnknown) + + // -------------------------------- + // Attempt to push unsigned manifest with missing layers + unsignedManifest := &schema1.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: imageName.Name(), + Tag: tag, + FSLayers: []schema1.FSLayer{ + { + BlobSum: "asdf", + }, + { + BlobSum: "qwer", + }, + }, + History: []schema1.History{ + { + V1Compatibility: "", + }, + { + V1Compatibility: "", + }, + }, + } + + resp = putManifest(t, "putting unsigned manifest", manifestURL, "", unsignedManifest) + defer resp.Body.Close() + checkResponse(t, "putting unsigned manifest", resp, http.StatusBadRequest) + _, p, counts := checkBodyHasErrorCodes(t, "putting unsigned manifest", resp, v2.ErrorCodeManifestInvalid) + + expectedCounts := map[errcode.ErrorCode]int{ + v2.ErrorCodeManifestInvalid: 1, + } + + if !reflect.DeepEqual(counts, expectedCounts) { + t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) + } + + // sign the manifest and still get some interesting errors. + sm, err := schema1.Sign(unsignedManifest, env.pk) + if err != nil { + t.Fatalf("error signing manifest: %v", err) + } + + resp = putManifest(t, "putting signed manifest with errors", manifestURL, "", sm) + defer resp.Body.Close() + checkResponse(t, "putting signed manifest with errors", resp, http.StatusBadRequest) + _, p, counts = checkBodyHasErrorCodes(t, "putting signed manifest with errors", resp, + v2.ErrorCodeManifestBlobUnknown, v2.ErrorCodeDigestInvalid) + + expectedCounts = map[errcode.ErrorCode]int{ + v2.ErrorCodeManifestBlobUnknown: 2, + v2.ErrorCodeDigestInvalid: 2, + } + + if !reflect.DeepEqual(counts, expectedCounts) { + t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) + } + + // TODO(stevvooe): Add a test case where we take a mostly valid registry, + // tamper with the content and ensure that we get an unverified manifest + // error. + + // Push 2 random layers + expectedLayers := make(map[digest.Digest]io.ReadSeeker) + + for i := range unsignedManifest.FSLayers { + rs, dgstStr, err := testutil.CreateRandomTarFile() + + if err != nil { + t.Fatalf("error creating random layer %d: %v", i, err) + } + dgst := digest.Digest(dgstStr) + + expectedLayers[dgst] = rs + unsignedManifest.FSLayers[i].BlobSum = dgst + + uploadURLBase, _ := startPushLayer(t, env, imageName) + pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) + } + + // ------------------- + // Push the signed manifest with all layers pushed. + signedManifest, err := schema1.Sign(unsignedManifest, env.pk) + if err != nil { + t.Fatalf("unexpected error signing manifest: %v", err) + } + + dgst := digest.FromBytes(signedManifest.Canonical) + args.manifest = signedManifest + args.dgst = dgst + + digestRef, _ := reference.WithDigest(imageName, dgst) + manifestDigestURL, err := env.builder.BuildManifestURL(digestRef) + checkErr(t, err, "building manifest url") + + resp = putManifest(t, "putting signed manifest no error", manifestURL, "", signedManifest) + checkResponse(t, "putting signed manifest no error", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // -------------------- + // Push by digest -- should get same result + resp = putManifest(t, "putting signed manifest", manifestDigestURL, "", signedManifest) + checkResponse(t, "putting signed manifest", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // ------------------ + // Fetch by tag name + resp, err = http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error fetching manifest: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedManifest schema1.SignedManifest + dec := json.NewDecoder(resp.Body) + + if err := dec.Decode(&fetchedManifest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + if !bytes.Equal(fetchedManifest.Canonical, signedManifest.Canonical) { + t.Fatalf("manifests do not match") + } + + // --------------- + // Fetch by digest + resp, err = http.Get(manifestDigestURL) + checkErr(t, err, "fetching manifest by digest") + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedManifestByDigest schema1.SignedManifest + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&fetchedManifestByDigest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + if !bytes.Equal(fetchedManifestByDigest.Canonical, signedManifest.Canonical) { + t.Fatalf("manifests do not match") + } + + // check signature was roundtripped + signatures, err := fetchedManifestByDigest.Signatures() + if err != nil { + t.Fatal(err) + } + + if len(signatures) != 1 { + t.Fatalf("expected 1 signature from manifest, got: %d", len(signatures)) + } + + // Re-sign, push and pull the same digest + sm2, err := schema1.Sign(&fetchedManifestByDigest.Manifest, env.pk) + if err != nil { + t.Fatal(err) + + } + + // Re-push with a few different Content-Types. The official schema1 + // content type should work, as should application/json with/without a + // charset. + resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, schema1.MediaTypeSignedManifest, sm2) + checkResponse(t, "re-putting signed manifest", resp, http.StatusCreated) + resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, "application/json; charset=utf-8", sm2) + checkResponse(t, "re-putting signed manifest", resp, http.StatusCreated) + resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, "application/json", sm2) + checkResponse(t, "re-putting signed manifest", resp, http.StatusCreated) + + resp, err = http.Get(manifestDigestURL) + checkErr(t, err, "re-fetching manifest by digest") + defer resp.Body.Close() + + checkResponse(t, "re-fetching uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&fetchedManifestByDigest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + // check only 1 signature is returned + signatures, err = fetchedManifestByDigest.Signatures() + if err != nil { + t.Fatal(err) + } + + if len(signatures) != 1 { + t.Fatalf("expected 2 signature from manifest, got: %d", len(signatures)) + } + + // Get by name with etag, gives 304 + etag := resp.Header.Get("Etag") + req, err := http.NewRequest("GET", manifestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching manifest by name with etag", resp, http.StatusNotModified) + + // Get by digest with etag, gives 304 + req, err = http.NewRequest("GET", manifestDigestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching manifest by dgst with etag", resp, http.StatusNotModified) + + // Ensure that the tag is listed. + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "getting tags", resp, http.StatusOK) + dec = json.NewDecoder(resp.Body) + + var tagsResponse tagsAPIResponse + + if err := dec.Decode(&tagsResponse); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if tagsResponse.Name != imageName.Name() { + t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName.Name()) + } + + if len(tagsResponse.Tags) != 1 { + t.Fatalf("expected some tags in response: %v", tagsResponse.Tags) + } + + if tagsResponse.Tags[0] != tag { + t.Fatalf("tag not as expected: %q != %q", tagsResponse.Tags[0], tag) + } + + // Attempt to put a manifest with mismatching FSLayer and History array cardinalities + + unsignedManifest.History = append(unsignedManifest.History, schema1.History{ + V1Compatibility: "", + }) + invalidSigned, err := schema1.Sign(unsignedManifest, env.pk) + if err != nil { + t.Fatalf("error signing manifest") + } + + resp = putManifest(t, "putting invalid signed manifest", manifestDigestURL, "", invalidSigned) + checkResponse(t, "putting invalid signed manifest", resp, http.StatusBadRequest) + + return args +} + +func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Named) manifestArgs { + tag := "schema2tag" + args := manifestArgs{ + imageName: imageName, + mediaType: schema2.MediaTypeManifest, + } + + tagRef, _ := reference.WithTag(imageName, tag) + manifestURL, err := env.builder.BuildManifestURL(tagRef) + if err != nil { + t.Fatalf("unexpected error getting manifest url: %v", err) + } + + // ----------------------------- + // Attempt to fetch the manifest + resp, err := http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error getting manifest: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "getting non-existent manifest", resp, http.StatusNotFound) + checkBodyHasErrorCodes(t, "getting non-existent manifest", resp, v2.ErrorCodeManifestUnknown) + + tagsURL, err := env.builder.BuildTagsURL(imageName) + if err != nil { + t.Fatalf("unexpected error building tags url: %v", err) + } + + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + // Check that we get an unknown repository error when asking for tags + checkResponse(t, "getting unknown manifest tags", resp, http.StatusNotFound) + checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, v2.ErrorCodeNameUnknown) + + // -------------------------------- + // Attempt to push manifest with missing config and missing layers + manifest := &schema2.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: schema2.MediaTypeManifest, + }, + Config: distribution.Descriptor{ + Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + Size: 3253, + MediaType: schema2.MediaTypeConfig, + }, + Layers: []distribution.Descriptor{ + { + Digest: "sha256:463434349086340864309863409683460843608348608934092322395278926a", + Size: 6323, + MediaType: schema2.MediaTypeLayer, + }, + { + Digest: "sha256:630923423623623423352523525237238023652897356239852383652aaaaaaa", + Size: 6863, + MediaType: schema2.MediaTypeLayer, + }, + }, + } + + resp = putManifest(t, "putting missing config manifest", manifestURL, schema2.MediaTypeManifest, manifest) + defer resp.Body.Close() + checkResponse(t, "putting missing config manifest", resp, http.StatusBadRequest) + _, p, counts := checkBodyHasErrorCodes(t, "putting missing config manifest", resp, v2.ErrorCodeManifestBlobUnknown) + + expectedCounts := map[errcode.ErrorCode]int{ + v2.ErrorCodeManifestBlobUnknown: 3, + } + + if !reflect.DeepEqual(counts, expectedCounts) { + t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) + } + + // Push a config, and reference it in the manifest + sampleConfig := []byte(`{ + "architecture": "amd64", + "history": [ + { + "created": "2015-10-31T22:22:54.690851953Z", + "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" + }, + { + "created": "2015-10-31T22:22:55.613815829Z", + "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]" + } + ], + "rootfs": { + "diff_ids": [ + "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + ], + "type": "layers" + } + }`) + sampleConfigDigest := digest.FromBytes(sampleConfig) + + uploadURLBase, _ := startPushLayer(t, env, imageName) + pushLayer(t, env.builder, imageName, sampleConfigDigest, uploadURLBase, bytes.NewReader(sampleConfig)) + manifest.Config.Digest = sampleConfigDigest + manifest.Config.Size = int64(len(sampleConfig)) + + // The manifest should still be invalid, because its layer doesn't exist + resp = putManifest(t, "putting missing layer manifest", manifestURL, schema2.MediaTypeManifest, manifest) + defer resp.Body.Close() + checkResponse(t, "putting missing layer manifest", resp, http.StatusBadRequest) + _, p, counts = checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, v2.ErrorCodeManifestBlobUnknown) + + expectedCounts = map[errcode.ErrorCode]int{ + v2.ErrorCodeManifestBlobUnknown: 2, + } + + if !reflect.DeepEqual(counts, expectedCounts) { + t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) + } + + // Push 2 random layers + expectedLayers := make(map[digest.Digest]io.ReadSeeker) + + for i := range manifest.Layers { + rs, dgstStr, err := testutil.CreateRandomTarFile() + + if err != nil { + t.Fatalf("error creating random layer %d: %v", i, err) + } + dgst := digest.Digest(dgstStr) + + expectedLayers[dgst] = rs + manifest.Layers[i].Digest = dgst + + uploadURLBase, _ := startPushLayer(t, env, imageName) + pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) + } + + // ------------------- + // Push the manifest with all layers pushed. + deserializedManifest, err := schema2.FromStruct(*manifest) + if err != nil { + t.Fatalf("could not create DeserializedManifest: %v", err) + } + _, canonical, err := deserializedManifest.Payload() + if err != nil { + t.Fatalf("could not get manifest payload: %v", err) + } + dgst := digest.FromBytes(canonical) + args.dgst = dgst + args.manifest = deserializedManifest + + digestRef, _ := reference.WithDigest(imageName, dgst) + manifestDigestURL, err := env.builder.BuildManifestURL(digestRef) + checkErr(t, err, "building manifest url") + + resp = putManifest(t, "putting manifest no error", manifestURL, schema2.MediaTypeManifest, manifest) + checkResponse(t, "putting manifest no error", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // -------------------- + // Push by digest -- should get same result + resp = putManifest(t, "putting manifest by digest", manifestDigestURL, schema2.MediaTypeManifest, manifest) + checkResponse(t, "putting manifest by digest", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // ------------------ + // Fetch by tag name + req, err := http.NewRequest("GET", manifestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("Accept", schema2.MediaTypeManifest) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("unexpected error fetching manifest: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedManifest schema2.DeserializedManifest + dec := json.NewDecoder(resp.Body) + + if err := dec.Decode(&fetchedManifest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + _, fetchedCanonical, err := fetchedManifest.Payload() + if err != nil { + t.Fatalf("error getting manifest payload: %v", err) + } + + if !bytes.Equal(fetchedCanonical, canonical) { + t.Fatalf("manifests do not match") + } + + // --------------- + // Fetch by digest + req, err = http.NewRequest("GET", manifestDigestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("Accept", schema2.MediaTypeManifest) + resp, err = http.DefaultClient.Do(req) + checkErr(t, err, "fetching manifest by digest") + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedManifestByDigest schema2.DeserializedManifest + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&fetchedManifestByDigest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + _, fetchedCanonical, err = fetchedManifest.Payload() + if err != nil { + t.Fatalf("error getting manifest payload: %v", err) + } + + if !bytes.Equal(fetchedCanonical, canonical) { + t.Fatalf("manifests do not match") + } + + // Get by name with etag, gives 304 + etag := resp.Header.Get("Etag") + req, err = http.NewRequest("GET", manifestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching manifest by name with etag", resp, http.StatusNotModified) + + // Get by digest with etag, gives 304 + req, err = http.NewRequest("GET", manifestDigestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching manifest by dgst with etag", resp, http.StatusNotModified) + + // Ensure that the tag is listed. + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "getting unknown manifest tags", resp, http.StatusOK) + dec = json.NewDecoder(resp.Body) + + var tagsResponse tagsAPIResponse + + if err := dec.Decode(&tagsResponse); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if tagsResponse.Name != imageName.Name() { + t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) + } + + if len(tagsResponse.Tags) != 1 { + t.Fatalf("expected some tags in response: %v", tagsResponse.Tags) + } + + if tagsResponse.Tags[0] != tag { + t.Fatalf("tag not as expected: %q != %q", tagsResponse.Tags[0], tag) + } + + // ------------------ + // Fetch as a schema1 manifest + resp, err = http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error fetching manifest as schema1: %v", err) + } + defer resp.Body.Close() + + manifestBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("error reading response body: %v", err) + } + + checkResponse(t, "fetching uploaded manifest as schema1", resp, http.StatusOK) + + m, desc, err := distribution.UnmarshalManifest(schema1.MediaTypeManifest, manifestBytes) + if err != nil { + t.Fatalf("unexpected error unmarshalling manifest: %v", err) + } + + fetchedSchema1Manifest, ok := m.(*schema1.SignedManifest) + if !ok { + t.Fatalf("expecting schema1 manifest") + } + + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{desc.Digest.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, desc.Digest)}, + }) + + if fetchedSchema1Manifest.Manifest.SchemaVersion != 1 { + t.Fatal("wrong schema version") + } + if fetchedSchema1Manifest.Architecture != "amd64" { + t.Fatal("wrong architecture") + } + if fetchedSchema1Manifest.Name != imageName.Name() { + t.Fatal("wrong image name") + } + if fetchedSchema1Manifest.Tag != tag { + t.Fatal("wrong tag") + } + if len(fetchedSchema1Manifest.FSLayers) != 2 { + t.Fatal("wrong number of FSLayers") + } + for i := range manifest.Layers { + if fetchedSchema1Manifest.FSLayers[i].BlobSum != manifest.Layers[len(manifest.Layers)-i-1].Digest { + t.Fatalf("blob digest mismatch in schema1 manifest for layer %d", i) + } + } + if len(fetchedSchema1Manifest.History) != 2 { + t.Fatal("wrong number of History entries") + } + + // Don't check V1Compatibility fields because we're using randomly-generated + // layers. + + return args +} + +func testManifestAPIManifestList(t *testing.T, env *testEnv, args manifestArgs) { + imageName := args.imageName + tag := "manifestlisttag" + + tagRef, _ := reference.WithTag(imageName, tag) + manifestURL, err := env.builder.BuildManifestURL(tagRef) + if err != nil { + t.Fatalf("unexpected error getting manifest url: %v", err) + } + + // -------------------------------- + // Attempt to push manifest list that refers to an unknown manifest + manifestList := &manifestlist.ManifestList{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: manifestlist.MediaTypeManifestList, + }, + Manifests: []manifestlist.ManifestDescriptor{ + { + Descriptor: distribution.Descriptor{ + Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + Size: 3253, + MediaType: schema2.MediaTypeManifest, + }, + Platform: manifestlist.PlatformSpec{ + Architecture: "amd64", + OS: "linux", + }, + }, + }, + } + + resp := putManifest(t, "putting missing manifest manifestlist", manifestURL, manifestlist.MediaTypeManifestList, manifestList) + defer resp.Body.Close() + checkResponse(t, "putting missing manifest manifestlist", resp, http.StatusBadRequest) + _, p, counts := checkBodyHasErrorCodes(t, "putting missing manifest manifestlist", resp, v2.ErrorCodeManifestBlobUnknown) + + expectedCounts := map[errcode.ErrorCode]int{ + v2.ErrorCodeManifestBlobUnknown: 1, + } + + if !reflect.DeepEqual(counts, expectedCounts) { + t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) + } + + // ------------------- + // Push a manifest list that references an actual manifest + manifestList.Manifests[0].Digest = args.dgst + deserializedManifestList, err := manifestlist.FromDescriptors(manifestList.Manifests) + if err != nil { + t.Fatalf("could not create DeserializedManifestList: %v", err) + } + _, canonical, err := deserializedManifestList.Payload() + if err != nil { + t.Fatalf("could not get manifest list payload: %v", err) + } + dgst := digest.FromBytes(canonical) + + digestRef, _ := reference.WithDigest(imageName, dgst) + manifestDigestURL, err := env.builder.BuildManifestURL(digestRef) + checkErr(t, err, "building manifest url") + + resp = putManifest(t, "putting manifest list no error", manifestURL, manifestlist.MediaTypeManifestList, deserializedManifestList) + checkResponse(t, "putting manifest list no error", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // -------------------- + // Push by digest -- should get same result + resp = putManifest(t, "putting manifest list by digest", manifestDigestURL, manifestlist.MediaTypeManifestList, deserializedManifestList) + checkResponse(t, "putting manifest list by digest", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // ------------------ + // Fetch by tag name + req, err := http.NewRequest("GET", manifestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + // multiple headers in mixed list format to ensure we parse correctly server-side + req.Header.Set("Accept", fmt.Sprintf(` %s ; q=0.8 , %s ; q=0.5 `, manifestlist.MediaTypeManifestList, schema1.MediaTypeSignedManifest)) + req.Header.Add("Accept", schema2.MediaTypeManifest) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("unexpected error fetching manifest list: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest list", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedManifestList manifestlist.DeserializedManifestList + dec := json.NewDecoder(resp.Body) + + if err := dec.Decode(&fetchedManifestList); err != nil { + t.Fatalf("error decoding fetched manifest list: %v", err) + } + + _, fetchedCanonical, err := fetchedManifestList.Payload() + if err != nil { + t.Fatalf("error getting manifest list payload: %v", err) + } + + if !bytes.Equal(fetchedCanonical, canonical) { + t.Fatalf("manifest lists do not match") + } + + // --------------- + // Fetch by digest + req, err = http.NewRequest("GET", manifestDigestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("Accept", manifestlist.MediaTypeManifestList) + resp, err = http.DefaultClient.Do(req) + checkErr(t, err, "fetching manifest list by digest") + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest list", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedManifestListByDigest manifestlist.DeserializedManifestList + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&fetchedManifestListByDigest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + _, fetchedCanonical, err = fetchedManifestListByDigest.Payload() + if err != nil { + t.Fatalf("error getting manifest list payload: %v", err) + } + + if !bytes.Equal(fetchedCanonical, canonical) { + t.Fatalf("manifests do not match") + } + + // Get by name with etag, gives 304 + etag := resp.Header.Get("Etag") + req, err = http.NewRequest("GET", manifestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching manifest by name with etag", resp, http.StatusNotModified) + + // Get by digest with etag, gives 304 + req, err = http.NewRequest("GET", manifestDigestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching manifest by dgst with etag", resp, http.StatusNotModified) + + // ------------------ + // Fetch as a schema1 manifest + resp, err = http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error fetching manifest list as schema1: %v", err) + } + defer resp.Body.Close() + + manifestBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("error reading response body: %v", err) + } + + checkResponse(t, "fetching uploaded manifest list as schema1", resp, http.StatusOK) + + m, desc, err := distribution.UnmarshalManifest(schema1.MediaTypeManifest, manifestBytes) + if err != nil { + t.Fatalf("unexpected error unmarshalling manifest: %v", err) + } + + fetchedSchema1Manifest, ok := m.(*schema1.SignedManifest) + if !ok { + t.Fatalf("expecting schema1 manifest") + } + + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{desc.Digest.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, desc.Digest)}, + }) + + if fetchedSchema1Manifest.Manifest.SchemaVersion != 1 { + t.Fatal("wrong schema version") + } + if fetchedSchema1Manifest.Architecture != "amd64" { + t.Fatal("wrong architecture") + } + if fetchedSchema1Manifest.Name != imageName.Name() { + t.Fatal("wrong image name") + } + if fetchedSchema1Manifest.Tag != tag { + t.Fatal("wrong tag") + } + if len(fetchedSchema1Manifest.FSLayers) != 2 { + t.Fatal("wrong number of FSLayers") + } + layers := args.manifest.(*schema2.DeserializedManifest).Layers + for i := range layers { + if fetchedSchema1Manifest.FSLayers[i].BlobSum != layers[len(layers)-i-1].Digest { + t.Fatalf("blob digest mismatch in schema1 manifest for layer %d", i) + } + } + if len(fetchedSchema1Manifest.History) != 2 { + t.Fatal("wrong number of History entries") + } + + // Don't check V1Compatibility fields because we're using randomly-generated + // layers. +} + +func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) { + imageName := args.imageName + dgst := args.dgst + manifest := args.manifest + + ref, _ := reference.WithDigest(imageName, dgst) + manifestDigestURL, err := env.builder.BuildManifestURL(ref) + // --------------- + // Delete by digest + resp, err := httpDelete(manifestDigestURL) + checkErr(t, err, "deleting manifest by digest") + + checkResponse(t, "deleting manifest", resp, http.StatusAccepted) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{"0"}, + }) + + // --------------- + // Attempt to fetch deleted manifest + resp, err = http.Get(manifestDigestURL) + checkErr(t, err, "fetching deleted manifest by digest") + defer resp.Body.Close() + + checkResponse(t, "fetching deleted manifest", resp, http.StatusNotFound) + + // --------------- + // Delete already deleted manifest by digest + resp, err = httpDelete(manifestDigestURL) + checkErr(t, err, "re-deleting manifest by digest") + + checkResponse(t, "re-deleting manifest", resp, http.StatusNotFound) + + // -------------------- + // Re-upload manifest by digest + resp = putManifest(t, "putting manifest", manifestDigestURL, args.mediaType, manifest) + checkResponse(t, "putting manifest", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // --------------- + // Attempt to fetch re-uploaded deleted digest + resp, err = http.Get(manifestDigestURL) + checkErr(t, err, "fetching re-uploaded manifest by digest") + defer resp.Body.Close() + + checkResponse(t, "fetching re-uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // --------------- + // Attempt to delete an unknown manifest + unknownDigest := digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + unknownRef, _ := reference.WithDigest(imageName, unknownDigest) + unknownManifestDigestURL, err := env.builder.BuildManifestURL(unknownRef) + checkErr(t, err, "building unknown manifest url") + + resp, err = httpDelete(unknownManifestDigestURL) + checkErr(t, err, "delting unknown manifest by digest") + checkResponse(t, "fetching deleted manifest", resp, http.StatusNotFound) + + // -------------------- + // Upload manifest by tag + tag := "atag" + tagRef, _ := reference.WithTag(imageName, tag) + manifestTagURL, err := env.builder.BuildManifestURL(tagRef) + resp = putManifest(t, "putting manifest by tag", manifestTagURL, args.mediaType, manifest) + checkResponse(t, "putting manifest by tag", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + tagsURL, err := env.builder.BuildTagsURL(imageName) + if err != nil { + t.Fatalf("unexpected error building tags url: %v", err) + } + + // Ensure that the tag is listed. + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + dec := json.NewDecoder(resp.Body) + var tagsResponse tagsAPIResponse + if err := dec.Decode(&tagsResponse); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if tagsResponse.Name != imageName.Name() { + t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) + } + + if len(tagsResponse.Tags) != 1 { + t.Fatalf("expected some tags in response: %v", tagsResponse.Tags) + } + + if tagsResponse.Tags[0] != tag { + t.Fatalf("tag not as expected: %q != %q", tagsResponse.Tags[0], tag) + } + + // --------------- + // Delete by digest + resp, err = httpDelete(manifestDigestURL) + checkErr(t, err, "deleting manifest by digest") + + checkResponse(t, "deleting manifest with tag", resp, http.StatusAccepted) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{"0"}, + }) + + // Ensure that the tag is not listed. + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&tagsResponse); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if tagsResponse.Name != imageName.Name() { + t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) + } + + if len(tagsResponse.Tags) != 0 { + t.Fatalf("expected 0 tags in response: %v", tagsResponse.Tags) + } + +} + +type testEnv struct { + pk libtrust.PrivateKey + ctx context.Context + config configuration.Configuration + app *App + server *httptest.Server + builder *v2.URLBuilder +} + +func newTestEnvMirror(t *testing.T, deleteEnabled bool) *testEnv { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "testdriver": configuration.Parameters{}, + "delete": configuration.Parameters{"enabled": deleteEnabled}, + "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ + "enabled": false, + }}, + }, + Proxy: configuration.Proxy{ + RemoteURL: "http://example.com", + }, + } + + return newTestEnvWithConfig(t, &config) + +} + +func newTestEnv(t *testing.T, deleteEnabled bool) *testEnv { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "testdriver": configuration.Parameters{}, + "delete": configuration.Parameters{"enabled": deleteEnabled}, + "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ + "enabled": false, + }}, + }, + } + + config.HTTP.Headers = headerConfig + + return newTestEnvWithConfig(t, &config) +} + +func newTestEnvWithConfig(t *testing.T, config *configuration.Configuration) *testEnv { + ctx := context.Background() + + app := NewApp(ctx, config) + server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) + builder, err := v2.NewURLBuilderFromString(server.URL+config.HTTP.Prefix, false) + + if err != nil { + t.Fatalf("error creating url builder: %v", err) + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + return &testEnv{ + pk: pk, + ctx: ctx, + config: *config, + app: app, + server: server, + builder: builder, + } +} + +func (t *testEnv) Shutdown() { + t.server.CloseClientConnections() + t.server.Close() +} + +func putManifest(t *testing.T, msg, url, contentType string, v interface{}) *http.Response { + var body []byte + + switch m := v.(type) { + case *schema1.SignedManifest: + _, pl, err := m.Payload() + if err != nil { + t.Fatalf("error getting payload: %v", err) + } + body = pl + case *manifestlist.DeserializedManifestList: + _, pl, err := m.Payload() + if err != nil { + t.Fatalf("error getting payload: %v", err) + } + body = pl + default: + var err error + body, err = json.MarshalIndent(v, "", " ") + if err != nil { + t.Fatalf("unexpected error marshaling %v: %v", v, err) + } + } + + req, err := http.NewRequest("PUT", url, bytes.NewReader(body)) + if err != nil { + t.Fatalf("error creating request for %s: %v", msg, err) + } + + if contentType != "" { + req.Header.Set("Content-Type", contentType) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("error doing put request while %s: %v", msg, err) + } + + return resp +} + +func startPushLayer(t *testing.T, env *testEnv, name reference.Named) (location string, uuid string) { + layerUploadURL, err := env.builder.BuildBlobUploadURL(name) + if err != nil { + t.Fatalf("unexpected error building layer upload url: %v", err) + } + + u, err := url.Parse(layerUploadURL) + if err != nil { + t.Fatalf("error parsing layer upload URL: %v", err) + } + + base, err := url.Parse(env.server.URL) + if err != nil { + t.Fatalf("error parsing server URL: %v", err) + } + + layerUploadURL = base.ResolveReference(u).String() + resp, err := http.Post(layerUploadURL, "", nil) + if err != nil { + t.Fatalf("unexpected error starting layer push: %v", err) + } + + defer resp.Body.Close() + + checkResponse(t, fmt.Sprintf("pushing starting layer push %v", name.String()), resp, http.StatusAccepted) + + u, err = url.Parse(resp.Header.Get("Location")) + if err != nil { + t.Fatalf("error parsing location header: %v", err) + } + + uuid = path.Base(u.Path) + checkHeaders(t, resp, http.Header{ + "Location": []string{"*"}, + "Content-Length": []string{"0"}, + "Docker-Upload-UUID": []string{uuid}, + }) + + return resp.Header.Get("Location"), uuid +} + +// doPushLayer pushes the layer content returning the url on success returning +// the response. If you're only expecting a successful response, use pushLayer. +func doPushLayer(t *testing.T, ub *v2.URLBuilder, name reference.Named, dgst digest.Digest, uploadURLBase string, body io.Reader) (*http.Response, error) { + u, err := url.Parse(uploadURLBase) + if err != nil { + t.Fatalf("unexpected error parsing pushLayer url: %v", err) + } + + u.RawQuery = url.Values{ + "_state": u.Query()["_state"], + "digest": []string{dgst.String()}, + }.Encode() + + uploadURL := u.String() + + // Just do a monolithic upload + req, err := http.NewRequest("PUT", uploadURL, body) + if err != nil { + t.Fatalf("unexpected error creating new request: %v", err) + } + + return http.DefaultClient.Do(req) +} + +// pushLayer pushes the layer content returning the url on success. +func pushLayer(t *testing.T, ub *v2.URLBuilder, name reference.Named, dgst digest.Digest, uploadURLBase string, body io.Reader) string { + digester := digest.Canonical.New() + + resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, io.TeeReader(body, digester.Hash())) + if err != nil { + t.Fatalf("unexpected error doing push layer request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "putting monolithic chunk", resp, http.StatusCreated) + + if err != nil { + t.Fatalf("error generating sha256 digest of body") + } + + sha256Dgst := digester.Digest() + + ref, _ := reference.WithDigest(name, sha256Dgst) + expectedLayerURL, err := ub.BuildBlobURL(ref) + if err != nil { + t.Fatalf("error building expected layer url: %v", err) + } + + checkHeaders(t, resp, http.Header{ + "Location": []string{expectedLayerURL}, + "Content-Length": []string{"0"}, + "Docker-Content-Digest": []string{sha256Dgst.String()}, + }) + + return resp.Header.Get("Location") +} + +func finishUpload(t *testing.T, ub *v2.URLBuilder, name reference.Named, uploadURLBase string, dgst digest.Digest) string { + resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, nil) + if err != nil { + t.Fatalf("unexpected error doing push layer request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "putting monolithic chunk", resp, http.StatusCreated) + + ref, _ := reference.WithDigest(name, dgst) + expectedLayerURL, err := ub.BuildBlobURL(ref) + if err != nil { + t.Fatalf("error building expected layer url: %v", err) + } + + checkHeaders(t, resp, http.Header{ + "Location": []string{expectedLayerURL}, + "Content-Length": []string{"0"}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + return resp.Header.Get("Location") +} + +func doPushChunk(t *testing.T, uploadURLBase string, body io.Reader) (*http.Response, digest.Digest, error) { + u, err := url.Parse(uploadURLBase) + if err != nil { + t.Fatalf("unexpected error parsing pushLayer url: %v", err) + } + + u.RawQuery = url.Values{ + "_state": u.Query()["_state"], + }.Encode() + + uploadURL := u.String() + + digester := digest.Canonical.New() + + req, err := http.NewRequest("PATCH", uploadURL, io.TeeReader(body, digester.Hash())) + if err != nil { + t.Fatalf("unexpected error creating new request: %v", err) + } + req.Header.Set("Content-Type", "application/octet-stream") + + resp, err := http.DefaultClient.Do(req) + + return resp, digester.Digest(), err +} + +func pushChunk(t *testing.T, ub *v2.URLBuilder, name reference.Named, uploadURLBase string, body io.Reader, length int64) (string, digest.Digest) { + resp, dgst, err := doPushChunk(t, uploadURLBase, body) + if err != nil { + t.Fatalf("unexpected error doing push layer request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "putting chunk", resp, http.StatusAccepted) + + if err != nil { + t.Fatalf("error generating sha256 digest of body") + } + + checkHeaders(t, resp, http.Header{ + "Range": []string{fmt.Sprintf("0-%d", length-1)}, + "Content-Length": []string{"0"}, + }) + + return resp.Header.Get("Location"), dgst +} + +func checkResponse(t *testing.T, msg string, resp *http.Response, expectedStatus int) { + if resp.StatusCode != expectedStatus { + t.Logf("unexpected status %s: %v != %v", msg, resp.StatusCode, expectedStatus) + maybeDumpResponse(t, resp) + + t.FailNow() + } + + // We expect the headers included in the configuration, unless the + // status code is 405 (Method Not Allowed), which means the handler + // doesn't even get called. + if resp.StatusCode != 405 && !reflect.DeepEqual(resp.Header["X-Content-Type-Options"], []string{"nosniff"}) { + t.Logf("missing or incorrect header X-Content-Type-Options %s", msg) + maybeDumpResponse(t, resp) + + t.FailNow() + } +} + +// checkBodyHasErrorCodes ensures the body is an error body and has the +// expected error codes, returning the error structure, the json slice and a +// count of the errors by code. +func checkBodyHasErrorCodes(t *testing.T, msg string, resp *http.Response, errorCodes ...errcode.ErrorCode) (errcode.Errors, []byte, map[errcode.ErrorCode]int) { + p, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("unexpected error reading body %s: %v", msg, err) + } + + var errs errcode.Errors + if err := json.Unmarshal(p, &errs); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if len(errs) == 0 { + t.Fatalf("expected errors in response") + } + + // TODO(stevvooe): Shoot. The error setup is not working out. The content- + // type headers are being set after writing the status code. + // if resp.Header.Get("Content-Type") != "application/json; charset=utf-8" { + // t.Fatalf("unexpected content type: %v != 'application/json'", + // resp.Header.Get("Content-Type")) + // } + + expected := map[errcode.ErrorCode]struct{}{} + counts := map[errcode.ErrorCode]int{} + + // Initialize map with zeros for expected + for _, code := range errorCodes { + expected[code] = struct{}{} + counts[code] = 0 + } + + for _, e := range errs { + err, ok := e.(errcode.ErrorCoder) + if !ok { + t.Fatalf("not an ErrorCoder: %#v", e) + } + if _, ok := expected[err.ErrorCode()]; !ok { + t.Fatalf("unexpected error code %v encountered during %s: %s ", err.ErrorCode(), msg, string(p)) + } + counts[err.ErrorCode()]++ + } + + // Ensure that counts of expected errors were all non-zero + for code := range expected { + if counts[code] == 0 { + t.Fatalf("expected error code %v not encounterd during %s: %s", code, msg, string(p)) + } + } + + return errs, p, counts +} + +func maybeDumpResponse(t *testing.T, resp *http.Response) { + if d, err := httputil.DumpResponse(resp, true); err != nil { + t.Logf("error dumping response: %v", err) + } else { + t.Logf("response:\n%s", string(d)) + } +} + +// matchHeaders checks that the response has at least the headers. If not, the +// test will fail. If a passed in header value is "*", any non-zero value will +// suffice as a match. +func checkHeaders(t *testing.T, resp *http.Response, headers http.Header) { + for k, vs := range headers { + if resp.Header.Get(k) == "" { + t.Fatalf("response missing header %q", k) + } + + for _, v := range vs { + if v == "*" { + // Just ensure there is some value. + if len(resp.Header[http.CanonicalHeaderKey(k)]) > 0 { + continue + } + } + + for _, hv := range resp.Header[http.CanonicalHeaderKey(k)] { + if hv != v { + t.Fatalf("%+v %v header value not matched in response: %q != %q", resp.Header, k, hv, v) + } + } + } + } +} + +func checkErr(t *testing.T, err error, msg string) { + if err != nil { + t.Fatalf("unexpected error %s: %v", msg, err) + } +} + +func createRepository(env *testEnv, t *testing.T, imageName string, tag string) digest.Digest { + imageNameRef, err := reference.ParseNamed(imageName) + if err != nil { + t.Fatalf("unable to parse reference: %v", err) + } + + unsignedManifest := &schema1.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: imageName, + Tag: tag, + FSLayers: []schema1.FSLayer{ + { + BlobSum: "asdf", + }, + }, + History: []schema1.History{ + { + V1Compatibility: "", + }, + }, + } + + // Push 2 random layers + expectedLayers := make(map[digest.Digest]io.ReadSeeker) + + for i := range unsignedManifest.FSLayers { + rs, dgstStr, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random layer %d: %v", i, err) + } + dgst := digest.Digest(dgstStr) + + expectedLayers[dgst] = rs + unsignedManifest.FSLayers[i].BlobSum = dgst + uploadURLBase, _ := startPushLayer(t, env, imageNameRef) + pushLayer(t, env.builder, imageNameRef, dgst, uploadURLBase, rs) + } + + signedManifest, err := schema1.Sign(unsignedManifest, env.pk) + if err != nil { + t.Fatalf("unexpected error signing manifest: %v", err) + } + + dgst := digest.FromBytes(signedManifest.Canonical) + + // Create this repository by tag to ensure the tag mapping is made in the registry + tagRef, _ := reference.WithTag(imageNameRef, tag) + manifestDigestURL, err := env.builder.BuildManifestURL(tagRef) + checkErr(t, err, "building manifest url") + + digestRef, _ := reference.WithDigest(imageNameRef, dgst) + location, err := env.builder.BuildManifestURL(digestRef) + checkErr(t, err, "building location URL") + + resp := putManifest(t, "putting signed manifest", manifestDigestURL, "", signedManifest) + checkResponse(t, "putting signed manifest", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{location}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + return dgst +} + +// Test mutation operations on a registry configured as a cache. Ensure that they return +// appropriate errors. +func TestRegistryAsCacheMutationAPIs(t *testing.T) { + deleteEnabled := true + env := newTestEnvMirror(t, deleteEnabled) + defer env.Shutdown() + + imageName, _ := reference.ParseNamed("foo/bar") + tag := "latest" + tagRef, _ := reference.WithTag(imageName, tag) + manifestURL, err := env.builder.BuildManifestURL(tagRef) + if err != nil { + t.Fatalf("unexpected error building base url: %v", err) + } + + // Manifest upload + m := &schema1.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: imageName.Name(), + Tag: tag, + FSLayers: []schema1.FSLayer{}, + History: []schema1.History{}, + } + + sm, err := schema1.Sign(m, env.pk) + if err != nil { + t.Fatalf("error signing manifest: %v", err) + } + + resp := putManifest(t, "putting unsigned manifest", manifestURL, "", sm) + checkResponse(t, "putting signed manifest to cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) + + // Manifest Delete + resp, err = httpDelete(manifestURL) + checkResponse(t, "deleting signed manifest from cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) + + // Blob upload initialization + layerUploadURL, err := env.builder.BuildBlobUploadURL(imageName) + if err != nil { + t.Fatalf("unexpected error building layer upload url: %v", err) + } + + resp, err = http.Post(layerUploadURL, "", nil) + if err != nil { + t.Fatalf("unexpected error starting layer push: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, fmt.Sprintf("starting layer push to cache %v", imageName), resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) + + // Blob Delete + ref, _ := reference.WithDigest(imageName, digest.DigestSha256EmptyTar) + blobURL, err := env.builder.BuildBlobURL(ref) + resp, err = httpDelete(blobURL) + checkResponse(t, "deleting blob from cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) + +} + +// TestCheckContextNotifier makes sure the API endpoints get a ResponseWriter +// that implements http.ContextNotifier. +func TestCheckContextNotifier(t *testing.T) { + env := newTestEnv(t, false) + defer env.Shutdown() + + // Register a new endpoint for testing + env.app.router.Handle("/unittest/{name}/", env.app.dispatcher(func(ctx *Context, r *http.Request) http.Handler { + return handlers.MethodHandler{ + "GET": http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if _, ok := w.(http.CloseNotifier); !ok { + t.Fatal("could not cast ResponseWriter to CloseNotifier") + } + w.WriteHeader(200) + }), + } + })) + + resp, err := http.Get(env.server.URL + "/unittest/reponame/") + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + t.Fatalf("wrong status code - expected 200, got %d", resp.StatusCode) + } +} + +func TestProxyManifestGetByTag(t *testing.T) { + truthConfig := configuration.Configuration{ + Storage: configuration.Storage{ + "testdriver": configuration.Parameters{}, + "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ + "enabled": false, + }}, + }, + } + truthConfig.HTTP.Headers = headerConfig + + imageName, _ := reference.ParseNamed("foo/bar") + tag := "latest" + + truthEnv := newTestEnvWithConfig(t, &truthConfig) + defer truthEnv.Shutdown() + // create a repository in the truth registry + dgst := createRepository(truthEnv, t, imageName.Name(), tag) + + proxyConfig := configuration.Configuration{ + Storage: configuration.Storage{ + "testdriver": configuration.Parameters{}, + }, + Proxy: configuration.Proxy{ + RemoteURL: truthEnv.server.URL, + }, + } + proxyConfig.HTTP.Headers = headerConfig + + proxyEnv := newTestEnvWithConfig(t, &proxyConfig) + defer proxyEnv.Shutdown() + + digestRef, _ := reference.WithDigest(imageName, dgst) + manifestDigestURL, err := proxyEnv.builder.BuildManifestURL(digestRef) + checkErr(t, err, "building manifest url") + + resp, err := http.Get(manifestDigestURL) + checkErr(t, err, "fetching manifest from proxy by digest") + defer resp.Body.Close() + + tagRef, _ := reference.WithTag(imageName, tag) + manifestTagURL, err := proxyEnv.builder.BuildManifestURL(tagRef) + checkErr(t, err, "building manifest url") + + resp, err = http.Get(manifestTagURL) + checkErr(t, err, "fetching manifest from proxy by tag") + defer resp.Body.Close() + checkResponse(t, "fetching manifest from proxy by tag", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // Create another manifest in the remote with the same image/tag pair + newDigest := createRepository(truthEnv, t, imageName.Name(), tag) + if dgst == newDigest { + t.Fatalf("non-random test data") + } + + // fetch it with the same proxy URL as before. Ensure the updated content is at the same tag + resp, err = http.Get(manifestTagURL) + checkErr(t, err, "fetching manifest from proxy by tag") + defer resp.Body.Close() + checkResponse(t, "fetching manifest from proxy by tag", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{newDigest.String()}, + }) +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/app.go b/vendor/github.com/docker/distribution/registry/handlers/app.go new file mode 100644 index 000000000..0f30603f0 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/app.go @@ -0,0 +1,1046 @@ +package handlers + +import ( + cryptorand "crypto/rand" + "expvar" + "fmt" + "math/rand" + "net" + "net/http" + "net/url" + "os" + "regexp" + "runtime" + "strings" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/configuration" + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/health" + "github.com/docker/distribution/health/checks" + "github.com/docker/distribution/notifications" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/auth" + registrymiddleware "github.com/docker/distribution/registry/middleware/registry" + repositorymiddleware "github.com/docker/distribution/registry/middleware/repository" + "github.com/docker/distribution/registry/proxy" + "github.com/docker/distribution/registry/storage" + memorycache "github.com/docker/distribution/registry/storage/cache/memory" + rediscache "github.com/docker/distribution/registry/storage/cache/redis" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/factory" + storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" + "github.com/docker/distribution/version" + "github.com/docker/libtrust" + "github.com/garyburd/redigo/redis" + "github.com/gorilla/mux" + "golang.org/x/net/context" +) + +// randomSecretSize is the number of random bytes to generate if no secret +// was specified. +const randomSecretSize = 32 + +// defaultCheckInterval is the default time in between health checks +const defaultCheckInterval = 10 * time.Second + +// App is a global registry application object. Shared resources can be placed +// on this object that will be accessible from all requests. Any writable +// fields should be protected. +type App struct { + context.Context + + Config *configuration.Configuration + + router *mux.Router // main application router, configured with dispatchers + driver storagedriver.StorageDriver // driver maintains the app global storage driver instance. + registry distribution.Namespace // registry is the primary registry backend for the app instance. + accessController auth.AccessController // main access controller for application + + // httpHost is a parsed representation of the http.host parameter from + // the configuration. Only the Scheme and Host fields are used. + httpHost url.URL + + // events contains notification related configuration. + events struct { + sink notifications.Sink + source notifications.SourceRecord + } + + redis *redis.Pool + + // trustKey is a deprecated key used to sign manifests converted to + // schema1 for backward compatibility. It should not be used for any + // other purposes. + trustKey libtrust.PrivateKey + + // isCache is true if this registry is configured as a pull through cache + isCache bool + + // readOnly is true if the registry is in a read-only maintenance mode + readOnly bool +} + +// NewApp takes a configuration and returns a configured app, ready to serve +// requests. The app only implements ServeHTTP and can be wrapped in other +// handlers accordingly. +func NewApp(ctx context.Context, config *configuration.Configuration) *App { + app := &App{ + Config: config, + Context: ctx, + router: v2.RouterWithPrefix(config.HTTP.Prefix), + isCache: config.Proxy.RemoteURL != "", + } + + // Register the handler dispatchers. + app.register(v2.RouteNameBase, func(ctx *Context, r *http.Request) http.Handler { + return http.HandlerFunc(apiBase) + }) + app.register(v2.RouteNameManifest, imageManifestDispatcher) + app.register(v2.RouteNameCatalog, catalogDispatcher) + app.register(v2.RouteNameTags, tagsDispatcher) + app.register(v2.RouteNameBlob, blobDispatcher) + app.register(v2.RouteNameBlobUpload, blobUploadDispatcher) + app.register(v2.RouteNameBlobUploadChunk, blobUploadDispatcher) + + // override the storage driver's UA string for registry outbound HTTP requests + storageParams := config.Storage.Parameters() + if storageParams == nil { + storageParams = make(configuration.Parameters) + } + storageParams["useragent"] = fmt.Sprintf("docker-distribution/%s %s", version.Version, runtime.Version()) + + var err error + app.driver, err = factory.Create(config.Storage.Type(), storageParams) + if err != nil { + // TODO(stevvooe): Move the creation of a service into a protected + // method, where this is created lazily. Its status can be queried via + // a health check. + panic(err) + } + + purgeConfig := uploadPurgeDefaultConfig() + if mc, ok := config.Storage["maintenance"]; ok { + if v, ok := mc["uploadpurging"]; ok { + purgeConfig, ok = v.(map[interface{}]interface{}) + if !ok { + panic("uploadpurging config key must contain additional keys") + } + } + if v, ok := mc["readonly"]; ok { + readOnly, ok := v.(map[interface{}]interface{}) + if !ok { + panic("readonly config key must contain additional keys") + } + if readOnlyEnabled, ok := readOnly["enabled"]; ok { + app.readOnly, ok = readOnlyEnabled.(bool) + if !ok { + panic("readonly's enabled config key must have a boolean value") + } + } + } + } + + startUploadPurger(app, app.driver, ctxu.GetLogger(app), purgeConfig) + + app.driver, err = applyStorageMiddleware(app.driver, config.Middleware["storage"]) + if err != nil { + panic(err) + } + + app.configureSecret(config) + app.configureEvents(config) + app.configureRedis(config) + app.configureLogHook(config) + + options := registrymiddleware.GetRegistryOptions() + if config.Compatibility.Schema1.TrustKey != "" { + app.trustKey, err = libtrust.LoadKeyFile(config.Compatibility.Schema1.TrustKey) + if err != nil { + panic(fmt.Sprintf(`could not load schema1 "signingkey" parameter: %v`, err)) + } + } else { + // Generate an ephemeral key to be used for signing converted manifests + // for clients that don't support schema2. + app.trustKey, err = libtrust.GenerateECP256PrivateKey() + if err != nil { + panic(err) + } + } + + options = append(options, storage.Schema1SigningKey(app.trustKey)) + + if config.HTTP.Host != "" { + u, err := url.Parse(config.HTTP.Host) + if err != nil { + panic(fmt.Sprintf(`could not parse http "host" parameter: %v`, err)) + } + app.httpHost = *u + } + + if app.isCache { + options = append(options, storage.DisableDigestResumption) + } + + // configure deletion + if d, ok := config.Storage["delete"]; ok { + e, ok := d["enabled"] + if ok { + if deleteEnabled, ok := e.(bool); ok && deleteEnabled { + options = append(options, storage.EnableDelete) + } + } + } + + // configure redirects + var redirectDisabled bool + if redirectConfig, ok := config.Storage["redirect"]; ok { + v := redirectConfig["disable"] + switch v := v.(type) { + case bool: + redirectDisabled = v + default: + panic(fmt.Sprintf("invalid type for redirect config: %#v", redirectConfig)) + } + } + if redirectDisabled { + ctxu.GetLogger(app).Infof("backend redirection disabled") + } else { + options = append(options, storage.EnableRedirect) + } + + // configure validation + if config.Validation.Enabled { + if len(config.Validation.Manifests.URLs.Allow) == 0 && len(config.Validation.Manifests.URLs.Deny) == 0 { + // If Allow and Deny are empty, allow nothing. + options = append(options, storage.ManifestURLsAllowRegexp(regexp.MustCompile("^$"))) + } else { + if len(config.Validation.Manifests.URLs.Allow) > 0 { + for i, s := range config.Validation.Manifests.URLs.Allow { + // Validate via compilation. + if _, err := regexp.Compile(s); err != nil { + panic(fmt.Sprintf("validation.manifests.urls.allow: %s", err)) + } + // Wrap with non-capturing group. + config.Validation.Manifests.URLs.Allow[i] = fmt.Sprintf("(?:%s)", s) + } + re := regexp.MustCompile(strings.Join(config.Validation.Manifests.URLs.Allow, "|")) + options = append(options, storage.ManifestURLsAllowRegexp(re)) + } + if len(config.Validation.Manifests.URLs.Deny) > 0 { + for i, s := range config.Validation.Manifests.URLs.Deny { + // Validate via compilation. + if _, err := regexp.Compile(s); err != nil { + panic(fmt.Sprintf("validation.manifests.urls.deny: %s", err)) + } + // Wrap with non-capturing group. + config.Validation.Manifests.URLs.Deny[i] = fmt.Sprintf("(?:%s)", s) + } + re := regexp.MustCompile(strings.Join(config.Validation.Manifests.URLs.Deny, "|")) + options = append(options, storage.ManifestURLsDenyRegexp(re)) + } + } + } + + // configure storage caches + if cc, ok := config.Storage["cache"]; ok { + v, ok := cc["blobdescriptor"] + if !ok { + // Backwards compatible: "layerinfo" == "blobdescriptor" + v = cc["layerinfo"] + } + + switch v { + case "redis": + if app.redis == nil { + panic("redis configuration required to use for layerinfo cache") + } + cacheProvider := rediscache.NewRedisBlobDescriptorCacheProvider(app.redis) + localOptions := append(options, storage.BlobDescriptorCacheProvider(cacheProvider)) + app.registry, err = storage.NewRegistry(app, app.driver, localOptions...) + if err != nil { + panic("could not create registry: " + err.Error()) + } + ctxu.GetLogger(app).Infof("using redis blob descriptor cache") + case "inmemory": + cacheProvider := memorycache.NewInMemoryBlobDescriptorCacheProvider() + localOptions := append(options, storage.BlobDescriptorCacheProvider(cacheProvider)) + app.registry, err = storage.NewRegistry(app, app.driver, localOptions...) + if err != nil { + panic("could not create registry: " + err.Error()) + } + ctxu.GetLogger(app).Infof("using inmemory blob descriptor cache") + default: + if v != "" { + ctxu.GetLogger(app).Warnf("unknown cache type %q, caching disabled", config.Storage["cache"]) + } + } + } + + if app.registry == nil { + // configure the registry if no cache section is available. + app.registry, err = storage.NewRegistry(app.Context, app.driver, options...) + if err != nil { + panic("could not create registry: " + err.Error()) + } + } + + app.registry, err = applyRegistryMiddleware(app, app.registry, config.Middleware["registry"]) + if err != nil { + panic(err) + } + + authType := config.Auth.Type() + + if authType != "" { + accessController, err := auth.GetAccessController(config.Auth.Type(), config.Auth.Parameters()) + if err != nil { + panic(fmt.Sprintf("unable to configure authorization (%s): %v", authType, err)) + } + app.accessController = accessController + ctxu.GetLogger(app).Debugf("configured %q access controller", authType) + } + + // configure as a pull through cache + if config.Proxy.RemoteURL != "" { + app.registry, err = proxy.NewRegistryPullThroughCache(ctx, app.registry, app.driver, config.Proxy) + if err != nil { + panic(err.Error()) + } + app.isCache = true + ctxu.GetLogger(app).Info("Registry configured as a proxy cache to ", config.Proxy.RemoteURL) + } + + return app +} + +// RegisterHealthChecks is an awful hack to defer health check registration +// control to callers. This should only ever be called once per registry +// process, typically in a main function. The correct way would be register +// health checks outside of app, since multiple apps may exist in the same +// process. Because the configuration and app are tightly coupled, +// implementing this properly will require a refactor. This method may panic +// if called twice in the same process. +func (app *App) RegisterHealthChecks(healthRegistries ...*health.Registry) { + if len(healthRegistries) > 1 { + panic("RegisterHealthChecks called with more than one registry") + } + healthRegistry := health.DefaultRegistry + if len(healthRegistries) == 1 { + healthRegistry = healthRegistries[0] + } + + if app.Config.Health.StorageDriver.Enabled { + interval := app.Config.Health.StorageDriver.Interval + if interval == 0 { + interval = defaultCheckInterval + } + + storageDriverCheck := func() error { + _, err := app.driver.Stat(app, "/") // "/" should always exist + return err // any error will be treated as failure + } + + if app.Config.Health.StorageDriver.Threshold != 0 { + healthRegistry.RegisterPeriodicThresholdFunc("storagedriver_"+app.Config.Storage.Type(), interval, app.Config.Health.StorageDriver.Threshold, storageDriverCheck) + } else { + healthRegistry.RegisterPeriodicFunc("storagedriver_"+app.Config.Storage.Type(), interval, storageDriverCheck) + } + } + + for _, fileChecker := range app.Config.Health.FileCheckers { + interval := fileChecker.Interval + if interval == 0 { + interval = defaultCheckInterval + } + ctxu.GetLogger(app).Infof("configuring file health check path=%s, interval=%d", fileChecker.File, interval/time.Second) + healthRegistry.Register(fileChecker.File, health.PeriodicChecker(checks.FileChecker(fileChecker.File), interval)) + } + + for _, httpChecker := range app.Config.Health.HTTPCheckers { + interval := httpChecker.Interval + if interval == 0 { + interval = defaultCheckInterval + } + + statusCode := httpChecker.StatusCode + if statusCode == 0 { + statusCode = 200 + } + + checker := checks.HTTPChecker(httpChecker.URI, statusCode, httpChecker.Timeout, httpChecker.Headers) + + if httpChecker.Threshold != 0 { + ctxu.GetLogger(app).Infof("configuring HTTP health check uri=%s, interval=%d, threshold=%d", httpChecker.URI, interval/time.Second, httpChecker.Threshold) + healthRegistry.Register(httpChecker.URI, health.PeriodicThresholdChecker(checker, interval, httpChecker.Threshold)) + } else { + ctxu.GetLogger(app).Infof("configuring HTTP health check uri=%s, interval=%d", httpChecker.URI, interval/time.Second) + healthRegistry.Register(httpChecker.URI, health.PeriodicChecker(checker, interval)) + } + } + + for _, tcpChecker := range app.Config.Health.TCPCheckers { + interval := tcpChecker.Interval + if interval == 0 { + interval = defaultCheckInterval + } + + checker := checks.TCPChecker(tcpChecker.Addr, tcpChecker.Timeout) + + if tcpChecker.Threshold != 0 { + ctxu.GetLogger(app).Infof("configuring TCP health check addr=%s, interval=%d, threshold=%d", tcpChecker.Addr, interval/time.Second, tcpChecker.Threshold) + healthRegistry.Register(tcpChecker.Addr, health.PeriodicThresholdChecker(checker, interval, tcpChecker.Threshold)) + } else { + ctxu.GetLogger(app).Infof("configuring TCP health check addr=%s, interval=%d", tcpChecker.Addr, interval/time.Second) + healthRegistry.Register(tcpChecker.Addr, health.PeriodicChecker(checker, interval)) + } + } +} + +// register a handler with the application, by route name. The handler will be +// passed through the application filters and context will be constructed at +// request time. +func (app *App) register(routeName string, dispatch dispatchFunc) { + + // TODO(stevvooe): This odd dispatcher/route registration is by-product of + // some limitations in the gorilla/mux router. We are using it to keep + // routing consistent between the client and server, but we may want to + // replace it with manual routing and structure-based dispatch for better + // control over the request execution. + + app.router.GetRoute(routeName).Handler(app.dispatcher(dispatch)) +} + +// configureEvents prepares the event sink for action. +func (app *App) configureEvents(configuration *configuration.Configuration) { + // Configure all of the endpoint sinks. + var sinks []notifications.Sink + for _, endpoint := range configuration.Notifications.Endpoints { + if endpoint.Disabled { + ctxu.GetLogger(app).Infof("endpoint %s disabled, skipping", endpoint.Name) + continue + } + + ctxu.GetLogger(app).Infof("configuring endpoint %v (%v), timeout=%s, headers=%v", endpoint.Name, endpoint.URL, endpoint.Timeout, endpoint.Headers) + endpoint := notifications.NewEndpoint(endpoint.Name, endpoint.URL, notifications.EndpointConfig{ + Timeout: endpoint.Timeout, + Threshold: endpoint.Threshold, + Backoff: endpoint.Backoff, + Headers: endpoint.Headers, + IgnoredMediaTypes: endpoint.IgnoredMediaTypes, + }) + + sinks = append(sinks, endpoint) + } + + // NOTE(stevvooe): Moving to a new queuing implementation is as easy as + // replacing broadcaster with a rabbitmq implementation. It's recommended + // that the registry instances also act as the workers to keep deployment + // simple. + app.events.sink = notifications.NewBroadcaster(sinks...) + + // Populate registry event source + hostname, err := os.Hostname() + if err != nil { + hostname = configuration.HTTP.Addr + } else { + // try to pick the port off the config + _, port, err := net.SplitHostPort(configuration.HTTP.Addr) + if err == nil { + hostname = net.JoinHostPort(hostname, port) + } + } + + app.events.source = notifications.SourceRecord{ + Addr: hostname, + InstanceID: ctxu.GetStringValue(app, "instance.id"), + } +} + +type redisStartAtKey struct{} + +func (app *App) configureRedis(configuration *configuration.Configuration) { + if configuration.Redis.Addr == "" { + ctxu.GetLogger(app).Infof("redis not configured") + return + } + + pool := &redis.Pool{ + Dial: func() (redis.Conn, error) { + // TODO(stevvooe): Yet another use case for contextual timing. + ctx := context.WithValue(app, redisStartAtKey{}, time.Now()) + + done := func(err error) { + logger := ctxu.GetLoggerWithField(ctx, "redis.connect.duration", + ctxu.Since(ctx, redisStartAtKey{})) + if err != nil { + logger.Errorf("redis: error connecting: %v", err) + } else { + logger.Infof("redis: connect %v", configuration.Redis.Addr) + } + } + + conn, err := redis.DialTimeout("tcp", + configuration.Redis.Addr, + configuration.Redis.DialTimeout, + configuration.Redis.ReadTimeout, + configuration.Redis.WriteTimeout) + if err != nil { + ctxu.GetLogger(app).Errorf("error connecting to redis instance %s: %v", + configuration.Redis.Addr, err) + done(err) + return nil, err + } + + // authorize the connection + if configuration.Redis.Password != "" { + if _, err = conn.Do("AUTH", configuration.Redis.Password); err != nil { + defer conn.Close() + done(err) + return nil, err + } + } + + // select the database to use + if configuration.Redis.DB != 0 { + if _, err = conn.Do("SELECT", configuration.Redis.DB); err != nil { + defer conn.Close() + done(err) + return nil, err + } + } + + done(nil) + return conn, nil + }, + MaxIdle: configuration.Redis.Pool.MaxIdle, + MaxActive: configuration.Redis.Pool.MaxActive, + IdleTimeout: configuration.Redis.Pool.IdleTimeout, + TestOnBorrow: func(c redis.Conn, t time.Time) error { + // TODO(stevvooe): We can probably do something more interesting + // here with the health package. + _, err := c.Do("PING") + return err + }, + Wait: false, // if a connection is not avialable, proceed without cache. + } + + app.redis = pool + + // setup expvar + registry := expvar.Get("registry") + if registry == nil { + registry = expvar.NewMap("registry") + } + + registry.(*expvar.Map).Set("redis", expvar.Func(func() interface{} { + return map[string]interface{}{ + "Config": configuration.Redis, + "Active": app.redis.ActiveCount(), + } + })) +} + +// configureLogHook prepares logging hook parameters. +func (app *App) configureLogHook(configuration *configuration.Configuration) { + entry, ok := ctxu.GetLogger(app).(*log.Entry) + if !ok { + // somehow, we are not using logrus + return + } + + logger := entry.Logger + + for _, configHook := range configuration.Log.Hooks { + if !configHook.Disabled { + switch configHook.Type { + case "mail": + hook := &logHook{} + hook.LevelsParam = configHook.Levels + hook.Mail = &mailer{ + Addr: configHook.MailOptions.SMTP.Addr, + Username: configHook.MailOptions.SMTP.Username, + Password: configHook.MailOptions.SMTP.Password, + Insecure: configHook.MailOptions.SMTP.Insecure, + From: configHook.MailOptions.From, + To: configHook.MailOptions.To, + } + logger.Hooks.Add(hook) + default: + } + } + } +} + +// configureSecret creates a random secret if a secret wasn't included in the +// configuration. +func (app *App) configureSecret(configuration *configuration.Configuration) { + if configuration.HTTP.Secret == "" { + var secretBytes [randomSecretSize]byte + if _, err := cryptorand.Read(secretBytes[:]); err != nil { + panic(fmt.Sprintf("could not generate random bytes for HTTP secret: %v", err)) + } + configuration.HTTP.Secret = string(secretBytes[:]) + ctxu.GetLogger(app).Warn("No HTTP secret provided - generated random secret. This may cause problems with uploads if multiple registries are behind a load-balancer. To provide a shared secret, fill in http.secret in the configuration file or set the REGISTRY_HTTP_SECRET environment variable.") + } +} + +func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() // ensure that request body is always closed. + + // Instantiate an http context here so we can track the error codes + // returned by the request router. + ctx := defaultContextManager.context(app, w, r) + + defer func() { + status, ok := ctx.Value("http.response.status").(int) + if ok && status >= 200 && status <= 399 { + ctxu.GetResponseLogger(ctx).Infof("response completed") + } + }() + defer defaultContextManager.release(ctx) + + // NOTE(stevvooe): Total hack to get instrumented responsewriter from context. + var err error + w, err = ctxu.GetResponseWriter(ctx) + if err != nil { + ctxu.GetLogger(ctx).Warnf("response writer not found in context") + } + + // Set a header with the Docker Distribution API Version for all responses. + w.Header().Add("Docker-Distribution-API-Version", "registry/2.0") + app.router.ServeHTTP(w, r) +} + +// dispatchFunc takes a context and request and returns a constructed handler +// for the route. The dispatcher will use this to dynamically create request +// specific handlers for each endpoint without creating a new router for each +// request. +type dispatchFunc func(ctx *Context, r *http.Request) http.Handler + +// TODO(stevvooe): dispatchers should probably have some validation error +// chain with proper error reporting. + +// dispatcher returns a handler that constructs a request specific context and +// handler, using the dispatch factory function. +func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + for headerName, headerValues := range app.Config.HTTP.Headers { + for _, value := range headerValues { + w.Header().Add(headerName, value) + } + } + + context := app.context(w, r) + + if err := app.authorized(w, r, context); err != nil { + ctxu.GetLogger(context).Warnf("error authorizing context: %v", err) + return + } + + // Add username to request logging + context.Context = ctxu.WithLogger(context.Context, ctxu.GetLogger(context.Context, auth.UserNameKey)) + + if app.nameRequired(r) { + nameRef, err := reference.ParseNamed(getName(context)) + if err != nil { + ctxu.GetLogger(context).Errorf("error parsing reference from context: %v", err) + context.Errors = append(context.Errors, distribution.ErrRepositoryNameInvalid{ + Name: getName(context), + Reason: err, + }) + if err := errcode.ServeJSON(w, context.Errors); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + return + } + repository, err := app.registry.Repository(context, nameRef) + + if err != nil { + ctxu.GetLogger(context).Errorf("error resolving repository: %v", err) + + switch err := err.(type) { + case distribution.ErrRepositoryUnknown: + context.Errors = append(context.Errors, v2.ErrorCodeNameUnknown.WithDetail(err)) + case distribution.ErrRepositoryNameInvalid: + context.Errors = append(context.Errors, v2.ErrorCodeNameInvalid.WithDetail(err)) + case errcode.Error: + context.Errors = append(context.Errors, err) + } + + if err := errcode.ServeJSON(w, context.Errors); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + return + } + + // assign and decorate the authorized repository with an event bridge. + context.Repository = notifications.Listen( + repository, + app.eventBridge(context, r)) + + context.Repository, err = applyRepoMiddleware(app, context.Repository, app.Config.Middleware["repository"]) + if err != nil { + ctxu.GetLogger(context).Errorf("error initializing repository middleware: %v", err) + context.Errors = append(context.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + + if err := errcode.ServeJSON(w, context.Errors); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + return + } + } + + dispatch(context, r).ServeHTTP(w, r) + // Automated error response handling here. Handlers may return their + // own errors if they need different behavior (such as range errors + // for layer upload). + if context.Errors.Len() > 0 { + if err := errcode.ServeJSON(w, context.Errors); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + + app.logError(context, context.Errors) + } + }) +} + +type errCodeKey struct{} + +func (errCodeKey) String() string { return "err.code" } + +type errMessageKey struct{} + +func (errMessageKey) String() string { return "err.message" } + +type errDetailKey struct{} + +func (errDetailKey) String() string { return "err.detail" } + +func (app *App) logError(context context.Context, errors errcode.Errors) { + for _, e1 := range errors { + var c ctxu.Context + + switch e1.(type) { + case errcode.Error: + e, _ := e1.(errcode.Error) + c = ctxu.WithValue(context, errCodeKey{}, e.Code) + c = ctxu.WithValue(c, errMessageKey{}, e.Code.Message()) + c = ctxu.WithValue(c, errDetailKey{}, e.Detail) + case errcode.ErrorCode: + e, _ := e1.(errcode.ErrorCode) + c = ctxu.WithValue(context, errCodeKey{}, e) + c = ctxu.WithValue(c, errMessageKey{}, e.Message()) + default: + // just normal go 'error' + c = ctxu.WithValue(context, errCodeKey{}, errcode.ErrorCodeUnknown) + c = ctxu.WithValue(c, errMessageKey{}, e1.Error()) + } + + c = ctxu.WithLogger(c, ctxu.GetLogger(c, + errCodeKey{}, + errMessageKey{}, + errDetailKey{})) + ctxu.GetResponseLogger(c).Errorf("response completed with error") + } +} + +// context constructs the context object for the application. This only be +// called once per request. +func (app *App) context(w http.ResponseWriter, r *http.Request) *Context { + ctx := defaultContextManager.context(app, w, r) + ctx = ctxu.WithVars(ctx, r) + ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx, + "vars.name", + "vars.reference", + "vars.digest", + "vars.uuid")) + + context := &Context{ + App: app, + Context: ctx, + } + + if app.httpHost.Scheme != "" && app.httpHost.Host != "" { + // A "host" item in the configuration takes precedence over + // X-Forwarded-Proto and X-Forwarded-Host headers, and the + // hostname in the request. + context.urlBuilder = v2.NewURLBuilder(&app.httpHost, false) + } else { + context.urlBuilder = v2.NewURLBuilderFromRequest(r, app.Config.HTTP.RelativeURLs) + } + + return context +} + +// authorized checks if the request can proceed with access to the requested +// repository. If it succeeds, the context may access the requested +// repository. An error will be returned if access is not available. +func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Context) error { + ctxu.GetLogger(context).Debug("authorizing request") + repo := getName(context) + + if app.accessController == nil { + return nil // access controller is not enabled. + } + + var accessRecords []auth.Access + + if repo != "" { + accessRecords = appendAccessRecords(accessRecords, r.Method, repo) + if fromRepo := r.FormValue("from"); fromRepo != "" { + // mounting a blob from one repository to another requires pull (GET) + // access to the source repository. + accessRecords = appendAccessRecords(accessRecords, "GET", fromRepo) + } + } else { + // Only allow the name not to be set on the base route. + if app.nameRequired(r) { + // For this to be properly secured, repo must always be set for a + // resource that may make a modification. The only condition under + // which name is not set and we still allow access is when the + // base route is accessed. This section prevents us from making + // that mistake elsewhere in the code, allowing any operation to + // proceed. + if err := errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + return fmt.Errorf("forbidden: no repository name") + } + accessRecords = appendCatalogAccessRecord(accessRecords, r) + } + + ctx, err := app.accessController.Authorized(context.Context, accessRecords...) + if err != nil { + switch err := err.(type) { + case auth.Challenge: + // Add the appropriate WWW-Auth header + err.SetHeaders(w) + + if err := errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized.WithDetail(accessRecords)); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + default: + // This condition is a potential security problem either in + // the configuration or whatever is backing the access + // controller. Just return a bad request with no information + // to avoid exposure. The request should not proceed. + ctxu.GetLogger(context).Errorf("error checking authorization: %v", err) + w.WriteHeader(http.StatusBadRequest) + } + + return err + } + + // TODO(stevvooe): This pattern needs to be cleaned up a bit. One context + // should be replaced by another, rather than replacing the context on a + // mutable object. + context.Context = ctx + return nil +} + +// eventBridge returns a bridge for the current request, configured with the +// correct actor and source. +func (app *App) eventBridge(ctx *Context, r *http.Request) notifications.Listener { + actor := notifications.ActorRecord{ + Name: getUserName(ctx, r), + } + request := notifications.NewRequestRecord(ctxu.GetRequestID(ctx), r) + + return notifications.NewBridge(ctx.urlBuilder, app.events.source, actor, request, app.events.sink) +} + +// nameRequired returns true if the route requires a name. +func (app *App) nameRequired(r *http.Request) bool { + route := mux.CurrentRoute(r) + routeName := route.GetName() + return route == nil || (routeName != v2.RouteNameBase && routeName != v2.RouteNameCatalog) +} + +// apiBase implements a simple yes-man for doing overall checks against the +// api. This can support auth roundtrips to support docker login. +func apiBase(w http.ResponseWriter, r *http.Request) { + const emptyJSON = "{}" + // Provide a simple /v2/ 200 OK response with empty json response. + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("Content-Length", fmt.Sprint(len(emptyJSON))) + + fmt.Fprint(w, emptyJSON) +} + +// appendAccessRecords checks the method and adds the appropriate Access records to the records list. +func appendAccessRecords(records []auth.Access, method string, repo string) []auth.Access { + resource := auth.Resource{ + Type: "repository", + Name: repo, + } + + switch method { + case "GET", "HEAD": + records = append(records, + auth.Access{ + Resource: resource, + Action: "pull", + }) + case "POST", "PUT", "PATCH": + records = append(records, + auth.Access{ + Resource: resource, + Action: "pull", + }, + auth.Access{ + Resource: resource, + Action: "push", + }) + case "DELETE": + // DELETE access requires full admin rights, which is represented + // as "*". This may not be ideal. + records = append(records, + auth.Access{ + Resource: resource, + Action: "*", + }) + } + return records +} + +// Add the access record for the catalog if it's our current route +func appendCatalogAccessRecord(accessRecords []auth.Access, r *http.Request) []auth.Access { + route := mux.CurrentRoute(r) + routeName := route.GetName() + + if routeName == v2.RouteNameCatalog { + resource := auth.Resource{ + Type: "registry", + Name: "catalog", + } + + accessRecords = append(accessRecords, + auth.Access{ + Resource: resource, + Action: "*", + }) + } + return accessRecords +} + +// applyRegistryMiddleware wraps a registry instance with the configured middlewares +func applyRegistryMiddleware(ctx context.Context, registry distribution.Namespace, middlewares []configuration.Middleware) (distribution.Namespace, error) { + for _, mw := range middlewares { + rmw, err := registrymiddleware.Get(ctx, mw.Name, mw.Options, registry) + if err != nil { + return nil, fmt.Errorf("unable to configure registry middleware (%s): %s", mw.Name, err) + } + registry = rmw + } + return registry, nil + +} + +// applyRepoMiddleware wraps a repository with the configured middlewares +func applyRepoMiddleware(ctx context.Context, repository distribution.Repository, middlewares []configuration.Middleware) (distribution.Repository, error) { + for _, mw := range middlewares { + rmw, err := repositorymiddleware.Get(ctx, mw.Name, mw.Options, repository) + if err != nil { + return nil, err + } + repository = rmw + } + return repository, nil +} + +// applyStorageMiddleware wraps a storage driver with the configured middlewares +func applyStorageMiddleware(driver storagedriver.StorageDriver, middlewares []configuration.Middleware) (storagedriver.StorageDriver, error) { + for _, mw := range middlewares { + smw, err := storagemiddleware.Get(mw.Name, mw.Options, driver) + if err != nil { + return nil, fmt.Errorf("unable to configure storage middleware (%s): %v", mw.Name, err) + } + driver = smw + } + return driver, nil +} + +// uploadPurgeDefaultConfig provides a default configuration for upload +// purging to be used in the absence of configuration in the +// confifuration file +func uploadPurgeDefaultConfig() map[interface{}]interface{} { + config := map[interface{}]interface{}{} + config["enabled"] = true + config["age"] = "168h" + config["interval"] = "24h" + config["dryrun"] = false + return config +} + +func badPurgeUploadConfig(reason string) { + panic(fmt.Sprintf("Unable to parse upload purge configuration: %s", reason)) +} + +// startUploadPurger schedules a goroutine which will periodically +// check upload directories for old files and delete them +func startUploadPurger(ctx context.Context, storageDriver storagedriver.StorageDriver, log ctxu.Logger, config map[interface{}]interface{}) { + if config["enabled"] == false { + return + } + + var purgeAgeDuration time.Duration + var err error + purgeAge, ok := config["age"] + if ok { + ageStr, ok := purgeAge.(string) + if !ok { + badPurgeUploadConfig("age is not a string") + } + purgeAgeDuration, err = time.ParseDuration(ageStr) + if err != nil { + badPurgeUploadConfig(fmt.Sprintf("Cannot parse duration: %s", err.Error())) + } + } else { + badPurgeUploadConfig("age missing") + } + + var intervalDuration time.Duration + interval, ok := config["interval"] + if ok { + intervalStr, ok := interval.(string) + if !ok { + badPurgeUploadConfig("interval is not a string") + } + + intervalDuration, err = time.ParseDuration(intervalStr) + if err != nil { + badPurgeUploadConfig(fmt.Sprintf("Cannot parse interval: %s", err.Error())) + } + } else { + badPurgeUploadConfig("interval missing") + } + + var dryRunBool bool + dryRun, ok := config["dryrun"] + if ok { + dryRunBool, ok = dryRun.(bool) + if !ok { + badPurgeUploadConfig("cannot parse dryrun") + } + } else { + badPurgeUploadConfig("dryrun missing") + } + + go func() { + rand.Seed(time.Now().Unix()) + jitter := time.Duration(rand.Int()%60) * time.Minute + log.Infof("Starting upload purge in %s", jitter) + time.Sleep(jitter) + + for { + storage.PurgeUploads(ctx, storageDriver, time.Now().Add(-purgeAgeDuration), !dryRunBool) + log.Infof("Starting upload purge in %s", intervalDuration) + time.Sleep(intervalDuration) + } + }() +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/app_test.go b/vendor/github.com/docker/distribution/registry/handlers/app_test.go new file mode 100644 index 000000000..385fa4c6b --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/app_test.go @@ -0,0 +1,279 @@ +package handlers + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "testing" + + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/auth" + _ "github.com/docker/distribution/registry/auth/silly" + "github.com/docker/distribution/registry/storage" + memorycache "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver/testdriver" +) + +// TestAppDispatcher builds an application with a test dispatcher and ensures +// that requests are properly dispatched and the handlers are constructed. +// This only tests the dispatch mechanism. The underlying dispatchers must be +// tested individually. +func TestAppDispatcher(t *testing.T) { + driver := testdriver.New() + ctx := context.Background() + registry, err := storage.NewRegistry(ctx, driver, storage.BlobDescriptorCacheProvider(memorycache.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableDelete, storage.EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + app := &App{ + Config: &configuration.Configuration{}, + Context: ctx, + router: v2.Router(), + driver: driver, + registry: registry, + } + server := httptest.NewServer(app) + defer server.Close() + router := v2.Router() + + serverURL, err := url.Parse(server.URL) + if err != nil { + t.Fatalf("error parsing server url: %v", err) + } + + varCheckingDispatcher := func(expectedVars map[string]string) dispatchFunc { + return func(ctx *Context, r *http.Request) http.Handler { + // Always checks the same name context + if ctx.Repository.Named().Name() != getName(ctx) { + t.Fatalf("unexpected name: %q != %q", ctx.Repository.Named().Name(), "foo/bar") + } + + // Check that we have all that is expected + for expectedK, expectedV := range expectedVars { + if ctx.Value(expectedK) != expectedV { + t.Fatalf("unexpected %s in context vars: %q != %q", expectedK, ctx.Value(expectedK), expectedV) + } + } + + // Check that we only have variables that are expected + for k, v := range ctx.Value("vars").(map[string]string) { + _, ok := expectedVars[k] + + if !ok { // name is checked on context + // We have an unexpected key, fail + t.Fatalf("unexpected key %q in vars with value %q", k, v) + } + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + } + } + + // unflatten a list of variables, suitable for gorilla/mux, to a map[string]string + unflatten := func(vars []string) map[string]string { + m := make(map[string]string) + for i := 0; i < len(vars)-1; i = i + 2 { + m[vars[i]] = vars[i+1] + } + + return m + } + + for _, testcase := range []struct { + endpoint string + vars []string + }{ + { + endpoint: v2.RouteNameManifest, + vars: []string{ + "name", "foo/bar", + "reference", "sometag", + }, + }, + { + endpoint: v2.RouteNameTags, + vars: []string{ + "name", "foo/bar", + }, + }, + { + endpoint: v2.RouteNameBlobUpload, + vars: []string{ + "name", "foo/bar", + }, + }, + { + endpoint: v2.RouteNameBlobUploadChunk, + vars: []string{ + "name", "foo/bar", + "uuid", "theuuid", + }, + }, + } { + app.register(testcase.endpoint, varCheckingDispatcher(unflatten(testcase.vars))) + route := router.GetRoute(testcase.endpoint).Host(serverURL.Host) + u, err := route.URL(testcase.vars...) + + if err != nil { + t.Fatal(err) + } + + resp, err := http.Get(u.String()) + + if err != nil { + t.Fatal(err) + } + + if resp.StatusCode != http.StatusOK { + t.Fatalf("unexpected status code: %v != %v", resp.StatusCode, http.StatusOK) + } + } +} + +// TestNewApp covers the creation of an application via NewApp with a +// configuration. +func TestNewApp(t *testing.T) { + ctx := context.Background() + config := configuration.Configuration{ + Storage: configuration.Storage{ + "testdriver": nil, + "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ + "enabled": false, + }}, + }, + Auth: configuration.Auth{ + // For now, we simply test that new auth results in a viable + // application. + "silly": { + "realm": "realm-test", + "service": "service-test", + }, + }, + } + + // Mostly, with this test, given a sane configuration, we are simply + // ensuring that NewApp doesn't panic. We might want to tweak this + // behavior. + app := NewApp(ctx, &config) + + server := httptest.NewServer(app) + defer server.Close() + builder, err := v2.NewURLBuilderFromString(server.URL, false) + if err != nil { + t.Fatalf("error creating urlbuilder: %v", err) + } + + baseURL, err := builder.BuildBaseURL() + if err != nil { + t.Fatalf("error creating baseURL: %v", err) + } + + // TODO(stevvooe): The rest of this test might belong in the API tests. + + // Just hit the app and make sure we get a 401 Unauthorized error. + req, err := http.Get(baseURL) + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer req.Body.Close() + + if req.StatusCode != http.StatusUnauthorized { + t.Fatalf("unexpected status code during request: %v", err) + } + + if req.Header.Get("Content-Type") != "application/json; charset=utf-8" { + t.Fatalf("unexpected content-type: %v != %v", req.Header.Get("Content-Type"), "application/json; charset=utf-8") + } + + expectedAuthHeader := "Bearer realm=\"realm-test\",service=\"service-test\"" + if e, a := expectedAuthHeader, req.Header.Get("WWW-Authenticate"); e != a { + t.Fatalf("unexpected WWW-Authenticate header: %q != %q", e, a) + } + + var errs errcode.Errors + dec := json.NewDecoder(req.Body) + if err := dec.Decode(&errs); err != nil { + t.Fatalf("error decoding error response: %v", err) + } + + err2, ok := errs[0].(errcode.ErrorCoder) + if !ok { + t.Fatalf("not an ErrorCoder: %#v", errs[0]) + } + if err2.ErrorCode() != errcode.ErrorCodeUnauthorized { + t.Fatalf("unexpected error code: %v != %v", err2.ErrorCode(), errcode.ErrorCodeUnauthorized) + } +} + +// Test the access record accumulator +func TestAppendAccessRecords(t *testing.T) { + repo := "testRepo" + + expectedResource := auth.Resource{ + Type: "repository", + Name: repo, + } + + expectedPullRecord := auth.Access{ + Resource: expectedResource, + Action: "pull", + } + expectedPushRecord := auth.Access{ + Resource: expectedResource, + Action: "push", + } + expectedAllRecord := auth.Access{ + Resource: expectedResource, + Action: "*", + } + + records := []auth.Access{} + result := appendAccessRecords(records, "GET", repo) + expectedResult := []auth.Access{expectedPullRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + + records = []auth.Access{} + result = appendAccessRecords(records, "HEAD", repo) + expectedResult = []auth.Access{expectedPullRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + + records = []auth.Access{} + result = appendAccessRecords(records, "POST", repo) + expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + + records = []auth.Access{} + result = appendAccessRecords(records, "PUT", repo) + expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + + records = []auth.Access{} + result = appendAccessRecords(records, "PATCH", repo) + expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + + records = []auth.Access{} + result = appendAccessRecords(records, "DELETE", repo) + expectedResult = []auth.Access{expectedAllRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/basicauth.go b/vendor/github.com/docker/distribution/registry/handlers/basicauth.go new file mode 100644 index 000000000..8727a3cd1 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/basicauth.go @@ -0,0 +1,11 @@ +// +build go1.4 + +package handlers + +import ( + "net/http" +) + +func basicAuth(r *http.Request) (username, password string, ok bool) { + return r.BasicAuth() +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/basicauth_prego14.go b/vendor/github.com/docker/distribution/registry/handlers/basicauth_prego14.go new file mode 100644 index 000000000..6cf10a25e --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/basicauth_prego14.go @@ -0,0 +1,41 @@ +// +build !go1.4 + +package handlers + +import ( + "encoding/base64" + "net/http" + "strings" +) + +// NOTE(stevvooe): This is basic auth support from go1.4 present to ensure we +// can compile on go1.3 and earlier. + +// BasicAuth returns the username and password provided in the request's +// Authorization header, if the request uses HTTP Basic Authentication. +// See RFC 2617, Section 2. +func basicAuth(r *http.Request) (username, password string, ok bool) { + auth := r.Header.Get("Authorization") + if auth == "" { + return + } + return parseBasicAuth(auth) +} + +// parseBasicAuth parses an HTTP Basic Authentication string. +// "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" returns ("Aladdin", "open sesame", true). +func parseBasicAuth(auth string) (username, password string, ok bool) { + if !strings.HasPrefix(auth, "Basic ") { + return + } + c, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic ")) + if err != nil { + return + } + cs := string(c) + s := strings.IndexByte(cs, ':') + if s < 0 { + return + } + return cs[:s], cs[s+1:], true +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/blob.go b/vendor/github.com/docker/distribution/registry/handlers/blob.go new file mode 100644 index 000000000..fb250acd2 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/blob.go @@ -0,0 +1,99 @@ +package handlers + +import ( + "net/http" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/gorilla/handlers" +) + +// blobDispatcher uses the request context to build a blobHandler. +func blobDispatcher(ctx *Context, r *http.Request) http.Handler { + dgst, err := getDigest(ctx) + if err != nil { + + if err == errDigestNotAvailable { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx.Errors = append(ctx.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx.Errors = append(ctx.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) + }) + } + + blobHandler := &blobHandler{ + Context: ctx, + Digest: dgst, + } + + mhandler := handlers.MethodHandler{ + "GET": http.HandlerFunc(blobHandler.GetBlob), + "HEAD": http.HandlerFunc(blobHandler.GetBlob), + } + + if !ctx.readOnly { + mhandler["DELETE"] = http.HandlerFunc(blobHandler.DeleteBlob) + } + + return mhandler +} + +// blobHandler serves http blob requests. +type blobHandler struct { + *Context + + Digest digest.Digest +} + +// GetBlob fetches the binary data from backend storage returns it in the +// response. +func (bh *blobHandler) GetBlob(w http.ResponseWriter, r *http.Request) { + context.GetLogger(bh).Debug("GetBlob") + blobs := bh.Repository.Blobs(bh) + desc, err := blobs.Stat(bh, bh.Digest) + if err != nil { + if err == distribution.ErrBlobUnknown { + bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown.WithDetail(bh.Digest)) + } else { + bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + return + } + + if err := blobs.ServeBlob(bh, w, r, desc.Digest); err != nil { + context.GetLogger(bh).Debugf("unexpected error getting blob HTTP handler: %v", err) + bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } +} + +// DeleteBlob deletes a layer blob +func (bh *blobHandler) DeleteBlob(w http.ResponseWriter, r *http.Request) { + context.GetLogger(bh).Debug("DeleteBlob") + + blobs := bh.Repository.Blobs(bh) + err := blobs.Delete(bh, bh.Digest) + if err != nil { + switch err { + case distribution.ErrUnsupported: + bh.Errors = append(bh.Errors, errcode.ErrorCodeUnsupported) + return + case distribution.ErrBlobUnknown: + bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown) + return + default: + bh.Errors = append(bh.Errors, err) + context.GetLogger(bh).Errorf("Unknown error deleting blob: %s", err.Error()) + return + } + } + + w.Header().Set("Content-Length", "0") + w.WriteHeader(http.StatusAccepted) +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/blobupload.go b/vendor/github.com/docker/distribution/registry/handlers/blobupload.go new file mode 100644 index 000000000..3afb47398 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/blobupload.go @@ -0,0 +1,368 @@ +package handlers + +import ( + "fmt" + "net/http" + "net/url" + + "github.com/docker/distribution" + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/storage" + "github.com/gorilla/handlers" +) + +// blobUploadDispatcher constructs and returns the blob upload handler for the +// given request context. +func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { + buh := &blobUploadHandler{ + Context: ctx, + UUID: getUploadUUID(ctx), + } + + handler := handlers.MethodHandler{ + "GET": http.HandlerFunc(buh.GetUploadStatus), + "HEAD": http.HandlerFunc(buh.GetUploadStatus), + } + + if !ctx.readOnly { + handler["POST"] = http.HandlerFunc(buh.StartBlobUpload) + handler["PATCH"] = http.HandlerFunc(buh.PatchBlobData) + handler["PUT"] = http.HandlerFunc(buh.PutBlobUploadComplete) + handler["DELETE"] = http.HandlerFunc(buh.CancelBlobUpload) + } + + if buh.UUID != "" { + state, err := hmacKey(ctx.Config.HTTP.Secret).unpackUploadState(r.FormValue("_state")) + if err != nil { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(ctx).Infof("error resolving upload: %v", err) + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + }) + } + buh.State = state + + if state.Name != ctx.Repository.Named().Name() { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(ctx).Infof("mismatched repository name in upload state: %q != %q", state.Name, buh.Repository.Named().Name()) + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + }) + } + + if state.UUID != buh.UUID { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(ctx).Infof("mismatched uuid in upload state: %q != %q", state.UUID, buh.UUID) + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + }) + } + + blobs := ctx.Repository.Blobs(buh) + upload, err := blobs.Resume(buh, buh.UUID) + if err != nil { + ctxu.GetLogger(ctx).Errorf("error resolving upload: %v", err) + if err == distribution.ErrBlobUploadUnknown { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown.WithDetail(err)) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + }) + } + buh.Upload = upload + + if size := upload.Size(); size != buh.State.Offset { + defer upload.Close() + ctxu.GetLogger(ctx).Errorf("upload resumed at wrong offest: %d != %d", size, buh.State.Offset) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + upload.Cancel(buh) + }) + } + return closeResources(handler, buh.Upload) + } + + return handler +} + +// blobUploadHandler handles the http blob upload process. +type blobUploadHandler struct { + *Context + + // UUID identifies the upload instance for the current request. Using UUID + // to key blob writers since this implementation uses UUIDs. + UUID string + + Upload distribution.BlobWriter + + State blobUploadState +} + +// StartBlobUpload begins the blob upload process and allocates a server-side +// blob writer session, optionally mounting the blob from a separate repository. +func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Request) { + var options []distribution.BlobCreateOption + + fromRepo := r.FormValue("from") + mountDigest := r.FormValue("mount") + + if mountDigest != "" && fromRepo != "" { + opt, err := buh.createBlobMountOption(fromRepo, mountDigest) + if opt != nil && err == nil { + options = append(options, opt) + } + } + + blobs := buh.Repository.Blobs(buh) + upload, err := blobs.Create(buh, options...) + + if err != nil { + if ebm, ok := err.(distribution.ErrBlobMounted); ok { + if err := buh.writeBlobCreatedHeaders(w, ebm.Descriptor); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + } else if err == distribution.ErrUnsupported { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnsupported) + } else { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + return + } + + buh.Upload = upload + + if err := buh.blobUploadResponse(w, r, true); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + w.Header().Set("Docker-Upload-UUID", buh.Upload.ID()) + w.WriteHeader(http.StatusAccepted) +} + +// GetUploadStatus returns the status of a given upload, identified by id. +func (buh *blobUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Request) { + if buh.Upload == nil { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) + return + } + + // TODO(dmcgowan): Set last argument to false in blobUploadResponse when + // resumable upload is supported. This will enable returning a non-zero + // range for clients to begin uploading at an offset. + if err := buh.blobUploadResponse(w, r, true); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + w.Header().Set("Docker-Upload-UUID", buh.UUID) + w.WriteHeader(http.StatusNoContent) +} + +// PatchBlobData writes data to an upload. +func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Request) { + if buh.Upload == nil { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) + return + } + + ct := r.Header.Get("Content-Type") + if ct != "" && ct != "application/octet-stream" { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(fmt.Errorf("Bad Content-Type"))) + // TODO(dmcgowan): encode error + return + } + + // TODO(dmcgowan): support Content-Range header to seek and write range + + if err := copyFullPayload(w, r, buh.Upload, buh, "blob PATCH", &buh.Errors); err != nil { + // copyFullPayload reports the error if necessary + return + } + + if err := buh.blobUploadResponse(w, r, false); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + w.WriteHeader(http.StatusAccepted) +} + +// PutBlobUploadComplete takes the final request of a blob upload. The +// request may include all the blob data or no blob data. Any data +// provided is received and verified. If successful, the blob is linked +// into the blob store and 201 Created is returned with the canonical +// url of the blob. +func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *http.Request) { + if buh.Upload == nil { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) + return + } + + dgstStr := r.FormValue("digest") // TODO(stevvooe): Support multiple digest parameters! + + if dgstStr == "" { + // no digest? return error, but allow retry. + buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail("digest missing")) + return + } + + dgst, err := digest.ParseDigest(dgstStr) + if err != nil { + // no digest? return error, but allow retry. + buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail("digest parsing failed")) + return + } + + if err := copyFullPayload(w, r, buh.Upload, buh, "blob PUT", &buh.Errors); err != nil { + // copyFullPayload reports the error if necessary + return + } + + desc, err := buh.Upload.Commit(buh, distribution.Descriptor{ + Digest: dgst, + + // TODO(stevvooe): This isn't wildly important yet, but we should + // really set the mediatype. For now, we can let the backend take care + // of this. + }) + + if err != nil { + switch err := err.(type) { + case distribution.ErrBlobInvalidDigest: + buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) + case errcode.Error: + buh.Errors = append(buh.Errors, err) + default: + switch err { + case distribution.ErrAccessDenied: + buh.Errors = append(buh.Errors, errcode.ErrorCodeDenied) + case distribution.ErrUnsupported: + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnsupported) + case distribution.ErrBlobInvalidLength, distribution.ErrBlobDigestUnsupported: + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + default: + ctxu.GetLogger(buh).Errorf("unknown error completing upload: %v", err) + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + + } + + // Clean up the backend blob data if there was an error. + if err := buh.Upload.Cancel(buh); err != nil { + // If the cleanup fails, all we can do is observe and report. + ctxu.GetLogger(buh).Errorf("error canceling upload after error: %v", err) + } + + return + } + if err := buh.writeBlobCreatedHeaders(w, desc); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } +} + +// CancelBlobUpload cancels an in-progress upload of a blob. +func (buh *blobUploadHandler) CancelBlobUpload(w http.ResponseWriter, r *http.Request) { + if buh.Upload == nil { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) + return + } + + w.Header().Set("Docker-Upload-UUID", buh.UUID) + if err := buh.Upload.Cancel(buh); err != nil { + ctxu.GetLogger(buh).Errorf("error encountered canceling upload: %v", err) + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + + w.WriteHeader(http.StatusNoContent) +} + +// blobUploadResponse provides a standard request for uploading blobs and +// chunk responses. This sets the correct headers but the response status is +// left to the caller. The fresh argument is used to ensure that new blob +// uploads always start at a 0 offset. This allows disabling resumable push by +// always returning a 0 offset on check status. +func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http.Request, fresh bool) error { + // TODO(stevvooe): Need a better way to manage the upload state automatically. + buh.State.Name = buh.Repository.Named().Name() + buh.State.UUID = buh.Upload.ID() + buh.Upload.Close() + buh.State.Offset = buh.Upload.Size() + buh.State.StartedAt = buh.Upload.StartedAt() + + token, err := hmacKey(buh.Config.HTTP.Secret).packUploadState(buh.State) + if err != nil { + ctxu.GetLogger(buh).Infof("error building upload state token: %s", err) + return err + } + + uploadURL, err := buh.urlBuilder.BuildBlobUploadChunkURL( + buh.Repository.Named(), buh.Upload.ID(), + url.Values{ + "_state": []string{token}, + }) + if err != nil { + ctxu.GetLogger(buh).Infof("error building upload url: %s", err) + return err + } + + endRange := buh.Upload.Size() + if endRange > 0 { + endRange = endRange - 1 + } + + w.Header().Set("Docker-Upload-UUID", buh.UUID) + w.Header().Set("Location", uploadURL) + + w.Header().Set("Content-Length", "0") + w.Header().Set("Range", fmt.Sprintf("0-%d", endRange)) + + return nil +} + +// mountBlob attempts to mount a blob from another repository by its digest. If +// successful, the blob is linked into the blob store and 201 Created is +// returned with the canonical url of the blob. +func (buh *blobUploadHandler) createBlobMountOption(fromRepo, mountDigest string) (distribution.BlobCreateOption, error) { + dgst, err := digest.ParseDigest(mountDigest) + if err != nil { + return nil, err + } + + ref, err := reference.ParseNamed(fromRepo) + if err != nil { + return nil, err + } + + canonical, err := reference.WithDigest(ref, dgst) + if err != nil { + return nil, err + } + + return storage.WithMountFrom(canonical), nil +} + +// writeBlobCreatedHeaders writes the standard headers describing a newly +// created blob. A 201 Created is written as well as the canonical URL and +// blob digest. +func (buh *blobUploadHandler) writeBlobCreatedHeaders(w http.ResponseWriter, desc distribution.Descriptor) error { + ref, err := reference.WithDigest(buh.Repository.Named(), desc.Digest) + if err != nil { + return err + } + blobURL, err := buh.urlBuilder.BuildBlobURL(ref) + if err != nil { + return err + } + + w.Header().Set("Location", blobURL) + w.Header().Set("Content-Length", "0") + w.Header().Set("Docker-Content-Digest", desc.Digest.String()) + w.WriteHeader(http.StatusCreated) + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/catalog.go b/vendor/github.com/docker/distribution/registry/handlers/catalog.go new file mode 100644 index 000000000..eca984686 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/catalog.go @@ -0,0 +1,98 @@ +package handlers + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/storage/driver" + "github.com/gorilla/handlers" +) + +const maximumReturnedEntries = 100 + +func catalogDispatcher(ctx *Context, r *http.Request) http.Handler { + catalogHandler := &catalogHandler{ + Context: ctx, + } + + return handlers.MethodHandler{ + "GET": http.HandlerFunc(catalogHandler.GetCatalog), + } +} + +type catalogHandler struct { + *Context +} + +type catalogAPIResponse struct { + Repositories []string `json:"repositories"` +} + +func (ch *catalogHandler) GetCatalog(w http.ResponseWriter, r *http.Request) { + var moreEntries = true + + q := r.URL.Query() + lastEntry := q.Get("last") + maxEntries, err := strconv.Atoi(q.Get("n")) + if err != nil || maxEntries < 0 { + maxEntries = maximumReturnedEntries + } + + repos := make([]string, maxEntries) + + filled, err := ch.App.registry.Repositories(ch.Context, repos, lastEntry) + _, pathNotFound := err.(driver.PathNotFoundError) + + if err == io.EOF || pathNotFound { + moreEntries = false + } else if err != nil { + ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + + // Add a link header if there are more entries to retrieve + if moreEntries { + lastEntry = repos[len(repos)-1] + urlStr, err := createLinkEntry(r.URL.String(), maxEntries, lastEntry) + if err != nil { + ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + w.Header().Set("Link", urlStr) + } + + enc := json.NewEncoder(w) + if err := enc.Encode(catalogAPIResponse{ + Repositories: repos[0:filled], + }); err != nil { + ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } +} + +// Use the original URL from the request to create a new URL for +// the link header +func createLinkEntry(origURL string, maxEntries int, lastEntry string) (string, error) { + calledURL, err := url.Parse(origURL) + if err != nil { + return "", err + } + + v := url.Values{} + v.Add("n", strconv.Itoa(maxEntries)) + v.Add("last", lastEntry) + + calledURL.RawQuery = v.Encode() + + calledURL.Fragment = "" + urlStr := fmt.Sprintf("<%s>; rel=\"next\"", calledURL.String()) + + return urlStr, nil +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/context.go b/vendor/github.com/docker/distribution/registry/handlers/context.go new file mode 100644 index 000000000..552db2df6 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/context.go @@ -0,0 +1,152 @@ +package handlers + +import ( + "fmt" + "net/http" + "sync" + + "github.com/docker/distribution" + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/auth" + "golang.org/x/net/context" +) + +// Context should contain the request specific context for use in across +// handlers. Resources that don't need to be shared across handlers should not +// be on this object. +type Context struct { + // App points to the application structure that created this context. + *App + context.Context + + // Repository is the repository for the current request. All requests + // should be scoped to a single repository. This field may be nil. + Repository distribution.Repository + + // Errors is a collection of errors encountered during the request to be + // returned to the client API. If errors are added to the collection, the + // handler *must not* start the response via http.ResponseWriter. + Errors errcode.Errors + + urlBuilder *v2.URLBuilder + + // TODO(stevvooe): The goal is too completely factor this context and + // dispatching out of the web application. Ideally, we should lean on + // context.Context for injection of these resources. +} + +// Value overrides context.Context.Value to ensure that calls are routed to +// correct context. +func (ctx *Context) Value(key interface{}) interface{} { + return ctx.Context.Value(key) +} + +func getName(ctx context.Context) (name string) { + return ctxu.GetStringValue(ctx, "vars.name") +} + +func getReference(ctx context.Context) (reference string) { + return ctxu.GetStringValue(ctx, "vars.reference") +} + +var errDigestNotAvailable = fmt.Errorf("digest not available in context") + +func getDigest(ctx context.Context) (dgst digest.Digest, err error) { + dgstStr := ctxu.GetStringValue(ctx, "vars.digest") + + if dgstStr == "" { + ctxu.GetLogger(ctx).Errorf("digest not available") + return "", errDigestNotAvailable + } + + d, err := digest.ParseDigest(dgstStr) + if err != nil { + ctxu.GetLogger(ctx).Errorf("error parsing digest=%q: %v", dgstStr, err) + return "", err + } + + return d, nil +} + +func getUploadUUID(ctx context.Context) (uuid string) { + return ctxu.GetStringValue(ctx, "vars.uuid") +} + +// getUserName attempts to resolve a username from the context and request. If +// a username cannot be resolved, the empty string is returned. +func getUserName(ctx context.Context, r *http.Request) string { + username := ctxu.GetStringValue(ctx, auth.UserNameKey) + + // Fallback to request user with basic auth + if username == "" { + var ok bool + uname, _, ok := basicAuth(r) + if ok { + username = uname + } + } + + return username +} + +// contextManager allows us to associate net/context.Context instances with a +// request, based on the memory identity of http.Request. This prepares http- +// level context, which is not application specific. If this is called, +// (*contextManager).release must be called on the context when the request is +// completed. +// +// Providing this circumvents a lot of necessity for dispatchers with the +// benefit of instantiating the request context much earlier. +// +// TODO(stevvooe): Consider making this facility a part of the context package. +type contextManager struct { + contexts map[*http.Request]context.Context + mu sync.Mutex +} + +// defaultContextManager is just a global instance to register request contexts. +var defaultContextManager = newContextManager() + +func newContextManager() *contextManager { + return &contextManager{ + contexts: make(map[*http.Request]context.Context), + } +} + +// context either returns a new context or looks it up in the manager. +func (cm *contextManager) context(parent context.Context, w http.ResponseWriter, r *http.Request) context.Context { + cm.mu.Lock() + defer cm.mu.Unlock() + + ctx, ok := cm.contexts[r] + if ok { + return ctx + } + + if parent == nil { + parent = ctxu.Background() + } + + ctx = ctxu.WithRequest(parent, r) + ctx, w = ctxu.WithResponseWriter(ctx, w) + ctx = ctxu.WithLogger(ctx, ctxu.GetRequestLogger(ctx)) + cm.contexts[r] = ctx + + return ctx +} + +// releases frees any associated with resources from request. +func (cm *contextManager) release(ctx context.Context) { + cm.mu.Lock() + defer cm.mu.Unlock() + + r, err := ctxu.GetRequest(ctx) + if err != nil { + ctxu.GetLogger(ctx).Errorf("no request found in context during release") + return + } + delete(cm.contexts, r) +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/health_test.go b/vendor/github.com/docker/distribution/registry/handlers/health_test.go new file mode 100644 index 000000000..0f38bd1cd --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/health_test.go @@ -0,0 +1,210 @@ +package handlers + +import ( + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "os" + "testing" + "time" + + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/context" + "github.com/docker/distribution/health" +) + +func TestFileHealthCheck(t *testing.T) { + interval := time.Second + + tmpfile, err := ioutil.TempFile(os.TempDir(), "healthcheck") + if err != nil { + t.Fatalf("could not create temporary file: %v", err) + } + defer tmpfile.Close() + + config := &configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ + "enabled": false, + }}, + }, + Health: configuration.Health{ + FileCheckers: []configuration.FileChecker{ + { + Interval: interval, + File: tmpfile.Name(), + }, + }, + }, + } + + ctx := context.Background() + + app := NewApp(ctx, config) + healthRegistry := health.NewRegistry() + app.RegisterHealthChecks(healthRegistry) + + // Wait for health check to happen + <-time.After(2 * interval) + + status := healthRegistry.CheckStatus() + if len(status) != 1 { + t.Fatal("expected 1 item in health check results") + } + if status[tmpfile.Name()] != "file exists" { + t.Fatal(`did not get "file exists" result for health check`) + } + + os.Remove(tmpfile.Name()) + + <-time.After(2 * interval) + if len(healthRegistry.CheckStatus()) != 0 { + t.Fatal("expected 0 items in health check results") + } +} + +func TestTCPHealthCheck(t *testing.T) { + interval := time.Second + + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("could not create listener: %v", err) + } + addrStr := ln.Addr().String() + + // Start accepting + go func() { + for { + conn, err := ln.Accept() + if err != nil { + // listener was closed + return + } + defer conn.Close() + } + }() + + config := &configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ + "enabled": false, + }}, + }, + Health: configuration.Health{ + TCPCheckers: []configuration.TCPChecker{ + { + Interval: interval, + Addr: addrStr, + Timeout: 500 * time.Millisecond, + }, + }, + }, + } + + ctx := context.Background() + + app := NewApp(ctx, config) + healthRegistry := health.NewRegistry() + app.RegisterHealthChecks(healthRegistry) + + // Wait for health check to happen + <-time.After(2 * interval) + + if len(healthRegistry.CheckStatus()) != 0 { + t.Fatal("expected 0 items in health check results") + } + + ln.Close() + <-time.After(2 * interval) + + // Health check should now fail + status := healthRegistry.CheckStatus() + if len(status) != 1 { + t.Fatal("expected 1 item in health check results") + } + if status[addrStr] != "connection to "+addrStr+" failed" { + t.Fatal(`did not get "connection failed" result for health check`) + } +} + +func TestHTTPHealthCheck(t *testing.T) { + interval := time.Second + threshold := 3 + + stopFailing := make(chan struct{}) + + checkedServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != "HEAD" { + t.Fatalf("expected HEAD request, got %s", r.Method) + } + select { + case <-stopFailing: + w.WriteHeader(http.StatusOK) + default: + w.WriteHeader(http.StatusInternalServerError) + } + })) + + config := &configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ + "enabled": false, + }}, + }, + Health: configuration.Health{ + HTTPCheckers: []configuration.HTTPChecker{ + { + Interval: interval, + URI: checkedServer.URL, + Threshold: threshold, + }, + }, + }, + } + + ctx := context.Background() + + app := NewApp(ctx, config) + healthRegistry := health.NewRegistry() + app.RegisterHealthChecks(healthRegistry) + + for i := 0; ; i++ { + <-time.After(interval) + + status := healthRegistry.CheckStatus() + + if i < threshold-1 { + // definitely shouldn't have hit the threshold yet + if len(status) != 0 { + t.Fatal("expected 1 item in health check results") + } + continue + } + if i < threshold+1 { + // right on the threshold - don't expect a failure yet + continue + } + + if len(status) != 1 { + t.Fatal("expected 1 item in health check results") + } + if status[checkedServer.URL] != "downstream service returned unexpected status: 500" { + t.Fatal("did not get expected result for health check") + } + + break + } + + // Signal HTTP handler to start returning 200 + close(stopFailing) + + <-time.After(2 * interval) + + if len(healthRegistry.CheckStatus()) != 0 { + t.Fatal("expected 0 items in health check results") + } +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/helpers.go b/vendor/github.com/docker/distribution/registry/handlers/helpers.go new file mode 100644 index 000000000..dac4f7a85 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/helpers.go @@ -0,0 +1,66 @@ +package handlers + +import ( + "errors" + "io" + "net/http" + + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/api/errcode" +) + +// closeResources closes all the provided resources after running the target +// handler. +func closeResources(handler http.Handler, closers ...io.Closer) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + for _, closer := range closers { + defer closer.Close() + } + handler.ServeHTTP(w, r) + }) +} + +// copyFullPayload copies the payload of an HTTP request to destWriter. If it +// receives less content than expected, and the client disconnected during the +// upload, it avoids sending a 400 error to keep the logs cleaner. +func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, context ctxu.Context, action string, errSlice *errcode.Errors) error { + // Get a channel that tells us if the client disconnects + var clientClosed <-chan bool + if notifier, ok := responseWriter.(http.CloseNotifier); ok { + clientClosed = notifier.CloseNotify() + } else { + ctxu.GetLogger(context).Warnf("the ResponseWriter does not implement CloseNotifier (type: %T)", responseWriter) + } + + // Read in the data, if any. + copied, err := io.Copy(destWriter, r.Body) + if clientClosed != nil && (err != nil || (r.ContentLength > 0 && copied < r.ContentLength)) { + // Didn't receive as much content as expected. Did the client + // disconnect during the request? If so, avoid returning a 400 + // error to keep the logs cleaner. + select { + case <-clientClosed: + // Set the response code to "499 Client Closed Request" + // Even though the connection has already been closed, + // this causes the logger to pick up a 499 error + // instead of showing 0 for the HTTP status. + responseWriter.WriteHeader(499) + + ctxu.GetLoggerWithFields(context, map[interface{}]interface{}{ + "error": err, + "copied": copied, + "contentLength": r.ContentLength, + }, "error", "copied", "contentLength").Error("client disconnected during " + action) + return errors.New("client disconnected") + default: + } + } + + if err != nil { + ctxu.GetLogger(context).Errorf("unknown error reading request payload: %v", err) + *errSlice = append(*errSlice, errcode.ErrorCodeUnknown.WithDetail(err)) + return err + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/hmac.go b/vendor/github.com/docker/distribution/registry/handlers/hmac.go new file mode 100644 index 000000000..94ed9fda5 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/hmac.go @@ -0,0 +1,74 @@ +package handlers + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + "time" +) + +// blobUploadState captures the state serializable state of the blob upload. +type blobUploadState struct { + // name is the primary repository under which the blob will be linked. + Name string + + // UUID identifies the upload. + UUID string + + // offset contains the current progress of the upload. + Offset int64 + + // StartedAt is the original start time of the upload. + StartedAt time.Time +} + +type hmacKey string + +var errInvalidSecret = fmt.Errorf("invalid secret") + +// unpackUploadState unpacks and validates the blob upload state from the +// token, using the hmacKey secret. +func (secret hmacKey) unpackUploadState(token string) (blobUploadState, error) { + var state blobUploadState + + tokenBytes, err := base64.URLEncoding.DecodeString(token) + if err != nil { + return state, err + } + mac := hmac.New(sha256.New, []byte(secret)) + + if len(tokenBytes) < mac.Size() { + return state, errInvalidSecret + } + + macBytes := tokenBytes[:mac.Size()] + messageBytes := tokenBytes[mac.Size():] + + mac.Write(messageBytes) + if !hmac.Equal(mac.Sum(nil), macBytes) { + return state, errInvalidSecret + } + + if err := json.Unmarshal(messageBytes, &state); err != nil { + return state, err + } + + return state, nil +} + +// packUploadState packs the upload state signed with and hmac digest using +// the hmacKey secret, encoding to url safe base64. The resulting token can be +// used to share data with minimized risk of external tampering. +func (secret hmacKey) packUploadState(lus blobUploadState) (string, error) { + mac := hmac.New(sha256.New, []byte(secret)) + p, err := json.Marshal(lus) + if err != nil { + return "", err + } + + mac.Write(p) + + return base64.URLEncoding.EncodeToString(append(mac.Sum(nil), p...)), nil +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/hmac_test.go b/vendor/github.com/docker/distribution/registry/handlers/hmac_test.go new file mode 100644 index 000000000..366c7279e --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/hmac_test.go @@ -0,0 +1,117 @@ +package handlers + +import "testing" + +var blobUploadStates = []blobUploadState{ + { + Name: "hello", + UUID: "abcd-1234-qwer-0987", + Offset: 0, + }, + { + Name: "hello-world", + UUID: "abcd-1234-qwer-0987", + Offset: 0, + }, + { + Name: "h3ll0_w0rld", + UUID: "abcd-1234-qwer-0987", + Offset: 1337, + }, + { + Name: "ABCDEFG", + UUID: "ABCD-1234-QWER-0987", + Offset: 1234567890, + }, + { + Name: "this-is-A-sort-of-Long-name-for-Testing", + UUID: "dead-1234-beef-0987", + Offset: 8675309, + }, +} + +var secrets = []string{ + "supersecret", + "12345", + "a", + "SuperSecret", + "Sup3r... S3cr3t!", + "This is a reasonably long secret key that is used for the purpose of testing.", + "\u2603+\u2744", // snowman+snowflake +} + +// TestLayerUploadTokens constructs stateTokens from LayerUploadStates and +// validates that the tokens can be used to reconstruct the proper upload state. +func TestLayerUploadTokens(t *testing.T) { + secret := hmacKey("supersecret") + + for _, testcase := range blobUploadStates { + token, err := secret.packUploadState(testcase) + if err != nil { + t.Fatal(err) + } + + lus, err := secret.unpackUploadState(token) + if err != nil { + t.Fatal(err) + } + + assertBlobUploadStateEquals(t, testcase, lus) + } +} + +// TestHMACValidate ensures that any HMAC token providers are compatible if and +// only if they share the same secret. +func TestHMACValidation(t *testing.T) { + for _, secret := range secrets { + secret1 := hmacKey(secret) + secret2 := hmacKey(secret) + badSecret := hmacKey("DifferentSecret") + + for _, testcase := range blobUploadStates { + token, err := secret1.packUploadState(testcase) + if err != nil { + t.Fatal(err) + } + + lus, err := secret2.unpackUploadState(token) + if err != nil { + t.Fatal(err) + } + + assertBlobUploadStateEquals(t, testcase, lus) + + _, err = badSecret.unpackUploadState(token) + if err == nil { + t.Fatalf("Expected token provider to fail at retrieving state from token: %s", token) + } + + badToken, err := badSecret.packUploadState(lus) + if err != nil { + t.Fatal(err) + } + + _, err = secret1.unpackUploadState(badToken) + if err == nil { + t.Fatalf("Expected token provider to fail at retrieving state from token: %s", badToken) + } + + _, err = secret2.unpackUploadState(badToken) + if err == nil { + t.Fatalf("Expected token provider to fail at retrieving state from token: %s", badToken) + } + } + } +} + +func assertBlobUploadStateEquals(t *testing.T, expected blobUploadState, received blobUploadState) { + if expected.Name != received.Name { + t.Fatalf("Expected Name=%q, Received Name=%q", expected.Name, received.Name) + } + if expected.UUID != received.UUID { + t.Fatalf("Expected UUID=%q, Received UUID=%q", expected.UUID, received.UUID) + } + if expected.Offset != received.Offset { + t.Fatalf("Expected Offset=%d, Received Offset=%d", expected.Offset, received.Offset) + } +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/hooks.go b/vendor/github.com/docker/distribution/registry/handlers/hooks.go new file mode 100644 index 000000000..7bbab4f8a --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/hooks.go @@ -0,0 +1,53 @@ +package handlers + +import ( + "bytes" + "errors" + "fmt" + "strings" + "text/template" + + "github.com/Sirupsen/logrus" +) + +// logHook is for hooking Panic in web application +type logHook struct { + LevelsParam []string + Mail *mailer +} + +// Fire forwards an error to LogHook +func (hook *logHook) Fire(entry *logrus.Entry) error { + addr := strings.Split(hook.Mail.Addr, ":") + if len(addr) != 2 { + return errors.New("Invalid Mail Address") + } + host := addr[0] + subject := fmt.Sprintf("[%s] %s: %s", entry.Level, host, entry.Message) + + html := ` + {{.Message}} + + {{range $key, $value := .Data}} + {{$key}}: {{$value}} + {{end}} + ` + b := bytes.NewBuffer(make([]byte, 0)) + t := template.Must(template.New("mail body").Parse(html)) + if err := t.Execute(b, entry); err != nil { + return err + } + body := fmt.Sprintf("%s", b) + + return hook.Mail.sendMail(subject, body) +} + +// Levels contains hook levels to be catched +func (hook *logHook) Levels() []logrus.Level { + levels := []logrus.Level{} + for _, v := range hook.LevelsParam { + lv, _ := logrus.ParseLevel(v) + levels = append(levels, lv) + } + return levels +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/images.go b/vendor/github.com/docker/distribution/registry/handlers/images.go new file mode 100644 index 000000000..9518f6855 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/images.go @@ -0,0 +1,460 @@ +package handlers + +import ( + "bytes" + "fmt" + "net/http" + "strings" + + "github.com/docker/distribution" + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/auth" + "github.com/gorilla/handlers" +) + +// These constants determine which architecture and OS to choose from a +// manifest list when downconverting it to a schema1 manifest. +const ( + defaultArch = "amd64" + defaultOS = "linux" +) + +// imageManifestDispatcher takes the request context and builds the +// appropriate handler for handling image manifest requests. +func imageManifestDispatcher(ctx *Context, r *http.Request) http.Handler { + imageManifestHandler := &imageManifestHandler{ + Context: ctx, + } + reference := getReference(ctx) + dgst, err := digest.ParseDigest(reference) + if err != nil { + // We just have a tag + imageManifestHandler.Tag = reference + } else { + imageManifestHandler.Digest = dgst + } + + mhandler := handlers.MethodHandler{ + "GET": http.HandlerFunc(imageManifestHandler.GetImageManifest), + "HEAD": http.HandlerFunc(imageManifestHandler.GetImageManifest), + } + + if !ctx.readOnly { + mhandler["PUT"] = http.HandlerFunc(imageManifestHandler.PutImageManifest) + mhandler["DELETE"] = http.HandlerFunc(imageManifestHandler.DeleteImageManifest) + } + + return mhandler +} + +// imageManifestHandler handles http operations on image manifests. +type imageManifestHandler struct { + *Context + + // One of tag or digest gets set, depending on what is present in context. + Tag string + Digest digest.Digest +} + +// GetImageManifest fetches the image manifest from the storage backend, if it exists. +func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(imh).Debug("GetImageManifest") + manifests, err := imh.Repository.Manifests(imh) + if err != nil { + imh.Errors = append(imh.Errors, err) + return + } + + var manifest distribution.Manifest + if imh.Tag != "" { + tags := imh.Repository.Tags(imh) + desc, err := tags.Get(imh, imh.Tag) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) + return + } + imh.Digest = desc.Digest + } + + if etagMatch(r, imh.Digest.String()) { + w.WriteHeader(http.StatusNotModified) + return + } + + var options []distribution.ManifestServiceOption + if imh.Tag != "" { + options = append(options, distribution.WithTag(imh.Tag)) + } + manifest, err = manifests.Get(imh, imh.Digest, options...) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) + return + } + + supportsSchema2 := false + supportsManifestList := false + // this parsing of Accept headers is not quite as full-featured as godoc.org's parser, but we don't care about "q=" values + // https://github.com/golang/gddo/blob/e91d4165076d7474d20abda83f92d15c7ebc3e81/httputil/header/header.go#L165-L202 + for _, acceptHeader := range r.Header["Accept"] { + // r.Header[...] is a slice in case the request contains the same header more than once + // if the header isn't set, we'll get the zero value, which "range" will handle gracefully + + // we need to split each header value on "," to get the full list of "Accept" values (per RFC 2616) + // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1 + for _, mediaType := range strings.Split(acceptHeader, ",") { + // remove "; q=..." if present + if i := strings.Index(mediaType, ";"); i >= 0 { + mediaType = mediaType[:i] + } + + // it's common (but not required) for Accept values to be space separated ("a/b, c/d, e/f") + mediaType = strings.TrimSpace(mediaType) + + if mediaType == schema2.MediaTypeManifest { + supportsSchema2 = true + } + if mediaType == manifestlist.MediaTypeManifestList { + supportsManifestList = true + } + } + } + + schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest) + manifestList, isManifestList := manifest.(*manifestlist.DeserializedManifestList) + + // Only rewrite schema2 manifests when they are being fetched by tag. + // If they are being fetched by digest, we can't return something not + // matching the digest. + if imh.Tag != "" && isSchema2 && !supportsSchema2 { + // Rewrite manifest in schema1 format + ctxu.GetLogger(imh).Infof("rewriting manifest %s in schema1 format to support old client", imh.Digest.String()) + + manifest, err = imh.convertSchema2Manifest(schema2Manifest) + if err != nil { + return + } + } else if imh.Tag != "" && isManifestList && !supportsManifestList { + // Rewrite manifest in schema1 format + ctxu.GetLogger(imh).Infof("rewriting manifest list %s in schema1 format to support old client", imh.Digest.String()) + + // Find the image manifest corresponding to the default + // platform + var manifestDigest digest.Digest + for _, manifestDescriptor := range manifestList.Manifests { + if manifestDescriptor.Platform.Architecture == defaultArch && manifestDescriptor.Platform.OS == defaultOS { + manifestDigest = manifestDescriptor.Digest + break + } + } + + if manifestDigest == "" { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown) + return + } + + manifest, err = manifests.Get(imh, manifestDigest) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) + return + } + + // If necessary, convert the image manifest + if schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest); isSchema2 && !supportsSchema2 { + manifest, err = imh.convertSchema2Manifest(schema2Manifest) + if err != nil { + return + } + } + } + + ct, p, err := manifest.Payload() + if err != nil { + return + } + + w.Header().Set("Content-Type", ct) + w.Header().Set("Content-Length", fmt.Sprint(len(p))) + w.Header().Set("Docker-Content-Digest", imh.Digest.String()) + w.Header().Set("Etag", fmt.Sprintf(`"%s"`, imh.Digest)) + w.Write(p) +} + +func (imh *imageManifestHandler) convertSchema2Manifest(schema2Manifest *schema2.DeserializedManifest) (distribution.Manifest, error) { + targetDescriptor := schema2Manifest.Target() + blobs := imh.Repository.Blobs(imh) + configJSON, err := blobs.Get(imh, targetDescriptor.Digest) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) + return nil, err + } + + ref := imh.Repository.Named() + + if imh.Tag != "" { + ref, err = reference.WithTag(ref, imh.Tag) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail(err)) + return nil, err + } + } + + builder := schema1.NewConfigManifestBuilder(imh.Repository.Blobs(imh), imh.Context.App.trustKey, ref, configJSON) + for _, d := range schema2Manifest.Layers { + if err := builder.AppendReference(d); err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) + return nil, err + } + } + manifest, err := builder.Build(imh) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) + return nil, err + } + imh.Digest = digest.FromBytes(manifest.(*schema1.SignedManifest).Canonical) + + return manifest, nil +} + +func etagMatch(r *http.Request, etag string) bool { + for _, headerVal := range r.Header["If-None-Match"] { + if headerVal == etag || headerVal == fmt.Sprintf(`"%s"`, etag) { // allow quoted or unquoted + return true + } + } + return false +} + +// PutImageManifest validates and stores an image in the registry. +func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(imh).Debug("PutImageManifest") + manifests, err := imh.Repository.Manifests(imh) + if err != nil { + imh.Errors = append(imh.Errors, err) + return + } + + var jsonBuf bytes.Buffer + if err := copyFullPayload(w, r, &jsonBuf, imh, "image manifest PUT", &imh.Errors); err != nil { + // copyFullPayload reports the error if necessary + return + } + + mediaType := r.Header.Get("Content-Type") + manifest, desc, err := distribution.UnmarshalManifest(mediaType, jsonBuf.Bytes()) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) + return + } + + if imh.Digest != "" { + if desc.Digest != imh.Digest { + ctxu.GetLogger(imh).Errorf("payload digest does match: %q != %q", desc.Digest, imh.Digest) + imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) + return + } + } else if imh.Tag != "" { + imh.Digest = desc.Digest + } else { + imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail("no tag or digest specified")) + return + } + + var options []distribution.ManifestServiceOption + if imh.Tag != "" { + options = append(options, distribution.WithTag(imh.Tag)) + } + + if err := imh.applyResourcePolicy(manifest); err != nil { + imh.Errors = append(imh.Errors, err) + return + } + + _, err = manifests.Put(imh, manifest, options...) + if err != nil { + // TODO(stevvooe): These error handling switches really need to be + // handled by an app global mapper. + if err == distribution.ErrUnsupported { + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported) + return + } + if err == distribution.ErrAccessDenied { + imh.Errors = append(imh.Errors, errcode.ErrorCodeDenied) + return + } + switch err := err.(type) { + case distribution.ErrManifestVerification: + for _, verificationError := range err { + switch verificationError := verificationError.(type) { + case distribution.ErrManifestBlobUnknown: + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestBlobUnknown.WithDetail(verificationError.Digest)) + case distribution.ErrManifestNameInvalid: + imh.Errors = append(imh.Errors, v2.ErrorCodeNameInvalid.WithDetail(err)) + case distribution.ErrManifestUnverified: + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnverified) + default: + if verificationError == digest.ErrDigestInvalidFormat { + imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) + } else { + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown, verificationError) + } + } + } + case errcode.Error: + imh.Errors = append(imh.Errors, err) + default: + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + + return + } + + // Tag this manifest + if imh.Tag != "" { + tags := imh.Repository.Tags(imh) + err = tags.Tag(imh, imh.Tag, desc) + if err != nil { + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + } + + // Construct a canonical url for the uploaded manifest. + ref, err := reference.WithDigest(imh.Repository.Named(), imh.Digest) + if err != nil { + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + location, err := imh.urlBuilder.BuildManifestURL(ref) + if err != nil { + // NOTE(stevvooe): Given the behavior above, this absurdly unlikely to + // happen. We'll log the error here but proceed as if it worked. Worst + // case, we set an empty location header. + ctxu.GetLogger(imh).Errorf("error building manifest url from digest: %v", err) + } + + w.Header().Set("Location", location) + w.Header().Set("Docker-Content-Digest", imh.Digest.String()) + w.WriteHeader(http.StatusCreated) +} + +// applyResourcePolicy checks whether the resource class matches what has +// been authorized and allowed by the policy configuration. +func (imh *imageManifestHandler) applyResourcePolicy(manifest distribution.Manifest) error { + allowedClasses := imh.App.Config.Policy.Repository.Classes + if len(allowedClasses) == 0 { + return nil + } + + var class string + switch m := manifest.(type) { + case *schema1.SignedManifest: + class = "image" + case *schema2.DeserializedManifest: + switch m.Config.MediaType { + case schema2.MediaTypeConfig: + class = "image" + case schema2.MediaTypePluginConfig: + class = "plugin" + default: + message := fmt.Sprintf("unknown manifest class for %s", m.Config.MediaType) + return errcode.ErrorCodeDenied.WithMessage(message) + } + } + + if class == "" { + return nil + } + + // Check to see if class is allowed in registry + var allowedClass bool + for _, c := range allowedClasses { + if class == c { + allowedClass = true + break + } + } + if !allowedClass { + message := fmt.Sprintf("registry does not allow %s manifest", class) + return errcode.ErrorCodeDenied.WithMessage(message) + } + + resources := auth.AuthorizedResources(imh) + n := imh.Repository.Named().Name() + + var foundResource bool + for _, r := range resources { + if r.Name == n { + if r.Class == "" { + r.Class = "image" + } + if r.Class == class { + return nil + } + foundResource = true + } + } + + // resource was found but no matching class was found + if foundResource { + message := fmt.Sprintf("repository not authorized for %s manifest", class) + return errcode.ErrorCodeDenied.WithMessage(message) + } + + return nil + +} + +// DeleteImageManifest removes the manifest with the given digest from the registry. +func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(imh).Debug("DeleteImageManifest") + + manifests, err := imh.Repository.Manifests(imh) + if err != nil { + imh.Errors = append(imh.Errors, err) + return + } + + err = manifests.Delete(imh, imh.Digest) + if err != nil { + switch err { + case digest.ErrDigestUnsupported: + case digest.ErrDigestInvalidFormat: + imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) + return + case distribution.ErrBlobUnknown: + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown) + return + case distribution.ErrUnsupported: + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported) + return + default: + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown) + return + } + } + + tagService := imh.Repository.Tags(imh) + referencedTags, err := tagService.Lookup(imh, distribution.Descriptor{Digest: imh.Digest}) + if err != nil { + imh.Errors = append(imh.Errors, err) + return + } + + for _, tag := range referencedTags { + if err := tagService.Untag(imh, tag); err != nil { + imh.Errors = append(imh.Errors, err) + return + } + } + + w.WriteHeader(http.StatusAccepted) +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/mail.go b/vendor/github.com/docker/distribution/registry/handlers/mail.go new file mode 100644 index 000000000..39244909d --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/mail.go @@ -0,0 +1,45 @@ +package handlers + +import ( + "errors" + "net/smtp" + "strings" +) + +// mailer provides fields of email configuration for sending. +type mailer struct { + Addr, Username, Password, From string + Insecure bool + To []string +} + +// sendMail allows users to send email, only if mail parameters is configured correctly. +func (mail *mailer) sendMail(subject, message string) error { + addr := strings.Split(mail.Addr, ":") + if len(addr) != 2 { + return errors.New("Invalid Mail Address") + } + host := addr[0] + msg := []byte("To:" + strings.Join(mail.To, ";") + + "\r\nFrom: " + mail.From + + "\r\nSubject: " + subject + + "\r\nContent-Type: text/plain\r\n\r\n" + + message) + auth := smtp.PlainAuth( + "", + mail.Username, + mail.Password, + host, + ) + err := smtp.SendMail( + mail.Addr, + auth, + mail.From, + mail.To, + []byte(msg), + ) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/handlers/tags.go b/vendor/github.com/docker/distribution/registry/handlers/tags.go new file mode 100644 index 000000000..91f1031e3 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/handlers/tags.go @@ -0,0 +1,62 @@ +package handlers + +import ( + "encoding/json" + "net/http" + + "github.com/docker/distribution" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/gorilla/handlers" +) + +// tagsDispatcher constructs the tags handler api endpoint. +func tagsDispatcher(ctx *Context, r *http.Request) http.Handler { + tagsHandler := &tagsHandler{ + Context: ctx, + } + + return handlers.MethodHandler{ + "GET": http.HandlerFunc(tagsHandler.GetTags), + } +} + +// tagsHandler handles requests for lists of tags under a repository name. +type tagsHandler struct { + *Context +} + +type tagsAPIResponse struct { + Name string `json:"name"` + Tags []string `json:"tags"` +} + +// GetTags returns a json list of tags for a specific image name. +func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + tagService := th.Repository.Tags(th) + tags, err := tagService.All(th) + if err != nil { + switch err := err.(type) { + case distribution.ErrRepositoryUnknown: + th.Errors = append(th.Errors, v2.ErrorCodeNameUnknown.WithDetail(map[string]string{"name": th.Repository.Named().Name()})) + case errcode.Error: + th.Errors = append(th.Errors, err) + default: + th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + + enc := json.NewEncoder(w) + if err := enc.Encode(tagsAPIResponse{ + Name: th.Repository.Named().Name(), + Tags: tags, + }); err != nil { + th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } +} diff --git a/vendor/github.com/docker/distribution/registry/listener/listener.go b/vendor/github.com/docker/distribution/registry/listener/listener.go new file mode 100644 index 000000000..b93a7a63f --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/listener/listener.go @@ -0,0 +1,74 @@ +package listener + +import ( + "fmt" + "net" + "os" + "time" +) + +// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted +// connections. It's used by ListenAndServe and ListenAndServeTLS so +// dead TCP connections (e.g. closing laptop mid-download) eventually +// go away. +// it is a plain copy-paste from net/http/server.go +type tcpKeepAliveListener struct { + *net.TCPListener +} + +func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { + tc, err := ln.AcceptTCP() + if err != nil { + return + } + tc.SetKeepAlive(true) + tc.SetKeepAlivePeriod(3 * time.Minute) + return tc, nil +} + +// NewListener announces on laddr and net. Accepted values of the net are +// 'unix' and 'tcp' +func NewListener(net, laddr string) (net.Listener, error) { + switch net { + case "unix": + return newUnixListener(laddr) + case "tcp", "": // an empty net means tcp + return newTCPListener(laddr) + default: + return nil, fmt.Errorf("unknown address type %s", net) + } +} + +func newUnixListener(laddr string) (net.Listener, error) { + fi, err := os.Stat(laddr) + if err == nil { + // the file exists. + // try to remove it if it's a socket + if !isSocket(fi.Mode()) { + return nil, fmt.Errorf("file %s exists and is not a socket", laddr) + } + + if err := os.Remove(laddr); err != nil { + return nil, err + } + } else if !os.IsNotExist(err) { + // we can't do stat on the file. + // it means we can not remove it + return nil, err + } + + return net.Listen("unix", laddr) +} + +func isSocket(m os.FileMode) bool { + return m&os.ModeSocket != 0 +} + +func newTCPListener(laddr string) (net.Listener, error) { + ln, err := net.Listen("tcp", laddr) + if err != nil { + return nil, err + } + + return tcpKeepAliveListener{ln.(*net.TCPListener)}, nil +} diff --git a/vendor/github.com/docker/distribution/registry/middleware/registry/middleware.go b/vendor/github.com/docker/distribution/registry/middleware/registry/middleware.go new file mode 100644 index 000000000..3e6e5cc71 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/middleware/registry/middleware.go @@ -0,0 +1,54 @@ +package middleware + +import ( + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage" +) + +// InitFunc is the type of a RegistryMiddleware factory function and is +// used to register the constructor for different RegistryMiddleware backends. +type InitFunc func(ctx context.Context, registry distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error) + +var middlewares map[string]InitFunc +var registryoptions []storage.RegistryOption + +// Register is used to register an InitFunc for +// a RegistryMiddleware backend with the given name. +func Register(name string, initFunc InitFunc) error { + if middlewares == nil { + middlewares = make(map[string]InitFunc) + } + if _, exists := middlewares[name]; exists { + return fmt.Errorf("name already registered: %s", name) + } + + middlewares[name] = initFunc + + return nil +} + +// Get constructs a RegistryMiddleware with the given options using the named backend. +func Get(ctx context.Context, name string, options map[string]interface{}, registry distribution.Namespace) (distribution.Namespace, error) { + if middlewares != nil { + if initFunc, exists := middlewares[name]; exists { + return initFunc(ctx, registry, options) + } + } + + return nil, fmt.Errorf("no registry middleware registered with name: %s", name) +} + +// RegisterOptions adds more options to RegistryOption list. Options get applied before +// any other configuration-based options. +func RegisterOptions(options ...storage.RegistryOption) error { + registryoptions = append(registryoptions, options...) + return nil +} + +// GetRegistryOptions returns list of RegistryOption. +func GetRegistryOptions() []storage.RegistryOption { + return registryoptions +} diff --git a/vendor/github.com/docker/distribution/registry/middleware/repository/middleware.go b/vendor/github.com/docker/distribution/registry/middleware/repository/middleware.go new file mode 100644 index 000000000..27b42aecf --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/middleware/repository/middleware.go @@ -0,0 +1,40 @@ +package middleware + +import ( + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" +) + +// InitFunc is the type of a RepositoryMiddleware factory function and is +// used to register the constructor for different RepositoryMiddleware backends. +type InitFunc func(ctx context.Context, repository distribution.Repository, options map[string]interface{}) (distribution.Repository, error) + +var middlewares map[string]InitFunc + +// Register is used to register an InitFunc for +// a RepositoryMiddleware backend with the given name. +func Register(name string, initFunc InitFunc) error { + if middlewares == nil { + middlewares = make(map[string]InitFunc) + } + if _, exists := middlewares[name]; exists { + return fmt.Errorf("name already registered: %s", name) + } + + middlewares[name] = initFunc + + return nil +} + +// Get constructs a RepositoryMiddleware with the given options using the named backend. +func Get(ctx context.Context, name string, options map[string]interface{}, repository distribution.Repository) (distribution.Repository, error) { + if middlewares != nil { + if initFunc, exists := middlewares[name]; exists { + return initFunc(ctx, repository, options) + } + } + + return nil, fmt.Errorf("no repository middleware registered with name: %s", name) +} diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxyauth.go b/vendor/github.com/docker/distribution/registry/proxy/proxyauth.go new file mode 100644 index 000000000..7b405afcf --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/proxy/proxyauth.go @@ -0,0 +1,87 @@ +package proxy + +import ( + "net/http" + "net/url" + "strings" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/auth/challenge" +) + +const challengeHeader = "Docker-Distribution-Api-Version" + +type userpass struct { + username string + password string +} + +type credentials struct { + creds map[string]userpass +} + +func (c credentials) Basic(u *url.URL) (string, string) { + up := c.creds[u.String()] + + return up.username, up.password +} + +func (c credentials) RefreshToken(u *url.URL, service string) string { + return "" +} + +func (c credentials) SetRefreshToken(u *url.URL, service, token string) { +} + +// configureAuth stores credentials for challenge responses +func configureAuth(username, password, remoteURL string) (auth.CredentialStore, error) { + creds := map[string]userpass{} + + authURLs, err := getAuthURLs(remoteURL) + if err != nil { + return nil, err + } + + for _, url := range authURLs { + context.GetLogger(context.Background()).Infof("Discovered token authentication URL: %s", url) + creds[url] = userpass{ + username: username, + password: password, + } + } + + return credentials{creds: creds}, nil +} + +func getAuthURLs(remoteURL string) ([]string, error) { + authURLs := []string{} + + resp, err := http.Get(remoteURL + "/v2/") + if err != nil { + return nil, err + } + defer resp.Body.Close() + + for _, c := range challenge.ResponseChallenges(resp) { + if strings.EqualFold(c.Scheme, "bearer") { + authURLs = append(authURLs, c.Parameters["realm"]) + } + } + + return authURLs, nil +} + +func ping(manager challenge.Manager, endpoint, versionHeader string) error { + resp, err := http.Get(endpoint) + if err != nil { + return err + } + defer resp.Body.Close() + + if err := manager.AddResponse(resp); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxyblobstore.go b/vendor/github.com/docker/distribution/registry/proxy/proxyblobstore.go new file mode 100644 index 000000000..6cd572133 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/proxy/proxyblobstore.go @@ -0,0 +1,224 @@ +package proxy + +import ( + "io" + "net/http" + "strconv" + "sync" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/proxy/scheduler" +) + +// todo(richardscothern): from cache control header or config file +const blobTTL = time.Duration(24 * 7 * time.Hour) + +type proxyBlobStore struct { + localStore distribution.BlobStore + remoteStore distribution.BlobService + scheduler *scheduler.TTLExpirationScheduler + repositoryName reference.Named + authChallenger authChallenger +} + +var _ distribution.BlobStore = &proxyBlobStore{} + +// inflight tracks currently downloading blobs +var inflight = make(map[digest.Digest]struct{}) + +// mu protects inflight +var mu sync.Mutex + +func setResponseHeaders(w http.ResponseWriter, length int64, mediaType string, digest digest.Digest) { + w.Header().Set("Content-Length", strconv.FormatInt(length, 10)) + w.Header().Set("Content-Type", mediaType) + w.Header().Set("Docker-Content-Digest", digest.String()) + w.Header().Set("Etag", digest.String()) +} + +func (pbs *proxyBlobStore) copyContent(ctx context.Context, dgst digest.Digest, writer io.Writer) (distribution.Descriptor, error) { + desc, err := pbs.remoteStore.Stat(ctx, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + if w, ok := writer.(http.ResponseWriter); ok { + setResponseHeaders(w, desc.Size, desc.MediaType, dgst) + } + + remoteReader, err := pbs.remoteStore.Open(ctx, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + defer remoteReader.Close() + + _, err = io.CopyN(writer, remoteReader, desc.Size) + if err != nil { + return distribution.Descriptor{}, err + } + + proxyMetrics.BlobPush(uint64(desc.Size)) + + return desc, nil +} + +func (pbs *proxyBlobStore) serveLocal(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) (bool, error) { + localDesc, err := pbs.localStore.Stat(ctx, dgst) + if err != nil { + // Stat can report a zero sized file here if it's checked between creation + // and population. Return nil error, and continue + return false, nil + } + + if err == nil { + proxyMetrics.BlobPush(uint64(localDesc.Size)) + return true, pbs.localStore.ServeBlob(ctx, w, r, dgst) + } + + return false, nil + +} + +func (pbs *proxyBlobStore) storeLocal(ctx context.Context, dgst digest.Digest) error { + defer func() { + mu.Lock() + delete(inflight, dgst) + mu.Unlock() + }() + + var desc distribution.Descriptor + var err error + var bw distribution.BlobWriter + + bw, err = pbs.localStore.Create(ctx) + if err != nil { + return err + } + + desc, err = pbs.copyContent(ctx, dgst, bw) + if err != nil { + return err + } + + _, err = bw.Commit(ctx, desc) + if err != nil { + return err + } + + return nil +} + +func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + served, err := pbs.serveLocal(ctx, w, r, dgst) + if err != nil { + context.GetLogger(ctx).Errorf("Error serving blob from local storage: %s", err.Error()) + return err + } + + if served { + return nil + } + + if err := pbs.authChallenger.tryEstablishChallenges(ctx); err != nil { + return err + } + + mu.Lock() + _, ok := inflight[dgst] + if ok { + mu.Unlock() + _, err := pbs.copyContent(ctx, dgst, w) + return err + } + inflight[dgst] = struct{}{} + mu.Unlock() + + go func(dgst digest.Digest) { + if err := pbs.storeLocal(ctx, dgst); err != nil { + context.GetLogger(ctx).Errorf("Error committing to storage: %s", err.Error()) + } + + blobRef, err := reference.WithDigest(pbs.repositoryName, dgst) + if err != nil { + context.GetLogger(ctx).Errorf("Error creating reference: %s", err) + return + } + + pbs.scheduler.AddBlob(blobRef, repositoryTTL) + }(dgst) + + _, err = pbs.copyContent(ctx, dgst, w) + if err != nil { + return err + } + return nil +} + +func (pbs *proxyBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + desc, err := pbs.localStore.Stat(ctx, dgst) + if err == nil { + return desc, err + } + + if err != distribution.ErrBlobUnknown { + return distribution.Descriptor{}, err + } + + if err := pbs.authChallenger.tryEstablishChallenges(ctx); err != nil { + return distribution.Descriptor{}, err + } + + return pbs.remoteStore.Stat(ctx, dgst) +} + +func (pbs *proxyBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + blob, err := pbs.localStore.Get(ctx, dgst) + if err == nil { + return blob, nil + } + + if err := pbs.authChallenger.tryEstablishChallenges(ctx); err != nil { + return []byte{}, err + } + + blob, err = pbs.remoteStore.Get(ctx, dgst) + if err != nil { + return []byte{}, err + } + + _, err = pbs.localStore.Put(ctx, "", blob) + if err != nil { + return []byte{}, err + } + return blob, nil +} + +// Unsupported functions +func (pbs *proxyBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + return distribution.Descriptor{}, distribution.ErrUnsupported +} + +func (pbs *proxyBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + return nil, distribution.ErrUnsupported +} + +func (pbs *proxyBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + return nil, distribution.ErrUnsupported +} + +func (pbs *proxyBlobStore) Mount(ctx context.Context, sourceRepo reference.Named, dgst digest.Digest) (distribution.Descriptor, error) { + return distribution.Descriptor{}, distribution.ErrUnsupported +} + +func (pbs *proxyBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + return nil, distribution.ErrUnsupported +} + +func (pbs *proxyBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + return distribution.ErrUnsupported +} diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxyblobstore_test.go b/vendor/github.com/docker/distribution/registry/proxy/proxyblobstore_test.go new file mode 100644 index 000000000..8e3a06920 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/proxy/proxyblobstore_test.go @@ -0,0 +1,416 @@ +package proxy + +import ( + "io/ioutil" + "math/rand" + "net/http" + "net/http/httptest" + "sync" + "testing" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/proxy/scheduler" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver/filesystem" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +var sbsMu sync.Mutex + +type statsBlobStore struct { + stats map[string]int + blobs distribution.BlobStore +} + +func (sbs statsBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + sbsMu.Lock() + sbs.stats["put"]++ + sbsMu.Unlock() + + return sbs.blobs.Put(ctx, mediaType, p) +} + +func (sbs statsBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + sbsMu.Lock() + sbs.stats["get"]++ + sbsMu.Unlock() + + return sbs.blobs.Get(ctx, dgst) +} + +func (sbs statsBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + sbsMu.Lock() + sbs.stats["create"]++ + sbsMu.Unlock() + + return sbs.blobs.Create(ctx, options...) +} + +func (sbs statsBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + sbsMu.Lock() + sbs.stats["resume"]++ + sbsMu.Unlock() + + return sbs.blobs.Resume(ctx, id) +} + +func (sbs statsBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + sbsMu.Lock() + sbs.stats["open"]++ + sbsMu.Unlock() + + return sbs.blobs.Open(ctx, dgst) +} + +func (sbs statsBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + sbsMu.Lock() + sbs.stats["serveblob"]++ + sbsMu.Unlock() + + return sbs.blobs.ServeBlob(ctx, w, r, dgst) +} + +func (sbs statsBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + + sbsMu.Lock() + sbs.stats["stat"]++ + sbsMu.Unlock() + + return sbs.blobs.Stat(ctx, dgst) +} + +func (sbs statsBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + sbsMu.Lock() + sbs.stats["delete"]++ + sbsMu.Unlock() + + return sbs.blobs.Delete(ctx, dgst) +} + +type testEnv struct { + numUnique int + inRemote []distribution.Descriptor + store proxyBlobStore + ctx context.Context +} + +func (te *testEnv) LocalStats() *map[string]int { + sbsMu.Lock() + ls := te.store.localStore.(statsBlobStore).stats + sbsMu.Unlock() + return &ls +} + +func (te *testEnv) RemoteStats() *map[string]int { + sbsMu.Lock() + rs := te.store.remoteStore.(statsBlobStore).stats + sbsMu.Unlock() + return &rs +} + +// Populate remote store and record the digests +func makeTestEnv(t *testing.T, name string) *testEnv { + nameRef, err := reference.ParseNamed(name) + if err != nil { + t.Fatalf("unable to parse reference: %s", err) + } + + ctx := context.Background() + + truthDir, err := ioutil.TempDir("", "truth") + if err != nil { + t.Fatalf("unable to create tempdir: %s", err) + } + + cacheDir, err := ioutil.TempDir("", "cache") + if err != nil { + t.Fatalf("unable to create tempdir: %s", err) + } + + localDriver, err := filesystem.FromParameters(map[string]interface{}{ + "rootdirectory": truthDir, + }) + if err != nil { + t.Fatalf("unable to create filesystem driver: %s", err) + } + + // todo: create a tempfile area here + localRegistry, err := storage.NewRegistry(ctx, localDriver, storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + localRepo, err := localRegistry.Repository(ctx, nameRef) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + + cacheDriver, err := filesystem.FromParameters(map[string]interface{}{ + "rootdirectory": cacheDir, + }) + if err != nil { + t.Fatalf("unable to create filesystem driver: %s", err) + } + + truthRegistry, err := storage.NewRegistry(ctx, cacheDriver, storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider())) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + truthRepo, err := truthRegistry.Repository(ctx, nameRef) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + + truthBlobs := statsBlobStore{ + stats: make(map[string]int), + blobs: truthRepo.Blobs(ctx), + } + + localBlobs := statsBlobStore{ + stats: make(map[string]int), + blobs: localRepo.Blobs(ctx), + } + + s := scheduler.New(ctx, inmemory.New(), "/scheduler-state.json") + + proxyBlobStore := proxyBlobStore{ + repositoryName: nameRef, + remoteStore: truthBlobs, + localStore: localBlobs, + scheduler: s, + authChallenger: &mockChallenger{}, + } + + te := &testEnv{ + store: proxyBlobStore, + ctx: ctx, + } + return te +} + +func makeBlob(size int) []byte { + blob := make([]byte, size, size) + for i := 0; i < size; i++ { + blob[i] = byte('A' + rand.Int()%48) + } + return blob +} + +func init() { + rand.Seed(42) +} + +func perm(m []distribution.Descriptor) []distribution.Descriptor { + for i := 0; i < len(m); i++ { + j := rand.Intn(i + 1) + tmp := m[i] + m[i] = m[j] + m[j] = tmp + } + return m +} + +func populate(t *testing.T, te *testEnv, blobCount, size, numUnique int) { + var inRemote []distribution.Descriptor + + for i := 0; i < numUnique; i++ { + bytes := makeBlob(size) + for j := 0; j < blobCount/numUnique; j++ { + desc, err := te.store.remoteStore.Put(te.ctx, "", bytes) + if err != nil { + t.Fatalf("Put in store") + } + + inRemote = append(inRemote, desc) + } + } + + te.inRemote = inRemote + te.numUnique = numUnique +} +func TestProxyStoreGet(t *testing.T) { + te := makeTestEnv(t, "foo/bar") + + localStats := te.LocalStats() + remoteStats := te.RemoteStats() + + populate(t, te, 1, 10, 1) + _, err := te.store.Get(te.ctx, te.inRemote[0].Digest) + if err != nil { + t.Fatal(err) + } + + if (*localStats)["get"] != 1 && (*localStats)["put"] != 1 { + t.Errorf("Unexpected local counts") + } + + if (*remoteStats)["get"] != 1 { + t.Errorf("Unexpected remote get count") + } + + _, err = te.store.Get(te.ctx, te.inRemote[0].Digest) + if err != nil { + t.Fatal(err) + } + + if (*localStats)["get"] != 2 && (*localStats)["put"] != 1 { + t.Errorf("Unexpected local counts") + } + + if (*remoteStats)["get"] != 1 { + t.Errorf("Unexpected remote get count") + } + +} + +func TestProxyStoreStat(t *testing.T) { + te := makeTestEnv(t, "foo/bar") + + remoteBlobCount := 1 + populate(t, te, remoteBlobCount, 10, 1) + + localStats := te.LocalStats() + remoteStats := te.RemoteStats() + + // Stat - touches both stores + for _, d := range te.inRemote { + _, err := te.store.Stat(te.ctx, d.Digest) + if err != nil { + t.Fatalf("Error stating proxy store") + } + } + + if (*localStats)["stat"] != remoteBlobCount { + t.Errorf("Unexpected local stat count") + } + + if (*remoteStats)["stat"] != remoteBlobCount { + t.Errorf("Unexpected remote stat count") + } + + if te.store.authChallenger.(*mockChallenger).count != len(te.inRemote) { + t.Fatalf("Unexpected auth challenge count, got %#v", te.store.authChallenger) + } + +} + +func TestProxyStoreServeHighConcurrency(t *testing.T) { + te := makeTestEnv(t, "foo/bar") + blobSize := 200 + blobCount := 10 + numUnique := 1 + populate(t, te, blobCount, blobSize, numUnique) + + numClients := 16 + testProxyStoreServe(t, te, numClients) +} + +func TestProxyStoreServeMany(t *testing.T) { + te := makeTestEnv(t, "foo/bar") + blobSize := 200 + blobCount := 10 + numUnique := 4 + populate(t, te, blobCount, blobSize, numUnique) + + numClients := 4 + testProxyStoreServe(t, te, numClients) +} + +// todo(richardscothern): blobCount must be smaller than num clients +func TestProxyStoreServeBig(t *testing.T) { + te := makeTestEnv(t, "foo/bar") + + blobSize := 2 << 20 + blobCount := 4 + numUnique := 2 + populate(t, te, blobCount, blobSize, numUnique) + + numClients := 4 + testProxyStoreServe(t, te, numClients) +} + +// testProxyStoreServe will create clients to consume all blobs +// populated in the truth store +func testProxyStoreServe(t *testing.T, te *testEnv, numClients int) { + localStats := te.LocalStats() + remoteStats := te.RemoteStats() + + var wg sync.WaitGroup + + for i := 0; i < numClients; i++ { + // Serveblob - pulls through blobs + wg.Add(1) + go func() { + defer wg.Done() + for _, remoteBlob := range te.inRemote { + w := httptest.NewRecorder() + r, err := http.NewRequest("GET", "", nil) + if err != nil { + t.Fatal(err) + } + + err = te.store.ServeBlob(te.ctx, w, r, remoteBlob.Digest) + if err != nil { + t.Fatalf(err.Error()) + } + + bodyBytes := w.Body.Bytes() + localDigest := digest.FromBytes(bodyBytes) + if localDigest != remoteBlob.Digest { + t.Fatalf("Mismatching blob fetch from proxy") + } + } + }() + } + + wg.Wait() + + remoteBlobCount := len(te.inRemote) + sbsMu.Lock() + if (*localStats)["stat"] != remoteBlobCount*numClients && (*localStats)["create"] != te.numUnique { + sbsMu.Unlock() + t.Fatal("Expected: stat:", remoteBlobCount*numClients, "create:", remoteBlobCount) + } + sbsMu.Unlock() + + // Wait for any async storage goroutines to finish + time.Sleep(3 * time.Second) + + sbsMu.Lock() + remoteStatCount := (*remoteStats)["stat"] + remoteOpenCount := (*remoteStats)["open"] + sbsMu.Unlock() + + // Serveblob - blobs come from local + for _, dr := range te.inRemote { + w := httptest.NewRecorder() + r, err := http.NewRequest("GET", "", nil) + if err != nil { + t.Fatal(err) + } + + err = te.store.ServeBlob(te.ctx, w, r, dr.Digest) + if err != nil { + t.Fatalf(err.Error()) + } + + dl := digest.FromBytes(w.Body.Bytes()) + if dl != dr.Digest { + t.Errorf("Mismatching blob fetch from proxy") + } + } + + localStats = te.LocalStats() + remoteStats = te.RemoteStats() + + // Ensure remote unchanged + sbsMu.Lock() + defer sbsMu.Unlock() + if (*remoteStats)["stat"] != remoteStatCount && (*remoteStats)["open"] != remoteOpenCount { + t.Fatalf("unexpected remote stats: %#v", remoteStats) + } +} diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore.go b/vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore.go new file mode 100644 index 000000000..f08e285db --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore.go @@ -0,0 +1,95 @@ +package proxy + +import ( + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/proxy/scheduler" +) + +// todo(richardscothern): from cache control header or config +const repositoryTTL = time.Duration(24 * 7 * time.Hour) + +type proxyManifestStore struct { + ctx context.Context + localManifests distribution.ManifestService + remoteManifests distribution.ManifestService + repositoryName reference.Named + scheduler *scheduler.TTLExpirationScheduler + authChallenger authChallenger +} + +var _ distribution.ManifestService = &proxyManifestStore{} + +func (pms proxyManifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + exists, err := pms.localManifests.Exists(ctx, dgst) + if err != nil { + return false, err + } + if exists { + return true, nil + } + if err := pms.authChallenger.tryEstablishChallenges(ctx); err != nil { + return false, err + } + return pms.remoteManifests.Exists(ctx, dgst) +} + +func (pms proxyManifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + // At this point `dgst` was either specified explicitly, or returned by the + // tagstore with the most recent association. + var fromRemote bool + manifest, err := pms.localManifests.Get(ctx, dgst, options...) + if err != nil { + if err := pms.authChallenger.tryEstablishChallenges(ctx); err != nil { + return nil, err + } + + manifest, err = pms.remoteManifests.Get(ctx, dgst, options...) + if err != nil { + return nil, err + } + fromRemote = true + } + + _, payload, err := manifest.Payload() + if err != nil { + return nil, err + } + + proxyMetrics.ManifestPush(uint64(len(payload))) + if fromRemote { + proxyMetrics.ManifestPull(uint64(len(payload))) + + _, err = pms.localManifests.Put(ctx, manifest) + if err != nil { + return nil, err + } + + // Schedule the manifest blob for removal + repoBlob, err := reference.WithDigest(pms.repositoryName, dgst) + if err != nil { + context.GetLogger(ctx).Errorf("Error creating reference: %s", err) + return nil, err + } + + pms.scheduler.AddManifest(repoBlob, repositoryTTL) + // Ensure the manifest blob is cleaned up + //pms.scheduler.AddBlob(blobRef, repositoryTTL) + + } + + return manifest, err +} + +func (pms proxyManifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + var d digest.Digest + return d, distribution.ErrUnsupported +} + +func (pms proxyManifestStore) Delete(ctx context.Context, dgst digest.Digest) error { + return distribution.ErrUnsupported +} diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore_test.go b/vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore_test.go new file mode 100644 index 000000000..067e845a2 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore_test.go @@ -0,0 +1,275 @@ +package proxy + +import ( + "io" + "sync" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/docker/distribution/registry/proxy/scheduler" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/testutil" + "github.com/docker/libtrust" +) + +type statsManifest struct { + manifests distribution.ManifestService + stats map[string]int +} + +type manifestStoreTestEnv struct { + manifestDigest digest.Digest // digest of the signed manifest in the local storage + manifests proxyManifestStore +} + +func (te manifestStoreTestEnv) LocalStats() *map[string]int { + ls := te.manifests.localManifests.(statsManifest).stats + return &ls +} + +func (te manifestStoreTestEnv) RemoteStats() *map[string]int { + rs := te.manifests.remoteManifests.(statsManifest).stats + return &rs +} + +func (sm statsManifest) Delete(ctx context.Context, dgst digest.Digest) error { + sm.stats["delete"]++ + return sm.manifests.Delete(ctx, dgst) +} + +func (sm statsManifest) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + sm.stats["exists"]++ + return sm.manifests.Exists(ctx, dgst) +} + +func (sm statsManifest) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + sm.stats["get"]++ + return sm.manifests.Get(ctx, dgst) +} + +func (sm statsManifest) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + sm.stats["put"]++ + return sm.manifests.Put(ctx, manifest) +} + +type mockChallenger struct { + sync.Mutex + count int +} + +// Called for remote operations only +func (m *mockChallenger) tryEstablishChallenges(context.Context) error { + m.Lock() + defer m.Unlock() + m.count++ + return nil +} + +func (m *mockChallenger) credentialStore() auth.CredentialStore { + return nil +} + +func (m *mockChallenger) challengeManager() challenge.Manager { + return nil +} + +func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { + nameRef, err := reference.ParseNamed(name) + if err != nil { + t.Fatalf("unable to parse reference: %s", err) + } + k, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + truthRegistry, err := storage.NewRegistry(ctx, inmemory.New(), + storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), + storage.Schema1SigningKey(k)) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + truthRepo, err := truthRegistry.Repository(ctx, nameRef) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + tr, err := truthRepo.Manifests(ctx) + if err != nil { + t.Fatal(err.Error()) + } + truthManifests := statsManifest{ + manifests: tr, + stats: make(map[string]int), + } + + manifestDigest, err := populateRepo(ctx, t, truthRepo, name, tag) + if err != nil { + t.Fatalf(err.Error()) + } + + localRegistry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption, storage.Schema1SigningKey(k)) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + localRepo, err := localRegistry.Repository(ctx, nameRef) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + lr, err := localRepo.Manifests(ctx) + if err != nil { + t.Fatal(err.Error()) + } + + localManifests := statsManifest{ + manifests: lr, + stats: make(map[string]int), + } + + s := scheduler.New(ctx, inmemory.New(), "/scheduler-state.json") + return &manifestStoreTestEnv{ + manifestDigest: manifestDigest, + manifests: proxyManifestStore{ + ctx: ctx, + localManifests: localManifests, + remoteManifests: truthManifests, + scheduler: s, + repositoryName: nameRef, + authChallenger: &mockChallenger{}, + }, + } +} + +func populateRepo(ctx context.Context, t *testing.T, repository distribution.Repository, name, tag string) (digest.Digest, error) { + m := schema1.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: name, + Tag: tag, + } + + for i := 0; i < 2; i++ { + wr, err := repository.Blobs(ctx).Create(ctx) + if err != nil { + t.Fatalf("unexpected error creating test upload: %v", err) + } + + rs, ts, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("unexpected error generating test layer file") + } + dgst := digest.Digest(ts) + if _, err := io.Copy(wr, rs); err != nil { + t.Fatalf("unexpected error copying to upload: %v", err) + } + + if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}); err != nil { + t.Fatalf("unexpected error finishing upload: %v", err) + } + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + sm, err := schema1.Sign(&m, pk) + if err != nil { + t.Fatalf("error signing manifest: %v", err) + } + + ms, err := repository.Manifests(ctx) + if err != nil { + t.Fatalf(err.Error()) + } + dgst, err := ms.Put(ctx, sm) + if err != nil { + t.Fatalf("unexpected errors putting manifest: %v", err) + } + + return dgst, nil +} + +// TestProxyManifests contains basic acceptance tests +// for the pull-through behavior +func TestProxyManifests(t *testing.T) { + name := "foo/bar" + env := newManifestStoreTestEnv(t, name, "latest") + + localStats := env.LocalStats() + remoteStats := env.RemoteStats() + + ctx := context.Background() + // Stat - must check local and remote + exists, err := env.manifests.Exists(ctx, env.manifestDigest) + if err != nil { + t.Fatalf("Error checking existence") + } + if !exists { + t.Errorf("Unexpected non-existant manifest") + } + + if (*localStats)["exists"] != 1 && (*remoteStats)["exists"] != 1 { + t.Errorf("Unexpected exists count : \n%v \n%v", localStats, remoteStats) + } + + if env.manifests.authChallenger.(*mockChallenger).count != 1 { + t.Fatalf("Expected 1 auth challenge, got %#v", env.manifests.authChallenger) + } + + // Get - should succeed and pull manifest into local + _, err = env.manifests.Get(ctx, env.manifestDigest) + if err != nil { + t.Fatal(err) + } + + if (*localStats)["get"] != 1 && (*remoteStats)["get"] != 1 { + t.Errorf("Unexpected get count") + } + + if (*localStats)["put"] != 1 { + t.Errorf("Expected local put") + } + + if env.manifests.authChallenger.(*mockChallenger).count != 2 { + t.Fatalf("Expected 2 auth challenges, got %#v", env.manifests.authChallenger) + } + + // Stat - should only go to local + exists, err = env.manifests.Exists(ctx, env.manifestDigest) + if err != nil { + t.Fatal(err) + } + if !exists { + t.Errorf("Unexpected non-existant manifest") + } + + if (*localStats)["exists"] != 2 && (*remoteStats)["exists"] != 1 { + t.Errorf("Unexpected exists count") + } + + if env.manifests.authChallenger.(*mockChallenger).count != 2 { + t.Fatalf("Expected 2 auth challenges, got %#v", env.manifests.authChallenger) + } + + // Get proxied - won't require another authchallenge + _, err = env.manifests.Get(ctx, env.manifestDigest) + if err != nil { + t.Fatal(err) + } + + if env.manifests.authChallenger.(*mockChallenger).count != 2 { + t.Fatalf("Expected 2 auth challenges, got %#v", env.manifests.authChallenger) + } + +} diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxymetrics.go b/vendor/github.com/docker/distribution/registry/proxy/proxymetrics.go new file mode 100644 index 000000000..d3d84d786 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/proxy/proxymetrics.go @@ -0,0 +1,74 @@ +package proxy + +import ( + "expvar" + "sync/atomic" +) + +// Metrics is used to hold metric counters +// related to the proxy +type Metrics struct { + Requests uint64 + Hits uint64 + Misses uint64 + BytesPulled uint64 + BytesPushed uint64 +} + +type proxyMetricsCollector struct { + blobMetrics Metrics + manifestMetrics Metrics +} + +// BlobPull tracks metrics about blobs pulled into the cache +func (pmc *proxyMetricsCollector) BlobPull(bytesPulled uint64) { + atomic.AddUint64(&pmc.blobMetrics.Misses, 1) + atomic.AddUint64(&pmc.blobMetrics.BytesPulled, bytesPulled) +} + +// BlobPush tracks metrics about blobs pushed to clients +func (pmc *proxyMetricsCollector) BlobPush(bytesPushed uint64) { + atomic.AddUint64(&pmc.blobMetrics.Requests, 1) + atomic.AddUint64(&pmc.blobMetrics.Hits, 1) + atomic.AddUint64(&pmc.blobMetrics.BytesPushed, bytesPushed) +} + +// ManifestPull tracks metrics related to Manifests pulled into the cache +func (pmc *proxyMetricsCollector) ManifestPull(bytesPulled uint64) { + atomic.AddUint64(&pmc.manifestMetrics.Misses, 1) + atomic.AddUint64(&pmc.manifestMetrics.BytesPulled, bytesPulled) +} + +// ManifestPush tracks metrics about manifests pushed to clients +func (pmc *proxyMetricsCollector) ManifestPush(bytesPushed uint64) { + atomic.AddUint64(&pmc.manifestMetrics.Requests, 1) + atomic.AddUint64(&pmc.manifestMetrics.Hits, 1) + atomic.AddUint64(&pmc.manifestMetrics.BytesPushed, bytesPushed) +} + +// proxyMetrics tracks metrics about the proxy cache. This is +// kept globally and made available via expvar. +var proxyMetrics = &proxyMetricsCollector{} + +func init() { + registry := expvar.Get("registry") + if registry == nil { + registry = expvar.NewMap("registry") + } + + pm := registry.(*expvar.Map).Get("proxy") + if pm == nil { + pm = &expvar.Map{} + pm.(*expvar.Map).Init() + registry.(*expvar.Map).Set("proxy", pm) + } + + pm.(*expvar.Map).Set("blobs", expvar.Func(func() interface{} { + return proxyMetrics.blobMetrics + })) + + pm.(*expvar.Map).Set("manifests", expvar.Func(func() interface{} { + return proxyMetrics.manifestMetrics + })) + +} diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxyregistry.go b/vendor/github.com/docker/distribution/registry/proxy/proxyregistry.go new file mode 100644 index 000000000..d64dcbb95 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/proxy/proxyregistry.go @@ -0,0 +1,249 @@ +package proxy + +import ( + "fmt" + "net/http" + "net/url" + "sync" + + "github.com/docker/distribution" + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/distribution/registry/proxy/scheduler" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/driver" +) + +// proxyingRegistry fetches content from a remote registry and caches it locally +type proxyingRegistry struct { + embedded distribution.Namespace // provides local registry functionality + scheduler *scheduler.TTLExpirationScheduler + remoteURL url.URL + authChallenger authChallenger +} + +// NewRegistryPullThroughCache creates a registry acting as a pull through cache +func NewRegistryPullThroughCache(ctx context.Context, registry distribution.Namespace, driver driver.StorageDriver, config configuration.Proxy) (distribution.Namespace, error) { + remoteURL, err := url.Parse(config.RemoteURL) + if err != nil { + return nil, err + } + + v := storage.NewVacuum(ctx, driver) + s := scheduler.New(ctx, driver, "/scheduler-state.json") + s.OnBlobExpire(func(ref reference.Reference) error { + var r reference.Canonical + var ok bool + if r, ok = ref.(reference.Canonical); !ok { + return fmt.Errorf("unexpected reference type : %T", ref) + } + + repo, err := registry.Repository(ctx, r) + if err != nil { + return err + } + + blobs := repo.Blobs(ctx) + + // Clear the repository reference and descriptor caches + err = blobs.Delete(ctx, r.Digest()) + if err != nil { + return err + } + + err = v.RemoveBlob(r.Digest().String()) + if err != nil { + return err + } + + return nil + }) + + s.OnManifestExpire(func(ref reference.Reference) error { + var r reference.Canonical + var ok bool + if r, ok = ref.(reference.Canonical); !ok { + return fmt.Errorf("unexpected reference type : %T", ref) + } + + repo, err := registry.Repository(ctx, r) + if err != nil { + return err + } + + manifests, err := repo.Manifests(ctx) + if err != nil { + return err + } + err = manifests.Delete(ctx, r.Digest()) + if err != nil { + return err + } + return nil + }) + + err = s.Start() + if err != nil { + return nil, err + } + + cs, err := configureAuth(config.Username, config.Password, config.RemoteURL) + if err != nil { + return nil, err + } + + return &proxyingRegistry{ + embedded: registry, + scheduler: s, + remoteURL: *remoteURL, + authChallenger: &remoteAuthChallenger{ + remoteURL: *remoteURL, + cm: challenge.NewSimpleManager(), + cs: cs, + }, + }, nil +} + +func (pr *proxyingRegistry) Scope() distribution.Scope { + return distribution.GlobalScope +} + +func (pr *proxyingRegistry) Repositories(ctx context.Context, repos []string, last string) (n int, err error) { + return pr.embedded.Repositories(ctx, repos, last) +} + +func (pr *proxyingRegistry) Repository(ctx context.Context, name reference.Named) (distribution.Repository, error) { + c := pr.authChallenger + + tr := transport.NewTransport(http.DefaultTransport, + auth.NewAuthorizer(c.challengeManager(), auth.NewTokenHandler(http.DefaultTransport, c.credentialStore(), name.Name(), "pull"))) + + localRepo, err := pr.embedded.Repository(ctx, name) + if err != nil { + return nil, err + } + localManifests, err := localRepo.Manifests(ctx, storage.SkipLayerVerification()) + if err != nil { + return nil, err + } + + remoteRepo, err := client.NewRepository(ctx, name, pr.remoteURL.String(), tr) + if err != nil { + return nil, err + } + + remoteManifests, err := remoteRepo.Manifests(ctx) + if err != nil { + return nil, err + } + + return &proxiedRepository{ + blobStore: &proxyBlobStore{ + localStore: localRepo.Blobs(ctx), + remoteStore: remoteRepo.Blobs(ctx), + scheduler: pr.scheduler, + repositoryName: name, + authChallenger: pr.authChallenger, + }, + manifests: &proxyManifestStore{ + repositoryName: name, + localManifests: localManifests, // Options? + remoteManifests: remoteManifests, + ctx: ctx, + scheduler: pr.scheduler, + authChallenger: pr.authChallenger, + }, + name: name, + tags: &proxyTagService{ + localTags: localRepo.Tags(ctx), + remoteTags: remoteRepo.Tags(ctx), + authChallenger: pr.authChallenger, + }, + }, nil +} + +func (pr *proxyingRegistry) Blobs() distribution.BlobEnumerator { + return pr.embedded.Blobs() +} + +func (pr *proxyingRegistry) BlobStatter() distribution.BlobStatter { + return pr.embedded.BlobStatter() +} + +// authChallenger encapsulates a request to the upstream to establish credential challenges +type authChallenger interface { + tryEstablishChallenges(context.Context) error + challengeManager() challenge.Manager + credentialStore() auth.CredentialStore +} + +type remoteAuthChallenger struct { + remoteURL url.URL + sync.Mutex + cm challenge.Manager + cs auth.CredentialStore +} + +func (r *remoteAuthChallenger) credentialStore() auth.CredentialStore { + return r.cs +} + +func (r *remoteAuthChallenger) challengeManager() challenge.Manager { + return r.cm +} + +// tryEstablishChallenges will attempt to get a challenge type for the upstream if none currently exist +func (r *remoteAuthChallenger) tryEstablishChallenges(ctx context.Context) error { + r.Lock() + defer r.Unlock() + + remoteURL := r.remoteURL + remoteURL.Path = "/v2/" + challenges, err := r.cm.GetChallenges(remoteURL) + if err != nil { + return err + } + + if len(challenges) > 0 { + return nil + } + + // establish challenge type with upstream + if err := ping(r.cm, remoteURL.String(), challengeHeader); err != nil { + return err + } + + context.GetLogger(ctx).Infof("Challenge established with upstream : %s %s", remoteURL, r.cm) + return nil +} + +// proxiedRepository uses proxying blob and manifest services to serve content +// locally, or pulling it through from a remote and caching it locally if it doesn't +// already exist +type proxiedRepository struct { + blobStore distribution.BlobStore + manifests distribution.ManifestService + name reference.Named + tags distribution.TagService +} + +func (pr *proxiedRepository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + return pr.manifests, nil +} + +func (pr *proxiedRepository) Blobs(ctx context.Context) distribution.BlobStore { + return pr.blobStore +} + +func (pr *proxiedRepository) Named() reference.Named { + return pr.name +} + +func (pr *proxiedRepository) Tags(ctx context.Context) distribution.TagService { + return pr.tags +} diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxytagservice.go b/vendor/github.com/docker/distribution/registry/proxy/proxytagservice.go new file mode 100644 index 000000000..a8273030d --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/proxy/proxytagservice.go @@ -0,0 +1,65 @@ +package proxy + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/context" +) + +// proxyTagService supports local and remote lookup of tags. +type proxyTagService struct { + localTags distribution.TagService + remoteTags distribution.TagService + authChallenger authChallenger +} + +var _ distribution.TagService = proxyTagService{} + +// Get attempts to get the most recent digest for the tag by checking the remote +// tag service first and then caching it locally. If the remote is unavailable +// the local association is returned +func (pt proxyTagService) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + err := pt.authChallenger.tryEstablishChallenges(ctx) + if err == nil { + desc, err := pt.remoteTags.Get(ctx, tag) + if err == nil { + err := pt.localTags.Tag(ctx, tag, desc) + if err != nil { + return distribution.Descriptor{}, err + } + return desc, nil + } + } + + desc, err := pt.localTags.Get(ctx, tag) + if err != nil { + return distribution.Descriptor{}, err + } + return desc, nil +} + +func (pt proxyTagService) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + return distribution.ErrUnsupported +} + +func (pt proxyTagService) Untag(ctx context.Context, tag string) error { + err := pt.localTags.Untag(ctx, tag) + if err != nil { + return err + } + return nil +} + +func (pt proxyTagService) All(ctx context.Context) ([]string, error) { + err := pt.authChallenger.tryEstablishChallenges(ctx) + if err == nil { + tags, err := pt.remoteTags.All(ctx) + if err == nil { + return tags, err + } + } + return pt.localTags.All(ctx) +} + +func (pt proxyTagService) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { + return []string{}, distribution.ErrUnsupported +} diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxytagservice_test.go b/vendor/github.com/docker/distribution/registry/proxy/proxytagservice_test.go new file mode 100644 index 000000000..ce0fe78ba --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/proxy/proxytagservice_test.go @@ -0,0 +1,182 @@ +package proxy + +import ( + "reflect" + "sort" + "sync" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" +) + +type mockTagStore struct { + mapping map[string]distribution.Descriptor + sync.Mutex +} + +var _ distribution.TagService = &mockTagStore{} + +func (m *mockTagStore) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + m.Lock() + defer m.Unlock() + + if d, ok := m.mapping[tag]; ok { + return d, nil + } + return distribution.Descriptor{}, distribution.ErrTagUnknown{} +} + +func (m *mockTagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + m.Lock() + defer m.Unlock() + + m.mapping[tag] = desc + return nil +} + +func (m *mockTagStore) Untag(ctx context.Context, tag string) error { + m.Lock() + defer m.Unlock() + + if _, ok := m.mapping[tag]; ok { + delete(m.mapping, tag) + return nil + } + return distribution.ErrTagUnknown{} +} + +func (m *mockTagStore) All(ctx context.Context) ([]string, error) { + m.Lock() + defer m.Unlock() + + var tags []string + for tag := range m.mapping { + tags = append(tags, tag) + } + + return tags, nil +} + +func (m *mockTagStore) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { + panic("not implemented") +} + +func testProxyTagService(local, remote map[string]distribution.Descriptor) *proxyTagService { + if local == nil { + local = make(map[string]distribution.Descriptor) + } + if remote == nil { + remote = make(map[string]distribution.Descriptor) + } + return &proxyTagService{ + localTags: &mockTagStore{mapping: local}, + remoteTags: &mockTagStore{mapping: remote}, + authChallenger: &mockChallenger{}, + } +} + +func TestGet(t *testing.T) { + remoteDesc := distribution.Descriptor{Size: 42} + remoteTag := "remote" + proxyTags := testProxyTagService(map[string]distribution.Descriptor{remoteTag: remoteDesc}, nil) + + ctx := context.Background() + + // Get pre-loaded tag + d, err := proxyTags.Get(ctx, remoteTag) + if err != nil { + t.Fatal(err) + } + + if proxyTags.authChallenger.(*mockChallenger).count != 1 { + t.Fatalf("Expected 1 auth challenge call, got %#v", proxyTags.authChallenger) + } + + if !reflect.DeepEqual(d, remoteDesc) { + t.Fatal("unable to get put tag") + } + + local, err := proxyTags.localTags.Get(ctx, remoteTag) + if err != nil { + t.Fatal("remote tag not pulled into store") + } + + if !reflect.DeepEqual(local, remoteDesc) { + t.Fatalf("unexpected descriptor pulled through") + } + + // Manually overwrite remote tag + newRemoteDesc := distribution.Descriptor{Size: 43} + err = proxyTags.remoteTags.Tag(ctx, remoteTag, newRemoteDesc) + if err != nil { + t.Fatal(err) + } + + d, err = proxyTags.Get(ctx, remoteTag) + if err != nil { + t.Fatal(err) + } + + if proxyTags.authChallenger.(*mockChallenger).count != 2 { + t.Fatalf("Expected 2 auth challenge calls, got %#v", proxyTags.authChallenger) + } + + if !reflect.DeepEqual(d, newRemoteDesc) { + t.Fatal("unable to get put tag") + } + + _, err = proxyTags.localTags.Get(ctx, remoteTag) + if err != nil { + t.Fatal("remote tag not pulled into store") + } + + // untag, ensure it's removed locally, but present in remote + err = proxyTags.Untag(ctx, remoteTag) + if err != nil { + t.Fatal(err) + } + + _, err = proxyTags.localTags.Get(ctx, remoteTag) + if err == nil { + t.Fatalf("Expected error getting Untag'd tag") + } + + _, err = proxyTags.remoteTags.Get(ctx, remoteTag) + if err != nil { + t.Fatalf("remote tag should not be untagged with proxyTag.Untag") + } + + _, err = proxyTags.Get(ctx, remoteTag) + if err != nil { + t.Fatal("untagged tag should be pulled through") + } + + if proxyTags.authChallenger.(*mockChallenger).count != 3 { + t.Fatalf("Expected 3 auth challenge calls, got %#v", proxyTags.authChallenger) + } + + // Add another tag. Ensure both tags appear in 'All' + err = proxyTags.remoteTags.Tag(ctx, "funtag", distribution.Descriptor{Size: 42}) + if err != nil { + t.Fatal(err) + } + + all, err := proxyTags.All(ctx) + if err != nil { + t.Fatal(err) + } + + if len(all) != 2 { + t.Fatalf("Unexpected tag length returned from All() : %d ", len(all)) + } + + sort.Strings(all) + if all[0] != "funtag" && all[1] != "remote" { + t.Fatalf("Unexpected tags returned from All() : %v ", all) + } + + if proxyTags.authChallenger.(*mockChallenger).count != 4 { + t.Fatalf("Expected 4 auth challenge calls, got %#v", proxyTags.authChallenger) + } +} diff --git a/vendor/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go b/vendor/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go new file mode 100644 index 000000000..bde946576 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go @@ -0,0 +1,259 @@ +package scheduler + +import ( + "encoding/json" + "fmt" + "sync" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/driver" +) + +// onTTLExpiryFunc is called when a repository's TTL expires +type expiryFunc func(reference.Reference) error + +const ( + entryTypeBlob = iota + entryTypeManifest + indexSaveFrequency = 5 * time.Second +) + +// schedulerEntry represents an entry in the scheduler +// fields are exported for serialization +type schedulerEntry struct { + Key string `json:"Key"` + Expiry time.Time `json:"ExpiryData"` + EntryType int `json:"EntryType"` + + timer *time.Timer +} + +// New returns a new instance of the scheduler +func New(ctx context.Context, driver driver.StorageDriver, path string) *TTLExpirationScheduler { + return &TTLExpirationScheduler{ + entries: make(map[string]*schedulerEntry), + driver: driver, + pathToStateFile: path, + ctx: ctx, + stopped: true, + doneChan: make(chan struct{}), + saveTimer: time.NewTicker(indexSaveFrequency), + } +} + +// TTLExpirationScheduler is a scheduler used to perform actions +// when TTLs expire +type TTLExpirationScheduler struct { + sync.Mutex + + entries map[string]*schedulerEntry + + driver driver.StorageDriver + ctx context.Context + pathToStateFile string + + stopped bool + + onBlobExpire expiryFunc + onManifestExpire expiryFunc + + indexDirty bool + saveTimer *time.Ticker + doneChan chan struct{} +} + +// OnBlobExpire is called when a scheduled blob's TTL expires +func (ttles *TTLExpirationScheduler) OnBlobExpire(f expiryFunc) { + ttles.Lock() + defer ttles.Unlock() + + ttles.onBlobExpire = f +} + +// OnManifestExpire is called when a scheduled manifest's TTL expires +func (ttles *TTLExpirationScheduler) OnManifestExpire(f expiryFunc) { + ttles.Lock() + defer ttles.Unlock() + + ttles.onManifestExpire = f +} + +// AddBlob schedules a blob cleanup after ttl expires +func (ttles *TTLExpirationScheduler) AddBlob(blobRef reference.Canonical, ttl time.Duration) error { + ttles.Lock() + defer ttles.Unlock() + + if ttles.stopped { + return fmt.Errorf("scheduler not started") + } + + ttles.add(blobRef, ttl, entryTypeBlob) + return nil +} + +// AddManifest schedules a manifest cleanup after ttl expires +func (ttles *TTLExpirationScheduler) AddManifest(manifestRef reference.Canonical, ttl time.Duration) error { + ttles.Lock() + defer ttles.Unlock() + + if ttles.stopped { + return fmt.Errorf("scheduler not started") + } + + ttles.add(manifestRef, ttl, entryTypeManifest) + return nil +} + +// Start starts the scheduler +func (ttles *TTLExpirationScheduler) Start() error { + ttles.Lock() + defer ttles.Unlock() + + err := ttles.readState() + if err != nil { + return err + } + + if !ttles.stopped { + return fmt.Errorf("Scheduler already started") + } + + context.GetLogger(ttles.ctx).Infof("Starting cached object TTL expiration scheduler...") + ttles.stopped = false + + // Start timer for each deserialized entry + for _, entry := range ttles.entries { + entry.timer = ttles.startTimer(entry, entry.Expiry.Sub(time.Now())) + } + + // Start a ticker to periodically save the entries index + + go func() { + for { + select { + case <-ttles.saveTimer.C: + ttles.Lock() + if !ttles.indexDirty { + ttles.Unlock() + continue + } + + err := ttles.writeState() + if err != nil { + context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) + } else { + ttles.indexDirty = false + } + ttles.Unlock() + + case <-ttles.doneChan: + return + } + } + }() + + return nil +} + +func (ttles *TTLExpirationScheduler) add(r reference.Reference, ttl time.Duration, eType int) { + entry := &schedulerEntry{ + Key: r.String(), + Expiry: time.Now().Add(ttl), + EntryType: eType, + } + context.GetLogger(ttles.ctx).Infof("Adding new scheduler entry for %s with ttl=%s", entry.Key, entry.Expiry.Sub(time.Now())) + if oldEntry, present := ttles.entries[entry.Key]; present && oldEntry.timer != nil { + oldEntry.timer.Stop() + } + ttles.entries[entry.Key] = entry + entry.timer = ttles.startTimer(entry, ttl) + ttles.indexDirty = true +} + +func (ttles *TTLExpirationScheduler) startTimer(entry *schedulerEntry, ttl time.Duration) *time.Timer { + return time.AfterFunc(ttl, func() { + ttles.Lock() + defer ttles.Unlock() + + var f expiryFunc + + switch entry.EntryType { + case entryTypeBlob: + f = ttles.onBlobExpire + case entryTypeManifest: + f = ttles.onManifestExpire + default: + f = func(reference.Reference) error { + return fmt.Errorf("scheduler entry type") + } + } + + ref, err := reference.Parse(entry.Key) + if err == nil { + if err := f(ref); err != nil { + context.GetLogger(ttles.ctx).Errorf("Scheduler error returned from OnExpire(%s): %s", entry.Key, err) + } + } else { + context.GetLogger(ttles.ctx).Errorf("Error unpacking reference: %s", err) + } + + delete(ttles.entries, entry.Key) + ttles.indexDirty = true + }) +} + +// Stop stops the scheduler. +func (ttles *TTLExpirationScheduler) Stop() { + ttles.Lock() + defer ttles.Unlock() + + if err := ttles.writeState(); err != nil { + context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) + } + + for _, entry := range ttles.entries { + entry.timer.Stop() + } + + close(ttles.doneChan) + ttles.saveTimer.Stop() + ttles.stopped = true +} + +func (ttles *TTLExpirationScheduler) writeState() error { + jsonBytes, err := json.Marshal(ttles.entries) + if err != nil { + return err + } + + err = ttles.driver.PutContent(ttles.ctx, ttles.pathToStateFile, jsonBytes) + if err != nil { + return err + } + + return nil +} + +func (ttles *TTLExpirationScheduler) readState() error { + if _, err := ttles.driver.Stat(ttles.ctx, ttles.pathToStateFile); err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return nil + default: + return err + } + } + + bytes, err := ttles.driver.GetContent(ttles.ctx, ttles.pathToStateFile) + if err != nil { + return err + } + + err = json.Unmarshal(bytes, &ttles.entries) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/proxy/scheduler/scheduler_test.go b/vendor/github.com/docker/distribution/registry/proxy/scheduler/scheduler_test.go new file mode 100644 index 000000000..4d69d5b55 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/proxy/scheduler/scheduler_test.go @@ -0,0 +1,211 @@ +package scheduler + +import ( + "encoding/json" + "sync" + "testing" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +func testRefs(t *testing.T) (reference.Reference, reference.Reference, reference.Reference) { + ref1, err := reference.Parse("testrepo@sha256:aaaaeaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + + ref2, err := reference.Parse("testrepo@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + + ref3, err := reference.Parse("testrepo@sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + + return ref1, ref2, ref3 +} + +func TestSchedule(t *testing.T) { + ref1, ref2, ref3 := testRefs(t) + timeUnit := time.Millisecond + remainingRepos := map[string]bool{ + ref1.String(): true, + ref2.String(): true, + ref3.String(): true, + } + + var mu sync.Mutex + s := New(context.Background(), inmemory.New(), "/ttl") + deleteFunc := func(repoName reference.Reference) error { + if len(remainingRepos) == 0 { + t.Fatalf("Incorrect expiry count") + } + _, ok := remainingRepos[repoName.String()] + if !ok { + t.Fatalf("Trying to remove nonexistent repo: %s", repoName) + } + t.Log("removing", repoName) + mu.Lock() + delete(remainingRepos, repoName.String()) + mu.Unlock() + + return nil + } + s.onBlobExpire = deleteFunc + err := s.Start() + if err != nil { + t.Fatalf("Error starting ttlExpirationScheduler: %s", err) + } + + s.add(ref1, 3*timeUnit, entryTypeBlob) + s.add(ref2, 1*timeUnit, entryTypeBlob) + + func() { + s.Lock() + s.add(ref3, 1*timeUnit, entryTypeBlob) + s.Unlock() + + }() + + // Ensure all repos are deleted + <-time.After(50 * timeUnit) + + mu.Lock() + defer mu.Unlock() + if len(remainingRepos) != 0 { + t.Fatalf("Repositories remaining: %#v", remainingRepos) + } +} + +func TestRestoreOld(t *testing.T) { + ref1, ref2, _ := testRefs(t) + remainingRepos := map[string]bool{ + ref1.String(): true, + ref2.String(): true, + } + + var wg sync.WaitGroup + wg.Add(len(remainingRepos)) + var mu sync.Mutex + deleteFunc := func(r reference.Reference) error { + mu.Lock() + defer mu.Unlock() + if r.String() == ref1.String() && len(remainingRepos) == 2 { + t.Errorf("ref1 should not be removed first") + } + _, ok := remainingRepos[r.String()] + if !ok { + t.Fatalf("Trying to remove nonexistent repo: %s", r) + } + delete(remainingRepos, r.String()) + wg.Done() + return nil + } + + timeUnit := time.Millisecond + serialized, err := json.Marshal(&map[string]schedulerEntry{ + ref1.String(): { + Expiry: time.Now().Add(10 * timeUnit), + Key: ref1.String(), + EntryType: 0, + }, + ref2.String(): { + Expiry: time.Now().Add(-3 * timeUnit), // TTL passed, should be removed first + Key: ref2.String(), + EntryType: 0, + }, + }) + if err != nil { + t.Fatalf("Error serializing test data: %s", err.Error()) + } + + ctx := context.Background() + pathToStatFile := "/ttl" + fs := inmemory.New() + err = fs.PutContent(ctx, pathToStatFile, serialized) + if err != nil { + t.Fatal("Unable to write serialized data to fs") + } + s := New(context.Background(), fs, "/ttl") + s.OnBlobExpire(deleteFunc) + err = s.Start() + if err != nil { + t.Fatalf("Error starting ttlExpirationScheduler: %s", err) + } + defer s.Stop() + + wg.Wait() + mu.Lock() + defer mu.Unlock() + if len(remainingRepos) != 0 { + t.Fatalf("Repositories remaining: %#v", remainingRepos) + } +} + +func TestStopRestore(t *testing.T) { + ref1, ref2, _ := testRefs(t) + + timeUnit := time.Millisecond + remainingRepos := map[string]bool{ + ref1.String(): true, + ref2.String(): true, + } + + var mu sync.Mutex + deleteFunc := func(r reference.Reference) error { + mu.Lock() + delete(remainingRepos, r.String()) + mu.Unlock() + return nil + } + + fs := inmemory.New() + pathToStateFile := "/ttl" + s := New(context.Background(), fs, pathToStateFile) + s.onBlobExpire = deleteFunc + + err := s.Start() + if err != nil { + t.Fatalf(err.Error()) + } + s.add(ref1, 300*timeUnit, entryTypeBlob) + s.add(ref2, 100*timeUnit, entryTypeBlob) + + // Start and stop before all operations complete + // state will be written to fs + s.Stop() + time.Sleep(10 * time.Millisecond) + + // v2 will restore state from fs + s2 := New(context.Background(), fs, pathToStateFile) + s2.onBlobExpire = deleteFunc + err = s2.Start() + if err != nil { + t.Fatalf("Error starting v2: %s", err.Error()) + } + + <-time.After(500 * timeUnit) + mu.Lock() + defer mu.Unlock() + if len(remainingRepos) != 0 { + t.Fatalf("Repositories remaining: %#v", remainingRepos) + } + +} + +func TestDoubleStart(t *testing.T) { + s := New(context.Background(), inmemory.New(), "/ttl") + err := s.Start() + if err != nil { + t.Fatalf("Unable to start scheduler") + } + err = s.Start() + if err == nil { + t.Fatalf("Scheduler started twice without error") + } +} diff --git a/vendor/github.com/docker/distribution/registry/registry.go b/vendor/github.com/docker/distribution/registry/registry.go new file mode 100644 index 000000000..2adcb1e3e --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/registry.go @@ -0,0 +1,356 @@ +package registry + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net/http" + "os" + "time" + + "rsc.io/letsencrypt" + + log "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus/formatters/logstash" + "github.com/bugsnag/bugsnag-go" + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/context" + "github.com/docker/distribution/health" + "github.com/docker/distribution/registry/handlers" + "github.com/docker/distribution/registry/listener" + "github.com/docker/distribution/uuid" + "github.com/docker/distribution/version" + gorhandlers "github.com/gorilla/handlers" + "github.com/spf13/cobra" + "github.com/yvasiyarov/gorelic" +) + +// ServeCmd is a cobra command for running the registry. +var ServeCmd = &cobra.Command{ + Use: "serve ", + Short: "`serve` stores and distributes Docker images", + Long: "`serve` stores and distributes Docker images.", + Run: func(cmd *cobra.Command, args []string) { + + // setup context + ctx := context.WithVersion(context.Background(), version.Version) + + config, err := resolveConfiguration(args) + if err != nil { + fmt.Fprintf(os.Stderr, "configuration error: %v\n", err) + cmd.Usage() + os.Exit(1) + } + + if config.HTTP.Debug.Addr != "" { + go func(addr string) { + log.Infof("debug server listening %v", addr) + if err := http.ListenAndServe(addr, nil); err != nil { + log.Fatalf("error listening on debug interface: %v", err) + } + }(config.HTTP.Debug.Addr) + } + + registry, err := NewRegistry(ctx, config) + if err != nil { + log.Fatalln(err) + } + + if err = registry.ListenAndServe(); err != nil { + log.Fatalln(err) + } + }, +} + +// A Registry represents a complete instance of the registry. +// TODO(aaronl): It might make sense for Registry to become an interface. +type Registry struct { + config *configuration.Configuration + app *handlers.App + server *http.Server +} + +// NewRegistry creates a new registry from a context and configuration struct. +func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Registry, error) { + var err error + ctx, err = configureLogging(ctx, config) + if err != nil { + return nil, fmt.Errorf("error configuring logger: %v", err) + } + + // inject a logger into the uuid library. warns us if there is a problem + // with uuid generation under low entropy. + uuid.Loggerf = context.GetLogger(ctx).Warnf + + app := handlers.NewApp(ctx, config) + // TODO(aaronl): The global scope of the health checks means NewRegistry + // can only be called once per process. + app.RegisterHealthChecks() + handler := configureReporting(app) + handler = alive("/", handler) + handler = health.Handler(handler) + handler = panicHandler(handler) + if !config.Log.AccessLog.Disabled { + handler = gorhandlers.CombinedLoggingHandler(os.Stdout, handler) + } + + server := &http.Server{ + Handler: handler, + } + + return &Registry{ + app: app, + config: config, + server: server, + }, nil +} + +// ListenAndServe runs the registry's HTTP server. +func (registry *Registry) ListenAndServe() error { + config := registry.config + + ln, err := listener.NewListener(config.HTTP.Net, config.HTTP.Addr) + if err != nil { + return err + } + + if config.HTTP.TLS.Certificate != "" || config.HTTP.TLS.LetsEncrypt.CacheFile != "" { + tlsConf := &tls.Config{ + ClientAuth: tls.NoClientCert, + NextProtos: nextProtos(config), + MinVersion: tls.VersionTLS10, + PreferServerCipherSuites: true, + CipherSuites: []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + }, + } + + if config.HTTP.TLS.LetsEncrypt.CacheFile != "" { + if config.HTTP.TLS.Certificate != "" { + return fmt.Errorf("cannot specify both certificate and Let's Encrypt") + } + var m letsencrypt.Manager + if err := m.CacheFile(config.HTTP.TLS.LetsEncrypt.CacheFile); err != nil { + return err + } + if !m.Registered() { + if err := m.Register(config.HTTP.TLS.LetsEncrypt.Email, nil); err != nil { + return err + } + } + tlsConf.GetCertificate = m.GetCertificate + } else { + tlsConf.Certificates = make([]tls.Certificate, 1) + tlsConf.Certificates[0], err = tls.LoadX509KeyPair(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key) + if err != nil { + return err + } + } + + if len(config.HTTP.TLS.ClientCAs) != 0 { + pool := x509.NewCertPool() + + for _, ca := range config.HTTP.TLS.ClientCAs { + caPem, err := ioutil.ReadFile(ca) + if err != nil { + return err + } + + if ok := pool.AppendCertsFromPEM(caPem); !ok { + return fmt.Errorf("Could not add CA to pool") + } + } + + for _, subj := range pool.Subjects() { + context.GetLogger(registry.app).Debugf("CA Subject: %s", string(subj)) + } + + tlsConf.ClientAuth = tls.RequireAndVerifyClientCert + tlsConf.ClientCAs = pool + } + + ln = tls.NewListener(ln, tlsConf) + context.GetLogger(registry.app).Infof("listening on %v, tls", ln.Addr()) + } else { + context.GetLogger(registry.app).Infof("listening on %v", ln.Addr()) + } + + return registry.server.Serve(ln) +} + +func configureReporting(app *handlers.App) http.Handler { + var handler http.Handler = app + + if app.Config.Reporting.Bugsnag.APIKey != "" { + bugsnagConfig := bugsnag.Configuration{ + APIKey: app.Config.Reporting.Bugsnag.APIKey, + // TODO(brianbland): provide the registry version here + // AppVersion: "2.0", + } + if app.Config.Reporting.Bugsnag.ReleaseStage != "" { + bugsnagConfig.ReleaseStage = app.Config.Reporting.Bugsnag.ReleaseStage + } + if app.Config.Reporting.Bugsnag.Endpoint != "" { + bugsnagConfig.Endpoint = app.Config.Reporting.Bugsnag.Endpoint + } + bugsnag.Configure(bugsnagConfig) + + handler = bugsnag.Handler(handler) + } + + if app.Config.Reporting.NewRelic.LicenseKey != "" { + agent := gorelic.NewAgent() + agent.NewrelicLicense = app.Config.Reporting.NewRelic.LicenseKey + if app.Config.Reporting.NewRelic.Name != "" { + agent.NewrelicName = app.Config.Reporting.NewRelic.Name + } + agent.CollectHTTPStat = true + agent.Verbose = app.Config.Reporting.NewRelic.Verbose + agent.Run() + + handler = agent.WrapHTTPHandler(handler) + } + + return handler +} + +// configureLogging prepares the context with a logger using the +// configuration. +func configureLogging(ctx context.Context, config *configuration.Configuration) (context.Context, error) { + if config.Log.Level == "" && config.Log.Formatter == "" { + // If no config for logging is set, fallback to deprecated "Loglevel". + log.SetLevel(logLevel(config.Loglevel)) + ctx = context.WithLogger(ctx, context.GetLogger(ctx)) + return ctx, nil + } + + log.SetLevel(logLevel(config.Log.Level)) + + formatter := config.Log.Formatter + if formatter == "" { + formatter = "text" // default formatter + } + + switch formatter { + case "json": + log.SetFormatter(&log.JSONFormatter{ + TimestampFormat: time.RFC3339Nano, + }) + case "text": + log.SetFormatter(&log.TextFormatter{ + TimestampFormat: time.RFC3339Nano, + }) + case "logstash": + log.SetFormatter(&logstash.LogstashFormatter{ + TimestampFormat: time.RFC3339Nano, + }) + default: + // just let the library use default on empty string. + if config.Log.Formatter != "" { + return ctx, fmt.Errorf("unsupported logging formatter: %q", config.Log.Formatter) + } + } + + if config.Log.Formatter != "" { + log.Debugf("using %q logging formatter", config.Log.Formatter) + } + + if len(config.Log.Fields) > 0 { + // build up the static fields, if present. + var fields []interface{} + for k := range config.Log.Fields { + fields = append(fields, k) + } + + ctx = context.WithValues(ctx, config.Log.Fields) + ctx = context.WithLogger(ctx, context.GetLogger(ctx, fields...)) + } + + return ctx, nil +} + +func logLevel(level configuration.Loglevel) log.Level { + l, err := log.ParseLevel(string(level)) + if err != nil { + l = log.InfoLevel + log.Warnf("error parsing level %q: %v, using %q ", level, err, l) + } + + return l +} + +// panicHandler add an HTTP handler to web app. The handler recover the happening +// panic. logrus.Panic transmits panic message to pre-config log hooks, which is +// defined in config.yml. +func panicHandler(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer func() { + if err := recover(); err != nil { + log.Panic(fmt.Sprintf("%v", err)) + } + }() + handler.ServeHTTP(w, r) + }) +} + +// alive simply wraps the handler with a route that always returns an http 200 +// response when the path is matched. If the path is not matched, the request +// is passed to the provided handler. There is no guarantee of anything but +// that the server is up. Wrap with other handlers (such as health.Handler) +// for greater affect. +func alive(path string, handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == path { + w.Header().Set("Cache-Control", "no-cache") + w.WriteHeader(http.StatusOK) + return + } + + handler.ServeHTTP(w, r) + }) +} + +func resolveConfiguration(args []string) (*configuration.Configuration, error) { + var configurationPath string + + if len(args) > 0 { + configurationPath = args[0] + } else if os.Getenv("REGISTRY_CONFIGURATION_PATH") != "" { + configurationPath = os.Getenv("REGISTRY_CONFIGURATION_PATH") + } + + if configurationPath == "" { + return nil, fmt.Errorf("configuration path unspecified") + } + + fp, err := os.Open(configurationPath) + if err != nil { + return nil, err + } + + defer fp.Close() + + config, err := configuration.Parse(fp) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %v", configurationPath, err) + } + + return config, nil +} + +func nextProtos(config *configuration.Configuration) []string { + switch config.HTTP.HTTP2.Disabled { + case true: + return []string{"http/1.1"} + default: + return []string{"h2", "http/1.1"} + } +} diff --git a/vendor/github.com/docker/distribution/registry/registry_test.go b/vendor/github.com/docker/distribution/registry/registry_test.go new file mode 100644 index 000000000..346731179 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/registry_test.go @@ -0,0 +1,30 @@ +package registry + +import ( + "reflect" + "testing" + + "github.com/docker/distribution/configuration" +) + +// Tests to ensure nextProtos returns the correct protocols when: +// * config.HTTP.HTTP2.Disabled is not explicitly set => [h2 http/1.1] +// * config.HTTP.HTTP2.Disabled is explicitly set to false [h2 http/1.1] +// * config.HTTP.HTTP2.Disabled is explicitly set to true [http/1.1] +func TestNextProtos(t *testing.T) { + config := &configuration.Configuration{} + protos := nextProtos(config) + if !reflect.DeepEqual(protos, []string{"h2", "http/1.1"}) { + t.Fatalf("expected protos to equal [h2 http/1.1], got %s", protos) + } + config.HTTP.HTTP2.Disabled = false + protos = nextProtos(config) + if !reflect.DeepEqual(protos, []string{"h2", "http/1.1"}) { + t.Fatalf("expected protos to equal [h2 http/1.1], got %s", protos) + } + config.HTTP.HTTP2.Disabled = true + protos = nextProtos(config) + if !reflect.DeepEqual(protos, []string{"http/1.1"}) { + t.Fatalf("expected protos to equal [http/1.1], got %s", protos) + } +} diff --git a/vendor/github.com/docker/distribution/registry/root.go b/vendor/github.com/docker/distribution/registry/root.go new file mode 100644 index 000000000..5d3005c26 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/root.go @@ -0,0 +1,84 @@ +package registry + +import ( + "fmt" + "os" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/driver/factory" + "github.com/docker/distribution/version" + "github.com/docker/libtrust" + "github.com/spf13/cobra" +) + +var showVersion bool + +func init() { + RootCmd.AddCommand(ServeCmd) + RootCmd.AddCommand(GCCmd) + GCCmd.Flags().BoolVarP(&dryRun, "dry-run", "d", false, "do everything except remove the blobs") + RootCmd.Flags().BoolVarP(&showVersion, "version", "v", false, "show the version and exit") +} + +// RootCmd is the main command for the 'registry' binary. +var RootCmd = &cobra.Command{ + Use: "registry", + Short: "`registry`", + Long: "`registry`", + Run: func(cmd *cobra.Command, args []string) { + if showVersion { + version.PrintVersion() + return + } + cmd.Usage() + }, +} + +var dryRun bool + +// GCCmd is the cobra command that corresponds to the garbage-collect subcommand +var GCCmd = &cobra.Command{ + Use: "garbage-collect ", + Short: "`garbage-collect` deletes layers not referenced by any manifests", + Long: "`garbage-collect` deletes layers not referenced by any manifests", + Run: func(cmd *cobra.Command, args []string) { + config, err := resolveConfiguration(args) + if err != nil { + fmt.Fprintf(os.Stderr, "configuration error: %v\n", err) + cmd.Usage() + os.Exit(1) + } + + driver, err := factory.Create(config.Storage.Type(), config.Storage.Parameters()) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to construct %s driver: %v", config.Storage.Type(), err) + os.Exit(1) + } + + ctx := context.Background() + ctx, err = configureLogging(ctx, config) + if err != nil { + fmt.Fprintf(os.Stderr, "unable to configure logging with config: %s", err) + os.Exit(1) + } + + k, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + fmt.Fprint(os.Stderr, err) + os.Exit(1) + } + + registry, err := storage.NewRegistry(ctx, driver, storage.Schema1SigningKey(k)) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to construct registry: %v", err) + os.Exit(1) + } + + err = storage.MarkAndSweep(ctx, driver, registry, dryRun) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to garbage collect: %v", err) + os.Exit(1) + } + }, +} diff --git a/vendor/github.com/docker/distribution/registry/storage/blob_test.go b/vendor/github.com/docker/distribution/registry/storage/blob_test.go new file mode 100644 index 000000000..767526bb2 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/blob_test.go @@ -0,0 +1,614 @@ +package storage + +import ( + "bytes" + "crypto/sha256" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "reflect" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver/testdriver" + "github.com/docker/distribution/testutil" +) + +// TestWriteSeek tests that the current file size can be +// obtained using Seek +func TestWriteSeek(t *testing.T) { + ctx := context.Background() + imageName, _ := reference.ParseNamed("foo/bar") + driver := testdriver.New() + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + repository, err := registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + bs := repository.Blobs(ctx) + + blobUpload, err := bs.Create(ctx) + + if err != nil { + t.Fatalf("unexpected error starting layer upload: %s", err) + } + contents := []byte{1, 2, 3} + blobUpload.Write(contents) + blobUpload.Close() + offset := blobUpload.Size() + if offset != int64(len(contents)) { + t.Fatalf("unexpected value for blobUpload offset: %v != %v", offset, len(contents)) + } + +} + +// TestSimpleBlobUpload covers the blob upload process, exercising common +// error paths that might be seen during an upload. +func TestSimpleBlobUpload(t *testing.T) { + randomDataReader, dgst, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random reader: %v", err) + } + + ctx := context.Background() + imageName, _ := reference.ParseNamed("foo/bar") + driver := testdriver.New() + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + repository, err := registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + bs := repository.Blobs(ctx) + + h := sha256.New() + rd := io.TeeReader(randomDataReader, h) + + blobUpload, err := bs.Create(ctx) + + if err != nil { + t.Fatalf("unexpected error starting layer upload: %s", err) + } + + // Cancel the upload then restart it + if err := blobUpload.Cancel(ctx); err != nil { + t.Fatalf("unexpected error during upload cancellation: %v", err) + } + + // get the enclosing directory + uploadPath := path.Dir(blobUpload.(*blobWriter).path) + + // ensure state was cleaned up + _, err = driver.List(ctx, uploadPath) + if err == nil { + t.Fatal("files in upload path after cleanup") + } + + // Do a resume, get unknown upload + blobUpload, err = bs.Resume(ctx, blobUpload.ID()) + if err != distribution.ErrBlobUploadUnknown { + t.Fatalf("unexpected error resuming upload, should be unknown: %v", err) + } + + // Restart! + blobUpload, err = bs.Create(ctx) + if err != nil { + t.Fatalf("unexpected error starting layer upload: %s", err) + } + + // Get the size of our random tarfile + randomDataSize, err := seekerSize(randomDataReader) + if err != nil { + t.Fatalf("error getting seeker size of random data: %v", err) + } + + nn, err := io.Copy(blobUpload, rd) + if err != nil { + t.Fatalf("unexpected error uploading layer data: %v", err) + } + + if nn != randomDataSize { + t.Fatalf("layer data write incomplete") + } + + blobUpload.Close() + + offset := blobUpload.Size() + if offset != nn { + t.Fatalf("blobUpload not updated with correct offset: %v != %v", offset, nn) + } + + // Do a resume, for good fun + blobUpload, err = bs.Resume(ctx, blobUpload.ID()) + if err != nil { + t.Fatalf("unexpected error resuming upload: %v", err) + } + + sha256Digest := digest.NewDigest("sha256", h) + desc, err := blobUpload.Commit(ctx, distribution.Descriptor{Digest: dgst}) + if err != nil { + t.Fatalf("unexpected error finishing layer upload: %v", err) + } + + // ensure state was cleaned up + uploadPath = path.Dir(blobUpload.(*blobWriter).path) + _, err = driver.List(ctx, uploadPath) + if err == nil { + t.Fatal("files in upload path after commit") + } + + // After finishing an upload, it should no longer exist. + if _, err := bs.Resume(ctx, blobUpload.ID()); err != distribution.ErrBlobUploadUnknown { + t.Fatalf("expected layer upload to be unknown, got %v", err) + } + + // Test for existence. + statDesc, err := bs.Stat(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs) + } + + if !reflect.DeepEqual(statDesc, desc) { + t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) + } + + rc, err := bs.Open(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error opening blob for read: %v", err) + } + defer rc.Close() + + h.Reset() + nn, err = io.Copy(h, rc) + if err != nil { + t.Fatalf("error reading layer: %v", err) + } + + if nn != randomDataSize { + t.Fatalf("incorrect read length") + } + + if digest.NewDigest("sha256", h) != sha256Digest { + t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), sha256Digest) + } + + // Delete a blob + err = bs.Delete(ctx, desc.Digest) + if err != nil { + t.Fatalf("Unexpected error deleting blob") + } + + d, err := bs.Stat(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected non-error stating deleted blob: %v", d) + } + + switch err { + case distribution.ErrBlobUnknown: + break + default: + t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err) + } + + _, err = bs.Open(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected success opening deleted blob for read") + } + + switch err { + case distribution.ErrBlobUnknown: + break + default: + t.Errorf("Unexpected error type getting deleted manifest: %#v", err) + } + + // Re-upload the blob + randomBlob, err := ioutil.ReadAll(randomDataReader) + if err != nil { + t.Fatalf("Error reading all of blob %s", err.Error()) + } + expectedDigest := digest.FromBytes(randomBlob) + simpleUpload(t, bs, randomBlob, expectedDigest) + + d, err = bs.Stat(ctx, expectedDigest) + if err != nil { + t.Errorf("unexpected error stat-ing blob") + } + if d.Digest != expectedDigest { + t.Errorf("Mismatching digest with restored blob") + } + + _, err = bs.Open(ctx, expectedDigest) + if err != nil { + t.Errorf("Unexpected error opening blob") + } + + // Reuse state to test delete with a delete-disabled registry + registry, err = NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + repository, err = registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + bs = repository.Blobs(ctx) + err = bs.Delete(ctx, desc.Digest) + if err == nil { + t.Errorf("Unexpected success deleting while disabled") + } +} + +// TestSimpleBlobRead just creates a simple blob file and ensures that basic +// open, read, seek, read works. More specific edge cases should be covered in +// other tests. +func TestSimpleBlobRead(t *testing.T) { + ctx := context.Background() + imageName, _ := reference.ParseNamed("foo/bar") + driver := testdriver.New() + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + repository, err := registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + bs := repository.Blobs(ctx) + + randomLayerReader, dgst, err := testutil.CreateRandomTarFile() // TODO(stevvooe): Consider using just a random string. + if err != nil { + t.Fatalf("error creating random data: %v", err) + } + + // Test for existence. + desc, err := bs.Stat(ctx, dgst) + if err != distribution.ErrBlobUnknown { + t.Fatalf("expected not found error when testing for existence: %v", err) + } + + rc, err := bs.Open(ctx, dgst) + if err != distribution.ErrBlobUnknown { + t.Fatalf("expected not found error when opening non-existent blob: %v", err) + } + + randomLayerSize, err := seekerSize(randomLayerReader) + if err != nil { + t.Fatalf("error getting seeker size for random layer: %v", err) + } + + descBefore := distribution.Descriptor{Digest: dgst, MediaType: "application/octet-stream", Size: randomLayerSize} + t.Logf("desc: %v", descBefore) + + desc, err = addBlob(ctx, bs, descBefore, randomLayerReader) + if err != nil { + t.Fatalf("error adding blob to blobservice: %v", err) + } + + if desc.Size != randomLayerSize { + t.Fatalf("committed blob has incorrect length: %v != %v", desc.Size, randomLayerSize) + } + + rc, err = bs.Open(ctx, desc.Digest) // note that we are opening with original digest. + if err != nil { + t.Fatalf("error opening blob with %v: %v", dgst, err) + } + defer rc.Close() + + // Now check the sha digest and ensure its the same + h := sha256.New() + nn, err := io.Copy(h, rc) + if err != nil { + t.Fatalf("unexpected error copying to hash: %v", err) + } + + if nn != randomLayerSize { + t.Fatalf("stored incorrect number of bytes in blob: %d != %d", nn, randomLayerSize) + } + + sha256Digest := digest.NewDigest("sha256", h) + if sha256Digest != desc.Digest { + t.Fatalf("fetched digest does not match: %q != %q", sha256Digest, desc.Digest) + } + + // Now seek back the blob, read the whole thing and check against randomLayerData + offset, err := rc.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatalf("error seeking blob: %v", err) + } + + if offset != 0 { + t.Fatalf("seek failed: expected 0 offset, got %d", offset) + } + + p, err := ioutil.ReadAll(rc) + if err != nil { + t.Fatalf("error reading all of blob: %v", err) + } + + if len(p) != int(randomLayerSize) { + t.Fatalf("blob data read has different length: %v != %v", len(p), randomLayerSize) + } + + // Reset the randomLayerReader and read back the buffer + _, err = randomLayerReader.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatalf("error resetting layer reader: %v", err) + } + + randomLayerData, err := ioutil.ReadAll(randomLayerReader) + if err != nil { + t.Fatalf("random layer read failed: %v", err) + } + + if !bytes.Equal(p, randomLayerData) { + t.Fatalf("layer data not equal") + } +} + +// TestBlobMount covers the blob mount process, exercising common +// error paths that might be seen during a mount. +func TestBlobMount(t *testing.T) { + randomDataReader, dgst, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random reader: %v", err) + } + + ctx := context.Background() + imageName, _ := reference.ParseNamed("foo/bar") + sourceImageName, _ := reference.ParseNamed("foo/source") + driver := testdriver.New() + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + + repository, err := registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + sourceRepository, err := registry.Repository(ctx, sourceImageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + + sbs := sourceRepository.Blobs(ctx) + + blobUpload, err := sbs.Create(ctx) + + if err != nil { + t.Fatalf("unexpected error starting layer upload: %s", err) + } + + // Get the size of our random tarfile + randomDataSize, err := seekerSize(randomDataReader) + if err != nil { + t.Fatalf("error getting seeker size of random data: %v", err) + } + + nn, err := io.Copy(blobUpload, randomDataReader) + if err != nil { + t.Fatalf("unexpected error uploading layer data: %v", err) + } + + desc, err := blobUpload.Commit(ctx, distribution.Descriptor{Digest: dgst}) + if err != nil { + t.Fatalf("unexpected error finishing layer upload: %v", err) + } + + // Test for existence. + statDesc, err := sbs.Stat(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error checking for existence: %v, %#v", err, sbs) + } + + if !reflect.DeepEqual(statDesc, desc) { + t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) + } + + bs := repository.Blobs(ctx) + // Test destination for existence. + statDesc, err = bs.Stat(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected non-error stating unmounted blob: %v", desc) + } + + canonicalRef, err := reference.WithDigest(sourceRepository.Named(), desc.Digest) + if err != nil { + t.Fatal(err) + } + + bw, err := bs.Create(ctx, WithMountFrom(canonicalRef)) + if bw != nil { + t.Fatal("unexpected blobwriter returned from Create call, should mount instead") + } + + ebm, ok := err.(distribution.ErrBlobMounted) + if !ok { + t.Fatalf("unexpected error mounting layer: %v", err) + } + + if !reflect.DeepEqual(ebm.Descriptor, desc) { + t.Fatalf("descriptors not equal: %v != %v", ebm.Descriptor, desc) + } + + // Test for existence. + statDesc, err = bs.Stat(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs) + } + + if !reflect.DeepEqual(statDesc, desc) { + t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) + } + + rc, err := bs.Open(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error opening blob for read: %v", err) + } + defer rc.Close() + + h := sha256.New() + nn, err = io.Copy(h, rc) + if err != nil { + t.Fatalf("error reading layer: %v", err) + } + + if nn != randomDataSize { + t.Fatalf("incorrect read length") + } + + if digest.NewDigest("sha256", h) != dgst { + t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), dgst) + } + + // Delete the blob from the source repo + err = sbs.Delete(ctx, desc.Digest) + if err != nil { + t.Fatalf("Unexpected error deleting blob") + } + + d, err := bs.Stat(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error stating blob deleted from source repository: %v", err) + } + + d, err = sbs.Stat(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected non-error stating deleted blob: %v", d) + } + + switch err { + case distribution.ErrBlobUnknown: + break + default: + t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err) + } + + // Delete the blob from the dest repo + err = bs.Delete(ctx, desc.Digest) + if err != nil { + t.Fatalf("Unexpected error deleting blob") + } + + d, err = bs.Stat(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected non-error stating deleted blob: %v", d) + } + + switch err { + case distribution.ErrBlobUnknown: + break + default: + t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err) + } +} + +// TestLayerUploadZeroLength uploads zero-length +func TestLayerUploadZeroLength(t *testing.T) { + ctx := context.Background() + imageName, _ := reference.ParseNamed("foo/bar") + driver := testdriver.New() + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + repository, err := registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + bs := repository.Blobs(ctx) + + simpleUpload(t, bs, []byte{}, digest.DigestSha256EmptyTar) +} + +func simpleUpload(t *testing.T, bs distribution.BlobIngester, blob []byte, expectedDigest digest.Digest) { + ctx := context.Background() + wr, err := bs.Create(ctx) + if err != nil { + t.Fatalf("unexpected error starting upload: %v", err) + } + + nn, err := io.Copy(wr, bytes.NewReader(blob)) + if err != nil { + t.Fatalf("error copying into blob writer: %v", err) + } + + if nn != 0 { + t.Fatalf("unexpected number of bytes copied: %v > 0", nn) + } + + dgst, err := digest.FromReader(bytes.NewReader(blob)) + if err != nil { + t.Fatalf("error getting digest: %v", err) + } + + if dgst != expectedDigest { + // sanity check on zero digest + t.Fatalf("digest not as expected: %v != %v", dgst, expectedDigest) + } + + desc, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}) + if err != nil { + t.Fatalf("unexpected error committing write: %v", err) + } + + if desc.Digest != dgst { + t.Fatalf("unexpected digest: %v != %v", desc.Digest, dgst) + } +} + +// seekerSize seeks to the end of seeker, checks the size and returns it to +// the original state, returning the size. The state of the seeker should be +// treated as unknown if an error is returned. +func seekerSize(seeker io.ReadSeeker) (int64, error) { + current, err := seeker.Seek(0, os.SEEK_CUR) + if err != nil { + return 0, err + } + + end, err := seeker.Seek(0, os.SEEK_END) + if err != nil { + return 0, err + } + + resumed, err := seeker.Seek(current, os.SEEK_SET) + if err != nil { + return 0, err + } + + if resumed != current { + return 0, fmt.Errorf("error returning seeker to original state, could not seek back to original location") + } + + return end, nil +} + +// addBlob simply consumes the reader and inserts into the blob service, +// returning a descriptor on success. +func addBlob(ctx context.Context, bs distribution.BlobIngester, desc distribution.Descriptor, rd io.Reader) (distribution.Descriptor, error) { + wr, err := bs.Create(ctx) + if err != nil { + return distribution.Descriptor{}, err + } + defer wr.Cancel(ctx) + + if nn, err := io.Copy(wr, rd); err != nil { + return distribution.Descriptor{}, err + } else if nn != desc.Size { + return distribution.Descriptor{}, fmt.Errorf("incorrect number of bytes copied: %v != %v", nn, desc.Size) + } + + return wr.Commit(ctx, desc) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobcachemetrics.go b/vendor/github.com/docker/distribution/registry/storage/blobcachemetrics.go new file mode 100644 index 000000000..fad0a77a1 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/blobcachemetrics.go @@ -0,0 +1,60 @@ +package storage + +import ( + "expvar" + "sync/atomic" + + "github.com/docker/distribution/registry/storage/cache" +) + +type blobStatCollector struct { + metrics cache.Metrics +} + +func (bsc *blobStatCollector) Hit() { + atomic.AddUint64(&bsc.metrics.Requests, 1) + atomic.AddUint64(&bsc.metrics.Hits, 1) +} + +func (bsc *blobStatCollector) Miss() { + atomic.AddUint64(&bsc.metrics.Requests, 1) + atomic.AddUint64(&bsc.metrics.Misses, 1) +} + +func (bsc *blobStatCollector) Metrics() cache.Metrics { + return bsc.metrics +} + +// blobStatterCacheMetrics keeps track of cache metrics for blob descriptor +// cache requests. Note this is kept globally and made available via expvar. +// For more detailed metrics, its recommend to instrument a particular cache +// implementation. +var blobStatterCacheMetrics cache.MetricsTracker = &blobStatCollector{} + +func init() { + registry := expvar.Get("registry") + if registry == nil { + registry = expvar.NewMap("registry") + } + + cache := registry.(*expvar.Map).Get("cache") + if cache == nil { + cache = &expvar.Map{} + cache.(*expvar.Map).Init() + registry.(*expvar.Map).Set("cache", cache) + } + + storage := cache.(*expvar.Map).Get("storage") + if storage == nil { + storage = &expvar.Map{} + storage.(*expvar.Map).Init() + cache.(*expvar.Map).Set("storage", storage) + } + + storage.(*expvar.Map).Set("blobdescriptor", expvar.Func(func() interface{} { + // no need for synchronous access: the increments are atomic and + // during reading, we don't care if the data is up to date. The + // numbers will always *eventually* be reported correctly. + return blobStatterCacheMetrics + })) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobserver.go b/vendor/github.com/docker/distribution/registry/storage/blobserver.go new file mode 100644 index 000000000..2655e0113 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/blobserver.go @@ -0,0 +1,78 @@ +package storage + +import ( + "fmt" + "net/http" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/driver" +) + +// TODO(stevvooe): This should configurable in the future. +const blobCacheControlMaxAge = 365 * 24 * time.Hour + +// blobServer simply serves blobs from a driver instance using a path function +// to identify paths and a descriptor service to fill in metadata. +type blobServer struct { + driver driver.StorageDriver + statter distribution.BlobStatter + pathFn func(dgst digest.Digest) (string, error) + redirect bool // allows disabling URLFor redirects +} + +func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + desc, err := bs.statter.Stat(ctx, dgst) + if err != nil { + return err + } + + path, err := bs.pathFn(desc.Digest) + if err != nil { + return err + } + + if bs.redirect { + redirectURL, err := bs.driver.URLFor(ctx, path, map[string]interface{}{"method": r.Method}) + switch err.(type) { + case nil: + // Redirect to storage URL. + http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) + return err + + case driver.ErrUnsupportedMethod: + // Fallback to serving the content directly. + default: + // Some unexpected error. + return err + } + } + + br, err := newFileReader(ctx, bs.driver, path, desc.Size) + if err != nil { + return err + } + defer br.Close() + + w.Header().Set("ETag", fmt.Sprintf(`"%s"`, desc.Digest)) // If-None-Match handled by ServeContent + w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.f", blobCacheControlMaxAge.Seconds())) + + if w.Header().Get("Docker-Content-Digest") == "" { + w.Header().Set("Docker-Content-Digest", desc.Digest.String()) + } + + if w.Header().Get("Content-Type") == "" { + // Set the content type if not already set. + w.Header().Set("Content-Type", desc.MediaType) + } + + if w.Header().Get("Content-Length") == "" { + // Set the content length if not already set. + w.Header().Set("Content-Length", fmt.Sprint(desc.Size)) + } + + http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br) + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobstore.go b/vendor/github.com/docker/distribution/registry/storage/blobstore.go new file mode 100644 index 000000000..4274cc9e8 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/blobstore.go @@ -0,0 +1,223 @@ +package storage + +import ( + "path" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/driver" +) + +// blobStore implements the read side of the blob store interface over a +// driver without enforcing per-repository membership. This object is +// intentionally a leaky abstraction, providing utility methods that support +// creating and traversing backend links. +type blobStore struct { + driver driver.StorageDriver + statter distribution.BlobStatter +} + +var _ distribution.BlobProvider = &blobStore{} + +// Get implements the BlobReadService.Get call. +func (bs *blobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + bp, err := bs.path(dgst) + if err != nil { + return nil, err + } + + p, err := bs.driver.GetContent(ctx, bp) + if err != nil { + switch err.(type) { + case driver.PathNotFoundError: + return nil, distribution.ErrBlobUnknown + } + + return nil, err + } + + return p, err +} + +func (bs *blobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + desc, err := bs.statter.Stat(ctx, dgst) + if err != nil { + return nil, err + } + + path, err := bs.path(desc.Digest) + if err != nil { + return nil, err + } + + return newFileReader(ctx, bs.driver, path, desc.Size) +} + +// Put stores the content p in the blob store, calculating the digest. If the +// content is already present, only the digest will be returned. This should +// only be used for small objects, such as manifests. This implemented as a convenience for other Put implementations +func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + dgst := digest.FromBytes(p) + desc, err := bs.statter.Stat(ctx, dgst) + if err == nil { + // content already present + return desc, nil + } else if err != distribution.ErrBlobUnknown { + context.GetLogger(ctx).Errorf("blobStore: error stating content (%v): %v", dgst, err) + // real error, return it + return distribution.Descriptor{}, err + } + + bp, err := bs.path(dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + // TODO(stevvooe): Write out mediatype here, as well. + return distribution.Descriptor{ + Size: int64(len(p)), + + // NOTE(stevvooe): The central blob store firewalls media types from + // other users. The caller should look this up and override the value + // for the specific repository. + MediaType: "application/octet-stream", + Digest: dgst, + }, bs.driver.PutContent(ctx, bp, p) +} + +func (bs *blobStore) Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error { + + specPath, err := pathFor(blobsPathSpec{}) + if err != nil { + return err + } + + err = Walk(ctx, bs.driver, specPath, func(fileInfo driver.FileInfo) error { + // skip directories + if fileInfo.IsDir() { + return nil + } + + currentPath := fileInfo.Path() + // we only want to parse paths that end with /data + _, fileName := path.Split(currentPath) + if fileName != "data" { + return nil + } + + digest, err := digestFromPath(currentPath) + if err != nil { + return err + } + + return ingester(digest) + }) + return err +} + +// path returns the canonical path for the blob identified by digest. The blob +// may or may not exist. +func (bs *blobStore) path(dgst digest.Digest) (string, error) { + bp, err := pathFor(blobDataPathSpec{ + digest: dgst, + }) + + if err != nil { + return "", err + } + + return bp, nil +} + +// link links the path to the provided digest by writing the digest into the +// target file. Caller must ensure that the blob actually exists. +func (bs *blobStore) link(ctx context.Context, path string, dgst digest.Digest) error { + // The contents of the "link" file are the exact string contents of the + // digest, which is specified in that package. + return bs.driver.PutContent(ctx, path, []byte(dgst)) +} + +// readlink returns the linked digest at path. +func (bs *blobStore) readlink(ctx context.Context, path string) (digest.Digest, error) { + content, err := bs.driver.GetContent(ctx, path) + if err != nil { + return "", err + } + + linked, err := digest.ParseDigest(string(content)) + if err != nil { + return "", err + } + + return linked, nil +} + +// resolve reads the digest link at path and returns the blob store path. +func (bs *blobStore) resolve(ctx context.Context, path string) (string, error) { + dgst, err := bs.readlink(ctx, path) + if err != nil { + return "", err + } + + return bs.path(dgst) +} + +type blobStatter struct { + driver driver.StorageDriver +} + +var _ distribution.BlobDescriptorService = &blobStatter{} + +// Stat implements BlobStatter.Stat by returning the descriptor for the blob +// in the main blob store. If this method returns successfully, there is +// strong guarantee that the blob exists and is available. +func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + path, err := pathFor(blobDataPathSpec{ + digest: dgst, + }) + + if err != nil { + return distribution.Descriptor{}, err + } + + fi, err := bs.driver.Stat(ctx, path) + if err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return distribution.Descriptor{}, distribution.ErrBlobUnknown + default: + return distribution.Descriptor{}, err + } + } + + if fi.IsDir() { + // NOTE(stevvooe): This represents a corruption situation. Somehow, we + // calculated a blob path and then detected a directory. We log the + // error and then error on the side of not knowing about the blob. + context.GetLogger(ctx).Warnf("blob path should not be a directory: %q", path) + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + // TODO(stevvooe): Add method to resolve the mediatype. We can store and + // cache a "global" media type for the blob, even if a specific repo has a + // mediatype that overrides the main one. + + return distribution.Descriptor{ + Size: fi.Size(), + + // NOTE(stevvooe): The central blob store firewalls media types from + // other users. The caller should look this up and override the value + // for the specific repository. + MediaType: "application/octet-stream", + Digest: dgst, + }, nil +} + +func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + return distribution.ErrUnsupported +} + +func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + return distribution.ErrUnsupported +} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobwriter.go b/vendor/github.com/docker/distribution/registry/storage/blobwriter.go new file mode 100644 index 000000000..668a6fc9b --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/blobwriter.go @@ -0,0 +1,399 @@ +package storage + +import ( + "errors" + "fmt" + "io" + "path" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +var ( + errResumableDigestNotAvailable = errors.New("resumable digest not available") +) + +// blobWriter is used to control the various aspects of resumable +// blob upload. +type blobWriter struct { + ctx context.Context + blobStore *linkedBlobStore + + id string + startedAt time.Time + digester digest.Digester + written int64 // track the contiguous write + + fileWriter storagedriver.FileWriter + driver storagedriver.StorageDriver + path string + + resumableDigestEnabled bool + committed bool +} + +var _ distribution.BlobWriter = &blobWriter{} + +// ID returns the identifier for this upload. +func (bw *blobWriter) ID() string { + return bw.id +} + +func (bw *blobWriter) StartedAt() time.Time { + return bw.startedAt +} + +// Commit marks the upload as completed, returning a valid descriptor. The +// final size and digest are checked against the first descriptor provided. +func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + context.GetLogger(ctx).Debug("(*blobWriter).Commit") + + if err := bw.fileWriter.Commit(); err != nil { + return distribution.Descriptor{}, err + } + + bw.Close() + desc.Size = bw.Size() + + canonical, err := bw.validateBlob(ctx, desc) + if err != nil { + return distribution.Descriptor{}, err + } + + if err := bw.moveBlob(ctx, canonical); err != nil { + return distribution.Descriptor{}, err + } + + if err := bw.blobStore.linkBlob(ctx, canonical, desc.Digest); err != nil { + return distribution.Descriptor{}, err + } + + if err := bw.removeResources(ctx); err != nil { + return distribution.Descriptor{}, err + } + + err = bw.blobStore.blobAccessController.SetDescriptor(ctx, canonical.Digest, canonical) + if err != nil { + return distribution.Descriptor{}, err + } + + bw.committed = true + return canonical, nil +} + +// Cancel the blob upload process, releasing any resources associated with +// the writer and canceling the operation. +func (bw *blobWriter) Cancel(ctx context.Context) error { + context.GetLogger(ctx).Debug("(*blobWriter).Cancel") + if err := bw.fileWriter.Cancel(); err != nil { + return err + } + + if err := bw.Close(); err != nil { + context.GetLogger(ctx).Errorf("error closing blobwriter: %s", err) + } + + if err := bw.removeResources(ctx); err != nil { + return err + } + + return nil +} + +func (bw *blobWriter) Size() int64 { + return bw.fileWriter.Size() +} + +func (bw *blobWriter) Write(p []byte) (int, error) { + // Ensure that the current write offset matches how many bytes have been + // written to the digester. If not, we need to update the digest state to + // match the current write position. + if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable { + return 0, err + } + + n, err := io.MultiWriter(bw.fileWriter, bw.digester.Hash()).Write(p) + bw.written += int64(n) + + return n, err +} + +func (bw *blobWriter) ReadFrom(r io.Reader) (n int64, err error) { + // Ensure that the current write offset matches how many bytes have been + // written to the digester. If not, we need to update the digest state to + // match the current write position. + if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable { + return 0, err + } + + nn, err := io.Copy(io.MultiWriter(bw.fileWriter, bw.digester.Hash()), r) + bw.written += nn + + return nn, err +} + +func (bw *blobWriter) Close() error { + if bw.committed { + return errors.New("blobwriter close after commit") + } + + if err := bw.storeHashState(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable { + return err + } + + return bw.fileWriter.Close() +} + +// validateBlob checks the data against the digest, returning an error if it +// does not match. The canonical descriptor is returned. +func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + var ( + verified, fullHash bool + canonical digest.Digest + ) + + if desc.Digest == "" { + // if no descriptors are provided, we have nothing to validate + // against. We don't really want to support this for the registry. + return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{ + Reason: fmt.Errorf("cannot validate against empty digest"), + } + } + + var size int64 + + // Stat the on disk file + if fi, err := bw.driver.Stat(ctx, bw.path); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // NOTE(stevvooe): We really don't care if the file is + // not actually present for the reader. We now assume + // that the desc length is zero. + desc.Size = 0 + default: + // Any other error we want propagated up the stack. + return distribution.Descriptor{}, err + } + } else { + if fi.IsDir() { + return distribution.Descriptor{}, fmt.Errorf("unexpected directory at upload location %q", bw.path) + } + + size = fi.Size() + } + + if desc.Size > 0 { + if desc.Size != size { + return distribution.Descriptor{}, distribution.ErrBlobInvalidLength + } + } else { + // if provided 0 or negative length, we can assume caller doesn't know or + // care about length. + desc.Size = size + } + + // TODO(stevvooe): This section is very meandering. Need to be broken down + // to be a lot more clear. + + if err := bw.resumeDigest(ctx); err == nil { + canonical = bw.digester.Digest() + + if canonical.Algorithm() == desc.Digest.Algorithm() { + // Common case: client and server prefer the same canonical digest + // algorithm - currently SHA256. + verified = desc.Digest == canonical + } else { + // The client wants to use a different digest algorithm. They'll just + // have to be patient and wait for us to download and re-hash the + // uploaded content using that digest algorithm. + fullHash = true + } + } else if err == errResumableDigestNotAvailable { + // Not using resumable digests, so we need to hash the entire layer. + fullHash = true + } else { + return distribution.Descriptor{}, err + } + + if fullHash { + // a fantastic optimization: if the the written data and the size are + // the same, we don't need to read the data from the backend. This is + // because we've written the entire file in the lifecycle of the + // current instance. + if bw.written == size && digest.Canonical == desc.Digest.Algorithm() { + canonical = bw.digester.Digest() + verified = desc.Digest == canonical + } + + // If the check based on size fails, we fall back to the slowest of + // paths. We may be able to make the size-based check a stronger + // guarantee, so this may be defensive. + if !verified { + digester := digest.Canonical.New() + + digestVerifier, err := digest.NewDigestVerifier(desc.Digest) + if err != nil { + return distribution.Descriptor{}, err + } + + // Read the file from the backend driver and validate it. + fr, err := newFileReader(ctx, bw.driver, bw.path, desc.Size) + if err != nil { + return distribution.Descriptor{}, err + } + defer fr.Close() + + tr := io.TeeReader(fr, digester.Hash()) + + if _, err := io.Copy(digestVerifier, tr); err != nil { + return distribution.Descriptor{}, err + } + + canonical = digester.Digest() + verified = digestVerifier.Verified() + } + } + + if !verified { + context.GetLoggerWithFields(ctx, + map[interface{}]interface{}{ + "canonical": canonical, + "provided": desc.Digest, + }, "canonical", "provided"). + Errorf("canonical digest does match provided digest") + return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{ + Digest: desc.Digest, + Reason: fmt.Errorf("content does not match digest"), + } + } + + // update desc with canonical hash + desc.Digest = canonical + + if desc.MediaType == "" { + desc.MediaType = "application/octet-stream" + } + + return desc, nil +} + +// moveBlob moves the data into its final, hash-qualified destination, +// identified by dgst. The layer should be validated before commencing the +// move. +func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor) error { + blobPath, err := pathFor(blobDataPathSpec{ + digest: desc.Digest, + }) + + if err != nil { + return err + } + + // Check for existence + if _, err := bw.blobStore.driver.Stat(ctx, blobPath); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + break // ensure that it doesn't exist. + default: + return err + } + } else { + // If the path exists, we can assume that the content has already + // been uploaded, since the blob storage is content-addressable. + // While it may be corrupted, detection of such corruption belongs + // elsewhere. + return nil + } + + // If no data was received, we may not actually have a file on disk. Check + // the size here and write a zero-length file to blobPath if this is the + // case. For the most part, this should only ever happen with zero-length + // tars. + if _, err := bw.blobStore.driver.Stat(ctx, bw.path); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // HACK(stevvooe): This is slightly dangerous: if we verify above, + // get a hash, then the underlying file is deleted, we risk moving + // a zero-length blob into a nonzero-length blob location. To + // prevent this horrid thing, we employ the hack of only allowing + // to this happen for the digest of an empty tar. + if desc.Digest == digest.DigestSha256EmptyTar { + return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{}) + } + + // We let this fail during the move below. + logrus. + WithField("upload.id", bw.ID()). + WithField("digest", desc.Digest).Warnf("attempted to move zero-length content with non-zero digest") + default: + return err // unrelated error + } + } + + // TODO(stevvooe): We should also write the mediatype when executing this move. + + return bw.blobStore.driver.Move(ctx, bw.path, blobPath) +} + +// removeResources should clean up all resources associated with the upload +// instance. An error will be returned if the clean up cannot proceed. If the +// resources are already not present, no error will be returned. +func (bw *blobWriter) removeResources(ctx context.Context) error { + dataPath, err := pathFor(uploadDataPathSpec{ + name: bw.blobStore.repository.Named().Name(), + id: bw.id, + }) + + if err != nil { + return err + } + + // Resolve and delete the containing directory, which should include any + // upload related files. + dirPath := path.Dir(dataPath) + if err := bw.blobStore.driver.Delete(ctx, dirPath); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + break // already gone! + default: + // This should be uncommon enough such that returning an error + // should be okay. At this point, the upload should be mostly + // complete, but perhaps the backend became unaccessible. + context.GetLogger(ctx).Errorf("unable to delete layer upload resources %q: %v", dirPath, err) + return err + } + } + + return nil +} + +func (bw *blobWriter) Reader() (io.ReadCloser, error) { + // todo(richardscothern): Change to exponential backoff, i=0.5, e=2, n=4 + try := 1 + for try <= 5 { + _, err := bw.driver.Stat(bw.ctx, bw.path) + if err == nil { + break + } + switch err.(type) { + case storagedriver.PathNotFoundError: + context.GetLogger(bw.ctx).Debugf("Nothing found on try %d, sleeping...", try) + time.Sleep(1 * time.Second) + try++ + default: + return nil, err + } + } + + readCloser, err := bw.driver.Reader(bw.ctx, bw.path, 0) + if err != nil { + return nil, err + } + + return readCloser, nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go b/vendor/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go new file mode 100644 index 000000000..32f130974 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go @@ -0,0 +1,17 @@ +// +build noresumabledigest + +package storage + +import ( + "github.com/docker/distribution/context" +) + +// resumeHashAt is a noop when resumable digest support is disabled. +func (bw *blobWriter) resumeDigest(ctx context.Context) error { + return errResumableDigestNotAvailable +} + +// storeHashState is a noop when resumable digest support is disabled. +func (bw *blobWriter) storeHashState(ctx context.Context) error { + return errResumableDigestNotAvailable +} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobwriter_resumable.go b/vendor/github.com/docker/distribution/registry/storage/blobwriter_resumable.go new file mode 100644 index 000000000..ff5482c3f --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/blobwriter_resumable.go @@ -0,0 +1,145 @@ +// +build !noresumabledigest + +package storage + +import ( + "fmt" + "path" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/stevvooe/resumable" + + // register resumable hashes with import + _ "github.com/stevvooe/resumable/sha256" + _ "github.com/stevvooe/resumable/sha512" +) + +// resumeDigest attempts to restore the state of the internal hash function +// by loading the most recent saved hash state equal to the current size of the blob. +func (bw *blobWriter) resumeDigest(ctx context.Context) error { + if !bw.resumableDigestEnabled { + return errResumableDigestNotAvailable + } + + h, ok := bw.digester.Hash().(resumable.Hash) + if !ok { + return errResumableDigestNotAvailable + } + offset := bw.fileWriter.Size() + if offset == int64(h.Len()) { + // State of digester is already at the requested offset. + return nil + } + + // List hash states from storage backend. + var hashStateMatch hashStateEntry + hashStates, err := bw.getStoredHashStates(ctx) + if err != nil { + return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err) + } + + // Find the highest stored hashState with offset equal to + // the requested offset. + for _, hashState := range hashStates { + if hashState.offset == offset { + hashStateMatch = hashState + break // Found an exact offset match. + } + } + + if hashStateMatch.offset == 0 { + // No need to load any state, just reset the hasher. + h.Reset() + } else { + storedState, err := bw.driver.GetContent(ctx, hashStateMatch.path) + if err != nil { + return err + } + + if err = h.Restore(storedState); err != nil { + return err + } + } + + // Mind the gap. + if gapLen := offset - int64(h.Len()); gapLen > 0 { + return errResumableDigestNotAvailable + } + + return nil +} + +type hashStateEntry struct { + offset int64 + path string +} + +// getStoredHashStates returns a slice of hashStateEntries for this upload. +func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) { + uploadHashStatePathPrefix, err := pathFor(uploadHashStatePathSpec{ + name: bw.blobStore.repository.Named().String(), + id: bw.id, + alg: bw.digester.Digest().Algorithm(), + list: true, + }) + + if err != nil { + return nil, err + } + + paths, err := bw.blobStore.driver.List(ctx, uploadHashStatePathPrefix) + if err != nil { + if _, ok := err.(storagedriver.PathNotFoundError); !ok { + return nil, err + } + // Treat PathNotFoundError as no entries. + paths = nil + } + + hashStateEntries := make([]hashStateEntry, 0, len(paths)) + + for _, p := range paths { + pathSuffix := path.Base(p) + // The suffix should be the offset. + offset, err := strconv.ParseInt(pathSuffix, 0, 64) + if err != nil { + logrus.Errorf("unable to parse offset from upload state path %q: %s", p, err) + } + + hashStateEntries = append(hashStateEntries, hashStateEntry{offset: offset, path: p}) + } + + return hashStateEntries, nil +} + +func (bw *blobWriter) storeHashState(ctx context.Context) error { + if !bw.resumableDigestEnabled { + return errResumableDigestNotAvailable + } + + h, ok := bw.digester.Hash().(resumable.Hash) + if !ok { + return errResumableDigestNotAvailable + } + + uploadHashStatePath, err := pathFor(uploadHashStatePathSpec{ + name: bw.blobStore.repository.Named().String(), + id: bw.id, + alg: bw.digester.Digest().Algorithm(), + offset: int64(h.Len()), + }) + + if err != nil { + return err + } + + hashState, err := h.State() + if err != nil { + return err + } + + return bw.driver.PutContent(ctx, uploadHashStatePath, hashState) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cache.go b/vendor/github.com/docker/distribution/registry/storage/cache/cache.go new file mode 100644 index 000000000..10a390919 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/cache/cache.go @@ -0,0 +1,35 @@ +// Package cache provides facilities to speed up access to the storage +// backend. +package cache + +import ( + "fmt" + + "github.com/docker/distribution" +) + +// BlobDescriptorCacheProvider provides repository scoped +// BlobDescriptorService cache instances and a global descriptor cache. +type BlobDescriptorCacheProvider interface { + distribution.BlobDescriptorService + + RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) +} + +// ValidateDescriptor provides a helper function to ensure that caches have +// common criteria for admitting descriptors. +func ValidateDescriptor(desc distribution.Descriptor) error { + if err := desc.Digest.Validate(); err != nil { + return err + } + + if desc.Size < 0 { + return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size) + } + + if desc.MediaType == "" { + return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc) + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cachecheck/suite.go b/vendor/github.com/docker/distribution/registry/storage/cache/cachecheck/suite.go new file mode 100644 index 000000000..a563c02a1 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/cache/cachecheck/suite.go @@ -0,0 +1,180 @@ +package cachecheck + +import ( + "reflect" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/cache" +) + +// CheckBlobDescriptorCache takes a cache implementation through a common set +// of operations. If adding new tests, please add them here so new +// implementations get the benefit. This should be used for unit tests. +func CheckBlobDescriptorCache(t *testing.T, provider cache.BlobDescriptorCacheProvider) { + ctx := context.Background() + + checkBlobDescriptorCacheEmptyRepository(ctx, t, provider) + checkBlobDescriptorCacheSetAndRead(ctx, t, provider) + checkBlobDescriptorCacheClear(ctx, t, provider) +} + +func checkBlobDescriptorCacheEmptyRepository(ctx context.Context, t *testing.T, provider cache.BlobDescriptorCacheProvider) { + if _, err := provider.Stat(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); err != distribution.ErrBlobUnknown { + t.Fatalf("expected unknown blob error with empty store: %v", err) + } + + cache, err := provider.RepositoryScoped("") + if err == nil { + t.Fatalf("expected an error when asking for invalid repo") + } + + cache, err = provider.RepositoryScoped("foo/bar") + if err != nil { + t.Fatalf("unexpected error getting repository: %v", err) + } + + if err := cache.SetDescriptor(ctx, "", distribution.Descriptor{ + Digest: "sha384:abc", + Size: 10, + MediaType: "application/octet-stream"}); err != digest.ErrDigestInvalidFormat { + t.Fatalf("expected error with invalid digest: %v", err) + } + + if err := cache.SetDescriptor(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", distribution.Descriptor{ + Digest: "", + Size: 10, + MediaType: "application/octet-stream"}); err == nil { + t.Fatalf("expected error setting value on invalid descriptor") + } + + if _, err := cache.Stat(ctx, ""); err != digest.ErrDigestInvalidFormat { + t.Fatalf("expected error checking for cache item with empty digest: %v", err) + } + + if _, err := cache.Stat(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); err != distribution.ErrBlobUnknown { + t.Fatalf("expected unknown blob error with empty repo: %v", err) + } +} + +func checkBlobDescriptorCacheSetAndRead(ctx context.Context, t *testing.T, provider cache.BlobDescriptorCacheProvider) { + localDigest := digest.Digest("sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111") + expected := distribution.Descriptor{ + Digest: "sha256:abc1111111111111111111111111111111111111111111111111111111111111", + Size: 10, + MediaType: "application/octet-stream"} + + cache, err := provider.RepositoryScoped("foo/bar") + if err != nil { + t.Fatalf("unexpected error getting scoped cache: %v", err) + } + + if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { + t.Fatalf("error setting descriptor: %v", err) + } + + desc, err := cache.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error statting fake2:abc: %v", err) + } + + if !reflect.DeepEqual(expected, desc) { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + // also check that we set the canonical key ("fake:abc") + desc, err = cache.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("descriptor not returned for canonical key: %v", err) + } + + if !reflect.DeepEqual(expected, desc) { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + // ensure that global gets extra descriptor mapping + desc, err = provider.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("expected blob unknown in global cache: %v, %v", err, desc) + } + + if !reflect.DeepEqual(desc, expected) { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + // get at it through canonical descriptor + desc, err = provider.Stat(ctx, expected.Digest) + if err != nil { + t.Fatalf("unexpected error checking glboal descriptor: %v", err) + } + + if !reflect.DeepEqual(desc, expected) { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + // now, we set the repo local mediatype to something else and ensure it + // doesn't get changed in the provider cache. + expected.MediaType = "application/json" + + if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { + t.Fatalf("unexpected error setting descriptor: %v", err) + } + + desc, err = cache.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error getting descriptor: %v", err) + } + + if !reflect.DeepEqual(desc, expected) { + t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected) + } + + desc, err = provider.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error getting global descriptor: %v", err) + } + + expected.MediaType = "application/octet-stream" // expect original mediatype in global + + if !reflect.DeepEqual(desc, expected) { + t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected) + } +} + +func checkBlobDescriptorCacheClear(ctx context.Context, t *testing.T, provider cache.BlobDescriptorCacheProvider) { + localDigest := digest.Digest("sha384:def111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111") + expected := distribution.Descriptor{ + Digest: "sha256:def1111111111111111111111111111111111111111111111111111111111111", + Size: 10, + MediaType: "application/octet-stream"} + + cache, err := provider.RepositoryScoped("foo/bar") + if err != nil { + t.Fatalf("unexpected error getting scoped cache: %v", err) + } + + if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { + t.Fatalf("error setting descriptor: %v", err) + } + + desc, err := cache.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error statting fake2:abc: %v", err) + } + + if !reflect.DeepEqual(expected, desc) { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + err = cache.Clear(ctx, localDigest) + if err != nil { + t.Error(err) + } + + desc, err = cache.Stat(ctx, localDigest) + if err == nil { + t.Fatalf("expected error statting deleted blob: %v", err) + } +} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go new file mode 100644 index 000000000..94ca8a90c --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go @@ -0,0 +1,101 @@ +package cache + +import ( + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + + "github.com/docker/distribution" +) + +// Metrics is used to hold metric counters +// related to the number of times a cache was +// hit or missed. +type Metrics struct { + Requests uint64 + Hits uint64 + Misses uint64 +} + +// MetricsTracker represents a metric tracker +// which simply counts the number of hits and misses. +type MetricsTracker interface { + Hit() + Miss() + Metrics() Metrics +} + +type cachedBlobStatter struct { + cache distribution.BlobDescriptorService + backend distribution.BlobDescriptorService + tracker MetricsTracker +} + +// NewCachedBlobStatter creates a new statter which prefers a cache and +// falls back to a backend. +func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService { + return &cachedBlobStatter{ + cache: cache, + backend: backend, + } +} + +// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and +// falls back to a backend. Hits and misses will send to the tracker. +func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter { + return &cachedBlobStatter{ + cache: cache, + backend: backend, + tracker: tracker, + } +} + +func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + desc, err := cbds.cache.Stat(ctx, dgst) + if err != nil { + if err != distribution.ErrBlobUnknown { + context.GetLogger(ctx).Errorf("error retrieving descriptor from cache: %v", err) + } + + goto fallback + } + + if cbds.tracker != nil { + cbds.tracker.Hit() + } + return desc, nil +fallback: + if cbds.tracker != nil { + cbds.tracker.Miss() + } + desc, err = cbds.backend.Stat(ctx, dgst) + if err != nil { + return desc, err + } + + if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { + context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) + } + + return desc, err + +} + +func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + err := cbds.cache.Clear(ctx, dgst) + if err != nil { + return err + } + + err = cbds.backend.Clear(ctx, dgst) + if err != nil { + return err + } + return nil +} + +func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { + context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) + } + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go new file mode 100644 index 000000000..cf125e187 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go @@ -0,0 +1,179 @@ +package memory + +import ( + "sync" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/cache" +) + +type inMemoryBlobDescriptorCacheProvider struct { + global *mapBlobDescriptorCache + repositories map[string]*mapBlobDescriptorCache + mu sync.RWMutex +} + +// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for +// storing blob descriptor data. +func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider { + return &inMemoryBlobDescriptorCacheProvider{ + global: newMapBlobDescriptorCache(), + repositories: make(map[string]*mapBlobDescriptorCache), + } +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { + if _, err := reference.ParseNamed(repo); err != nil { + return nil, err + } + + imbdcp.mu.RLock() + defer imbdcp.mu.RUnlock() + + return &repositoryScopedInMemoryBlobDescriptorCache{ + repo: repo, + parent: imbdcp, + repository: imbdcp.repositories[repo], + }, nil +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return imbdcp.global.Stat(ctx, dgst) +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error { + return imbdcp.global.Clear(ctx, dgst) +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + _, err := imbdcp.Stat(ctx, dgst) + if err == distribution.ErrBlobUnknown { + + if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest { + // if the digests differ, set the other canonical mapping + if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil { + return err + } + } + + // unknown, just set it + return imbdcp.global.SetDescriptor(ctx, dgst, desc) + } + + // we already know it, do nothing + return err +} + +// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped +// repository cache. Instances are not thread-safe but the delegated +// operations are. +type repositoryScopedInMemoryBlobDescriptorCache struct { + repo string + parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map + repository *mapBlobDescriptorCache +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + rsimbdcp.parent.mu.Lock() + repo := rsimbdcp.repository + rsimbdcp.parent.mu.Unlock() + + if repo == nil { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + return repo.Stat(ctx, dgst) +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { + rsimbdcp.parent.mu.Lock() + repo := rsimbdcp.repository + rsimbdcp.parent.mu.Unlock() + + if repo == nil { + return distribution.ErrBlobUnknown + } + + return repo.Clear(ctx, dgst) +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + rsimbdcp.parent.mu.Lock() + repo := rsimbdcp.repository + if repo == nil { + // allocate map since we are setting it now. + var ok bool + // have to read back value since we may have allocated elsewhere. + repo, ok = rsimbdcp.parent.repositories[rsimbdcp.repo] + if !ok { + repo = newMapBlobDescriptorCache() + rsimbdcp.parent.repositories[rsimbdcp.repo] = repo + } + rsimbdcp.repository = repo + } + rsimbdcp.parent.mu.Unlock() + + if err := repo.SetDescriptor(ctx, dgst, desc); err != nil { + return err + } + + return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc) +} + +// mapBlobDescriptorCache provides a simple map-based implementation of the +// descriptor cache. +type mapBlobDescriptorCache struct { + descriptors map[digest.Digest]distribution.Descriptor + mu sync.RWMutex +} + +var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{} + +func newMapBlobDescriptorCache() *mapBlobDescriptorCache { + return &mapBlobDescriptorCache{ + descriptors: make(map[digest.Digest]distribution.Descriptor), + } +} + +func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if err := dgst.Validate(); err != nil { + return distribution.Descriptor{}, err + } + + mbdc.mu.RLock() + defer mbdc.mu.RUnlock() + + desc, ok := mbdc.descriptors[dgst] + if !ok { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + return desc, nil +} + +func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { + mbdc.mu.Lock() + defer mbdc.mu.Unlock() + + delete(mbdc.descriptors, dgst) + return nil +} + +func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := dgst.Validate(); err != nil { + return err + } + + if err := cache.ValidateDescriptor(desc); err != nil { + return err + } + + mbdc.mu.Lock() + defer mbdc.mu.Unlock() + + mbdc.descriptors[dgst] = desc + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory_test.go b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory_test.go new file mode 100644 index 000000000..49c2b5c39 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory_test.go @@ -0,0 +1,13 @@ +package memory + +import ( + "testing" + + "github.com/docker/distribution/registry/storage/cache/cachecheck" +) + +// TestInMemoryBlobInfoCache checks the in memory implementation is working +// correctly. +func TestInMemoryBlobInfoCache(t *testing.T) { + cachecheck.CheckBlobDescriptorCache(t, NewInMemoryBlobDescriptorCacheProvider()) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/redis/redis.go b/vendor/github.com/docker/distribution/registry/storage/cache/redis/redis.go new file mode 100644 index 000000000..cb264b098 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/cache/redis/redis.go @@ -0,0 +1,268 @@ +package redis + +import ( + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/cache" + "github.com/garyburd/redigo/redis" +) + +// redisBlobStatService provides an implementation of +// BlobDescriptorCacheProvider based on redis. Blob descriptors are stored in +// two parts. The first provide fast access to repository membership through a +// redis set for each repo. The second is a redis hash keyed by the digest of +// the layer, providing path, length and mediatype information. There is also +// a per-repository redis hash of the blob descriptor, allowing override of +// data. This is currently used to override the mediatype on a per-repository +// basis. +// +// Note that there is no implied relationship between these two caches. The +// layer may exist in one, both or none and the code must be written this way. +type redisBlobDescriptorService struct { + pool *redis.Pool + + // TODO(stevvooe): We use a pool because we don't have great control over + // the cache lifecycle to manage connections. A new connection if fetched + // for each operation. Once we have better lifecycle management of the + // request objects, we can change this to a connection. +} + +// NewRedisBlobDescriptorCacheProvider returns a new redis-based +// BlobDescriptorCacheProvider using the provided redis connection pool. +func NewRedisBlobDescriptorCacheProvider(pool *redis.Pool) cache.BlobDescriptorCacheProvider { + return &redisBlobDescriptorService{ + pool: pool, + } +} + +// RepositoryScoped returns the scoped cache. +func (rbds *redisBlobDescriptorService) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { + if _, err := reference.ParseNamed(repo); err != nil { + return nil, err + } + + return &repositoryScopedRedisBlobDescriptorService{ + repo: repo, + upstream: rbds, + }, nil +} + +// Stat retrieves the descriptor data from the redis hash entry. +func (rbds *redisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if err := dgst.Validate(); err != nil { + return distribution.Descriptor{}, err + } + + conn := rbds.pool.Get() + defer conn.Close() + + return rbds.stat(ctx, conn, dgst) +} + +func (rbds *redisBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error { + if err := dgst.Validate(); err != nil { + return err + } + + conn := rbds.pool.Get() + defer conn.Close() + + // Not atomic in redis <= 2.3 + reply, err := conn.Do("HDEL", rbds.blobDescriptorHashKey(dgst), "digest", "length", "mediatype") + if err != nil { + return err + } + + if reply == 0 { + return distribution.ErrBlobUnknown + } + + return nil +} + +// stat provides an internal stat call that takes a connection parameter. This +// allows some internal management of the connection scope. +func (rbds *redisBlobDescriptorService) stat(ctx context.Context, conn redis.Conn, dgst digest.Digest) (distribution.Descriptor, error) { + reply, err := redis.Values(conn.Do("HMGET", rbds.blobDescriptorHashKey(dgst), "digest", "size", "mediatype")) + if err != nil { + return distribution.Descriptor{}, err + } + + // NOTE(stevvooe): The "size" field used to be "length". We treat a + // missing "size" field here as an unknown blob, which causes a cache + // miss, effectively migrating the field. + if len(reply) < 3 || reply[0] == nil || reply[1] == nil { // don't care if mediatype is nil + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + var desc distribution.Descriptor + if _, err := redis.Scan(reply, &desc.Digest, &desc.Size, &desc.MediaType); err != nil { + return distribution.Descriptor{}, err + } + + return desc, nil +} + +// SetDescriptor sets the descriptor data for the given digest using a redis +// hash. A hash is used here since we may store unrelated fields about a layer +// in the future. +func (rbds *redisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := dgst.Validate(); err != nil { + return err + } + + if err := cache.ValidateDescriptor(desc); err != nil { + return err + } + + conn := rbds.pool.Get() + defer conn.Close() + + return rbds.setDescriptor(ctx, conn, dgst, desc) +} + +func (rbds *redisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error { + if _, err := conn.Do("HMSET", rbds.blobDescriptorHashKey(dgst), + "digest", desc.Digest, + "size", desc.Size); err != nil { + return err + } + + // Only set mediatype if not already set. + if _, err := conn.Do("HSETNX", rbds.blobDescriptorHashKey(dgst), + "mediatype", desc.MediaType); err != nil { + return err + } + + return nil +} + +func (rbds *redisBlobDescriptorService) blobDescriptorHashKey(dgst digest.Digest) string { + return "blobs::" + dgst.String() +} + +type repositoryScopedRedisBlobDescriptorService struct { + repo string + upstream *redisBlobDescriptorService +} + +var _ distribution.BlobDescriptorService = &repositoryScopedRedisBlobDescriptorService{} + +// Stat ensures that the digest is a member of the specified repository and +// forwards the descriptor request to the global blob store. If the media type +// differs for the repository, we override it. +func (rsrbds *repositoryScopedRedisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if err := dgst.Validate(); err != nil { + return distribution.Descriptor{}, err + } + + conn := rsrbds.upstream.pool.Get() + defer conn.Close() + + // Check membership to repository first + member, err := redis.Bool(conn.Do("SISMEMBER", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst)) + if err != nil { + return distribution.Descriptor{}, err + } + + if !member { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + upstream, err := rsrbds.upstream.stat(ctx, conn, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + // We allow a per repository mediatype, let's look it up here. + mediatype, err := redis.String(conn.Do("HGET", rsrbds.blobDescriptorHashKey(dgst), "mediatype")) + if err != nil { + return distribution.Descriptor{}, err + } + + if mediatype != "" { + upstream.MediaType = mediatype + } + + return upstream, nil +} + +// Clear removes the descriptor from the cache and forwards to the upstream descriptor store +func (rsrbds *repositoryScopedRedisBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error { + if err := dgst.Validate(); err != nil { + return err + } + + conn := rsrbds.upstream.pool.Get() + defer conn.Close() + + // Check membership to repository first + member, err := redis.Bool(conn.Do("SISMEMBER", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst)) + if err != nil { + return err + } + + if !member { + return distribution.ErrBlobUnknown + } + + return rsrbds.upstream.Clear(ctx, dgst) +} + +func (rsrbds *repositoryScopedRedisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := dgst.Validate(); err != nil { + return err + } + + if err := cache.ValidateDescriptor(desc); err != nil { + return err + } + + if dgst != desc.Digest { + if dgst.Algorithm() == desc.Digest.Algorithm() { + return fmt.Errorf("redis cache: digest for descriptors differ but algorthim does not: %q != %q", dgst, desc.Digest) + } + } + + conn := rsrbds.upstream.pool.Get() + defer conn.Close() + + return rsrbds.setDescriptor(ctx, conn, dgst, desc) +} + +func (rsrbds *repositoryScopedRedisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error { + if _, err := conn.Do("SADD", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst); err != nil { + return err + } + + if err := rsrbds.upstream.setDescriptor(ctx, conn, dgst, desc); err != nil { + return err + } + + // Override repository mediatype. + if _, err := conn.Do("HSET", rsrbds.blobDescriptorHashKey(dgst), "mediatype", desc.MediaType); err != nil { + return err + } + + // Also set the values for the primary descriptor, if they differ by + // algorithm (ie sha256 vs sha512). + if desc.Digest != "" && dgst != desc.Digest && dgst.Algorithm() != desc.Digest.Algorithm() { + if err := rsrbds.setDescriptor(ctx, conn, desc.Digest, desc); err != nil { + return err + } + } + + return nil +} + +func (rsrbds *repositoryScopedRedisBlobDescriptorService) blobDescriptorHashKey(dgst digest.Digest) string { + return "repository::" + rsrbds.repo + "::blobs::" + dgst.String() +} + +func (rsrbds *repositoryScopedRedisBlobDescriptorService) repositoryBlobSetKey(repo string) string { + return "repository::" + rsrbds.repo + "::blobs" +} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/redis/redis_test.go b/vendor/github.com/docker/distribution/registry/storage/cache/redis/redis_test.go new file mode 100644 index 000000000..d324842d3 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/cache/redis/redis_test.go @@ -0,0 +1,53 @@ +package redis + +import ( + "flag" + "os" + "testing" + "time" + + "github.com/docker/distribution/registry/storage/cache/cachecheck" + "github.com/garyburd/redigo/redis" +) + +var redisAddr string + +func init() { + flag.StringVar(&redisAddr, "test.registry.storage.cache.redis.addr", "", "configure the address of a test instance of redis") +} + +// TestRedisLayerInfoCache exercises a live redis instance using the cache +// implementation. +func TestRedisBlobDescriptorCacheProvider(t *testing.T) { + if redisAddr == "" { + // fallback to an environement variable + redisAddr = os.Getenv("TEST_REGISTRY_STORAGE_CACHE_REDIS_ADDR") + } + + if redisAddr == "" { + // skip if still not set + t.Skip("please set -test.registry.storage.cache.redis.addr to test layer info cache against redis") + } + + pool := &redis.Pool{ + Dial: func() (redis.Conn, error) { + return redis.Dial("tcp", redisAddr) + }, + MaxIdle: 1, + MaxActive: 2, + TestOnBorrow: func(c redis.Conn, t time.Time) error { + _, err := c.Do("PING") + return err + }, + Wait: false, // if a connection is not avialable, proceed without cache. + } + + // Clear the database + conn := pool.Get() + if _, err := conn.Do("FLUSHDB"); err != nil { + t.Fatalf("unexpected error flushing redis db: %v", err) + } + conn.Close() + + cachecheck.CheckBlobDescriptorCache(t, NewRedisBlobDescriptorCacheProvider(pool)) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/catalog.go b/vendor/github.com/docker/distribution/registry/storage/catalog.go new file mode 100644 index 000000000..0b59a39ac --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/catalog.go @@ -0,0 +1,153 @@ +package storage + +import ( + "errors" + "io" + "path" + "strings" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" +) + +// errFinishedWalk signals an early exit to the walk when the current query +// is satisfied. +var errFinishedWalk = errors.New("finished walk") + +// Returns a list, or partial list, of repositories in the registry. +// Because it's a quite expensive operation, it should only be used when building up +// an initial set of repositories. +func (reg *registry) Repositories(ctx context.Context, repos []string, last string) (n int, err error) { + var foundRepos []string + + if len(repos) == 0 { + return 0, errors.New("no space in slice") + } + + root, err := pathFor(repositoriesRootPathSpec{}) + if err != nil { + return 0, err + } + + err = Walk(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error { + err := handleRepository(fileInfo, root, last, func(repoPath string) error { + foundRepos = append(foundRepos, repoPath) + return nil + }) + if err != nil { + return err + } + + // if we've filled our array, no need to walk any further + if len(foundRepos) == len(repos) { + return errFinishedWalk + } + + return nil + }) + + n = copy(repos, foundRepos) + + switch err { + case nil: + // nil means that we completed walk and didn't fill buffer. No more + // records are available. + err = io.EOF + case errFinishedWalk: + // more records are available. + err = nil + } + + return n, err +} + +// Enumerate applies ingester to each repository +func (reg *registry) Enumerate(ctx context.Context, ingester func(string) error) error { + root, err := pathFor(repositoriesRootPathSpec{}) + if err != nil { + return err + } + + err = Walk(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error { + return handleRepository(fileInfo, root, "", ingester) + }) + + return err +} + +// lessPath returns true if one path a is less than path b. +// +// A component-wise comparison is done, rather than the lexical comparison of +// strings. +func lessPath(a, b string) bool { + // we provide this behavior by making separator always sort first. + return compareReplaceInline(a, b, '/', '\x00') < 0 +} + +// compareReplaceInline modifies runtime.cmpstring to replace old with new +// during a byte-wise comparison. +func compareReplaceInline(s1, s2 string, old, new byte) int { + // TODO(stevvooe): We are missing an optimization when the s1 and s2 have + // the exact same slice header. It will make the code unsafe but can + // provide some extra performance. + + l := len(s1) + if len(s2) < l { + l = len(s2) + } + + for i := 0; i < l; i++ { + c1, c2 := s1[i], s2[i] + if c1 == old { + c1 = new + } + + if c2 == old { + c2 = new + } + + if c1 < c2 { + return -1 + } + + if c1 > c2 { + return +1 + } + } + + if len(s1) < len(s2) { + return -1 + } + + if len(s1) > len(s2) { + return +1 + } + + return 0 +} + +// handleRepository calls function fn with a repository path if fileInfo +// has a path of a repository under root and that it is lexographically +// after last. Otherwise, it will return ErrSkipDir. This should be used +// with Walk to do handling with repositories in a storage. +func handleRepository(fileInfo driver.FileInfo, root, last string, fn func(repoPath string) error) error { + filePath := fileInfo.Path() + + // lop the base path off + repo := filePath[len(root)+1:] + + _, file := path.Split(repo) + if file == "_layers" { + repo = strings.TrimSuffix(repo, "/_layers") + if lessPath(last, repo) { + if err := fn(repo); err != nil { + return err + } + } + return ErrSkipDir + } else if strings.HasPrefix(file, "_") { + return ErrSkipDir + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/catalog_test.go b/vendor/github.com/docker/distribution/registry/storage/catalog_test.go new file mode 100644 index 000000000..c69c0cdc1 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/catalog_test.go @@ -0,0 +1,324 @@ +package storage + +import ( + "fmt" + "io" + "math/rand" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/testutil" +) + +type setupEnv struct { + ctx context.Context + driver driver.StorageDriver + expected []string + registry distribution.Namespace +} + +func setupFS(t *testing.T) *setupEnv { + d := inmemory.New() + ctx := context.Background() + registry, err := NewRegistry(ctx, d, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + + repos := []string{ + "foo/a", + "foo/b", + "foo-bar/a", + "bar/c", + "bar/d", + "bar/e", + "foo/d/in", + "foo-bar/b", + "test", + } + + for _, repo := range repos { + makeRepo(ctx, t, repo, registry) + } + + expected := []string{ + "bar/c", + "bar/d", + "bar/e", + "foo/a", + "foo/b", + "foo/d/in", + "foo-bar/a", + "foo-bar/b", + "test", + } + + return &setupEnv{ + ctx: ctx, + driver: d, + expected: expected, + registry: registry, + } +} + +func makeRepo(ctx context.Context, t *testing.T, name string, reg distribution.Namespace) { + named, err := reference.ParseNamed(name) + if err != nil { + t.Fatal(err) + } + + repo, _ := reg.Repository(ctx, named) + manifests, _ := repo.Manifests(ctx) + + layers, err := testutil.CreateRandomLayers(1) + if err != nil { + t.Fatal(err) + } + + err = testutil.UploadBlobs(repo, layers) + if err != nil { + t.Fatalf("failed to upload layers: %v", err) + } + + getKeys := func(digests map[digest.Digest]io.ReadSeeker) (ds []digest.Digest) { + for d := range digests { + ds = append(ds, d) + } + return + } + + manifest, err := testutil.MakeSchema1Manifest(getKeys(layers)) + if err != nil { + t.Fatal(err) + } + + _, err = manifests.Put(ctx, manifest) + if err != nil { + t.Fatalf("manifest upload failed: %v", err) + } + +} + +func TestCatalog(t *testing.T) { + env := setupFS(t) + + p := make([]string, 50) + + numFilled, err := env.registry.Repositories(env.ctx, p, "") + if numFilled != len(env.expected) { + t.Errorf("missing items in catalog") + } + + if !testEq(p, env.expected, len(env.expected)) { + t.Errorf("Expected catalog repos err") + } + + if err != io.EOF { + t.Errorf("Catalog has more values which we aren't expecting") + } +} + +func TestCatalogInParts(t *testing.T) { + env := setupFS(t) + + chunkLen := 3 + p := make([]string, chunkLen) + + numFilled, err := env.registry.Repositories(env.ctx, p, "") + if err == io.EOF || numFilled != len(p) { + t.Errorf("Expected more values in catalog") + } + + if !testEq(p, env.expected[0:chunkLen], numFilled) { + t.Errorf("Expected catalog first chunk err") + } + + lastRepo := p[len(p)-1] + numFilled, err = env.registry.Repositories(env.ctx, p, lastRepo) + + if err == io.EOF || numFilled != len(p) { + t.Errorf("Expected more values in catalog") + } + + if !testEq(p, env.expected[chunkLen:chunkLen*2], numFilled) { + t.Errorf("Expected catalog second chunk err") + } + + lastRepo = p[len(p)-1] + numFilled, err = env.registry.Repositories(env.ctx, p, lastRepo) + + if err != io.EOF || numFilled != len(p) { + t.Errorf("Expected end of catalog") + } + + if !testEq(p, env.expected[chunkLen*2:chunkLen*3], numFilled) { + t.Errorf("Expected catalog third chunk err") + } + + lastRepo = p[len(p)-1] + numFilled, err = env.registry.Repositories(env.ctx, p, lastRepo) + + if err != io.EOF { + t.Errorf("Catalog has more values which we aren't expecting") + } + + if numFilled != 0 { + t.Errorf("Expected catalog fourth chunk err") + } +} + +func TestCatalogEnumerate(t *testing.T) { + env := setupFS(t) + + var repos []string + repositoryEnumerator := env.registry.(distribution.RepositoryEnumerator) + err := repositoryEnumerator.Enumerate(env.ctx, func(repoName string) error { + repos = append(repos, repoName) + return nil + }) + if err != nil { + t.Errorf("Expected catalog enumerate err") + } + + if len(repos) != len(env.expected) { + t.Errorf("Expected catalog enumerate doesn't have correct number of values") + } + + if !testEq(repos, env.expected, len(env.expected)) { + t.Errorf("Expected catalog enumerate not over all values") + } +} + +func testEq(a, b []string, size int) bool { + for cnt := 0; cnt < size-1; cnt++ { + if a[cnt] != b[cnt] { + return false + } + } + return true +} + +func setupBadWalkEnv(t *testing.T) *setupEnv { + d := newBadListDriver() + ctx := context.Background() + registry, err := NewRegistry(ctx, d, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + + return &setupEnv{ + ctx: ctx, + driver: d, + registry: registry, + } +} + +type badListDriver struct { + driver.StorageDriver +} + +var _ driver.StorageDriver = &badListDriver{} + +func newBadListDriver() *badListDriver { + return &badListDriver{StorageDriver: inmemory.New()} +} + +func (d *badListDriver) List(ctx context.Context, path string) ([]string, error) { + return nil, fmt.Errorf("List error") +} + +func TestCatalogWalkError(t *testing.T) { + env := setupBadWalkEnv(t) + p := make([]string, 1) + + _, err := env.registry.Repositories(env.ctx, p, "") + if err == io.EOF { + t.Errorf("Expected catalog driver list error") + } +} + +func BenchmarkPathCompareEqual(B *testing.B) { + B.StopTimer() + pp := randomPath(100) + // make a real copy + ppb := append([]byte{}, []byte(pp)...) + a, b := pp, string(ppb) + + B.StartTimer() + for i := 0; i < B.N; i++ { + lessPath(a, b) + } +} + +func BenchmarkPathCompareNotEqual(B *testing.B) { + B.StopTimer() + a, b := randomPath(100), randomPath(100) + B.StartTimer() + + for i := 0; i < B.N; i++ { + lessPath(a, b) + } +} + +func BenchmarkPathCompareNative(B *testing.B) { + B.StopTimer() + a, b := randomPath(100), randomPath(100) + B.StartTimer() + + for i := 0; i < B.N; i++ { + c := a < b + c = c && false + } +} + +func BenchmarkPathCompareNativeEqual(B *testing.B) { + B.StopTimer() + pp := randomPath(100) + a, b := pp, pp + B.StartTimer() + + for i := 0; i < B.N; i++ { + c := a < b + c = c && false + } +} + +var filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789") +var separatorChars = []byte("._-") + +func randomPath(length int64) string { + path := "/" + for int64(len(path)) < length { + chunkLength := rand.Int63n(length-int64(len(path))) + 1 + chunk := randomFilename(chunkLength) + path += chunk + remaining := length - int64(len(path)) + if remaining == 1 { + path += randomFilename(1) + } else if remaining > 1 { + path += "/" + } + } + return path +} + +func randomFilename(length int64) string { + b := make([]byte, length) + wasSeparator := true + for i := range b { + if !wasSeparator && i < len(b)-1 && rand.Intn(4) == 0 { + b[i] = separatorChars[rand.Intn(len(separatorChars))] + wasSeparator = true + } else { + b[i] = filenameChars[rand.Intn(len(filenameChars))] + wasSeparator = false + } + } + return string(b) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/doc.go b/vendor/github.com/docker/distribution/registry/storage/doc.go new file mode 100644 index 000000000..387d92348 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/doc.go @@ -0,0 +1,3 @@ +// Package storage contains storage services for use in the registry +// application. It should be considered an internal package, as of Go 1.4. +package storage diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/azure/azure.go b/vendor/github.com/docker/distribution/registry/storage/driver/azure/azure.go new file mode 100644 index 000000000..930b76a17 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/azure/azure.go @@ -0,0 +1,483 @@ +// Package azure provides a storagedriver.StorageDriver implementation to +// store blobs in Microsoft Azure Blob Storage Service. +package azure + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + "time" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" + + azure "github.com/Azure/azure-sdk-for-go/storage" +) + +const driverName = "azure" + +const ( + paramAccountName = "accountname" + paramAccountKey = "accountkey" + paramContainer = "container" + paramRealm = "realm" + maxChunkSize = 4 * 1024 * 1024 +) + +type driver struct { + client azure.BlobStorageClient + container string +} + +type baseEmbed struct{ base.Base } + +// Driver is a storagedriver.StorageDriver implementation backed by +// Microsoft Azure Blob Storage Service. +type Driver struct{ baseEmbed } + +func init() { + factory.Register(driverName, &azureDriverFactory{}) +} + +type azureDriverFactory struct{} + +func (factory *azureDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +// FromParameters constructs a new Driver with a given parameters map. +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + accountName, ok := parameters[paramAccountName] + if !ok || fmt.Sprint(accountName) == "" { + return nil, fmt.Errorf("No %s parameter provided", paramAccountName) + } + + accountKey, ok := parameters[paramAccountKey] + if !ok || fmt.Sprint(accountKey) == "" { + return nil, fmt.Errorf("No %s parameter provided", paramAccountKey) + } + + container, ok := parameters[paramContainer] + if !ok || fmt.Sprint(container) == "" { + return nil, fmt.Errorf("No %s parameter provided", paramContainer) + } + + realm, ok := parameters[paramRealm] + if !ok || fmt.Sprint(realm) == "" { + realm = azure.DefaultBaseURL + } + + return New(fmt.Sprint(accountName), fmt.Sprint(accountKey), fmt.Sprint(container), fmt.Sprint(realm)) +} + +// New constructs a new Driver with the given Azure Storage Account credentials +func New(accountName, accountKey, container, realm string) (*Driver, error) { + api, err := azure.NewClient(accountName, accountKey, realm, azure.DefaultAPIVersion, true) + if err != nil { + return nil, err + } + + blobClient := api.GetBlobService() + + // Create registry container + if _, err = blobClient.CreateContainerIfNotExists(container, azure.ContainerAccessTypePrivate); err != nil { + return nil, err + } + + d := &driver{ + client: blobClient, + container: container} + return &Driver{baseEmbed: baseEmbed{Base: base.Base{StorageDriver: d}}}, nil +} + +// Implement the storagedriver.StorageDriver interface. +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + blob, err := d.client.GetBlob(d.container, path) + if err != nil { + if is404(err) { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return nil, err + } + + defer blob.Close() + return ioutil.ReadAll(blob) +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + if _, err := d.client.DeleteBlobIfExists(d.container, path, nil); err != nil { + return err + } + writer, err := d.Writer(ctx, path, false) + if err != nil { + return err + } + defer writer.Close() + _, err = writer.Write(contents) + if err != nil { + return err + } + return writer.Commit() +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + if ok, err := d.client.BlobExists(d.container, path); err != nil { + return nil, err + } else if !ok { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + info, err := d.client.GetBlobProperties(d.container, path) + if err != nil { + return nil, err + } + + size := int64(info.ContentLength) + if offset >= size { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + + bytesRange := fmt.Sprintf("%v-", offset) + resp, err := d.client.GetBlobRange(d.container, path, bytesRange, nil) + if err != nil { + return nil, err + } + return resp, nil +} + +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + blobExists, err := d.client.BlobExists(d.container, path) + if err != nil { + return nil, err + } + var size int64 + if blobExists { + if append { + blobProperties, err := d.client.GetBlobProperties(d.container, path) + if err != nil { + return nil, err + } + size = blobProperties.ContentLength + } else { + err := d.client.DeleteBlob(d.container, path, nil) + if err != nil { + return nil, err + } + } + } else { + if append { + return nil, storagedriver.PathNotFoundError{Path: path} + } + err := d.client.PutAppendBlob(d.container, path, nil) + if err != nil { + return nil, err + } + } + + return d.newWriter(path, size), nil +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + // Check if the path is a blob + if ok, err := d.client.BlobExists(d.container, path); err != nil { + return nil, err + } else if ok { + blob, err := d.client.GetBlobProperties(d.container, path) + if err != nil { + return nil, err + } + + mtim, err := time.Parse(http.TimeFormat, blob.LastModified) + if err != nil { + return nil, err + } + + return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{ + Path: path, + Size: int64(blob.ContentLength), + ModTime: mtim, + IsDir: false, + }}, nil + } + + // Check if path is a virtual container + virtContainerPath := path + if !strings.HasSuffix(virtContainerPath, "/") { + virtContainerPath += "/" + } + blobs, err := d.client.ListBlobs(d.container, azure.ListBlobsParameters{ + Prefix: virtContainerPath, + MaxResults: 1, + }) + if err != nil { + return nil, err + } + if len(blobs.Blobs) > 0 { + // path is a virtual container + return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{ + Path: path, + IsDir: true, + }}, nil + } + + // path is not a blob or virtual container + return nil, storagedriver.PathNotFoundError{Path: path} +} + +// List returns a list of the objects that are direct descendants of the given +// path. +func (d *driver) List(ctx context.Context, path string) ([]string, error) { + if path == "/" { + path = "" + } + + blobs, err := d.listBlobs(d.container, path) + if err != nil { + return blobs, err + } + + list := directDescendants(blobs, path) + if path != "" && len(list) == 0 { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return list, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + sourceBlobURL := d.client.GetBlobURL(d.container, sourcePath) + err := d.client.CopyBlob(d.container, destPath, sourceBlobURL) + if err != nil { + if is404(err) { + return storagedriver.PathNotFoundError{Path: sourcePath} + } + return err + } + + return d.client.DeleteBlob(d.container, sourcePath, nil) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + ok, err := d.client.DeleteBlobIfExists(d.container, path, nil) + if err != nil { + return err + } + if ok { + return nil // was a blob and deleted, return + } + + // Not a blob, see if path is a virtual container with blobs + blobs, err := d.listBlobs(d.container, path) + if err != nil { + return err + } + + for _, b := range blobs { + if err = d.client.DeleteBlob(d.container, b, nil); err != nil { + return err + } + } + + if len(blobs) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + return nil +} + +// URLFor returns a publicly accessible URL for the blob stored at given path +// for specified duration by making use of Azure Storage Shared Access Signatures (SAS). +// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx for more info. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + expiresTime := time.Now().UTC().Add(20 * time.Minute) // default expiration + expires, ok := options["expiry"] + if ok { + t, ok := expires.(time.Time) + if ok { + expiresTime = t + } + } + return d.client.GetBlobSASURI(d.container, path, expiresTime, "r") +} + +// directDescendants will find direct descendants (blobs or virtual containers) +// of from list of blob paths and will return their full paths. Elements in blobs +// list must be prefixed with a "/" and +// +// Example: direct descendants of "/" in {"/foo", "/bar/1", "/bar/2"} is +// {"/foo", "/bar"} and direct descendants of "bar" is {"/bar/1", "/bar/2"} +func directDescendants(blobs []string, prefix string) []string { + if !strings.HasPrefix(prefix, "/") { // add trailing '/' + prefix = "/" + prefix + } + if !strings.HasSuffix(prefix, "/") { // containerify the path + prefix += "/" + } + + out := make(map[string]bool) + for _, b := range blobs { + if strings.HasPrefix(b, prefix) { + rel := b[len(prefix):] + c := strings.Count(rel, "/") + if c == 0 { + out[b] = true + } else { + out[prefix+rel[:strings.Index(rel, "/")]] = true + } + } + } + + var keys []string + for k := range out { + keys = append(keys, k) + } + return keys +} + +func (d *driver) listBlobs(container, virtPath string) ([]string, error) { + if virtPath != "" && !strings.HasSuffix(virtPath, "/") { // containerify the path + virtPath += "/" + } + + out := []string{} + marker := "" + for { + resp, err := d.client.ListBlobs(d.container, azure.ListBlobsParameters{ + Marker: marker, + Prefix: virtPath, + }) + + if err != nil { + return out, err + } + + for _, b := range resp.Blobs { + out = append(out, b.Name) + } + + if len(resp.Blobs) == 0 || resp.NextMarker == "" { + break + } + marker = resp.NextMarker + } + return out, nil +} + +func is404(err error) bool { + statusCodeErr, ok := err.(azure.AzureStorageServiceError) + return ok && statusCodeErr.StatusCode == http.StatusNotFound +} + +type writer struct { + driver *driver + path string + size int64 + bw *bufio.Writer + closed bool + committed bool + cancelled bool +} + +func (d *driver) newWriter(path string, size int64) storagedriver.FileWriter { + return &writer{ + driver: d, + path: path, + size: size, + bw: bufio.NewWriterSize(&blockWriter{ + client: d.client, + container: d.container, + path: path, + }, maxChunkSize), + } +} + +func (w *writer) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + + n, err := w.bw.Write(p) + w.size += int64(n) + return n, err +} + +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + w.closed = true + return w.bw.Flush() +} + +func (w *writer) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + return w.driver.client.DeleteBlob(w.driver.container, w.path, nil) +} + +func (w *writer) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + w.committed = true + return w.bw.Flush() +} + +type blockWriter struct { + client azure.BlobStorageClient + container string + path string +} + +func (bw *blockWriter) Write(p []byte) (int, error) { + n := 0 + for offset := 0; offset < len(p); offset += maxChunkSize { + chunkSize := maxChunkSize + if offset+chunkSize > len(p) { + chunkSize = len(p) - offset + } + err := bw.client.AppendBlock(bw.container, bw.path, p[offset:offset+chunkSize], nil) + if err != nil { + return n, err + } + + n += chunkSize + } + + return n, nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/azure/azure_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/azure/azure_test.go new file mode 100644 index 000000000..4a0661b3e --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/azure/azure_test.go @@ -0,0 +1,63 @@ +package azure + +import ( + "fmt" + "os" + "strings" + "testing" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + . "gopkg.in/check.v1" +) + +const ( + envAccountName = "AZURE_STORAGE_ACCOUNT_NAME" + envAccountKey = "AZURE_STORAGE_ACCOUNT_KEY" + envContainer = "AZURE_STORAGE_CONTAINER" + envRealm = "AZURE_STORAGE_REALM" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { TestingT(t) } + +func init() { + var ( + accountName string + accountKey string + container string + realm string + ) + + config := []struct { + env string + value *string + }{ + {envAccountName, &accountName}, + {envAccountKey, &accountKey}, + {envContainer, &container}, + {envRealm, &realm}, + } + + missing := []string{} + for _, v := range config { + *v.value = os.Getenv(v.env) + if *v.value == "" { + missing = append(missing, v.env) + } + } + + azureDriverConstructor := func() (storagedriver.StorageDriver, error) { + return New(accountName, accountKey, container, realm) + } + + // Skip Azure storage driver tests if environment variable parameters are not provided + skipCheck := func() string { + if len(missing) > 0 { + return fmt.Sprintf("Must set %s environment variables to run Azure tests", strings.Join(missing, ", ")) + } + return "" + } + + testsuites.RegisterSuite(azureDriverConstructor, skipCheck) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/base/base.go b/vendor/github.com/docker/distribution/registry/storage/driver/base/base.go new file mode 100644 index 000000000..e14f7edb1 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/base/base.go @@ -0,0 +1,198 @@ +// Package base provides a base implementation of the storage driver that can +// be used to implement common checks. The goal is to increase the amount of +// code sharing. +// +// The canonical approach to use this class is to embed in the exported driver +// struct such that calls are proxied through this implementation. First, +// declare the internal driver, as follows: +// +// type driver struct { ... internal ...} +// +// The resulting type should implement StorageDriver such that it can be the +// target of a Base struct. The exported type can then be declared as follows: +// +// type Driver struct { +// Base +// } +// +// Because Driver embeds Base, it effectively implements Base. If the driver +// needs to intercept a call, before going to base, Driver should implement +// that method. Effectively, Driver can intercept calls before coming in and +// driver implements the actual logic. +// +// To further shield the embed from other packages, it is recommended to +// employ a private embed struct: +// +// type baseEmbed struct { +// base.Base +// } +// +// Then, declare driver to embed baseEmbed, rather than Base directly: +// +// type Driver struct { +// baseEmbed +// } +// +// The type now implements StorageDriver, proxying through Base, without +// exporting an unnecessary field. +package base + +import ( + "io" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +// Base provides a wrapper around a storagedriver implementation that provides +// common path and bounds checking. +type Base struct { + storagedriver.StorageDriver +} + +// Format errors received from the storage driver +func (base *Base) setDriverName(e error) error { + switch actual := e.(type) { + case nil: + return nil + case storagedriver.ErrUnsupportedMethod: + actual.DriverName = base.StorageDriver.Name() + return actual + case storagedriver.PathNotFoundError: + actual.DriverName = base.StorageDriver.Name() + return actual + case storagedriver.InvalidPathError: + actual.DriverName = base.StorageDriver.Name() + return actual + case storagedriver.InvalidOffsetError: + actual.DriverName = base.StorageDriver.Name() + return actual + default: + storageError := storagedriver.Error{ + DriverName: base.StorageDriver.Name(), + Enclosed: e, + } + + return storageError + } +} + +// GetContent wraps GetContent of underlying storage driver. +func (base *Base) GetContent(ctx context.Context, path string) ([]byte, error) { + ctx, done := context.WithTrace(ctx) + defer done("%s.GetContent(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) { + return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + b, e := base.StorageDriver.GetContent(ctx, path) + return b, base.setDriverName(e) +} + +// PutContent wraps PutContent of underlying storage driver. +func (base *Base) PutContent(ctx context.Context, path string, content []byte) error { + ctx, done := context.WithTrace(ctx) + defer done("%s.PutContent(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) { + return storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + return base.setDriverName(base.StorageDriver.PutContent(ctx, path, content)) +} + +// Reader wraps Reader of underlying storage driver. +func (base *Base) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + ctx, done := context.WithTrace(ctx) + defer done("%s.Reader(%q, %d)", base.Name(), path, offset) + + if offset < 0 { + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset, DriverName: base.StorageDriver.Name()} + } + + if !storagedriver.PathRegexp.MatchString(path) { + return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + rc, e := base.StorageDriver.Reader(ctx, path, offset) + return rc, base.setDriverName(e) +} + +// Writer wraps Writer of underlying storage driver. +func (base *Base) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + ctx, done := context.WithTrace(ctx) + defer done("%s.Writer(%q, %v)", base.Name(), path, append) + + if !storagedriver.PathRegexp.MatchString(path) { + return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + writer, e := base.StorageDriver.Writer(ctx, path, append) + return writer, base.setDriverName(e) +} + +// Stat wraps Stat of underlying storage driver. +func (base *Base) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + ctx, done := context.WithTrace(ctx) + defer done("%s.Stat(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) && path != "/" { + return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + fi, e := base.StorageDriver.Stat(ctx, path) + return fi, base.setDriverName(e) +} + +// List wraps List of underlying storage driver. +func (base *Base) List(ctx context.Context, path string) ([]string, error) { + ctx, done := context.WithTrace(ctx) + defer done("%s.List(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) && path != "/" { + return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + str, e := base.StorageDriver.List(ctx, path) + return str, base.setDriverName(e) +} + +// Move wraps Move of underlying storage driver. +func (base *Base) Move(ctx context.Context, sourcePath string, destPath string) error { + ctx, done := context.WithTrace(ctx) + defer done("%s.Move(%q, %q", base.Name(), sourcePath, destPath) + + if !storagedriver.PathRegexp.MatchString(sourcePath) { + return storagedriver.InvalidPathError{Path: sourcePath, DriverName: base.StorageDriver.Name()} + } else if !storagedriver.PathRegexp.MatchString(destPath) { + return storagedriver.InvalidPathError{Path: destPath, DriverName: base.StorageDriver.Name()} + } + + return base.setDriverName(base.StorageDriver.Move(ctx, sourcePath, destPath)) +} + +// Delete wraps Delete of underlying storage driver. +func (base *Base) Delete(ctx context.Context, path string) error { + ctx, done := context.WithTrace(ctx) + defer done("%s.Delete(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) { + return storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + return base.setDriverName(base.StorageDriver.Delete(ctx, path)) +} + +// URLFor wraps URLFor of underlying storage driver. +func (base *Base) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + ctx, done := context.WithTrace(ctx) + defer done("%s.URLFor(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) { + return "", storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + str, e := base.StorageDriver.URLFor(ctx, path, options) + return str, base.setDriverName(e) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/base/regulator.go b/vendor/github.com/docker/distribution/registry/storage/driver/base/regulator.go new file mode 100644 index 000000000..185160a4b --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/base/regulator.go @@ -0,0 +1,145 @@ +package base + +import ( + "io" + "sync" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +type regulator struct { + storagedriver.StorageDriver + *sync.Cond + + available uint64 +} + +// NewRegulator wraps the given driver and is used to regulate concurrent calls +// to the given storage driver to a maximum of the given limit. This is useful +// for storage drivers that would otherwise create an unbounded number of OS +// threads if allowed to be called unregulated. +func NewRegulator(driver storagedriver.StorageDriver, limit uint64) storagedriver.StorageDriver { + return ®ulator{ + StorageDriver: driver, + Cond: sync.NewCond(&sync.Mutex{}), + available: limit, + } +} + +func (r *regulator) enter() { + r.L.Lock() + for r.available == 0 { + r.Wait() + } + r.available-- + r.L.Unlock() +} + +func (r *regulator) exit() { + r.L.Lock() + // We only need to signal to a waiting FS operation if we're already at the + // limit of threads used + if r.available == 0 { + r.Signal() + } + r.available++ + r.L.Unlock() +} + +// Name returns the human-readable "name" of the driver, useful in error +// messages and logging. By convention, this will just be the registration +// name, but drivers may provide other information here. +func (r *regulator) Name() string { + r.enter() + defer r.exit() + + return r.StorageDriver.Name() +} + +// GetContent retrieves the content stored at "path" as a []byte. +// This should primarily be used for small objects. +func (r *regulator) GetContent(ctx context.Context, path string) ([]byte, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.GetContent(ctx, path) +} + +// PutContent stores the []byte content at a location designated by "path". +// This should primarily be used for small objects. +func (r *regulator) PutContent(ctx context.Context, path string, content []byte) error { + r.enter() + defer r.exit() + + return r.StorageDriver.PutContent(ctx, path, content) +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" +// with a given byte offset. +// May be used to resume reading a stream by providing a nonzero offset. +func (r *regulator) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.Reader(ctx, path, offset) +} + +// Writer stores the contents of the provided io.ReadCloser at a +// location designated by the given path. +// May be used to resume writing a stream by providing a nonzero offset. +// The offset must be no larger than the CurrentSize for this path. +func (r *regulator) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.Writer(ctx, path, append) +} + +// Stat retrieves the FileInfo for the given path, including the current +// size in bytes and the creation time. +func (r *regulator) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.Stat(ctx, path) +} + +// List returns a list of the objects that are direct descendants of the +//given path. +func (r *regulator) List(ctx context.Context, path string) ([]string, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.List(ctx, path) +} + +// Move moves an object stored at sourcePath to destPath, removing the +// original object. +// Note: This may be no more efficient than a copy followed by a delete for +// many implementations. +func (r *regulator) Move(ctx context.Context, sourcePath string, destPath string) error { + r.enter() + defer r.exit() + + return r.StorageDriver.Move(ctx, sourcePath, destPath) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (r *regulator) Delete(ctx context.Context, path string) error { + r.enter() + defer r.exit() + + return r.StorageDriver.Delete(ctx, path) +} + +// URLFor returns a URL which may be used to retrieve the content stored at +// the given path, possibly using the given options. +// May return an ErrUnsupportedMethod in certain StorageDriver +// implementations. +func (r *regulator) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.URLFor(ctx, path, options) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/factory/factory.go b/vendor/github.com/docker/distribution/registry/storage/driver/factory/factory.go new file mode 100644 index 000000000..a9c04ec59 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/factory/factory.go @@ -0,0 +1,64 @@ +package factory + +import ( + "fmt" + + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +// driverFactories stores an internal mapping between storage driver names and their respective +// factories +var driverFactories = make(map[string]StorageDriverFactory) + +// StorageDriverFactory is a factory interface for creating storagedriver.StorageDriver interfaces +// Storage drivers should call Register() with a factory to make the driver available by name. +// Individual StorageDriver implementations generally register with the factory via the Register +// func (below) in their init() funcs, and as such they should be imported anonymously before use. +// See below for an example of how to register and get a StorageDriver for S3 +// +// import _ "github.com/docker/distribution/registry/storage/driver/s3-aws" +// s3Driver, err = factory.Create("s3", storageParams) +// // assuming no error, s3Driver is the StorageDriver that communicates with S3 according to storageParams +type StorageDriverFactory interface { + // Create returns a new storagedriver.StorageDriver with the given parameters + // Parameters will vary by driver and may be ignored + // Each parameter key must only consist of lowercase letters and numbers + Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) +} + +// Register makes a storage driver available by the provided name. +// If Register is called twice with the same name or if driver factory is nil, it panics. +// Additionally, it is not concurrency safe. Most Storage Drivers call this function +// in their init() functions. See the documentation for StorageDriverFactory for more. +func Register(name string, factory StorageDriverFactory) { + if factory == nil { + panic("Must not provide nil StorageDriverFactory") + } + _, registered := driverFactories[name] + if registered { + panic(fmt.Sprintf("StorageDriverFactory named %s already registered", name)) + } + + driverFactories[name] = factory +} + +// Create a new storagedriver.StorageDriver with the given name and +// parameters. To use a driver, the StorageDriverFactory must first be +// registered with the given name. If no drivers are found, an +// InvalidStorageDriverError is returned +func Create(name string, parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + driverFactory, ok := driverFactories[name] + if !ok { + return nil, InvalidStorageDriverError{name} + } + return driverFactory.Create(parameters) +} + +// InvalidStorageDriverError records an attempt to construct an unregistered storage driver +type InvalidStorageDriverError struct { + Name string +} + +func (err InvalidStorageDriverError) Error() string { + return fmt.Sprintf("StorageDriver not registered: %s", err.Name) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/fileinfo.go b/vendor/github.com/docker/distribution/registry/storage/driver/fileinfo.go new file mode 100644 index 000000000..e5064029a --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/fileinfo.go @@ -0,0 +1,79 @@ +package driver + +import "time" + +// FileInfo returns information about a given path. Inspired by os.FileInfo, +// it elides the base name method for a full path instead. +type FileInfo interface { + // Path provides the full path of the target of this file info. + Path() string + + // Size returns current length in bytes of the file. The return value can + // be used to write to the end of the file at path. The value is + // meaningless if IsDir returns true. + Size() int64 + + // ModTime returns the modification time for the file. For backends that + // don't have a modification time, the creation time should be returned. + ModTime() time.Time + + // IsDir returns true if the path is a directory. + IsDir() bool +} + +// NOTE(stevvooe): The next two types, FileInfoFields and FileInfoInternal +// should only be used by storagedriver implementations. They should moved to +// a "driver" package, similar to database/sql. + +// FileInfoFields provides the exported fields for implementing FileInfo +// interface in storagedriver implementations. It should be used with +// InternalFileInfo. +type FileInfoFields struct { + // Path provides the full path of the target of this file info. + Path string + + // Size is current length in bytes of the file. The value of this field + // can be used to write to the end of the file at path. The value is + // meaningless if IsDir is set to true. + Size int64 + + // ModTime returns the modification time for the file. For backends that + // don't have a modification time, the creation time should be returned. + ModTime time.Time + + // IsDir returns true if the path is a directory. + IsDir bool +} + +// FileInfoInternal implements the FileInfo interface. This should only be +// used by storagedriver implementations that don't have a specialized +// FileInfo type. +type FileInfoInternal struct { + FileInfoFields +} + +var _ FileInfo = FileInfoInternal{} +var _ FileInfo = &FileInfoInternal{} + +// Path provides the full path of the target of this file info. +func (fi FileInfoInternal) Path() string { + return fi.FileInfoFields.Path +} + +// Size returns current length in bytes of the file. The return value can +// be used to write to the end of the file at path. The value is +// meaningless if IsDir returns true. +func (fi FileInfoInternal) Size() int64 { + return fi.FileInfoFields.Size +} + +// ModTime returns the modification time for the file. For backends that +// don't have a modification time, the creation time should be returned. +func (fi FileInfoInternal) ModTime() time.Time { + return fi.FileInfoFields.ModTime +} + +// IsDir returns true if the path is a directory. +func (fi FileInfoInternal) IsDir() bool { + return fi.FileInfoFields.IsDir +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go b/vendor/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go new file mode 100644 index 000000000..649e2bc23 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go @@ -0,0 +1,440 @@ +package filesystem + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "reflect" + "strconv" + "time" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const ( + driverName = "filesystem" + defaultRootDirectory = "/var/lib/registry" + defaultMaxThreads = uint64(100) + + // minThreads is the minimum value for the maxthreads configuration + // parameter. If the driver's parameters are less than this we set + // the parameters to minThreads + minThreads = uint64(25) +) + +// DriverParameters represents all configuration options available for the +// filesystem driver +type DriverParameters struct { + RootDirectory string + MaxThreads uint64 +} + +func init() { + factory.Register(driverName, &filesystemDriverFactory{}) +} + +// filesystemDriverFactory implements the factory.StorageDriverFactory interface +type filesystemDriverFactory struct{} + +func (factory *filesystemDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + rootDirectory string +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by a local +// filesystem. All provided paths will be subpaths of the RootDirectory. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Optional Parameters: +// - rootdirectory +// - maxthreads +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + params, err := fromParametersImpl(parameters) + if err != nil || params == nil { + return nil, err + } + return New(*params), nil +} + +func fromParametersImpl(parameters map[string]interface{}) (*DriverParameters, error) { + var ( + err error + maxThreads = defaultMaxThreads + rootDirectory = defaultRootDirectory + ) + + if parameters != nil { + if rootDir, ok := parameters["rootdirectory"]; ok { + rootDirectory = fmt.Sprint(rootDir) + } + + // Get maximum number of threads for blocking filesystem operations, + // if specified + threads := parameters["maxthreads"] + switch v := threads.(type) { + case string: + if maxThreads, err = strconv.ParseUint(v, 0, 64); err != nil { + return nil, fmt.Errorf("maxthreads parameter must be an integer, %v invalid", threads) + } + case uint64: + maxThreads = v + case int, int32, int64: + val := reflect.ValueOf(v).Convert(reflect.TypeOf(threads)).Int() + // If threads is negative casting to uint64 will wrap around and + // give you the hugest thread limit ever. Let's be sensible, here + if val > 0 { + maxThreads = uint64(val) + } + case uint, uint32: + maxThreads = reflect.ValueOf(v).Convert(reflect.TypeOf(threads)).Uint() + case nil: + // do nothing + default: + return nil, fmt.Errorf("invalid value for maxthreads: %#v", threads) + } + + if maxThreads < minThreads { + maxThreads = minThreads + } + } + + params := &DriverParameters{ + RootDirectory: rootDirectory, + MaxThreads: maxThreads, + } + return params, nil +} + +// New constructs a new Driver with a given rootDirectory +func New(params DriverParameters) *Driver { + fsDriver := &driver{rootDirectory: params.RootDirectory} + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: base.NewRegulator(fsDriver, params.MaxThreads), + }, + }, + } +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + rc, err := d.Reader(ctx, path, 0) + if err != nil { + return nil, err + } + defer rc.Close() + + p, err := ioutil.ReadAll(rc) + if err != nil { + return nil, err + } + + return p, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, subPath string, contents []byte) error { + writer, err := d.Writer(ctx, subPath, false) + if err != nil { + return err + } + defer writer.Close() + _, err = io.Copy(writer, bytes.NewReader(contents)) + if err != nil { + writer.Cancel() + return err + } + return writer.Commit() +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0644) + if err != nil { + if os.IsNotExist(err) { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + return nil, err + } + + seekPos, err := file.Seek(int64(offset), os.SEEK_SET) + if err != nil { + file.Close() + return nil, err + } else if seekPos < int64(offset) { + file.Close() + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + return file, nil +} + +func (d *driver) Writer(ctx context.Context, subPath string, append bool) (storagedriver.FileWriter, error) { + fullPath := d.fullPath(subPath) + parentDir := path.Dir(fullPath) + if err := os.MkdirAll(parentDir, 0777); err != nil { + return nil, err + } + + fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + return nil, err + } + + var offset int64 + + if !append { + err := fp.Truncate(0) + if err != nil { + fp.Close() + return nil, err + } + } else { + n, err := fp.Seek(0, os.SEEK_END) + if err != nil { + fp.Close() + return nil, err + } + offset = int64(n) + } + + return newFileWriter(fp, offset), nil +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, subPath string) (storagedriver.FileInfo, error) { + fullPath := d.fullPath(subPath) + + fi, err := os.Stat(fullPath) + if err != nil { + if os.IsNotExist(err) { + return nil, storagedriver.PathNotFoundError{Path: subPath} + } + + return nil, err + } + + return fileInfo{ + path: subPath, + FileInfo: fi, + }, nil +} + +// List returns a list of the objects that are direct descendants of the given +// path. +func (d *driver) List(ctx context.Context, subPath string) ([]string, error) { + fullPath := d.fullPath(subPath) + + dir, err := os.Open(fullPath) + if err != nil { + if os.IsNotExist(err) { + return nil, storagedriver.PathNotFoundError{Path: subPath} + } + return nil, err + } + + defer dir.Close() + + fileNames, err := dir.Readdirnames(0) + if err != nil { + return nil, err + } + + keys := make([]string, 0, len(fileNames)) + for _, fileName := range fileNames { + keys = append(keys, path.Join(subPath, fileName)) + } + + return keys, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + source := d.fullPath(sourcePath) + dest := d.fullPath(destPath) + + if _, err := os.Stat(source); os.IsNotExist(err) { + return storagedriver.PathNotFoundError{Path: sourcePath} + } + + if err := os.MkdirAll(path.Dir(dest), 0755); err != nil { + return err + } + + err := os.Rename(source, dest) + return err +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, subPath string) error { + fullPath := d.fullPath(subPath) + + _, err := os.Stat(fullPath) + if err != nil && !os.IsNotExist(err) { + return err + } else if err != nil { + return storagedriver.PathNotFoundError{Path: subPath} + } + + err = os.RemoveAll(fullPath) + return err +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + return "", storagedriver.ErrUnsupportedMethod{} +} + +// fullPath returns the absolute path of a key within the Driver's storage. +func (d *driver) fullPath(subPath string) string { + return path.Join(d.rootDirectory, subPath) +} + +type fileInfo struct { + os.FileInfo + path string +} + +var _ storagedriver.FileInfo = fileInfo{} + +// Path provides the full path of the target of this file info. +func (fi fileInfo) Path() string { + return fi.path +} + +// Size returns current length in bytes of the file. The return value can +// be used to write to the end of the file at path. The value is +// meaningless if IsDir returns true. +func (fi fileInfo) Size() int64 { + if fi.IsDir() { + return 0 + } + + return fi.FileInfo.Size() +} + +// ModTime returns the modification time for the file. For backends that +// don't have a modification time, the creation time should be returned. +func (fi fileInfo) ModTime() time.Time { + return fi.FileInfo.ModTime() +} + +// IsDir returns true if the path is a directory. +func (fi fileInfo) IsDir() bool { + return fi.FileInfo.IsDir() +} + +type fileWriter struct { + file *os.File + size int64 + bw *bufio.Writer + closed bool + committed bool + cancelled bool +} + +func newFileWriter(file *os.File, size int64) *fileWriter { + return &fileWriter{ + file: file, + size: size, + bw: bufio.NewWriter(file), + } +} + +func (fw *fileWriter) Write(p []byte) (int, error) { + if fw.closed { + return 0, fmt.Errorf("already closed") + } else if fw.committed { + return 0, fmt.Errorf("already committed") + } else if fw.cancelled { + return 0, fmt.Errorf("already cancelled") + } + n, err := fw.bw.Write(p) + fw.size += int64(n) + return n, err +} + +func (fw *fileWriter) Size() int64 { + return fw.size +} + +func (fw *fileWriter) Close() error { + if fw.closed { + return fmt.Errorf("already closed") + } + + if err := fw.bw.Flush(); err != nil { + return err + } + + if err := fw.file.Sync(); err != nil { + return err + } + + if err := fw.file.Close(); err != nil { + return err + } + fw.closed = true + return nil +} + +func (fw *fileWriter) Cancel() error { + if fw.closed { + return fmt.Errorf("already closed") + } + + fw.cancelled = true + fw.file.Close() + return os.Remove(fw.file.Name()) +} + +func (fw *fileWriter) Commit() error { + if fw.closed { + return fmt.Errorf("already closed") + } else if fw.committed { + return fmt.Errorf("already committed") + } else if fw.cancelled { + return fmt.Errorf("already cancelled") + } + + if err := fw.bw.Flush(); err != nil { + return err + } + + if err := fw.file.Sync(); err != nil { + return err + } + + fw.committed = true + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/filesystem/driver_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/filesystem/driver_test.go new file mode 100644 index 000000000..3be859239 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/filesystem/driver_test.go @@ -0,0 +1,113 @@ +package filesystem + +import ( + "io/ioutil" + "os" + "reflect" + "testing" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + . "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { TestingT(t) } + +func init() { + root, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(root) + + driver, err := FromParameters(map[string]interface{}{ + "rootdirectory": root, + }) + if err != nil { + panic(err) + } + + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { + return driver, nil + }, testsuites.NeverSkip) +} + +func TestFromParametersImpl(t *testing.T) { + + tests := []struct { + params map[string]interface{} // techincally the yaml can contain anything + expected DriverParameters + pass bool + }{ + // check we use default threads and root dirs + { + params: map[string]interface{}{}, + expected: DriverParameters{ + RootDirectory: defaultRootDirectory, + MaxThreads: defaultMaxThreads, + }, + pass: true, + }, + // Testing initiation with a string maxThreads which can't be parsed + { + params: map[string]interface{}{ + "maxthreads": "fail", + }, + expected: DriverParameters{}, + pass: false, + }, + { + params: map[string]interface{}{ + "maxthreads": "100", + }, + expected: DriverParameters{ + RootDirectory: defaultRootDirectory, + MaxThreads: uint64(100), + }, + pass: true, + }, + { + params: map[string]interface{}{ + "maxthreads": 100, + }, + expected: DriverParameters{ + RootDirectory: defaultRootDirectory, + MaxThreads: uint64(100), + }, + pass: true, + }, + // check that we use minimum thread counts + { + params: map[string]interface{}{ + "maxthreads": 1, + }, + expected: DriverParameters{ + RootDirectory: defaultRootDirectory, + MaxThreads: minThreads, + }, + pass: true, + }, + } + + for _, item := range tests { + params, err := fromParametersImpl(item.params) + + if !item.pass { + // We only need to assert that expected failures have an error + if err == nil { + t.Fatalf("expected error configuring filesystem driver with invalid param: %+v", item.params) + } + continue + } + + if err != nil { + t.Fatalf("unexpected error creating filesystem driver: %s", err) + } + // Note that we get a pointer to params back + if !reflect.DeepEqual(*params, item.expected) { + t.Fatalf("unexpected params from filesystem driver. expected %+v, got %+v", item.expected, params) + } + } + +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/gcs/doc.go b/vendor/github.com/docker/distribution/registry/storage/driver/gcs/doc.go new file mode 100644 index 000000000..0f23ea785 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/gcs/doc.go @@ -0,0 +1,3 @@ +// Package gcs implements the Google Cloud Storage driver backend. Support can be +// enabled by including the "include_gcs" build tag. +package gcs diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/gcs/gcs.go b/vendor/github.com/docker/distribution/registry/storage/driver/gcs/gcs.go new file mode 100644 index 000000000..1369c280a --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/gcs/gcs.go @@ -0,0 +1,873 @@ +// Package gcs provides a storagedriver.StorageDriver implementation to +// store blobs in Google cloud storage. +// +// This package leverages the google.golang.org/cloud/storage client library +//for interfacing with gcs. +// +// Because gcs is a key, value store the Stat call does not support last modification +// time for directories (directories are an abstraction for key, value stores) +// +// Note that the contents of incomplete uploads are not accessible even though +// Stat returns their length +// +// +build include_gcs + +package gcs + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "math/rand" + "net/http" + "net/url" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" + "google.golang.org/api/googleapi" + "google.golang.org/cloud" + "google.golang.org/cloud/storage" + + "github.com/Sirupsen/logrus" + + ctx "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const ( + driverName = "gcs" + dummyProjectID = "" + + uploadSessionContentType = "application/x-docker-upload-session" + minChunkSize = 256 * 1024 + defaultChunkSize = 20 * minChunkSize + + maxTries = 5 +) + +var rangeHeader = regexp.MustCompile(`^bytes=([0-9])+-([0-9]+)$`) + +// driverParameters is a struct that encapsulates all of the driver parameters after all values have been set +type driverParameters struct { + bucket string + config *jwt.Config + email string + privateKey []byte + client *http.Client + rootDirectory string + chunkSize int +} + +func init() { + factory.Register(driverName, &gcsDriverFactory{}) +} + +// gcsDriverFactory implements the factory.StorageDriverFactory interface +type gcsDriverFactory struct{} + +// Create StorageDriver from parameters +func (factory *gcsDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +// driver is a storagedriver.StorageDriver implementation backed by GCS +// Objects are stored at absolute keys in the provided bucket. +type driver struct { + client *http.Client + bucket string + email string + privateKey []byte + rootDirectory string + chunkSize int +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - bucket +func FromParameters(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + bucket, ok := parameters["bucket"] + if !ok || fmt.Sprint(bucket) == "" { + return nil, fmt.Errorf("No bucket parameter provided") + } + + rootDirectory, ok := parameters["rootdirectory"] + if !ok { + rootDirectory = "" + } + + chunkSize := defaultChunkSize + chunkSizeParam, ok := parameters["chunksize"] + if ok { + switch v := chunkSizeParam.(type) { + case string: + vv, err := strconv.Atoi(v) + if err != nil { + return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) + } + chunkSize = vv + case int, uint, int32, uint32, uint64, int64: + chunkSize = int(reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int()) + default: + return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) + } + + if chunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) + } + + if chunkSize%minChunkSize != 0 { + return nil, fmt.Errorf("chunksize should be a multiple of %d", minChunkSize) + } + } + + var ts oauth2.TokenSource + jwtConf := new(jwt.Config) + if keyfile, ok := parameters["keyfile"]; ok { + jsonKey, err := ioutil.ReadFile(fmt.Sprint(keyfile)) + if err != nil { + return nil, err + } + jwtConf, err = google.JWTConfigFromJSON(jsonKey, storage.ScopeFullControl) + if err != nil { + return nil, err + } + ts = jwtConf.TokenSource(context.Background()) + } else { + var err error + ts, err = google.DefaultTokenSource(context.Background(), storage.ScopeFullControl) + if err != nil { + return nil, err + } + } + + params := driverParameters{ + bucket: fmt.Sprint(bucket), + rootDirectory: fmt.Sprint(rootDirectory), + email: jwtConf.Email, + privateKey: jwtConf.PrivateKey, + client: oauth2.NewClient(context.Background(), ts), + chunkSize: chunkSize, + } + + return New(params) +} + +// New constructs a new driver +func New(params driverParameters) (storagedriver.StorageDriver, error) { + rootDirectory := strings.Trim(params.rootDirectory, "/") + if rootDirectory != "" { + rootDirectory += "/" + } + if params.chunkSize <= 0 || params.chunkSize%minChunkSize != 0 { + return nil, fmt.Errorf("Invalid chunksize: %d is not a positive multiple of %d", params.chunkSize, minChunkSize) + } + d := &driver{ + bucket: params.bucket, + rootDirectory: rootDirectory, + email: params.email, + privateKey: params.privateKey, + client: params.client, + chunkSize: params.chunkSize, + } + + return &base.Base{ + StorageDriver: d, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +// This should primarily be used for small objects. +func (d *driver) GetContent(context ctx.Context, path string) ([]byte, error) { + gcsContext := d.context(context) + name := d.pathToKey(path) + var rc io.ReadCloser + err := retry(func() error { + var err error + rc, err = storage.NewReader(gcsContext, d.bucket, name) + return err + }) + if err == storage.ErrObjectNotExist { + return nil, storagedriver.PathNotFoundError{Path: path} + } + if err != nil { + return nil, err + } + defer rc.Close() + + p, err := ioutil.ReadAll(rc) + if err != nil { + return nil, err + } + return p, nil +} + +// PutContent stores the []byte content at a location designated by "path". +// This should primarily be used for small objects. +func (d *driver) PutContent(context ctx.Context, path string, contents []byte) error { + return retry(func() error { + wc := storage.NewWriter(d.context(context), d.bucket, d.pathToKey(path)) + wc.ContentType = "application/octet-stream" + return putContentsClose(wc, contents) + }) +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" +// with a given byte offset. +// May be used to resume reading a stream by providing a nonzero offset. +func (d *driver) Reader(context ctx.Context, path string, offset int64) (io.ReadCloser, error) { + res, err := getObject(d.client, d.bucket, d.pathToKey(path), offset) + if err != nil { + if res != nil { + if res.StatusCode == http.StatusNotFound { + res.Body.Close() + return nil, storagedriver.PathNotFoundError{Path: path} + } + + if res.StatusCode == http.StatusRequestedRangeNotSatisfiable { + res.Body.Close() + obj, err := storageStatObject(d.context(context), d.bucket, d.pathToKey(path)) + if err != nil { + return nil, err + } + if offset == int64(obj.Size) { + return ioutil.NopCloser(bytes.NewReader([]byte{})), nil + } + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + } + return nil, err + } + if res.Header.Get("Content-Type") == uploadSessionContentType { + defer res.Body.Close() + return nil, storagedriver.PathNotFoundError{Path: path} + } + return res.Body, nil +} + +func getObject(client *http.Client, bucket string, name string, offset int64) (*http.Response, error) { + // copied from google.golang.org/cloud/storage#NewReader : + // to set the additional "Range" header + u := &url.URL{ + Scheme: "https", + Host: "storage.googleapis.com", + Path: fmt.Sprintf("/%s/%s", bucket, name), + } + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + if offset > 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%v-", offset)) + } + var res *http.Response + err = retry(func() error { + var err error + res, err = client.Do(req) + return err + }) + if err != nil { + return nil, err + } + return res, googleapi.CheckMediaResponse(res) +} + +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(context ctx.Context, path string, append bool) (storagedriver.FileWriter, error) { + writer := &writer{ + client: d.client, + bucket: d.bucket, + name: d.pathToKey(path), + buffer: make([]byte, d.chunkSize), + } + + if append { + err := writer.init(path) + if err != nil { + return nil, err + } + } + return writer, nil +} + +type writer struct { + client *http.Client + bucket string + name string + size int64 + offset int64 + closed bool + sessionURI string + buffer []byte + buffSize int +} + +// Cancel removes any written content from this FileWriter. +func (w *writer) Cancel() error { + w.closed = true + err := storageDeleteObject(cloud.NewContext(dummyProjectID, w.client), w.bucket, w.name) + if err != nil { + if status, ok := err.(*googleapi.Error); ok { + if status.Code == http.StatusNotFound { + err = nil + } + } + } + return err +} + +func (w *writer) Close() error { + if w.closed { + return nil + } + w.closed = true + + err := w.writeChunk() + if err != nil { + return err + } + + // Copy the remaining bytes from the buffer to the upload session + // Normally buffSize will be smaller than minChunkSize. However, in the + // unlikely event that the upload session failed to start, this number could be higher. + // In this case we can safely clip the remaining bytes to the minChunkSize + if w.buffSize > minChunkSize { + w.buffSize = minChunkSize + } + + // commit the writes by updating the upload session + err = retry(func() error { + wc := storage.NewWriter(cloud.NewContext(dummyProjectID, w.client), w.bucket, w.name) + wc.ContentType = uploadSessionContentType + wc.Metadata = map[string]string{ + "Session-URI": w.sessionURI, + "Offset": strconv.FormatInt(w.offset, 10), + } + return putContentsClose(wc, w.buffer[0:w.buffSize]) + }) + if err != nil { + return err + } + w.size = w.offset + int64(w.buffSize) + w.buffSize = 0 + return nil +} + +func putContentsClose(wc *storage.Writer, contents []byte) error { + size := len(contents) + var nn int + var err error + for nn < size { + n, err := wc.Write(contents[nn:size]) + nn += n + if err != nil { + break + } + } + if err != nil { + wc.CloseWithError(err) + return err + } + return wc.Close() +} + +// Commit flushes all content written to this FileWriter and makes it +// available for future calls to StorageDriver.GetContent and +// StorageDriver.Reader. +func (w *writer) Commit() error { + + if err := w.checkClosed(); err != nil { + return err + } + w.closed = true + + // no session started yet just perform a simple upload + if w.sessionURI == "" { + err := retry(func() error { + wc := storage.NewWriter(cloud.NewContext(dummyProjectID, w.client), w.bucket, w.name) + wc.ContentType = "application/octet-stream" + return putContentsClose(wc, w.buffer[0:w.buffSize]) + }) + if err != nil { + return err + } + w.size = w.offset + int64(w.buffSize) + w.buffSize = 0 + return nil + } + size := w.offset + int64(w.buffSize) + var nn int + // loop must be performed at least once to ensure the file is committed even when + // the buffer is empty + for { + n, err := putChunk(w.client, w.sessionURI, w.buffer[nn:w.buffSize], w.offset, size) + nn += int(n) + w.offset += n + w.size = w.offset + if err != nil { + w.buffSize = copy(w.buffer, w.buffer[nn:w.buffSize]) + return err + } + if nn == w.buffSize { + break + } + } + w.buffSize = 0 + return nil +} + +func (w *writer) checkClosed() error { + if w.closed { + return fmt.Errorf("Writer already closed") + } + return nil +} + +func (w *writer) writeChunk() error { + var err error + // chunks can be uploaded only in multiples of minChunkSize + // chunkSize is a multiple of minChunkSize less than or equal to buffSize + chunkSize := w.buffSize - (w.buffSize % minChunkSize) + if chunkSize == 0 { + return nil + } + // if their is no sessionURI yet, obtain one by starting the session + if w.sessionURI == "" { + w.sessionURI, err = startSession(w.client, w.bucket, w.name) + } + if err != nil { + return err + } + nn, err := putChunk(w.client, w.sessionURI, w.buffer[0:chunkSize], w.offset, -1) + w.offset += nn + if w.offset > w.size { + w.size = w.offset + } + // shift the remaining bytes to the start of the buffer + w.buffSize = copy(w.buffer, w.buffer[int(nn):w.buffSize]) + + return err +} + +func (w *writer) Write(p []byte) (int, error) { + err := w.checkClosed() + if err != nil { + return 0, err + } + + var nn int + for nn < len(p) { + n := copy(w.buffer[w.buffSize:], p[nn:]) + w.buffSize += n + if w.buffSize == cap(w.buffer) { + err = w.writeChunk() + if err != nil { + break + } + } + nn += n + } + return nn, err +} + +// Size returns the number of bytes written to this FileWriter. +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) init(path string) error { + res, err := getObject(w.client, w.bucket, w.name, 0) + if err != nil { + return err + } + defer res.Body.Close() + if res.Header.Get("Content-Type") != uploadSessionContentType { + return storagedriver.PathNotFoundError{Path: path} + } + offset, err := strconv.ParseInt(res.Header.Get("X-Goog-Meta-Offset"), 10, 64) + if err != nil { + return err + } + buffer, err := ioutil.ReadAll(res.Body) + if err != nil { + return err + } + w.sessionURI = res.Header.Get("X-Goog-Meta-Session-URI") + w.buffSize = copy(w.buffer, buffer) + w.offset = offset + w.size = offset + int64(w.buffSize) + return nil +} + +type request func() error + +func retry(req request) error { + backoff := time.Second + var err error + for i := 0; i < maxTries; i++ { + err = req() + if err == nil { + return nil + } + + status, ok := err.(*googleapi.Error) + if !ok || (status.Code != 429 && status.Code < http.StatusInternalServerError) { + return err + } + + time.Sleep(backoff - time.Second + (time.Duration(rand.Int31n(1000)) * time.Millisecond)) + if i <= 4 { + backoff = backoff * 2 + } + } + return err +} + +// Stat retrieves the FileInfo for the given path, including the current +// size in bytes and the creation time. +func (d *driver) Stat(context ctx.Context, path string) (storagedriver.FileInfo, error) { + var fi storagedriver.FileInfoFields + //try to get as file + gcsContext := d.context(context) + obj, err := storageStatObject(gcsContext, d.bucket, d.pathToKey(path)) + if err == nil { + if obj.ContentType == uploadSessionContentType { + return nil, storagedriver.PathNotFoundError{Path: path} + } + fi = storagedriver.FileInfoFields{ + Path: path, + Size: obj.Size, + ModTime: obj.Updated, + IsDir: false, + } + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil + } + //try to get as folder + dirpath := d.pathToDirKey(path) + + var query *storage.Query + query = &storage.Query{} + query.Prefix = dirpath + query.MaxResults = 1 + + objects, err := storageListObjects(gcsContext, d.bucket, query) + if err != nil { + return nil, err + } + if len(objects.Results) < 1 { + return nil, storagedriver.PathNotFoundError{Path: path} + } + fi = storagedriver.FileInfoFields{ + Path: path, + IsDir: true, + } + obj = objects.Results[0] + if obj.Name == dirpath { + fi.Size = obj.Size + fi.ModTime = obj.Updated + } + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the +//given path. +func (d *driver) List(context ctx.Context, path string) ([]string, error) { + var query *storage.Query + query = &storage.Query{} + query.Delimiter = "/" + query.Prefix = d.pathToDirKey(path) + list := make([]string, 0, 64) + for { + objects, err := storageListObjects(d.context(context), d.bucket, query) + if err != nil { + return nil, err + } + for _, object := range objects.Results { + // GCS does not guarantee strong consistency between + // DELETE and LIST operations. Check that the object is not deleted, + // and filter out any objects with a non-zero time-deleted + if object.Deleted.IsZero() && object.ContentType != uploadSessionContentType { + list = append(list, d.keyToPath(object.Name)) + } + } + for _, subpath := range objects.Prefixes { + subpath = d.keyToPath(subpath) + list = append(list, subpath) + } + query = objects.Next + if query == nil { + break + } + } + if path != "/" && len(list) == 0 { + // Treat empty response as missing directory, since we don't actually + // have directories in Google Cloud Storage. + return nil, storagedriver.PathNotFoundError{Path: path} + } + return list, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the +// original object. +func (d *driver) Move(context ctx.Context, sourcePath string, destPath string) error { + gcsContext := d.context(context) + _, err := storageCopyObject(gcsContext, d.bucket, d.pathToKey(sourcePath), d.bucket, d.pathToKey(destPath), nil) + if err != nil { + if status, ok := err.(*googleapi.Error); ok { + if status.Code == http.StatusNotFound { + return storagedriver.PathNotFoundError{Path: sourcePath} + } + } + return err + } + err = storageDeleteObject(gcsContext, d.bucket, d.pathToKey(sourcePath)) + // if deleting the file fails, log the error, but do not fail; the file was successfully copied, + // and the original should eventually be cleaned when purging the uploads folder. + if err != nil { + logrus.Infof("error deleting file: %v due to %v", sourcePath, err) + } + return nil +} + +// listAll recursively lists all names of objects stored at "prefix" and its subpaths. +func (d *driver) listAll(context context.Context, prefix string) ([]string, error) { + list := make([]string, 0, 64) + query := &storage.Query{} + query.Prefix = prefix + query.Versions = false + for { + objects, err := storageListObjects(d.context(context), d.bucket, query) + if err != nil { + return nil, err + } + for _, obj := range objects.Results { + // GCS does not guarantee strong consistency between + // DELETE and LIST operations. Check that the object is not deleted, + // and filter out any objects with a non-zero time-deleted + if obj.Deleted.IsZero() { + list = append(list, obj.Name) + } + } + query = objects.Next + if query == nil { + break + } + } + return list, nil +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(context ctx.Context, path string) error { + prefix := d.pathToDirKey(path) + gcsContext := d.context(context) + keys, err := d.listAll(gcsContext, prefix) + if err != nil { + return err + } + if len(keys) > 0 { + sort.Sort(sort.Reverse(sort.StringSlice(keys))) + for _, key := range keys { + err := storageDeleteObject(gcsContext, d.bucket, key) + // GCS only guarantees eventual consistency, so listAll might return + // paths that no longer exist. If this happens, just ignore any not + // found error + if status, ok := err.(*googleapi.Error); ok { + if status.Code == http.StatusNotFound { + err = nil + } + } + if err != nil { + return err + } + } + return nil + } + err = storageDeleteObject(gcsContext, d.bucket, d.pathToKey(path)) + if err != nil { + if status, ok := err.(*googleapi.Error); ok { + if status.Code == http.StatusNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + } + } + return err +} + +func storageDeleteObject(context context.Context, bucket string, name string) error { + return retry(func() error { + return storage.DeleteObject(context, bucket, name) + }) +} + +func storageStatObject(context context.Context, bucket string, name string) (*storage.Object, error) { + var obj *storage.Object + err := retry(func() error { + var err error + obj, err = storage.StatObject(context, bucket, name) + return err + }) + return obj, err +} + +func storageListObjects(context context.Context, bucket string, q *storage.Query) (*storage.Objects, error) { + var objs *storage.Objects + err := retry(func() error { + var err error + objs, err = storage.ListObjects(context, bucket, q) + return err + }) + return objs, err +} + +func storageCopyObject(context context.Context, srcBucket, srcName string, destBucket, destName string, attrs *storage.ObjectAttrs) (*storage.Object, error) { + var obj *storage.Object + err := retry(func() error { + var err error + obj, err = storage.CopyObject(context, srcBucket, srcName, destBucket, destName, attrs) + return err + }) + return obj, err +} + +// URLFor returns a URL which may be used to retrieve the content stored at +// the given path, possibly using the given options. +// Returns ErrUnsupportedMethod if this driver has no privateKey +func (d *driver) URLFor(context ctx.Context, path string, options map[string]interface{}) (string, error) { + if d.privateKey == nil { + return "", storagedriver.ErrUnsupportedMethod{} + } + + name := d.pathToKey(path) + methodString := "GET" + method, ok := options["method"] + if ok { + methodString, ok = method.(string) + if !ok || (methodString != "GET" && methodString != "HEAD") { + return "", storagedriver.ErrUnsupportedMethod{} + } + } + + expiresTime := time.Now().Add(20 * time.Minute) + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresTime = et + } + } + + opts := &storage.SignedURLOptions{ + GoogleAccessID: d.email, + PrivateKey: d.privateKey, + Method: methodString, + Expires: expiresTime, + } + return storage.SignedURL(d.bucket, name, opts) +} + +func startSession(client *http.Client, bucket string, name string) (uri string, err error) { + u := &url.URL{ + Scheme: "https", + Host: "www.googleapis.com", + Path: fmt.Sprintf("/upload/storage/v1/b/%v/o", bucket), + RawQuery: fmt.Sprintf("uploadType=resumable&name=%v", name), + } + err = retry(func() error { + req, err := http.NewRequest("POST", u.String(), nil) + if err != nil { + return err + } + req.Header.Set("X-Upload-Content-Type", "application/octet-stream") + req.Header.Set("Content-Length", "0") + resp, err := client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + err = googleapi.CheckMediaResponse(resp) + if err != nil { + return err + } + uri = resp.Header.Get("Location") + return nil + }) + return uri, err +} + +func putChunk(client *http.Client, sessionURI string, chunk []byte, from int64, totalSize int64) (int64, error) { + bytesPut := int64(0) + err := retry(func() error { + req, err := http.NewRequest("PUT", sessionURI, bytes.NewReader(chunk)) + if err != nil { + return err + } + length := int64(len(chunk)) + to := from + length - 1 + size := "*" + if totalSize >= 0 { + size = strconv.FormatInt(totalSize, 10) + } + req.Header.Set("Content-Type", "application/octet-stream") + if from == to+1 { + req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", size)) + } else { + req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", from, to, size)) + } + req.Header.Set("Content-Length", strconv.FormatInt(length, 10)) + + resp, err := client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if totalSize < 0 && resp.StatusCode == 308 { + groups := rangeHeader.FindStringSubmatch(resp.Header.Get("Range")) + end, err := strconv.ParseInt(groups[2], 10, 64) + if err != nil { + return err + } + bytesPut = end - from + 1 + return nil + } + err = googleapi.CheckMediaResponse(resp) + if err != nil { + return err + } + bytesPut = to - from + 1 + return nil + }) + return bytesPut, err +} + +func (d *driver) context(context ctx.Context) context.Context { + return cloud.WithContext(context, dummyProjectID, d.client) +} + +func (d *driver) pathToKey(path string) string { + return strings.TrimRight(d.rootDirectory+strings.TrimLeft(path, "/"), "/") +} + +func (d *driver) pathToDirKey(path string) string { + return d.pathToKey(path) + "/" +} + +func (d *driver) keyToPath(key string) string { + return "/" + strings.Trim(strings.TrimPrefix(key, d.rootDirectory), "/") +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/gcs/gcs_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/gcs/gcs_test.go new file mode 100644 index 000000000..f2808d5fc --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/gcs/gcs_test.go @@ -0,0 +1,311 @@ +// +build include_gcs + +package gcs + +import ( + "io/ioutil" + "os" + "testing" + + "fmt" + ctx "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/api/googleapi" + "google.golang.org/cloud/storage" + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +var gcsDriverConstructor func(rootDirectory string) (storagedriver.StorageDriver, error) +var skipGCS func() string + +func init() { + bucket := os.Getenv("REGISTRY_STORAGE_GCS_BUCKET") + credentials := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") + + // Skip GCS storage driver tests if environment variable parameters are not provided + skipGCS = func() string { + if bucket == "" || credentials == "" { + return "The following environment variables must be set to enable these tests: REGISTRY_STORAGE_GCS_BUCKET, GOOGLE_APPLICATION_CREDENTIALS" + } + return "" + } + + if skipGCS() != "" { + return + } + + root, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(root) + var ts oauth2.TokenSource + var email string + var privateKey []byte + + ts, err = google.DefaultTokenSource(ctx.Background(), storage.ScopeFullControl) + if err != nil { + // Assume that the file contents are within the environment variable since it exists + // but does not contain a valid file path + jwtConfig, err := google.JWTConfigFromJSON([]byte(credentials), storage.ScopeFullControl) + if err != nil { + panic(fmt.Sprintf("Error reading JWT config : %s", err)) + } + email = jwtConfig.Email + privateKey = []byte(jwtConfig.PrivateKey) + if len(privateKey) == 0 { + panic("Error reading JWT config : missing private_key property") + } + if email == "" { + panic("Error reading JWT config : missing client_email property") + } + ts = jwtConfig.TokenSource(ctx.Background()) + } + + gcsDriverConstructor = func(rootDirectory string) (storagedriver.StorageDriver, error) { + parameters := driverParameters{ + bucket: bucket, + rootDirectory: root, + email: email, + privateKey: privateKey, + client: oauth2.NewClient(ctx.Background(), ts), + chunkSize: defaultChunkSize, + } + + return New(parameters) + } + + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { + return gcsDriverConstructor(root) + }, skipGCS) +} + +// Test Committing a FileWriter without having called Write +func TestCommitEmpty(t *testing.T) { + if skipGCS() != "" { + t.Skip(skipGCS()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + driver, err := gcsDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + filename := "/test" + ctx := ctx.Background() + + writer, err := driver.Writer(ctx, filename, false) + defer driver.Delete(ctx, filename) + if err != nil { + t.Fatalf("driver.Writer: unexpected error: %v", err) + } + err = writer.Commit() + if err != nil { + t.Fatalf("writer.Commit: unexpected error: %v", err) + } + err = writer.Close() + if err != nil { + t.Fatalf("writer.Close: unexpected error: %v", err) + } + if writer.Size() != 0 { + t.Fatalf("writer.Size: %d != 0", writer.Size()) + } + readContents, err := driver.GetContent(ctx, filename) + if err != nil { + t.Fatalf("driver.GetContent: unexpected error: %v", err) + } + if len(readContents) != 0 { + t.Fatalf("len(driver.GetContent(..)): %d != 0", len(readContents)) + } +} + +// Test Committing a FileWriter after having written exactly +// defaultChunksize bytes. +func TestCommit(t *testing.T) { + if skipGCS() != "" { + t.Skip(skipGCS()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + driver, err := gcsDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + filename := "/test" + ctx := ctx.Background() + + contents := make([]byte, defaultChunkSize) + writer, err := driver.Writer(ctx, filename, false) + defer driver.Delete(ctx, filename) + if err != nil { + t.Fatalf("driver.Writer: unexpected error: %v", err) + } + _, err = writer.Write(contents) + if err != nil { + t.Fatalf("writer.Write: unexpected error: %v", err) + } + err = writer.Commit() + if err != nil { + t.Fatalf("writer.Commit: unexpected error: %v", err) + } + err = writer.Close() + if err != nil { + t.Fatalf("writer.Close: unexpected error: %v", err) + } + if writer.Size() != int64(len(contents)) { + t.Fatalf("writer.Size: %d != %d", writer.Size(), len(contents)) + } + readContents, err := driver.GetContent(ctx, filename) + if err != nil { + t.Fatalf("driver.GetContent: unexpected error: %v", err) + } + if len(readContents) != len(contents) { + t.Fatalf("len(driver.GetContent(..)): %d != %d", len(readContents), len(contents)) + } +} + +func TestRetry(t *testing.T) { + if skipGCS() != "" { + t.Skip(skipGCS()) + } + + assertError := func(expected string, observed error) { + observedMsg := "" + if observed != nil { + observedMsg = observed.Error() + } + if observedMsg != expected { + t.Fatalf("expected %v, observed %v\n", expected, observedMsg) + } + } + + err := retry(func() error { + return &googleapi.Error{ + Code: 503, + Message: "google api error", + } + }) + assertError("googleapi: Error 503: google api error", err) + + err = retry(func() error { + return &googleapi.Error{ + Code: 404, + Message: "google api error", + } + }) + assertError("googleapi: Error 404: google api error", err) + + err = retry(func() error { + return fmt.Errorf("error") + }) + assertError("error", err) +} + +func TestEmptyRootList(t *testing.T) { + if skipGCS() != "" { + t.Skip(skipGCS()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + rootedDriver, err := gcsDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := gcsDriverConstructor("") + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := gcsDriverConstructor("/") + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } + + filename := "/test" + contents := []byte("contents") + ctx := ctx.Background() + err = rootedDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer func() { + err := rootedDriver.Delete(ctx, filename) + if err != nil { + t.Fatalf("failed to remove %v due to %v\n", filename, err) + } + }() + keys, err := emptyRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } + + keys, err = slashRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } +} + +// TestMoveDirectory checks that moving a directory returns an error. +func TestMoveDirectory(t *testing.T) { + if skipGCS() != "" { + t.Skip(skipGCS()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + driver, err := gcsDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + ctx := ctx.Background() + contents := []byte("contents") + // Create a regular file. + err = driver.PutContent(ctx, "/parent/dir/foo", contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer func() { + err := driver.Delete(ctx, "/parent") + if err != nil { + t.Fatalf("failed to remove /parent due to %v\n", err) + } + }() + + err = driver.Move(ctx, "/parent/dir", "/parent/other") + if err == nil { + t.Fatalf("Moving directory /parent/dir /parent/other should have return a non-nil error\n") + } +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go b/vendor/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go new file mode 100644 index 000000000..eb2fd1cf4 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go @@ -0,0 +1,312 @@ +package inmemory + +import ( + "fmt" + "io" + "io/ioutil" + "sync" + "time" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "inmemory" + +func init() { + factory.Register(driverName, &inMemoryDriverFactory{}) +} + +// inMemoryDriverFacotry implements the factory.StorageDriverFactory interface. +type inMemoryDriverFactory struct{} + +func (factory *inMemoryDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return New(), nil +} + +type driver struct { + root *dir + mutex sync.RWMutex +} + +// baseEmbed allows us to hide the Base embed. +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by a local map. +// Intended solely for example and testing purposes. +type Driver struct { + baseEmbed // embedded, hidden base driver. +} + +var _ storagedriver.StorageDriver = &Driver{} + +// New constructs a new Driver. +func New() *Driver { + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: &driver{ + root: &dir{ + common: common{ + p: "/", + mod: time.Now(), + }, + }, + }, + }, + }, + } +} + +// Implement the storagedriver.StorageDriver interface. + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() + + rc, err := d.Reader(ctx, path, 0) + if err != nil { + return nil, err + } + defer rc.Close() + + return ioutil.ReadAll(rc) +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, p string, contents []byte) error { + d.mutex.Lock() + defer d.mutex.Unlock() + + normalized := normalize(p) + + f, err := d.root.mkfile(normalized) + if err != nil { + // TODO(stevvooe): Again, we need to clarify when this is not a + // directory in StorageDriver API. + return fmt.Errorf("not a file") + } + + f.truncate() + f.WriteAt(contents, 0) + + return nil +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() + + if offset < 0 { + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + normalized := normalize(path) + found := d.root.find(normalized) + + if found.path() != normalized { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + if found.isdir() { + return nil, fmt.Errorf("%q is a directory", path) + } + + return ioutil.NopCloser(found.(*file).sectionReader(offset)), nil +} + +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + d.mutex.Lock() + defer d.mutex.Unlock() + + normalized := normalize(path) + + f, err := d.root.mkfile(normalized) + if err != nil { + return nil, fmt.Errorf("not a file") + } + + if !append { + f.truncate() + } + + return d.newWriter(f), nil +} + +// Stat returns info about the provided path. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() + + normalized := normalize(path) + found := d.root.find(normalized) + + if found.path() != normalized { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + fi := storagedriver.FileInfoFields{ + Path: path, + IsDir: found.isdir(), + ModTime: found.modtime(), + } + + if !fi.IsDir { + fi.Size = int64(len(found.(*file).data)) + } + + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the given +// path. +func (d *driver) List(ctx context.Context, path string) ([]string, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() + + normalized := normalize(path) + + found := d.root.find(normalized) + + if !found.isdir() { + return nil, fmt.Errorf("not a directory") // TODO(stevvooe): Need error type for this... + } + + entries, err := found.(*dir).list(normalized) + + if err != nil { + switch err { + case errNotExists: + return nil, storagedriver.PathNotFoundError{Path: path} + case errIsNotDir: + return nil, fmt.Errorf("not a directory") + default: + return nil, err + } + } + + return entries, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + d.mutex.Lock() + defer d.mutex.Unlock() + + normalizedSrc, normalizedDst := normalize(sourcePath), normalize(destPath) + + err := d.root.move(normalizedSrc, normalizedDst) + switch err { + case errNotExists: + return storagedriver.PathNotFoundError{Path: destPath} + default: + return err + } +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + d.mutex.Lock() + defer d.mutex.Unlock() + + normalized := normalize(path) + + err := d.root.delete(normalized) + switch err { + case errNotExists: + return storagedriver.PathNotFoundError{Path: path} + default: + return err + } +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + return "", storagedriver.ErrUnsupportedMethod{} +} + +type writer struct { + d *driver + f *file + closed bool + committed bool + cancelled bool +} + +func (d *driver) newWriter(f *file) storagedriver.FileWriter { + return &writer{ + d: d, + f: f, + } +} + +func (w *writer) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + + w.d.mutex.Lock() + defer w.d.mutex.Unlock() + + return w.f.WriteAt(p, int64(len(w.f.data))) +} + +func (w *writer) Size() int64 { + w.d.mutex.RLock() + defer w.d.mutex.RUnlock() + + return int64(len(w.f.data)) +} + +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + w.closed = true + return nil +} + +func (w *writer) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + + w.d.mutex.Lock() + defer w.d.mutex.Unlock() + + return w.d.root.delete(w.f.path()) +} + +func (w *writer) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + w.committed = true + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/inmemory/driver_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/inmemory/driver_test.go new file mode 100644 index 000000000..dbc1916f9 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/inmemory/driver_test.go @@ -0,0 +1,19 @@ +package inmemory + +import ( + "testing" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +func init() { + inmemoryDriverConstructor := func() (storagedriver.StorageDriver, error) { + return New(), nil + } + testsuites.RegisterSuite(inmemoryDriverConstructor, testsuites.NeverSkip) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/inmemory/mfs.go b/vendor/github.com/docker/distribution/registry/storage/driver/inmemory/mfs.go new file mode 100644 index 000000000..cdefacfd8 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/inmemory/mfs.go @@ -0,0 +1,338 @@ +package inmemory + +import ( + "fmt" + "io" + "path" + "sort" + "strings" + "time" +) + +var ( + errExists = fmt.Errorf("exists") + errNotExists = fmt.Errorf("notexists") + errIsNotDir = fmt.Errorf("notdir") + errIsDir = fmt.Errorf("isdir") +) + +type node interface { + name() string + path() string + isdir() bool + modtime() time.Time +} + +// dir is the central type for the memory-based storagedriver. All operations +// are dispatched from a root dir. +type dir struct { + common + + // TODO(stevvooe): Use sorted slice + search. + children map[string]node +} + +var _ node = &dir{} + +func (d *dir) isdir() bool { + return true +} + +// add places the node n into dir d. +func (d *dir) add(n node) { + if d.children == nil { + d.children = make(map[string]node) + } + + d.children[n.name()] = n + d.mod = time.Now() +} + +// find searches for the node, given path q in dir. If the node is found, it +// will be returned. If the node is not found, the closet existing parent. If +// the node is found, the returned (node).path() will match q. +func (d *dir) find(q string) node { + q = strings.Trim(q, "/") + i := strings.Index(q, "/") + + if q == "" { + return d + } + + if i == 0 { + panic("shouldn't happen, no root paths") + } + + var component string + if i < 0 { + // No more path components + component = q + } else { + component = q[:i] + } + + child, ok := d.children[component] + if !ok { + // Node was not found. Return p and the current node. + return d + } + + if child.isdir() { + // traverse down! + q = q[i+1:] + return child.(*dir).find(q) + } + + return child +} + +func (d *dir) list(p string) ([]string, error) { + n := d.find(p) + + if n.path() != p { + return nil, errNotExists + } + + if !n.isdir() { + return nil, errIsNotDir + } + + var children []string + for _, child := range n.(*dir).children { + children = append(children, child.path()) + } + + sort.Strings(children) + return children, nil +} + +// mkfile or return the existing one. returns an error if it exists and is a +// directory. Essentially, this is open or create. +func (d *dir) mkfile(p string) (*file, error) { + n := d.find(p) + if n.path() == p { + if n.isdir() { + return nil, errIsDir + } + + return n.(*file), nil + } + + dirpath, filename := path.Split(p) + // Make any non-existent directories + n, err := d.mkdirs(dirpath) + if err != nil { + return nil, err + } + + dd := n.(*dir) + n = &file{ + common: common{ + p: path.Join(dd.path(), filename), + mod: time.Now(), + }, + } + + dd.add(n) + return n.(*file), nil +} + +// mkdirs creates any missing directory entries in p and returns the result. +func (d *dir) mkdirs(p string) (*dir, error) { + p = normalize(p) + + n := d.find(p) + + if !n.isdir() { + // Found something there + return nil, errIsNotDir + } + + if n.path() == p { + return n.(*dir), nil + } + + dd := n.(*dir) + + relative := strings.Trim(strings.TrimPrefix(p, n.path()), "/") + + if relative == "" { + return dd, nil + } + + components := strings.Split(relative, "/") + for _, component := range components { + d, err := dd.mkdir(component) + + if err != nil { + // This should actually never happen, since there are no children. + return nil, err + } + dd = d + } + + return dd, nil +} + +// mkdir creates a child directory under d with the given name. +func (d *dir) mkdir(name string) (*dir, error) { + if name == "" { + return nil, fmt.Errorf("invalid dirname") + } + + _, ok := d.children[name] + if ok { + return nil, errExists + } + + child := &dir{ + common: common{ + p: path.Join(d.path(), name), + mod: time.Now(), + }, + } + d.add(child) + d.mod = time.Now() + + return child, nil +} + +func (d *dir) move(src, dst string) error { + dstDirname, _ := path.Split(dst) + + dp, err := d.mkdirs(dstDirname) + if err != nil { + return err + } + + srcDirname, srcFilename := path.Split(src) + sp := d.find(srcDirname) + + if normalize(srcDirname) != normalize(sp.path()) { + return errNotExists + } + + spd, ok := sp.(*dir) + if !ok { + return errIsNotDir // paranoid. + } + + s, ok := spd.children[srcFilename] + if !ok { + return errNotExists + } + + delete(spd.children, srcFilename) + + switch n := s.(type) { + case *dir: + n.p = dst + case *file: + n.p = dst + } + + dp.add(s) + + return nil +} + +func (d *dir) delete(p string) error { + dirname, filename := path.Split(p) + parent := d.find(dirname) + + if normalize(dirname) != normalize(parent.path()) { + return errNotExists + } + + if _, ok := parent.(*dir).children[filename]; !ok { + return errNotExists + } + + delete(parent.(*dir).children, filename) + return nil +} + +// dump outputs a primitive directory structure to stdout. +func (d *dir) dump(indent string) { + fmt.Println(indent, d.name()+"/") + + for _, child := range d.children { + if child.isdir() { + child.(*dir).dump(indent + "\t") + } else { + fmt.Println(indent, child.name()) + } + + } +} + +func (d *dir) String() string { + return fmt.Sprintf("&dir{path: %v, children: %v}", d.p, d.children) +} + +// file stores actual data in the fs tree. It acts like an open, seekable file +// where operations are conducted through ReadAt and WriteAt. Use it with +// SectionReader for the best effect. +type file struct { + common + data []byte +} + +var _ node = &file{} + +func (f *file) isdir() bool { + return false +} + +func (f *file) truncate() { + f.data = f.data[:0] +} + +func (f *file) sectionReader(offset int64) io.Reader { + return io.NewSectionReader(f, offset, int64(len(f.data))-offset) +} + +func (f *file) ReadAt(p []byte, offset int64) (n int, err error) { + return copy(p, f.data[offset:]), nil +} + +func (f *file) WriteAt(p []byte, offset int64) (n int, err error) { + off := int(offset) + if cap(f.data) < off+len(p) { + data := make([]byte, len(f.data), off+len(p)) + copy(data, f.data) + f.data = data + } + + f.mod = time.Now() + f.data = f.data[:off+len(p)] + + return copy(f.data[off:off+len(p)], p), nil +} + +func (f *file) String() string { + return fmt.Sprintf("&file{path: %q}", f.p) +} + +// common provides shared fields and methods for node implementations. +type common struct { + p string + mod time.Time +} + +func (c *common) name() string { + _, name := path.Split(c.p) + return name +} + +func (c *common) path() string { + return c.p +} + +func (c *common) modtime() time.Time { + return c.mod +} + +func normalize(p string) string { + return "/" + strings.Trim(p, "/") +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go b/vendor/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go new file mode 100644 index 000000000..b0618d1aa --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go @@ -0,0 +1,136 @@ +// Package middleware - cloudfront wrapper for storage libs +// N.B. currently only works with S3, not arbitrary sites +// +package middleware + +import ( + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + "net/url" + "strings" + "time" + + "github.com/aws/aws-sdk-go/service/cloudfront/sign" + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" +) + +// cloudFrontStorageMiddleware provides a simple implementation of layerHandler that +// constructs temporary signed CloudFront URLs from the storagedriver layer URL, +// then issues HTTP Temporary Redirects to this CloudFront content URL. +type cloudFrontStorageMiddleware struct { + storagedriver.StorageDriver + urlSigner *sign.URLSigner + baseURL string + duration time.Duration +} + +var _ storagedriver.StorageDriver = &cloudFrontStorageMiddleware{} + +// newCloudFrontLayerHandler constructs and returns a new CloudFront +// LayerHandler implementation. +// Required options: baseurl, privatekey, keypairid +func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) { + base, ok := options["baseurl"] + if !ok { + return nil, fmt.Errorf("no baseurl provided") + } + baseURL, ok := base.(string) + if !ok { + return nil, fmt.Errorf("baseurl must be a string") + } + if !strings.Contains(baseURL, "://") { + baseURL = "https://" + baseURL + } + if !strings.HasSuffix(baseURL, "/") { + baseURL += "/" + } + if _, err := url.Parse(baseURL); err != nil { + return nil, fmt.Errorf("invalid baseurl: %v", err) + } + pk, ok := options["privatekey"] + if !ok { + return nil, fmt.Errorf("no privatekey provided") + } + pkPath, ok := pk.(string) + if !ok { + return nil, fmt.Errorf("privatekey must be a string") + } + kpid, ok := options["keypairid"] + if !ok { + return nil, fmt.Errorf("no keypairid provided") + } + keypairID, ok := kpid.(string) + if !ok { + return nil, fmt.Errorf("keypairid must be a string") + } + + pkBytes, err := ioutil.ReadFile(pkPath) + if err != nil { + return nil, fmt.Errorf("failed to read privatekey file: %s", err) + } + + block, _ := pem.Decode([]byte(pkBytes)) + if block == nil { + return nil, fmt.Errorf("failed to decode private key as an rsa private key") + } + privateKey, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, err + } + + urlSigner := sign.NewURLSigner(keypairID, privateKey) + + duration := 20 * time.Minute + d, ok := options["duration"] + if ok { + switch d := d.(type) { + case time.Duration: + duration = d + case string: + dur, err := time.ParseDuration(d) + if err != nil { + return nil, fmt.Errorf("invalid duration: %s", err) + } + duration = dur + } + } + + return &cloudFrontStorageMiddleware{ + StorageDriver: storageDriver, + urlSigner: urlSigner, + baseURL: baseURL, + duration: duration, + }, nil +} + +// S3BucketKeyer is any type that is capable of returning the S3 bucket key +// which should be cached by AWS CloudFront. +type S3BucketKeyer interface { + S3BucketKey(path string) string +} + +// Resolve returns an http.Handler which can serve the contents of the given +// Layer, or an error if not supported by the storagedriver. +func (lh *cloudFrontStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + // TODO(endophage): currently only supports S3 + keyer, ok := lh.StorageDriver.(S3BucketKeyer) + if !ok { + context.GetLogger(ctx).Warn("the CloudFront middleware does not support this backend storage driver") + return lh.StorageDriver.URLFor(ctx, path, options) + } + + cfURL, err := lh.urlSigner.Sign(lh.baseURL+keyer.S3BucketKey(path), time.Now().Add(lh.duration)) + if err != nil { + return "", err + } + return cfURL, nil +} + +// init registers the cloudfront layerHandler backend. +func init() { + storagemiddleware.Register("cloudfront", storagemiddleware.InitFunc(newCloudFrontStorageMiddleware)) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/middleware/redirect/middleware.go b/vendor/github.com/docker/distribution/registry/storage/driver/middleware/redirect/middleware.go new file mode 100644 index 000000000..20cd7daa7 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/middleware/redirect/middleware.go @@ -0,0 +1,50 @@ +package middleware + +import ( + "fmt" + "net/url" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" +) + +type redirectStorageMiddleware struct { + storagedriver.StorageDriver + scheme string + host string +} + +var _ storagedriver.StorageDriver = &redirectStorageMiddleware{} + +func newRedirectStorageMiddleware(sd storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) { + o, ok := options["baseurl"] + if !ok { + return nil, fmt.Errorf("no baseurl provided") + } + b, ok := o.(string) + if !ok { + return nil, fmt.Errorf("baseurl must be a string") + } + u, err := url.Parse(b) + if err != nil { + return nil, fmt.Errorf("unable to parse redirect baseurl: %s", b) + } + if u.Scheme == "" { + return nil, fmt.Errorf("no scheme specified for redirect baseurl") + } + if u.Host == "" { + return nil, fmt.Errorf("no host specified for redirect baseurl") + } + + return &redirectStorageMiddleware{StorageDriver: sd, scheme: u.Scheme, host: u.Host}, nil +} + +func (r *redirectStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + u := &url.URL{Scheme: r.scheme, Host: r.host, Path: path} + return u.String(), nil +} + +func init() { + storagemiddleware.Register("redirect", storagemiddleware.InitFunc(newRedirectStorageMiddleware)) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/middleware/redirect/middleware_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/middleware/redirect/middleware_test.go new file mode 100644 index 000000000..1eb6309f8 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/middleware/redirect/middleware_test.go @@ -0,0 +1,58 @@ +package middleware + +import ( + "testing" + + check "gopkg.in/check.v1" +) + +func Test(t *testing.T) { check.TestingT(t) } + +type MiddlewareSuite struct{} + +var _ = check.Suite(&MiddlewareSuite{}) + +func (s *MiddlewareSuite) TestNoConfig(c *check.C) { + options := make(map[string]interface{}) + _, err := newRedirectStorageMiddleware(nil, options) + c.Assert(err, check.ErrorMatches, "no baseurl provided") +} + +func (s *MiddlewareSuite) TestMissingScheme(c *check.C) { + options := make(map[string]interface{}) + options["baseurl"] = "example.com" + _, err := newRedirectStorageMiddleware(nil, options) + c.Assert(err, check.ErrorMatches, "no scheme specified for redirect baseurl") +} + +func (s *MiddlewareSuite) TestHttpsPort(c *check.C) { + options := make(map[string]interface{}) + options["baseurl"] = "https://example.com:5443" + middleware, err := newRedirectStorageMiddleware(nil, options) + c.Assert(err, check.Equals, nil) + + m, ok := middleware.(*redirectStorageMiddleware) + c.Assert(ok, check.Equals, true) + c.Assert(m.scheme, check.Equals, "https") + c.Assert(m.host, check.Equals, "example.com:5443") + + url, err := middleware.URLFor(nil, "/rick/data", nil) + c.Assert(err, check.Equals, nil) + c.Assert(url, check.Equals, "https://example.com:5443/rick/data") +} + +func (s *MiddlewareSuite) TestHTTP(c *check.C) { + options := make(map[string]interface{}) + options["baseurl"] = "http://example.com" + middleware, err := newRedirectStorageMiddleware(nil, options) + c.Assert(err, check.Equals, nil) + + m, ok := middleware.(*redirectStorageMiddleware) + c.Assert(ok, check.Equals, true) + c.Assert(m.scheme, check.Equals, "http") + c.Assert(m.host, check.Equals, "example.com") + + url, err := middleware.URLFor(nil, "morty/data", nil) + c.Assert(err, check.Equals, nil) + c.Assert(url, check.Equals, "http://example.com/morty/data") +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/middleware/storagemiddleware.go b/vendor/github.com/docker/distribution/registry/storage/driver/middleware/storagemiddleware.go new file mode 100644 index 000000000..7e40a8dd9 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/middleware/storagemiddleware.go @@ -0,0 +1,39 @@ +package storagemiddleware + +import ( + "fmt" + + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +// InitFunc is the type of a StorageMiddleware factory function and is +// used to register the constructor for different StorageMiddleware backends. +type InitFunc func(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) + +var storageMiddlewares map[string]InitFunc + +// Register is used to register an InitFunc for +// a StorageMiddleware backend with the given name. +func Register(name string, initFunc InitFunc) error { + if storageMiddlewares == nil { + storageMiddlewares = make(map[string]InitFunc) + } + if _, exists := storageMiddlewares[name]; exists { + return fmt.Errorf("name already registered: %s", name) + } + + storageMiddlewares[name] = initFunc + + return nil +} + +// Get constructs a StorageMiddleware with the given options using the named backend. +func Get(name string, options map[string]interface{}, storageDriver storagedriver.StorageDriver) (storagedriver.StorageDriver, error) { + if storageMiddlewares != nil { + if initFunc, exists := storageMiddlewares[name]; exists { + return initFunc(storageDriver, options) + } + } + + return nil, fmt.Errorf("no storage middleware registered with name: %s", name) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/oss/doc.go b/vendor/github.com/docker/distribution/registry/storage/driver/oss/doc.go new file mode 100644 index 000000000..d1bc932f8 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/oss/doc.go @@ -0,0 +1,3 @@ +// Package oss implements the Aliyun OSS Storage driver backend. Support can be +// enabled by including the "include_oss" build tag. +package oss diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/oss/oss.go b/vendor/github.com/docker/distribution/registry/storage/driver/oss/oss.go new file mode 100644 index 000000000..4d215928b --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/oss/oss.go @@ -0,0 +1,683 @@ +// Package oss provides a storagedriver.StorageDriver implementation to +// store blobs in Aliyun OSS cloud storage. +// +// This package leverages the denverdino/aliyungo client library for interfacing with +// oss. +// +// Because OSS is a key, value store the Stat call does not support last modification +// time for directories (directories are an abstraction for key, value stores) +// +// +build include_oss + +package oss + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/docker/distribution/context" + + "github.com/Sirupsen/logrus" + "github.com/denverdino/aliyungo/oss" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "oss" + +// minChunkSize defines the minimum multipart upload chunk size +// OSS API requires multipart upload chunks to be at least 5MB +const minChunkSize = 5 << 20 + +const defaultChunkSize = 2 * minChunkSize +const defaultTimeout = 2 * time.Minute // 2 minute timeout per chunk + +// listMax is the largest amount of objects you can request from OSS in a list call +const listMax = 1000 + +//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set +type DriverParameters struct { + AccessKeyID string + AccessKeySecret string + Bucket string + Region oss.Region + Internal bool + Encrypt bool + Secure bool + ChunkSize int64 + RootDirectory string + Endpoint string +} + +func init() { + factory.Register(driverName, &ossDriverFactory{}) +} + +// ossDriverFactory implements the factory.StorageDriverFactory interface +type ossDriverFactory struct{} + +func (factory *ossDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + Client *oss.Client + Bucket *oss.Bucket + ChunkSize int64 + Encrypt bool + RootDirectory string +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Aliyun OSS +// Objects are stored at absolute keys in the provided bucket. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - accesskey +// - secretkey +// - region +// - bucket +// - encrypt +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + // Providing no values for these is valid in case the user is authenticating + + accessKey, ok := parameters["accesskeyid"] + if !ok { + return nil, fmt.Errorf("No accesskeyid parameter provided") + } + secretKey, ok := parameters["accesskeysecret"] + if !ok { + return nil, fmt.Errorf("No accesskeysecret parameter provided") + } + + regionName, ok := parameters["region"] + if !ok || fmt.Sprint(regionName) == "" { + return nil, fmt.Errorf("No region parameter provided") + } + + bucket, ok := parameters["bucket"] + if !ok || fmt.Sprint(bucket) == "" { + return nil, fmt.Errorf("No bucket parameter provided") + } + + internalBool := false + internal, ok := parameters["internal"] + if ok { + internalBool, ok = internal.(bool) + if !ok { + return nil, fmt.Errorf("The internal parameter should be a boolean") + } + } + + encryptBool := false + encrypt, ok := parameters["encrypt"] + if ok { + encryptBool, ok = encrypt.(bool) + if !ok { + return nil, fmt.Errorf("The encrypt parameter should be a boolean") + } + } + + secureBool := true + secure, ok := parameters["secure"] + if ok { + secureBool, ok = secure.(bool) + if !ok { + return nil, fmt.Errorf("The secure parameter should be a boolean") + } + } + + chunkSize := int64(defaultChunkSize) + chunkSizeParam, ok := parameters["chunksize"] + if ok { + switch v := chunkSizeParam.(type) { + case string: + vv, err := strconv.ParseInt(v, 0, 64) + if err != nil { + return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) + } + chunkSize = vv + case int64: + chunkSize = v + case int, uint, int32, uint32, uint64: + chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() + default: + return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) + } + + if chunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) + } + } + + rootDirectory, ok := parameters["rootdirectory"] + if !ok { + rootDirectory = "" + } + + endpoint, ok := parameters["endpoint"] + if !ok { + endpoint = "" + } + + params := DriverParameters{ + AccessKeyID: fmt.Sprint(accessKey), + AccessKeySecret: fmt.Sprint(secretKey), + Bucket: fmt.Sprint(bucket), + Region: oss.Region(fmt.Sprint(regionName)), + ChunkSize: chunkSize, + RootDirectory: fmt.Sprint(rootDirectory), + Encrypt: encryptBool, + Secure: secureBool, + Internal: internalBool, + Endpoint: fmt.Sprint(endpoint), + } + + return New(params) +} + +// New constructs a new Driver with the given Aliyun credentials, region, encryption flag, and +// bucketName +func New(params DriverParameters) (*Driver, error) { + + client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret, params.Secure) + client.SetEndpoint(params.Endpoint) + bucket := client.Bucket(params.Bucket) + client.SetDebug(false) + + // Validate that the given credentials have at least read permissions in the + // given bucket scope. + if _, err := bucket.List(strings.TrimRight(params.RootDirectory, "/"), "", "", 1); err != nil { + return nil, err + } + + // TODO(tg123): Currently multipart uploads have no timestamps, so this would be unwise + // if you initiated a new OSS client while another one is running on the same bucket. + + d := &driver{ + Client: client, + Bucket: bucket, + ChunkSize: params.ChunkSize, + Encrypt: params.Encrypt, + RootDirectory: params.RootDirectory, + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + content, err := d.Bucket.Get(d.ossPath(path)) + if err != nil { + return nil, parseError(path, err) + } + return content, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + return parseError(path, d.Bucket.Put(d.ossPath(path), contents, d.getContentType(), getPermissions(), d.getOptions())) +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + headers := make(http.Header) + headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") + + resp, err := d.Bucket.GetResponseWithHeaders(d.ossPath(path), headers) + if err != nil { + return nil, parseError(path, err) + } + + // Due to Aliyun OSS API, status 200 and whole object will be return instead of an + // InvalidRange error when range is invalid. + // + // OSS sever will always return http.StatusPartialContent if range is acceptable. + if resp.StatusCode != http.StatusPartialContent { + resp.Body.Close() + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + + return resp.Body, nil +} + +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + key := d.ossPath(path) + if !append { + // TODO (brianbland): cancel other uploads at this path + multi, err := d.Bucket.InitMulti(key, d.getContentType(), getPermissions(), d.getOptions()) + if err != nil { + return nil, err + } + return d.newWriter(key, multi, nil), nil + } + multis, _, err := d.Bucket.ListMulti(key, "") + if err != nil { + return nil, parseError(path, err) + } + for _, multi := range multis { + if key != multi.Key { + continue + } + parts, err := multi.ListParts() + if err != nil { + return nil, parseError(path, err) + } + var multiSize int64 + for _, part := range parts { + multiSize += part.Size + } + return d.newWriter(key, multi, parts), nil + } + return nil, storagedriver.PathNotFoundError{Path: path} +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + listResponse, err := d.Bucket.List(d.ossPath(path), "", "", 1) + if err != nil { + return nil, err + } + + fi := storagedriver.FileInfoFields{ + Path: path, + } + + if len(listResponse.Contents) == 1 { + if listResponse.Contents[0].Key != d.ossPath(path) { + fi.IsDir = true + } else { + fi.IsDir = false + fi.Size = listResponse.Contents[0].Size + + timestamp, err := time.Parse(time.RFC3339Nano, listResponse.Contents[0].LastModified) + if err != nil { + return nil, err + } + fi.ModTime = timestamp + } + } else if len(listResponse.CommonPrefixes) == 1 { + fi.IsDir = true + } else { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, opath string) ([]string, error) { + path := opath + if path != "/" && opath[len(path)-1] != '/' { + path = path + "/" + } + + // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". + // In those cases, there is no root prefix to replace and we must actually add a "/" to all + // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp + prefix := "" + if d.ossPath("") == "" { + prefix = "/" + } + + listResponse, err := d.Bucket.List(d.ossPath(path), "/", "", listMax) + if err != nil { + return nil, parseError(opath, err) + } + + files := []string{} + directories := []string{} + + for { + for _, key := range listResponse.Contents { + files = append(files, strings.Replace(key.Key, d.ossPath(""), prefix, 1)) + } + + for _, commonPrefix := range listResponse.CommonPrefixes { + directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.ossPath(""), prefix, 1)) + } + + if listResponse.IsTruncated { + listResponse, err = d.Bucket.List(d.ossPath(path), "/", listResponse.NextMarker, listMax) + if err != nil { + return nil, err + } + } else { + break + } + } + + if opath != "/" { + if len(files) == 0 && len(directories) == 0 { + // Treat empty response as missing directory, since we don't actually + // have directories in s3. + return nil, storagedriver.PathNotFoundError{Path: opath} + } + } + + return append(files, directories...), nil +} + +const maxConcurrency = 10 + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + logrus.Infof("Move from %s to %s", d.ossPath(sourcePath), d.ossPath(destPath)) + err := d.Bucket.CopyLargeFileInParallel(d.ossPath(sourcePath), d.ossPath(destPath), + d.getContentType(), + getPermissions(), + oss.Options{}, + maxConcurrency) + if err != nil { + logrus.Errorf("Failed for move from %s to %s: %v", d.ossPath(sourcePath), d.ossPath(destPath), err) + return parseError(sourcePath, err) + } + + return d.Delete(ctx, sourcePath) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + ossPath := d.ossPath(path) + listResponse, err := d.Bucket.List(ossPath, "", "", listMax) + if err != nil || len(listResponse.Contents) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + + ossObjects := make([]oss.Object, listMax) + + for len(listResponse.Contents) > 0 { + numOssObjects := len(listResponse.Contents) + for index, key := range listResponse.Contents { + // Stop if we encounter a key that is not a subpath (so that deleting "/a" does not delete "/ab"). + if len(key.Key) > len(ossPath) && (key.Key)[len(ossPath)] != '/' { + numOssObjects = index + break + } + ossObjects[index].Key = key.Key + } + + err := d.Bucket.DelMulti(oss.Delete{Quiet: false, Objects: ossObjects[0:numOssObjects]}) + if err != nil { + return nil + } + + if numOssObjects < len(listResponse.Contents) { + return nil + } + + listResponse, err = d.Bucket.List(d.ossPath(path), "", "", listMax) + if err != nil { + return err + } + } + + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + methodString := "GET" + method, ok := options["method"] + if ok { + methodString, ok = method.(string) + if !ok || (methodString != "GET") { + return "", storagedriver.ErrUnsupportedMethod{} + } + } + + expiresTime := time.Now().Add(20 * time.Minute) + + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresTime = et + } + } + logrus.Infof("methodString: %s, expiresTime: %v", methodString, expiresTime) + signedURL := d.Bucket.SignedURLWithMethod(methodString, d.ossPath(path), expiresTime, nil, nil) + logrus.Infof("signed URL: %s", signedURL) + return signedURL, nil +} + +func (d *driver) ossPath(path string) string { + return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") +} + +func parseError(path string, err error) error { + if ossErr, ok := err.(*oss.Error); ok && ossErr.StatusCode == http.StatusNotFound && (ossErr.Code == "NoSuchKey" || ossErr.Code == "") { + return storagedriver.PathNotFoundError{Path: path} + } + + return err +} + +func hasCode(err error, code string) bool { + ossErr, ok := err.(*oss.Error) + return ok && ossErr.Code == code +} + +func (d *driver) getOptions() oss.Options { + return oss.Options{ServerSideEncryption: d.Encrypt} +} + +func getPermissions() oss.ACL { + return oss.Private +} + +func (d *driver) getContentType() string { + return "application/octet-stream" +} + +// writer attempts to upload parts to S3 in a buffered fashion where the last +// part is at least as large as the chunksize, so the multipart upload could be +// cleanly resumed in the future. This is violated if Close is called after less +// than a full chunk is written. +type writer struct { + driver *driver + key string + multi *oss.Multi + parts []oss.Part + size int64 + readyPart []byte + pendingPart []byte + closed bool + committed bool + cancelled bool +} + +func (d *driver) newWriter(key string, multi *oss.Multi, parts []oss.Part) storagedriver.FileWriter { + var size int64 + for _, part := range parts { + size += part.Size + } + return &writer{ + driver: d, + key: key, + multi: multi, + parts: parts, + size: size, + } +} + +func (w *writer) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + + // If the last written part is smaller than minChunkSize, we need to make a + // new multipart upload :sadface: + if len(w.parts) > 0 && int(w.parts[len(w.parts)-1].Size) < minChunkSize { + err := w.multi.Complete(w.parts) + if err != nil { + w.multi.Abort() + return 0, err + } + + multi, err := w.driver.Bucket.InitMulti(w.key, w.driver.getContentType(), getPermissions(), w.driver.getOptions()) + if err != nil { + return 0, err + } + w.multi = multi + + // If the entire written file is smaller than minChunkSize, we need to make + // a new part from scratch :double sad face: + if w.size < minChunkSize { + contents, err := w.driver.Bucket.Get(w.key) + if err != nil { + return 0, err + } + w.parts = nil + w.readyPart = contents + } else { + // Otherwise we can use the old file as the new first part + _, part, err := multi.PutPartCopy(1, oss.CopyOptions{}, w.driver.Bucket.Name+"/"+w.key) + if err != nil { + return 0, err + } + w.parts = []oss.Part{part} + } + } + + var n int + + for len(p) > 0 { + // If no parts are ready to write, fill up the first part + if neededBytes := int(w.driver.ChunkSize) - len(w.readyPart); neededBytes > 0 { + if len(p) >= neededBytes { + w.readyPart = append(w.readyPart, p[:neededBytes]...) + n += neededBytes + p = p[neededBytes:] + } else { + w.readyPart = append(w.readyPart, p...) + n += len(p) + p = nil + } + } + + if neededBytes := int(w.driver.ChunkSize) - len(w.pendingPart); neededBytes > 0 { + if len(p) >= neededBytes { + w.pendingPart = append(w.pendingPart, p[:neededBytes]...) + n += neededBytes + p = p[neededBytes:] + err := w.flushPart() + if err != nil { + w.size += int64(n) + return n, err + } + } else { + w.pendingPart = append(w.pendingPart, p...) + n += len(p) + p = nil + } + } + } + w.size += int64(n) + return n, nil +} + +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + w.closed = true + return w.flushPart() +} + +func (w *writer) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + err := w.multi.Abort() + return err +} + +func (w *writer) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + err := w.flushPart() + if err != nil { + return err + } + w.committed = true + err = w.multi.Complete(w.parts) + if err != nil { + w.multi.Abort() + return err + } + return nil +} + +// flushPart flushes buffers to write a part to S3. +// Only called by Write (with both buffers full) and Close/Commit (always) +func (w *writer) flushPart() error { + if len(w.readyPart) == 0 && len(w.pendingPart) == 0 { + // nothing to write + return nil + } + if len(w.pendingPart) < int(w.driver.ChunkSize) { + // closing with a small pending part + // combine ready and pending to avoid writing a small part + w.readyPart = append(w.readyPart, w.pendingPart...) + w.pendingPart = nil + } + + part, err := w.multi.PutPart(len(w.parts)+1, bytes.NewReader(w.readyPart)) + if err != nil { + return err + } + w.parts = append(w.parts, part) + w.readyPart = w.pendingPart + w.pendingPart = nil + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/oss/oss_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/oss/oss_test.go new file mode 100644 index 000000000..fbae5d9ca --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/oss/oss_test.go @@ -0,0 +1,144 @@ +// +build include_oss + +package oss + +import ( + "io/ioutil" + + alioss "github.com/denverdino/aliyungo/oss" + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + //"log" + "os" + "strconv" + "testing" + + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +var ossDriverConstructor func(rootDirectory string) (*Driver, error) + +var skipCheck func() string + +func init() { + accessKey := os.Getenv("ALIYUN_ACCESS_KEY_ID") + secretKey := os.Getenv("ALIYUN_ACCESS_KEY_SECRET") + bucket := os.Getenv("OSS_BUCKET") + region := os.Getenv("OSS_REGION") + internal := os.Getenv("OSS_INTERNAL") + encrypt := os.Getenv("OSS_ENCRYPT") + secure := os.Getenv("OSS_SECURE") + endpoint := os.Getenv("OSS_ENDPOINT") + root, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(root) + + ossDriverConstructor = func(rootDirectory string) (*Driver, error) { + encryptBool := false + if encrypt != "" { + encryptBool, err = strconv.ParseBool(encrypt) + if err != nil { + return nil, err + } + } + + secureBool := false + if secure != "" { + secureBool, err = strconv.ParseBool(secure) + if err != nil { + return nil, err + } + } + + internalBool := false + if internal != "" { + internalBool, err = strconv.ParseBool(internal) + if err != nil { + return nil, err + } + } + + parameters := DriverParameters{ + AccessKeyID: accessKey, + AccessKeySecret: secretKey, + Bucket: bucket, + Region: alioss.Region(region), + Internal: internalBool, + ChunkSize: minChunkSize, + RootDirectory: rootDirectory, + Encrypt: encryptBool, + Secure: secureBool, + Endpoint: endpoint, + } + + return New(parameters) + } + + // Skip OSS storage driver tests if environment variable parameters are not provided + skipCheck = func() string { + if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { + return "Must set ALIYUN_ACCESS_KEY_ID, ALIYUN_ACCESS_KEY_SECRET, OSS_REGION, OSS_BUCKET, and OSS_ENCRYPT to run OSS tests" + } + return "" + } + + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { + return ossDriverConstructor(root) + }, skipCheck) +} + +func TestEmptyRootList(t *testing.T) { + if skipCheck() != "" { + t.Skip(skipCheck()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + rootedDriver, err := ossDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := ossDriverConstructor("") + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := ossDriverConstructor("/") + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } + + filename := "/test" + contents := []byte("contents") + ctx := context.Background() + err = rootedDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rootedDriver.Delete(ctx, filename) + + keys, err := emptyRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } + + keys, err = slashRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3.go b/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3.go new file mode 100644 index 000000000..c9d19c46c --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3.go @@ -0,0 +1,1189 @@ +// Package s3 provides a storagedriver.StorageDriver implementation to +// store blobs in Amazon S3 cloud storage. +// +// This package leverages the official aws client library for interfacing with +// S3. +// +// Because S3 is a key, value store the Stat call does not support last modification +// time for directories (directories are an abstraction for key, value stores) +// +// Keep in mind that S3 guarantees only read-after-write consistency for new +// objects, but no read-after-update or list-after-write consistency. +package s3 + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "math" + "net/http" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/client/transport" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "s3aws" + +// minChunkSize defines the minimum multipart upload chunk size +// S3 API requires multipart upload chunks to be at least 5MB +const minChunkSize = 5 << 20 + +// maxChunkSize defines the maximum multipart upload chunk size allowed by S3. +const maxChunkSize = 5 << 30 + +const defaultChunkSize = 2 * minChunkSize + +const ( + // defaultMultipartCopyChunkSize defines the default chunk size for all + // but the last Upload Part - Copy operation of a multipart copy. + // Empirically, 32 MB is optimal. + defaultMultipartCopyChunkSize = 32 << 20 + + // defaultMultipartCopyMaxConcurrency defines the default maximum number + // of concurrent Upload Part - Copy operations for a multipart copy. + defaultMultipartCopyMaxConcurrency = 100 + + // defaultMultipartCopyThresholdSize defines the default object size + // above which multipart copy will be used. (PUT Object - Copy is used + // for objects at or below this size.) Empirically, 32 MB is optimal. + defaultMultipartCopyThresholdSize = 32 << 20 +) + +// listMax is the largest amount of objects you can request from S3 in a list call +const listMax = 1000 + +// noStorageClass defines the value to be used if storage class is not supported by the S3 endpoint +const noStorageClass = "NONE" + +// validRegions maps known s3 region identifiers to region descriptors +var validRegions = map[string]struct{}{} + +// validObjectACLs contains known s3 object Acls +var validObjectACLs = map[string]struct{}{} + +//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set +type DriverParameters struct { + AccessKey string + SecretKey string + Bucket string + Region string + RegionEndpoint string + Encrypt bool + KeyID string + Secure bool + V4Auth bool + ChunkSize int64 + MultipartCopyChunkSize int64 + MultipartCopyMaxConcurrency int64 + MultipartCopyThresholdSize int64 + RootDirectory string + StorageClass string + UserAgent string + ObjectACL string +} + +func init() { + for _, region := range []string{ + "us-east-1", + "us-east-2", + "us-west-1", + "us-west-2", + "eu-west-1", + "eu-central-1", + "ap-southeast-1", + "ap-southeast-2", + "ap-northeast-1", + "ap-northeast-2", + "sa-east-1", + "cn-north-1", + "us-gov-west-1", + } { + validRegions[region] = struct{}{} + } + + for _, objectACL := range []string{ + s3.ObjectCannedACLPrivate, + s3.ObjectCannedACLPublicRead, + s3.ObjectCannedACLPublicReadWrite, + s3.ObjectCannedACLAuthenticatedRead, + s3.ObjectCannedACLAwsExecRead, + s3.ObjectCannedACLBucketOwnerRead, + s3.ObjectCannedACLBucketOwnerFullControl, + } { + validObjectACLs[objectACL] = struct{}{} + } + + // Register this as the default s3 driver in addition to s3aws + factory.Register("s3", &s3DriverFactory{}) + factory.Register(driverName, &s3DriverFactory{}) +} + +// s3DriverFactory implements the factory.StorageDriverFactory interface +type s3DriverFactory struct{} + +func (factory *s3DriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + S3 *s3.S3 + Bucket string + ChunkSize int64 + Encrypt bool + KeyID string + MultipartCopyChunkSize int64 + MultipartCopyMaxConcurrency int64 + MultipartCopyThresholdSize int64 + RootDirectory string + StorageClass string + ObjectACL string +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Amazon S3 +// Objects are stored at absolute keys in the provided bucket. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - accesskey +// - secretkey +// - region +// - bucket +// - encrypt +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + // Providing no values for these is valid in case the user is authenticating + // with an IAM on an ec2 instance (in which case the instance credentials will + // be summoned when GetAuth is called) + accessKey := parameters["accesskey"] + if accessKey == nil { + accessKey = "" + } + secretKey := parameters["secretkey"] + if secretKey == nil { + secretKey = "" + } + + regionEndpoint := parameters["regionendpoint"] + if regionEndpoint == nil { + regionEndpoint = "" + } + + regionName, ok := parameters["region"] + if regionName == nil || fmt.Sprint(regionName) == "" { + return nil, fmt.Errorf("No region parameter provided") + } + region := fmt.Sprint(regionName) + // Don't check the region value if a custom endpoint is provided. + if regionEndpoint == "" { + if _, ok = validRegions[region]; !ok { + return nil, fmt.Errorf("Invalid region provided: %v", region) + } + } + + bucket := parameters["bucket"] + if bucket == nil || fmt.Sprint(bucket) == "" { + return nil, fmt.Errorf("No bucket parameter provided") + } + + encryptBool := false + encrypt := parameters["encrypt"] + switch encrypt := encrypt.(type) { + case string: + b, err := strconv.ParseBool(encrypt) + if err != nil { + return nil, fmt.Errorf("The encrypt parameter should be a boolean") + } + encryptBool = b + case bool: + encryptBool = encrypt + case nil: + // do nothing + default: + return nil, fmt.Errorf("The encrypt parameter should be a boolean") + } + + secureBool := true + secure := parameters["secure"] + switch secure := secure.(type) { + case string: + b, err := strconv.ParseBool(secure) + if err != nil { + return nil, fmt.Errorf("The secure parameter should be a boolean") + } + secureBool = b + case bool: + secureBool = secure + case nil: + // do nothing + default: + return nil, fmt.Errorf("The secure parameter should be a boolean") + } + + v4Bool := true + v4auth := parameters["v4auth"] + switch v4auth := v4auth.(type) { + case string: + b, err := strconv.ParseBool(v4auth) + if err != nil { + return nil, fmt.Errorf("The v4auth parameter should be a boolean") + } + v4Bool = b + case bool: + v4Bool = v4auth + case nil: + // do nothing + default: + return nil, fmt.Errorf("The v4auth parameter should be a boolean") + } + + keyID := parameters["keyid"] + if keyID == nil { + keyID = "" + } + + chunkSize, err := getParameterAsInt64(parameters, "chunksize", defaultChunkSize, minChunkSize, maxChunkSize) + if err != nil { + return nil, err + } + + multipartCopyChunkSize, err := getParameterAsInt64(parameters, "multipartcopychunksize", defaultMultipartCopyChunkSize, minChunkSize, maxChunkSize) + if err != nil { + return nil, err + } + + multipartCopyMaxConcurrency, err := getParameterAsInt64(parameters, "multipartcopymaxconcurrency", defaultMultipartCopyMaxConcurrency, 1, math.MaxInt64) + if err != nil { + return nil, err + } + + multipartCopyThresholdSize, err := getParameterAsInt64(parameters, "multipartcopythresholdsize", defaultMultipartCopyThresholdSize, 0, maxChunkSize) + if err != nil { + return nil, err + } + + rootDirectory := parameters["rootdirectory"] + if rootDirectory == nil { + rootDirectory = "" + } + + storageClass := s3.StorageClassStandard + storageClassParam := parameters["storageclass"] + if storageClassParam != nil { + storageClassString, ok := storageClassParam.(string) + if !ok { + return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", + []string{s3.StorageClassStandard, s3.StorageClassReducedRedundancy}, storageClassParam) + } + // All valid storage class parameters are UPPERCASE, so be a bit more flexible here + storageClassString = strings.ToUpper(storageClassString) + if storageClassString != noStorageClass && + storageClassString != s3.StorageClassStandard && + storageClassString != s3.StorageClassReducedRedundancy { + return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", + []string{noStorageClass, s3.StorageClassStandard, s3.StorageClassReducedRedundancy}, storageClassParam) + } + storageClass = storageClassString + } + + userAgent := parameters["useragent"] + if userAgent == nil { + userAgent = "" + } + + objectACL := s3.ObjectCannedACLPrivate + objectACLParam := parameters["objectacl"] + if objectACLParam != nil { + objectACLString, ok := objectACLParam.(string) + if !ok { + return nil, fmt.Errorf("Invalid value for objectacl parameter: %v", objectACLParam) + } + + if _, ok = validObjectACLs[objectACLString]; !ok { + return nil, fmt.Errorf("Invalid value for objectacl parameter: %v", objectACLParam) + } + objectACL = objectACLString + } + + params := DriverParameters{ + fmt.Sprint(accessKey), + fmt.Sprint(secretKey), + fmt.Sprint(bucket), + region, + fmt.Sprint(regionEndpoint), + encryptBool, + fmt.Sprint(keyID), + secureBool, + v4Bool, + chunkSize, + multipartCopyChunkSize, + multipartCopyMaxConcurrency, + multipartCopyThresholdSize, + fmt.Sprint(rootDirectory), + storageClass, + fmt.Sprint(userAgent), + objectACL, + } + + return New(params) +} + +// getParameterAsInt64 converts paramaters[name] to an int64 value (using +// defaultt if nil), verifies it is no smaller than min, and returns it. +func getParameterAsInt64(parameters map[string]interface{}, name string, defaultt int64, min int64, max int64) (int64, error) { + rv := defaultt + param := parameters[name] + switch v := param.(type) { + case string: + vv, err := strconv.ParseInt(v, 0, 64) + if err != nil { + return 0, fmt.Errorf("%s parameter must be an integer, %v invalid", name, param) + } + rv = vv + case int64: + rv = v + case int, uint, int32, uint32, uint64: + rv = reflect.ValueOf(v).Convert(reflect.TypeOf(rv)).Int() + case nil: + // do nothing + default: + return 0, fmt.Errorf("invalid value for %s: %#v", name, param) + } + + if rv < min || rv > max { + return 0, fmt.Errorf("The %s %#v parameter should be a number between %d and %d (inclusive)", name, rv, min, max) + } + + return rv, nil +} + +// New constructs a new Driver with the given AWS credentials, region, encryption flag, and +// bucketName +func New(params DriverParameters) (*Driver, error) { + if !params.V4Auth && + (params.RegionEndpoint == "" || + strings.Contains(params.RegionEndpoint, "s3.amazonaws.com")) { + return nil, fmt.Errorf("On Amazon S3 this storage driver can only be used with v4 authentication") + } + + awsConfig := aws.NewConfig() + creds := credentials.NewChainCredentials([]credentials.Provider{ + &credentials.StaticProvider{ + Value: credentials.Value{ + AccessKeyID: params.AccessKey, + SecretAccessKey: params.SecretKey, + }, + }, + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{}, + &ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())}, + }) + + if params.RegionEndpoint != "" { + awsConfig.WithS3ForcePathStyle(true) + awsConfig.WithEndpoint(params.RegionEndpoint) + } + + awsConfig.WithCredentials(creds) + awsConfig.WithRegion(params.Region) + awsConfig.WithDisableSSL(!params.Secure) + + if params.UserAgent != "" { + awsConfig.WithHTTPClient(&http.Client{ + Transport: transport.NewTransport(http.DefaultTransport, transport.NewHeaderRequestModifier(http.Header{http.CanonicalHeaderKey("User-Agent"): []string{params.UserAgent}})), + }) + } + + s3obj := s3.New(session.New(awsConfig)) + + // enable S3 compatible signature v2 signing instead + if !params.V4Auth { + setv2Handlers(s3obj) + } + + // TODO Currently multipart uploads have no timestamps, so this would be unwise + // if you initiated a new s3driver while another one is running on the same bucket. + // multis, _, err := bucket.ListMulti("", "") + // if err != nil { + // return nil, err + // } + + // for _, multi := range multis { + // err := multi.Abort() + // //TODO appropriate to do this error checking? + // if err != nil { + // return nil, err + // } + // } + + d := &driver{ + S3: s3obj, + Bucket: params.Bucket, + ChunkSize: params.ChunkSize, + Encrypt: params.Encrypt, + KeyID: params.KeyID, + MultipartCopyChunkSize: params.MultipartCopyChunkSize, + MultipartCopyMaxConcurrency: params.MultipartCopyMaxConcurrency, + MultipartCopyThresholdSize: params.MultipartCopyThresholdSize, + RootDirectory: params.RootDirectory, + StorageClass: params.StorageClass, + ObjectACL: params.ObjectACL, + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + reader, err := d.Reader(ctx, path, 0) + if err != nil { + return nil, err + } + return ioutil.ReadAll(reader) +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + _, err := d.S3.PutObject(&s3.PutObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + ContentType: d.getContentType(), + ACL: d.getACL(), + ServerSideEncryption: d.getEncryptionMode(), + SSEKMSKeyId: d.getSSEKMSKeyID(), + StorageClass: d.getStorageClass(), + Body: bytes.NewReader(contents), + }) + return parseError(path, err) +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + resp, err := d.S3.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + Range: aws.String("bytes=" + strconv.FormatInt(offset, 10) + "-"), + }) + + if err != nil { + if s3Err, ok := err.(awserr.Error); ok && s3Err.Code() == "InvalidRange" { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + + return nil, parseError(path, err) + } + return resp.Body, nil +} + +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + key := d.s3Path(path) + if !append { + // TODO (brianbland): cancel other uploads at this path + resp, err := d.S3.CreateMultipartUpload(&s3.CreateMultipartUploadInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(key), + ContentType: d.getContentType(), + ACL: d.getACL(), + ServerSideEncryption: d.getEncryptionMode(), + SSEKMSKeyId: d.getSSEKMSKeyID(), + StorageClass: d.getStorageClass(), + }) + if err != nil { + return nil, err + } + return d.newWriter(key, *resp.UploadId, nil), nil + } + resp, err := d.S3.ListMultipartUploads(&s3.ListMultipartUploadsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(key), + }) + if err != nil { + return nil, parseError(path, err) + } + + for _, multi := range resp.Uploads { + if key != *multi.Key { + continue + } + resp, err := d.S3.ListParts(&s3.ListPartsInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(key), + UploadId: multi.UploadId, + }) + if err != nil { + return nil, parseError(path, err) + } + var multiSize int64 + for _, part := range resp.Parts { + multiSize += *part.Size + } + return d.newWriter(key, *multi.UploadId, resp.Parts), nil + } + return nil, storagedriver.PathNotFoundError{Path: path} +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + resp, err := d.S3.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(d.s3Path(path)), + MaxKeys: aws.Int64(1), + }) + if err != nil { + return nil, err + } + + fi := storagedriver.FileInfoFields{ + Path: path, + } + + if len(resp.Contents) == 1 { + if *resp.Contents[0].Key != d.s3Path(path) { + fi.IsDir = true + } else { + fi.IsDir = false + fi.Size = *resp.Contents[0].Size + fi.ModTime = *resp.Contents[0].LastModified + } + } else if len(resp.CommonPrefixes) == 1 { + fi.IsDir = true + } else { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, opath string) ([]string, error) { + path := opath + if path != "/" && path[len(path)-1] != '/' { + path = path + "/" + } + + // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". + // In those cases, there is no root prefix to replace and we must actually add a "/" to all + // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp + prefix := "" + if d.s3Path("") == "" { + prefix = "/" + } + + resp, err := d.S3.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(d.s3Path(path)), + Delimiter: aws.String("/"), + MaxKeys: aws.Int64(listMax), + }) + if err != nil { + return nil, parseError(opath, err) + } + + files := []string{} + directories := []string{} + + for { + for _, key := range resp.Contents { + files = append(files, strings.Replace(*key.Key, d.s3Path(""), prefix, 1)) + } + + for _, commonPrefix := range resp.CommonPrefixes { + commonPrefix := *commonPrefix.Prefix + directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), prefix, 1)) + } + + if *resp.IsTruncated { + resp, err = d.S3.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(d.s3Path(path)), + Delimiter: aws.String("/"), + MaxKeys: aws.Int64(listMax), + Marker: resp.NextMarker, + }) + if err != nil { + return nil, err + } + } else { + break + } + } + + if opath != "/" { + if len(files) == 0 && len(directories) == 0 { + // Treat empty response as missing directory, since we don't actually + // have directories in s3. + return nil, storagedriver.PathNotFoundError{Path: opath} + } + } + + return append(files, directories...), nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + /* This is terrible, but aws doesn't have an actual move. */ + if err := d.copy(ctx, sourcePath, destPath); err != nil { + return err + } + return d.Delete(ctx, sourcePath) +} + +// copy copies an object stored at sourcePath to destPath. +func (d *driver) copy(ctx context.Context, sourcePath string, destPath string) error { + // S3 can copy objects up to 5 GB in size with a single PUT Object - Copy + // operation. For larger objects, the multipart upload API must be used. + // + // Empirically, multipart copy is fastest with 32 MB parts and is faster + // than PUT Object - Copy for objects larger than 32 MB. + + fileInfo, err := d.Stat(ctx, sourcePath) + if err != nil { + return parseError(sourcePath, err) + } + + if fileInfo.Size() <= d.MultipartCopyThresholdSize { + _, err := d.S3.CopyObject(&s3.CopyObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(destPath)), + ContentType: d.getContentType(), + ACL: d.getACL(), + ServerSideEncryption: d.getEncryptionMode(), + SSEKMSKeyId: d.getSSEKMSKeyID(), + StorageClass: d.getStorageClass(), + CopySource: aws.String(d.Bucket + "/" + d.s3Path(sourcePath)), + }) + if err != nil { + return parseError(sourcePath, err) + } + return nil + } + + // Even in the worst case, a multipart copy should take no more + // than a few minutes, so 30 minutes is very conservative. + expires := time.Now().Add(time.Duration(30) * time.Minute) + createResp, err := d.S3.CreateMultipartUpload(&s3.CreateMultipartUploadInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(destPath)), + ContentType: d.getContentType(), + ACL: d.getACL(), + Expires: aws.Time(expires), + SSEKMSKeyId: d.getSSEKMSKeyID(), + ServerSideEncryption: d.getEncryptionMode(), + StorageClass: d.getStorageClass(), + }) + if err != nil { + return err + } + + numParts := (fileInfo.Size() + d.MultipartCopyChunkSize - 1) / d.MultipartCopyChunkSize + completedParts := make([]*s3.CompletedPart, numParts) + errChan := make(chan error, numParts) + limiter := make(chan struct{}, d.MultipartCopyMaxConcurrency) + + for i := range completedParts { + i := int64(i) + go func() { + limiter <- struct{}{} + firstByte := i * d.MultipartCopyChunkSize + lastByte := firstByte + d.MultipartCopyChunkSize - 1 + if lastByte >= fileInfo.Size() { + lastByte = fileInfo.Size() - 1 + } + uploadResp, err := d.S3.UploadPartCopy(&s3.UploadPartCopyInput{ + Bucket: aws.String(d.Bucket), + CopySource: aws.String(d.Bucket + "/" + d.s3Path(sourcePath)), + Key: aws.String(d.s3Path(destPath)), + PartNumber: aws.Int64(i + 1), + UploadId: createResp.UploadId, + CopySourceRange: aws.String(fmt.Sprintf("bytes=%d-%d", firstByte, lastByte)), + }) + if err == nil { + completedParts[i] = &s3.CompletedPart{ + ETag: uploadResp.CopyPartResult.ETag, + PartNumber: aws.Int64(i + 1), + } + } + errChan <- err + <-limiter + }() + } + + for range completedParts { + err := <-errChan + if err != nil { + return err + } + } + + _, err = d.S3.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(destPath)), + UploadId: createResp.UploadId, + MultipartUpload: &s3.CompletedMultipartUpload{Parts: completedParts}, + }) + return err +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +// We must be careful since S3 does not guarantee read after delete consistency +func (d *driver) Delete(ctx context.Context, path string) error { + s3Objects := make([]*s3.ObjectIdentifier, 0, listMax) + s3Path := d.s3Path(path) + listObjectsInput := &s3.ListObjectsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(s3Path), + } +ListLoop: + for { + // list all the objects + resp, err := d.S3.ListObjects(listObjectsInput) + + // resp.Contents can only be empty on the first call + // if there were no more results to return after the first call, resp.IsTruncated would have been false + // and the loop would be exited without recalling ListObjects + if err != nil || len(resp.Contents) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + + for _, key := range resp.Contents { + // Stop if we encounter a key that is not a subpath (so that deleting "/a" does not delete "/ab"). + if len(*key.Key) > len(s3Path) && (*key.Key)[len(s3Path)] != '/' { + break ListLoop + } + s3Objects = append(s3Objects, &s3.ObjectIdentifier{ + Key: key.Key, + }) + } + + // resp.Contents must have at least one element or we would have returned not found + listObjectsInput.Marker = resp.Contents[len(resp.Contents)-1].Key + + // from the s3 api docs, IsTruncated "specifies whether (true) or not (false) all of the results were returned" + // if everything has been returned, break + if resp.IsTruncated == nil || !*resp.IsTruncated { + break + } + } + + // need to chunk objects into groups of 1000 per s3 restrictions + total := len(s3Objects) + for i := 0; i < total; i += 1000 { + _, err := d.S3.DeleteObjects(&s3.DeleteObjectsInput{ + Bucket: aws.String(d.Bucket), + Delete: &s3.Delete{ + Objects: s3Objects[i:min(i+1000, total)], + Quiet: aws.Bool(false), + }, + }) + if err != nil { + return err + } + } + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + methodString := "GET" + method, ok := options["method"] + if ok { + methodString, ok = method.(string) + if !ok || (methodString != "GET" && methodString != "HEAD") { + return "", storagedriver.ErrUnsupportedMethod{} + } + } + + expiresIn := 20 * time.Minute + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresIn = et.Sub(time.Now()) + } + } + + var req *request.Request + + switch methodString { + case "GET": + req, _ = d.S3.GetObjectRequest(&s3.GetObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + }) + case "HEAD": + req, _ = d.S3.HeadObjectRequest(&s3.HeadObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + }) + default: + panic("unreachable") + } + + return req.Presign(expiresIn) +} + +func (d *driver) s3Path(path string) string { + return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") +} + +// S3BucketKey returns the s3 bucket key for the given storage driver path. +func (d *Driver) S3BucketKey(path string) string { + return d.StorageDriver.(*driver).s3Path(path) +} + +func parseError(path string, err error) error { + if s3Err, ok := err.(awserr.Error); ok && s3Err.Code() == "NoSuchKey" { + return storagedriver.PathNotFoundError{Path: path} + } + + return err +} + +func (d *driver) getEncryptionMode() *string { + if !d.Encrypt { + return nil + } + if d.KeyID == "" { + return aws.String("AES256") + } + return aws.String("aws:kms") +} + +func (d *driver) getSSEKMSKeyID() *string { + if d.KeyID != "" { + return aws.String(d.KeyID) + } + return nil +} + +func (d *driver) getContentType() *string { + return aws.String("application/octet-stream") +} + +func (d *driver) getACL() *string { + return aws.String(d.ObjectACL) +} + +func (d *driver) getStorageClass() *string { + if d.StorageClass == noStorageClass { + return nil + } + return aws.String(d.StorageClass) +} + +// writer attempts to upload parts to S3 in a buffered fashion where the last +// part is at least as large as the chunksize, so the multipart upload could be +// cleanly resumed in the future. This is violated if Close is called after less +// than a full chunk is written. +type writer struct { + driver *driver + key string + uploadID string + parts []*s3.Part + size int64 + readyPart []byte + pendingPart []byte + closed bool + committed bool + cancelled bool +} + +func (d *driver) newWriter(key, uploadID string, parts []*s3.Part) storagedriver.FileWriter { + var size int64 + for _, part := range parts { + size += *part.Size + } + return &writer{ + driver: d, + key: key, + uploadID: uploadID, + parts: parts, + size: size, + } +} + +type completedParts []*s3.CompletedPart + +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber } + +func (w *writer) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + + // If the last written part is smaller than minChunkSize, we need to make a + // new multipart upload :sadface: + if len(w.parts) > 0 && int(*w.parts[len(w.parts)-1].Size) < minChunkSize { + var completedUploadedParts completedParts + for _, part := range w.parts { + completedUploadedParts = append(completedUploadedParts, &s3.CompletedPart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + + sort.Sort(completedUploadedParts) + + _, err := w.driver.S3.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + MultipartUpload: &s3.CompletedMultipartUpload{ + Parts: completedUploadedParts, + }, + }) + if err != nil { + w.driver.S3.AbortMultipartUpload(&s3.AbortMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + }) + return 0, err + } + + resp, err := w.driver.S3.CreateMultipartUpload(&s3.CreateMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + ContentType: w.driver.getContentType(), + ACL: w.driver.getACL(), + ServerSideEncryption: w.driver.getEncryptionMode(), + StorageClass: w.driver.getStorageClass(), + }) + if err != nil { + return 0, err + } + w.uploadID = *resp.UploadId + + // If the entire written file is smaller than minChunkSize, we need to make + // a new part from scratch :double sad face: + if w.size < minChunkSize { + resp, err := w.driver.S3.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + }) + defer resp.Body.Close() + if err != nil { + return 0, err + } + w.parts = nil + w.readyPart, err = ioutil.ReadAll(resp.Body) + if err != nil { + return 0, err + } + } else { + // Otherwise we can use the old file as the new first part + copyPartResp, err := w.driver.S3.UploadPartCopy(&s3.UploadPartCopyInput{ + Bucket: aws.String(w.driver.Bucket), + CopySource: aws.String(w.driver.Bucket + "/" + w.key), + Key: aws.String(w.key), + PartNumber: aws.Int64(1), + UploadId: resp.UploadId, + }) + if err != nil { + return 0, err + } + w.parts = []*s3.Part{ + { + ETag: copyPartResp.CopyPartResult.ETag, + PartNumber: aws.Int64(1), + Size: aws.Int64(w.size), + }, + } + } + } + + var n int + + for len(p) > 0 { + // If no parts are ready to write, fill up the first part + if neededBytes := int(w.driver.ChunkSize) - len(w.readyPart); neededBytes > 0 { + if len(p) >= neededBytes { + w.readyPart = append(w.readyPart, p[:neededBytes]...) + n += neededBytes + p = p[neededBytes:] + } else { + w.readyPart = append(w.readyPart, p...) + n += len(p) + p = nil + } + } + + if neededBytes := int(w.driver.ChunkSize) - len(w.pendingPart); neededBytes > 0 { + if len(p) >= neededBytes { + w.pendingPart = append(w.pendingPart, p[:neededBytes]...) + n += neededBytes + p = p[neededBytes:] + err := w.flushPart() + if err != nil { + w.size += int64(n) + return n, err + } + } else { + w.pendingPart = append(w.pendingPart, p...) + n += len(p) + p = nil + } + } + } + w.size += int64(n) + return n, nil +} + +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + w.closed = true + return w.flushPart() +} + +func (w *writer) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + _, err := w.driver.S3.AbortMultipartUpload(&s3.AbortMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + }) + return err +} + +func (w *writer) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + err := w.flushPart() + if err != nil { + return err + } + w.committed = true + + var completedUploadedParts completedParts + for _, part := range w.parts { + completedUploadedParts = append(completedUploadedParts, &s3.CompletedPart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + + sort.Sort(completedUploadedParts) + + _, err = w.driver.S3.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + MultipartUpload: &s3.CompletedMultipartUpload{ + Parts: completedUploadedParts, + }, + }) + if err != nil { + w.driver.S3.AbortMultipartUpload(&s3.AbortMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + }) + return err + } + return nil +} + +// flushPart flushes buffers to write a part to S3. +// Only called by Write (with both buffers full) and Close/Commit (always) +func (w *writer) flushPart() error { + if len(w.readyPart) == 0 && len(w.pendingPart) == 0 { + // nothing to write + return nil + } + if len(w.pendingPart) < int(w.driver.ChunkSize) { + // closing with a small pending part + // combine ready and pending to avoid writing a small part + w.readyPart = append(w.readyPart, w.pendingPart...) + w.pendingPart = nil + } + + partNumber := aws.Int64(int64(len(w.parts) + 1)) + resp, err := w.driver.S3.UploadPart(&s3.UploadPartInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + PartNumber: partNumber, + UploadId: aws.String(w.uploadID), + Body: bytes.NewReader(w.readyPart), + }) + if err != nil { + return err + } + w.parts = append(w.parts, &s3.Part{ + ETag: resp.ETag, + PartNumber: partNumber, + Size: aws.Int64(int64(len(w.readyPart))), + }) + w.readyPart = w.pendingPart + w.pendingPart = nil + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3_test.go new file mode 100644 index 000000000..eb7ee5195 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3_test.go @@ -0,0 +1,313 @@ +package s3 + +import ( + "bytes" + "io/ioutil" + "math/rand" + "os" + "strconv" + "testing" + + "gopkg.in/check.v1" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +var s3DriverConstructor func(rootDirectory, storageClass string) (*Driver, error) +var skipS3 func() string + +func init() { + accessKey := os.Getenv("AWS_ACCESS_KEY") + secretKey := os.Getenv("AWS_SECRET_KEY") + bucket := os.Getenv("S3_BUCKET") + encrypt := os.Getenv("S3_ENCRYPT") + keyID := os.Getenv("S3_KEY_ID") + secure := os.Getenv("S3_SECURE") + v4Auth := os.Getenv("S3_V4_AUTH") + region := os.Getenv("AWS_REGION") + objectACL := os.Getenv("S3_OBJECT_ACL") + root, err := ioutil.TempDir("", "driver-") + regionEndpoint := os.Getenv("REGION_ENDPOINT") + if err != nil { + panic(err) + } + defer os.Remove(root) + + s3DriverConstructor = func(rootDirectory, storageClass string) (*Driver, error) { + encryptBool := false + if encrypt != "" { + encryptBool, err = strconv.ParseBool(encrypt) + if err != nil { + return nil, err + } + } + + secureBool := true + if secure != "" { + secureBool, err = strconv.ParseBool(secure) + if err != nil { + return nil, err + } + } + + v4Bool := true + if v4Auth != "" { + v4Bool, err = strconv.ParseBool(v4Auth) + if err != nil { + return nil, err + } + } + + parameters := DriverParameters{ + accessKey, + secretKey, + bucket, + region, + regionEndpoint, + encryptBool, + keyID, + secureBool, + v4Bool, + minChunkSize, + defaultMultipartCopyChunkSize, + defaultMultipartCopyMaxConcurrency, + defaultMultipartCopyThresholdSize, + rootDirectory, + storageClass, + driverName + "-test", + objectACL, + } + + return New(parameters) + } + + // Skip S3 storage driver tests if environment variable parameters are not provided + skipS3 = func() string { + if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { + return "Must set AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, S3_BUCKET, and S3_ENCRYPT to run S3 tests" + } + return "" + } + + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { + return s3DriverConstructor(root, s3.StorageClassStandard) + }, skipS3) +} + +func TestEmptyRootList(t *testing.T) { + if skipS3() != "" { + t.Skip(skipS3()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + rootedDriver, err := s3DriverConstructor(validRoot, s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := s3DriverConstructor("", s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := s3DriverConstructor("/", s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } + + filename := "/test" + contents := []byte("contents") + ctx := context.Background() + err = rootedDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rootedDriver.Delete(ctx, filename) + + keys, err := emptyRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } + + keys, err = slashRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } +} + +func TestStorageClass(t *testing.T) { + if skipS3() != "" { + t.Skip(skipS3()) + } + + rootDir, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(rootDir) + + standardDriver, err := s3DriverConstructor(rootDir, s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating driver with standard storage: %v", err) + } + + rrDriver, err := s3DriverConstructor(rootDir, s3.StorageClassReducedRedundancy) + if err != nil { + t.Fatalf("unexpected error creating driver with reduced redundancy storage: %v", err) + } + + if _, err = s3DriverConstructor(rootDir, noStorageClass); err != nil { + t.Fatalf("unexpected error creating driver without storage class: %v", err) + } + + standardFilename := "/test-standard" + rrFilename := "/test-rr" + contents := []byte("contents") + ctx := context.Background() + + err = standardDriver.PutContent(ctx, standardFilename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer standardDriver.Delete(ctx, standardFilename) + + err = rrDriver.PutContent(ctx, rrFilename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rrDriver.Delete(ctx, rrFilename) + + standardDriverUnwrapped := standardDriver.Base.StorageDriver.(*driver) + resp, err := standardDriverUnwrapped.S3.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(standardDriverUnwrapped.Bucket), + Key: aws.String(standardDriverUnwrapped.s3Path(standardFilename)), + }) + if err != nil { + t.Fatalf("unexpected error retrieving standard storage file: %v", err) + } + defer resp.Body.Close() + // Amazon only populates this header value for non-standard storage classes + if resp.StorageClass != nil { + t.Fatalf("unexpected storage class for standard file: %v", resp.StorageClass) + } + + rrDriverUnwrapped := rrDriver.Base.StorageDriver.(*driver) + resp, err = rrDriverUnwrapped.S3.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(rrDriverUnwrapped.Bucket), + Key: aws.String(rrDriverUnwrapped.s3Path(rrFilename)), + }) + if err != nil { + t.Fatalf("unexpected error retrieving reduced-redundancy storage file: %v", err) + } + defer resp.Body.Close() + if resp.StorageClass == nil { + t.Fatalf("unexpected storage class for reduced-redundancy file: %v", s3.StorageClassStandard) + } else if *resp.StorageClass != s3.StorageClassReducedRedundancy { + t.Fatalf("unexpected storage class for reduced-redundancy file: %v", *resp.StorageClass) + } + +} + +func TestOverThousandBlobs(t *testing.T) { + if skipS3() != "" { + t.Skip(skipS3()) + } + + rootDir, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(rootDir) + + standardDriver, err := s3DriverConstructor(rootDir, s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating driver with standard storage: %v", err) + } + + ctx := context.Background() + for i := 0; i < 1005; i++ { + filename := "/thousandfiletest/file" + strconv.Itoa(i) + contents := []byte("contents") + err = standardDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + } + + // cant actually verify deletion because read-after-delete is inconsistent, but can ensure no errors + err = standardDriver.Delete(ctx, "/thousandfiletest") + if err != nil { + t.Fatalf("unexpected error deleting thousand files: %v", err) + } +} + +func TestMoveWithMultipartCopy(t *testing.T) { + if skipS3() != "" { + t.Skip(skipS3()) + } + + rootDir, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(rootDir) + + d, err := s3DriverConstructor(rootDir, s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating driver: %v", err) + } + + ctx := context.Background() + sourcePath := "/source" + destPath := "/dest" + + defer d.Delete(ctx, sourcePath) + defer d.Delete(ctx, destPath) + + // An object larger than d's MultipartCopyThresholdSize will cause d.Move() to perform a multipart copy. + multipartCopyThresholdSize := d.baseEmbed.Base.StorageDriver.(*driver).MultipartCopyThresholdSize + contents := make([]byte, 2*multipartCopyThresholdSize) + rand.Read(contents) + + err = d.PutContent(ctx, sourcePath, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + + err = d.Move(ctx, sourcePath, destPath) + if err != nil { + t.Fatalf("unexpected error moving file: %v", err) + } + + received, err := d.GetContent(ctx, destPath) + if err != nil { + t.Fatalf("unexpected error getting content: %v", err) + } + if !bytes.Equal(contents, received) { + t.Fatal("content differs") + } + + _, err = d.GetContent(ctx, sourcePath) + switch err.(type) { + case storagedriver.PathNotFoundError: + default: + t.Fatalf("unexpected error getting content: %v", err) + } +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3_v2_signer.go b/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3_v2_signer.go new file mode 100644 index 000000000..7cabe07e2 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/s3-aws/s3_v2_signer.go @@ -0,0 +1,219 @@ +package s3 + +// Source: https://github.com/pivotal-golang/s3cli + +// Copyright (c) 2013 Damien Le Berrigaud and Nick Wade + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +import ( + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "net/http" + "net/url" + "sort" + "strings" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" +) + +const ( + signatureVersion = "2" + signatureMethod = "HmacSHA1" + timeFormat = "2006-01-02T15:04:05Z" +) + +type signer struct { + // Values that must be populated from the request + Request *http.Request + Time time.Time + Credentials *credentials.Credentials + Query url.Values + stringToSign string + signature string +} + +var s3ParamsToSign = map[string]bool{ + "acl": true, + "location": true, + "logging": true, + "notification": true, + "partNumber": true, + "policy": true, + "requestPayment": true, + "torrent": true, + "uploadId": true, + "uploads": true, + "versionId": true, + "versioning": true, + "versions": true, + "response-content-type": true, + "response-content-language": true, + "response-expires": true, + "response-cache-control": true, + "response-content-disposition": true, + "response-content-encoding": true, + "website": true, + "delete": true, +} + +// setv2Handlers will setup v2 signature signing on the S3 driver +func setv2Handlers(svc *s3.S3) { + svc.Handlers.Build.PushBack(func(r *request.Request) { + parsedURL, err := url.Parse(r.HTTPRequest.URL.String()) + if err != nil { + log.Fatalf("Failed to parse URL: %v", err) + } + r.HTTPRequest.URL.Opaque = parsedURL.Path + }) + + svc.Handlers.Sign.Clear() + svc.Handlers.Sign.PushBack(Sign) + svc.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) +} + +// Sign requests with signature version 2. +// +// Will sign the requests with the service config's Credentials object +// Signing is skipped if the credentials is the credentials.AnonymousCredentials +// object. +func Sign(req *request.Request) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + v2 := signer{ + Request: req.HTTPRequest, + Time: req.Time, + Credentials: req.Config.Credentials, + } + v2.Sign() +} + +func (v2 *signer) Sign() error { + credValue, err := v2.Credentials.Get() + if err != nil { + return err + } + accessKey := credValue.AccessKeyID + var ( + md5, ctype, date, xamz string + xamzDate bool + sarray []string + smap map[string]string + sharray []string + ) + + headers := v2.Request.Header + params := v2.Request.URL.Query() + parsedURL, err := url.Parse(v2.Request.URL.String()) + if err != nil { + return err + } + host, canonicalPath := parsedURL.Host, parsedURL.Path + v2.Request.Header["Host"] = []string{host} + v2.Request.Header["date"] = []string{v2.Time.In(time.UTC).Format(time.RFC1123)} + + smap = make(map[string]string) + for k, v := range headers { + k = strings.ToLower(k) + switch k { + case "content-md5": + md5 = v[0] + case "content-type": + ctype = v[0] + case "date": + if !xamzDate { + date = v[0] + } + default: + if strings.HasPrefix(k, "x-amz-") { + vall := strings.Join(v, ",") + smap[k] = k + ":" + vall + if k == "x-amz-date" { + xamzDate = true + date = "" + } + sharray = append(sharray, k) + } + } + } + if len(sharray) > 0 { + sort.StringSlice(sharray).Sort() + for _, h := range sharray { + sarray = append(sarray, smap[h]) + } + xamz = strings.Join(sarray, "\n") + "\n" + } + + expires := false + if v, ok := params["Expires"]; ok { + expires = true + date = v[0] + params["AWSAccessKeyId"] = []string{accessKey} + } + + sarray = sarray[0:0] + for k, v := range params { + if s3ParamsToSign[k] { + for _, vi := range v { + if vi == "" { + sarray = append(sarray, k) + } else { + sarray = append(sarray, k+"="+vi) + } + } + } + } + if len(sarray) > 0 { + sort.StringSlice(sarray).Sort() + canonicalPath = canonicalPath + "?" + strings.Join(sarray, "&") + } + + v2.stringToSign = strings.Join([]string{ + v2.Request.Method, + md5, + ctype, + date, + xamz + canonicalPath, + }, "\n") + hash := hmac.New(sha1.New, []byte(credValue.SecretAccessKey)) + hash.Write([]byte(v2.stringToSign)) + v2.signature = base64.StdEncoding.EncodeToString(hash.Sum(nil)) + + if expires { + params["Signature"] = []string{string(v2.signature)} + } else { + headers["Authorization"] = []string{"AWS " + accessKey + ":" + string(v2.signature)} + } + + log.WithFields(log.Fields{ + "string-to-sign": v2.stringToSign, + "signature": v2.signature, + }).Debugln("request signature") + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/s3-goamz/s3.go b/vendor/github.com/docker/distribution/registry/storage/driver/s3-goamz/s3.go new file mode 100644 index 000000000..33751c168 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/s3-goamz/s3.go @@ -0,0 +1,757 @@ +// Package s3 provides a storagedriver.StorageDriver implementation to +// store blobs in Amazon S3 cloud storage. +// +// This package leverages the docker/goamz client library for interfacing with +// S3. It is intended to be deprecated in favor of the s3-aws driver +// implementation. +// +// Because S3 is a key, value store the Stat call does not support last modification +// time for directories (directories are an abstraction for key, value stores) +// +// Keep in mind that S3 guarantees only read-after-write consistency for new +// objects, but no read-after-update or list-after-write consistency. +package s3 + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/docker/goamz/aws" + "github.com/docker/goamz/s3" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/client/transport" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "s3goamz" + +// minChunkSize defines the minimum multipart upload chunk size +// S3 API requires multipart upload chunks to be at least 5MB +const minChunkSize = 5 << 20 + +const defaultChunkSize = 2 * minChunkSize + +// listMax is the largest amount of objects you can request from S3 in a list call +const listMax = 1000 + +//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set +type DriverParameters struct { + AccessKey string + SecretKey string + Bucket string + Region aws.Region + Encrypt bool + Secure bool + V4Auth bool + ChunkSize int64 + RootDirectory string + StorageClass s3.StorageClass + UserAgent string +} + +func init() { + factory.Register(driverName, &s3DriverFactory{}) +} + +// s3DriverFactory implements the factory.StorageDriverFactory interface +type s3DriverFactory struct{} + +func (factory *s3DriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + S3 *s3.S3 + Bucket *s3.Bucket + ChunkSize int64 + Encrypt bool + RootDirectory string + StorageClass s3.StorageClass +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Amazon S3 +// Objects are stored at absolute keys in the provided bucket. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - accesskey +// - secretkey +// - region +// - bucket +// - encrypt +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + // Providing no values for these is valid in case the user is authenticating + // with an IAM on an ec2 instance (in which case the instance credentials will + // be summoned when GetAuth is called) + accessKey := parameters["accesskey"] + if accessKey == nil { + accessKey = "" + } + + secretKey := parameters["secretkey"] + if secretKey == nil { + secretKey = "" + } + + regionName := parameters["region"] + if regionName == nil || fmt.Sprint(regionName) == "" { + return nil, fmt.Errorf("No region parameter provided") + } + region := aws.GetRegion(fmt.Sprint(regionName)) + if region.Name == "" { + return nil, fmt.Errorf("Invalid region provided: %v", region) + } + + bucket := parameters["bucket"] + if bucket == nil || fmt.Sprint(bucket) == "" { + return nil, fmt.Errorf("No bucket parameter provided") + } + + encryptBool := false + encrypt := parameters["encrypt"] + switch encrypt := encrypt.(type) { + case string: + b, err := strconv.ParseBool(encrypt) + if err != nil { + return nil, fmt.Errorf("The encrypt parameter should be a boolean") + } + encryptBool = b + case bool: + encryptBool = encrypt + case nil: + // do nothing + default: + return nil, fmt.Errorf("The encrypt parameter should be a boolean") + } + + secureBool := true + secure := parameters["secure"] + switch secure := secure.(type) { + case string: + b, err := strconv.ParseBool(secure) + if err != nil { + return nil, fmt.Errorf("The secure parameter should be a boolean") + } + secureBool = b + case bool: + secureBool = secure + case nil: + // do nothing + default: + return nil, fmt.Errorf("The secure parameter should be a boolean") + } + + v4AuthBool := false + v4Auth := parameters["v4auth"] + switch v4Auth := v4Auth.(type) { + case string: + b, err := strconv.ParseBool(v4Auth) + if err != nil { + return nil, fmt.Errorf("The v4auth parameter should be a boolean") + } + v4AuthBool = b + case bool: + v4AuthBool = v4Auth + case nil: + // do nothing + default: + return nil, fmt.Errorf("The v4auth parameter should be a boolean") + } + + chunkSize := int64(defaultChunkSize) + chunkSizeParam := parameters["chunksize"] + switch v := chunkSizeParam.(type) { + case string: + vv, err := strconv.ParseInt(v, 0, 64) + if err != nil { + return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) + } + chunkSize = vv + case int64: + chunkSize = v + case int, uint, int32, uint32, uint64: + chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() + case nil: + // do nothing + default: + return nil, fmt.Errorf("invalid value for chunksize: %#v", chunkSizeParam) + } + + if chunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) + } + + rootDirectory := parameters["rootdirectory"] + if rootDirectory == nil { + rootDirectory = "" + } + + storageClass := s3.StandardStorage + storageClassParam := parameters["storageclass"] + if storageClassParam != nil { + storageClassString, ok := storageClassParam.(string) + if !ok { + return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", []s3.StorageClass{s3.StandardStorage, s3.ReducedRedundancy}, storageClassParam) + } + // All valid storage class parameters are UPPERCASE, so be a bit more flexible here + storageClassCasted := s3.StorageClass(strings.ToUpper(storageClassString)) + if storageClassCasted != s3.StandardStorage && storageClassCasted != s3.ReducedRedundancy { + return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", []s3.StorageClass{s3.StandardStorage, s3.ReducedRedundancy}, storageClassParam) + } + storageClass = storageClassCasted + } + + userAgent := parameters["useragent"] + if userAgent == nil { + userAgent = "" + } + + params := DriverParameters{ + fmt.Sprint(accessKey), + fmt.Sprint(secretKey), + fmt.Sprint(bucket), + region, + encryptBool, + secureBool, + v4AuthBool, + chunkSize, + fmt.Sprint(rootDirectory), + storageClass, + fmt.Sprint(userAgent), + } + + return New(params) +} + +// New constructs a new Driver with the given AWS credentials, region, encryption flag, and +// bucketName +func New(params DriverParameters) (*Driver, error) { + auth, err := aws.GetAuth(params.AccessKey, params.SecretKey, "", time.Time{}) + if err != nil { + return nil, fmt.Errorf("unable to resolve aws credentials, please ensure that 'accesskey' and 'secretkey' are properly set or the credentials are available in $HOME/.aws/credentials: %v", err) + } + + if !params.Secure { + params.Region.S3Endpoint = strings.Replace(params.Region.S3Endpoint, "https", "http", 1) + } + + s3obj := s3.New(auth, params.Region) + + if params.UserAgent != "" { + s3obj.Client = &http.Client{ + Transport: transport.NewTransport(http.DefaultTransport, + transport.NewHeaderRequestModifier(http.Header{ + http.CanonicalHeaderKey("User-Agent"): []string{params.UserAgent}, + }), + ), + } + } + + if params.V4Auth { + s3obj.Signature = aws.V4Signature + } else { + if params.Region.Name == "eu-central-1" { + return nil, fmt.Errorf("The eu-central-1 region only works with v4 authentication") + } + } + + bucket := s3obj.Bucket(params.Bucket) + + // TODO Currently multipart uploads have no timestamps, so this would be unwise + // if you initiated a new s3driver while another one is running on the same bucket. + // multis, _, err := bucket.ListMulti("", "") + // if err != nil { + // return nil, err + // } + + // for _, multi := range multis { + // err := multi.Abort() + // //TODO appropriate to do this error checking? + // if err != nil { + // return nil, err + // } + // } + + d := &driver{ + S3: s3obj, + Bucket: bucket, + ChunkSize: params.ChunkSize, + Encrypt: params.Encrypt, + RootDirectory: params.RootDirectory, + StorageClass: params.StorageClass, + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + content, err := d.Bucket.Get(d.s3Path(path)) + if err != nil { + return nil, parseError(path, err) + } + return content, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + return parseError(path, d.Bucket.Put(d.s3Path(path), contents, d.getContentType(), getPermissions(), d.getOptions())) +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + headers := make(http.Header) + headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") + + resp, err := d.Bucket.GetResponseWithHeaders(d.s3Path(path), headers) + if err != nil { + if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "InvalidRange" { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + + return nil, parseError(path, err) + } + return resp.Body, nil +} + +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + key := d.s3Path(path) + if !append { + // TODO (brianbland): cancel other uploads at this path + multi, err := d.Bucket.InitMulti(key, d.getContentType(), getPermissions(), d.getOptions()) + if err != nil { + return nil, err + } + return d.newWriter(key, multi, nil), nil + } + multis, _, err := d.Bucket.ListMulti(key, "") + if err != nil { + return nil, parseError(path, err) + } + for _, multi := range multis { + if key != multi.Key { + continue + } + parts, err := multi.ListParts() + if err != nil { + return nil, parseError(path, err) + } + var multiSize int64 + for _, part := range parts { + multiSize += part.Size + } + return d.newWriter(key, multi, parts), nil + } + return nil, storagedriver.PathNotFoundError{Path: path} +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + listResponse, err := d.Bucket.List(d.s3Path(path), "", "", 1) + if err != nil { + return nil, err + } + + fi := storagedriver.FileInfoFields{ + Path: path, + } + + if len(listResponse.Contents) == 1 { + if listResponse.Contents[0].Key != d.s3Path(path) { + fi.IsDir = true + } else { + fi.IsDir = false + fi.Size = listResponse.Contents[0].Size + + timestamp, err := time.Parse(time.RFC3339Nano, listResponse.Contents[0].LastModified) + if err != nil { + return nil, err + } + fi.ModTime = timestamp + } + } else if len(listResponse.CommonPrefixes) == 1 { + fi.IsDir = true + } else { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, opath string) ([]string, error) { + path := opath + if path != "/" && path[len(path)-1] != '/' { + path = path + "/" + } + + // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". + // In those cases, there is no root prefix to replace and we must actually add a "/" to all + // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp + prefix := "" + if d.s3Path("") == "" { + prefix = "/" + } + + listResponse, err := d.Bucket.List(d.s3Path(path), "/", "", listMax) + if err != nil { + return nil, parseError(opath, err) + } + + files := []string{} + directories := []string{} + + for { + for _, key := range listResponse.Contents { + files = append(files, strings.Replace(key.Key, d.s3Path(""), prefix, 1)) + } + + for _, commonPrefix := range listResponse.CommonPrefixes { + directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), prefix, 1)) + } + + if listResponse.IsTruncated { + listResponse, err = d.Bucket.List(d.s3Path(path), "/", listResponse.NextMarker, listMax) + if err != nil { + return nil, err + } + } else { + break + } + } + + if opath != "/" { + if len(files) == 0 && len(directories) == 0 { + // Treat empty response as missing directory, since we don't actually + // have directories in s3. + return nil, storagedriver.PathNotFoundError{Path: opath} + } + } + + return append(files, directories...), nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + /* This is terrible, but aws doesn't have an actual move. */ + _, err := d.Bucket.PutCopy(d.s3Path(destPath), getPermissions(), + s3.CopyOptions{Options: d.getOptions(), ContentType: d.getContentType()}, d.Bucket.Name+"/"+d.s3Path(sourcePath)) + if err != nil { + return parseError(sourcePath, err) + } + + return d.Delete(ctx, sourcePath) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + s3Path := d.s3Path(path) + listResponse, err := d.Bucket.List(s3Path, "", "", listMax) + if err != nil || len(listResponse.Contents) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + + s3Objects := make([]s3.Object, listMax) + + for len(listResponse.Contents) > 0 { + numS3Objects := len(listResponse.Contents) + for index, key := range listResponse.Contents { + // Stop if we encounter a key that is not a subpath (so that deleting "/a" does not delete "/ab"). + if len(key.Key) > len(s3Path) && (key.Key)[len(s3Path)] != '/' { + numS3Objects = index + break + } + s3Objects[index].Key = key.Key + } + + err := d.Bucket.DelMulti(s3.Delete{Quiet: false, Objects: s3Objects[0:numS3Objects]}) + if err != nil { + return nil + } + + if numS3Objects < len(listResponse.Contents) { + return nil + } + + listResponse, err = d.Bucket.List(d.s3Path(path), "", "", listMax) + if err != nil { + return err + } + } + + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + methodString := "GET" + method, ok := options["method"] + if ok { + methodString, ok = method.(string) + if !ok || (methodString != "GET" && methodString != "HEAD") { + return "", storagedriver.ErrUnsupportedMethod{} + } + } + + expiresTime := time.Now().Add(20 * time.Minute) + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresTime = et + } + } + + return d.Bucket.SignedURLWithMethod(methodString, d.s3Path(path), expiresTime, nil, nil), nil +} + +func (d *driver) s3Path(path string) string { + return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") +} + +// S3BucketKey returns the s3 bucket key for the given storage driver path. +func (d *Driver) S3BucketKey(path string) string { + return d.StorageDriver.(*driver).s3Path(path) +} + +func parseError(path string, err error) error { + if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "NoSuchKey" { + return storagedriver.PathNotFoundError{Path: path} + } + + return err +} + +func hasCode(err error, code string) bool { + s3err, ok := err.(*aws.Error) + return ok && s3err.Code == code +} + +func (d *driver) getOptions() s3.Options { + return s3.Options{ + SSE: d.Encrypt, + StorageClass: d.StorageClass, + } +} + +func getPermissions() s3.ACL { + return s3.Private +} + +func (d *driver) getContentType() string { + return "application/octet-stream" +} + +// writer attempts to upload parts to S3 in a buffered fashion where the last +// part is at least as large as the chunksize, so the multipart upload could be +// cleanly resumed in the future. This is violated if Close is called after less +// than a full chunk is written. +type writer struct { + driver *driver + key string + multi *s3.Multi + parts []s3.Part + size int64 + readyPart []byte + pendingPart []byte + closed bool + committed bool + cancelled bool +} + +func (d *driver) newWriter(key string, multi *s3.Multi, parts []s3.Part) storagedriver.FileWriter { + var size int64 + for _, part := range parts { + size += part.Size + } + return &writer{ + driver: d, + key: key, + multi: multi, + parts: parts, + size: size, + } +} + +func (w *writer) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + + // If the last written part is smaller than minChunkSize, we need to make a + // new multipart upload :sadface: + if len(w.parts) > 0 && int(w.parts[len(w.parts)-1].Size) < minChunkSize { + err := w.multi.Complete(w.parts) + if err != nil { + w.multi.Abort() + return 0, err + } + + multi, err := w.driver.Bucket.InitMulti(w.key, w.driver.getContentType(), getPermissions(), w.driver.getOptions()) + if err != nil { + return 0, err + } + w.multi = multi + + // If the entire written file is smaller than minChunkSize, we need to make + // a new part from scratch :double sad face: + if w.size < minChunkSize { + contents, err := w.driver.Bucket.Get(w.key) + if err != nil { + return 0, err + } + w.parts = nil + w.readyPart = contents + } else { + // Otherwise we can use the old file as the new first part + _, part, err := multi.PutPartCopy(1, s3.CopyOptions{}, w.driver.Bucket.Name+"/"+w.key) + if err != nil { + return 0, err + } + w.parts = []s3.Part{part} + } + } + + var n int + + for len(p) > 0 { + // If no parts are ready to write, fill up the first part + if neededBytes := int(w.driver.ChunkSize) - len(w.readyPart); neededBytes > 0 { + if len(p) >= neededBytes { + w.readyPart = append(w.readyPart, p[:neededBytes]...) + n += neededBytes + p = p[neededBytes:] + } else { + w.readyPart = append(w.readyPart, p...) + n += len(p) + p = nil + } + } + + if neededBytes := int(w.driver.ChunkSize) - len(w.pendingPart); neededBytes > 0 { + if len(p) >= neededBytes { + w.pendingPart = append(w.pendingPart, p[:neededBytes]...) + n += neededBytes + p = p[neededBytes:] + err := w.flushPart() + if err != nil { + w.size += int64(n) + return n, err + } + } else { + w.pendingPart = append(w.pendingPart, p...) + n += len(p) + p = nil + } + } + } + w.size += int64(n) + return n, nil +} + +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + w.closed = true + return w.flushPart() +} + +func (w *writer) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + err := w.multi.Abort() + return err +} + +func (w *writer) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + err := w.flushPart() + if err != nil { + return err + } + w.committed = true + err = w.multi.Complete(w.parts) + if err != nil { + w.multi.Abort() + return err + } + return nil +} + +// flushPart flushes buffers to write a part to S3. +// Only called by Write (with both buffers full) and Close/Commit (always) +func (w *writer) flushPart() error { + if len(w.readyPart) == 0 && len(w.pendingPart) == 0 { + // nothing to write + return nil + } + if len(w.pendingPart) < int(w.driver.ChunkSize) { + // closing with a small pending part + // combine ready and pending to avoid writing a small part + w.readyPart = append(w.readyPart, w.pendingPart...) + w.pendingPart = nil + } + + part, err := w.multi.PutPart(len(w.parts)+1, bytes.NewReader(w.readyPart)) + if err != nil { + return err + } + w.parts = append(w.parts, part) + w.readyPart = w.pendingPart + w.pendingPart = nil + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/s3-goamz/s3_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/s3-goamz/s3_test.go new file mode 100644 index 000000000..352ec3f5c --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/s3-goamz/s3_test.go @@ -0,0 +1,201 @@ +package s3 + +import ( + "io/ioutil" + "os" + "strconv" + "testing" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + "github.com/docker/goamz/aws" + "github.com/docker/goamz/s3" + + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +var s3DriverConstructor func(rootDirectory string, storageClass s3.StorageClass) (*Driver, error) +var skipS3 func() string + +func init() { + accessKey := os.Getenv("AWS_ACCESS_KEY") + secretKey := os.Getenv("AWS_SECRET_KEY") + bucket := os.Getenv("S3_BUCKET") + encrypt := os.Getenv("S3_ENCRYPT") + secure := os.Getenv("S3_SECURE") + v4auth := os.Getenv("S3_USE_V4_AUTH") + region := os.Getenv("AWS_REGION") + root, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(root) + + s3DriverConstructor = func(rootDirectory string, storageClass s3.StorageClass) (*Driver, error) { + encryptBool := false + if encrypt != "" { + encryptBool, err = strconv.ParseBool(encrypt) + if err != nil { + return nil, err + } + } + + secureBool := true + if secure != "" { + secureBool, err = strconv.ParseBool(secure) + if err != nil { + return nil, err + } + } + + v4AuthBool := false + if v4auth != "" { + v4AuthBool, err = strconv.ParseBool(v4auth) + if err != nil { + return nil, err + } + } + + parameters := DriverParameters{ + accessKey, + secretKey, + bucket, + aws.GetRegion(region), + encryptBool, + secureBool, + v4AuthBool, + minChunkSize, + rootDirectory, + storageClass, + driverName + "-test", + } + + return New(parameters) + } + + // Skip S3 storage driver tests if environment variable parameters are not provided + skipS3 = func() string { + if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { + return "Must set AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, S3_BUCKET, and S3_ENCRYPT to run S3 tests" + } + return "" + } + + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { + return s3DriverConstructor(root, s3.StandardStorage) + }, skipS3) +} + +func TestEmptyRootList(t *testing.T) { + if skipS3() != "" { + t.Skip(skipS3()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + rootedDriver, err := s3DriverConstructor(validRoot, s3.StandardStorage) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := s3DriverConstructor("", s3.StandardStorage) + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := s3DriverConstructor("/", s3.StandardStorage) + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } + + filename := "/test" + contents := []byte("contents") + ctx := context.Background() + err = rootedDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rootedDriver.Delete(ctx, filename) + + keys, err := emptyRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } + + keys, err = slashRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } +} + +func TestStorageClass(t *testing.T) { + if skipS3() != "" { + t.Skip(skipS3()) + } + + rootDir, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(rootDir) + + standardDriver, err := s3DriverConstructor(rootDir, s3.StandardStorage) + if err != nil { + t.Fatalf("unexpected error creating driver with standard storage: %v", err) + } + + rrDriver, err := s3DriverConstructor(rootDir, s3.ReducedRedundancy) + if err != nil { + t.Fatalf("unexpected error creating driver with reduced redundancy storage: %v", err) + } + + standardFilename := "/test-standard" + rrFilename := "/test-rr" + contents := []byte("contents") + ctx := context.Background() + + err = standardDriver.PutContent(ctx, standardFilename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer standardDriver.Delete(ctx, standardFilename) + + err = rrDriver.PutContent(ctx, rrFilename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rrDriver.Delete(ctx, rrFilename) + + standardDriverUnwrapped := standardDriver.Base.StorageDriver.(*driver) + resp, err := standardDriverUnwrapped.Bucket.GetResponse(standardDriverUnwrapped.s3Path(standardFilename)) + if err != nil { + t.Fatalf("unexpected error retrieving standard storage file: %v", err) + } + defer resp.Body.Close() + // Amazon only populates this header value for non-standard storage classes + if storageClass := resp.Header.Get("x-amz-storage-class"); storageClass != "" { + t.Fatalf("unexpected storage class for standard file: %v", storageClass) + } + + rrDriverUnwrapped := rrDriver.Base.StorageDriver.(*driver) + resp, err = rrDriverUnwrapped.Bucket.GetResponse(rrDriverUnwrapped.s3Path(rrFilename)) + if err != nil { + t.Fatalf("unexpected error retrieving reduced-redundancy storage file: %v", err) + } + defer resp.Body.Close() + if storageClass := resp.Header.Get("x-amz-storage-class"); storageClass != string(s3.ReducedRedundancy) { + t.Fatalf("unexpected storage class for reduced-redundancy file: %v", storageClass) + } +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/storagedriver.go b/vendor/github.com/docker/distribution/registry/storage/driver/storagedriver.go new file mode 100644 index 000000000..548a17d84 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/storagedriver.go @@ -0,0 +1,165 @@ +package driver + +import ( + "fmt" + "io" + "regexp" + "strconv" + "strings" + + "github.com/docker/distribution/context" +) + +// Version is a string representing the storage driver version, of the form +// Major.Minor. +// The registry must accept storage drivers with equal major version and greater +// minor version, but may not be compatible with older storage driver versions. +type Version string + +// Major returns the major (primary) component of a version. +func (version Version) Major() uint { + majorPart := strings.Split(string(version), ".")[0] + major, _ := strconv.ParseUint(majorPart, 10, 0) + return uint(major) +} + +// Minor returns the minor (secondary) component of a version. +func (version Version) Minor() uint { + minorPart := strings.Split(string(version), ".")[1] + minor, _ := strconv.ParseUint(minorPart, 10, 0) + return uint(minor) +} + +// CurrentVersion is the current storage driver Version. +const CurrentVersion Version = "0.1" + +// StorageDriver defines methods that a Storage Driver must implement for a +// filesystem-like key/value object storage. Storage Drivers are automatically +// registered via an internal registration mechanism, and generally created +// via the StorageDriverFactory interface (https://godoc.org/github.com/docker/distribution/registry/storage/driver/factory). +// Please see the aforementioned factory package for example code showing how to get an instance +// of a StorageDriver +type StorageDriver interface { + // Name returns the human-readable "name" of the driver, useful in error + // messages and logging. By convention, this will just be the registration + // name, but drivers may provide other information here. + Name() string + + // GetContent retrieves the content stored at "path" as a []byte. + // This should primarily be used for small objects. + GetContent(ctx context.Context, path string) ([]byte, error) + + // PutContent stores the []byte content at a location designated by "path". + // This should primarily be used for small objects. + PutContent(ctx context.Context, path string, content []byte) error + + // Reader retrieves an io.ReadCloser for the content stored at "path" + // with a given byte offset. + // May be used to resume reading a stream by providing a nonzero offset. + Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) + + // Writer returns a FileWriter which will store the content written to it + // at the location designated by "path" after the call to Commit. + Writer(ctx context.Context, path string, append bool) (FileWriter, error) + + // Stat retrieves the FileInfo for the given path, including the current + // size in bytes and the creation time. + Stat(ctx context.Context, path string) (FileInfo, error) + + // List returns a list of the objects that are direct descendants of the + //given path. + List(ctx context.Context, path string) ([]string, error) + + // Move moves an object stored at sourcePath to destPath, removing the + // original object. + // Note: This may be no more efficient than a copy followed by a delete for + // many implementations. + Move(ctx context.Context, sourcePath string, destPath string) error + + // Delete recursively deletes all objects stored at "path" and its subpaths. + Delete(ctx context.Context, path string) error + + // URLFor returns a URL which may be used to retrieve the content stored at + // the given path, possibly using the given options. + // May return an ErrUnsupportedMethod in certain StorageDriver + // implementations. + URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) +} + +// FileWriter provides an abstraction for an opened writable file-like object in +// the storage backend. The FileWriter must flush all content written to it on +// the call to Close, but is only required to make its content readable on a +// call to Commit. +type FileWriter interface { + io.WriteCloser + + // Size returns the number of bytes written to this FileWriter. + Size() int64 + + // Cancel removes any written content from this FileWriter. + Cancel() error + + // Commit flushes all content written to this FileWriter and makes it + // available for future calls to StorageDriver.GetContent and + // StorageDriver.Reader. + Commit() error +} + +// PathRegexp is the regular expression which each file path must match. A +// file path is absolute, beginning with a slash and containing a positive +// number of path components separated by slashes, where each component is +// restricted to alphanumeric characters or a period, underscore, or +// hyphen. +var PathRegexp = regexp.MustCompile(`^(/[A-Za-z0-9._-]+)+$`) + +// ErrUnsupportedMethod may be returned in the case where a StorageDriver implementation does not support an optional method. +type ErrUnsupportedMethod struct { + DriverName string +} + +func (err ErrUnsupportedMethod) Error() string { + return fmt.Sprintf("%s: unsupported method", err.DriverName) +} + +// PathNotFoundError is returned when operating on a nonexistent path. +type PathNotFoundError struct { + Path string + DriverName string +} + +func (err PathNotFoundError) Error() string { + return fmt.Sprintf("%s: Path not found: %s", err.DriverName, err.Path) +} + +// InvalidPathError is returned when the provided path is malformed. +type InvalidPathError struct { + Path string + DriverName string +} + +func (err InvalidPathError) Error() string { + return fmt.Sprintf("%s: invalid path: %s", err.DriverName, err.Path) +} + +// InvalidOffsetError is returned when attempting to read or write from an +// invalid offset. +type InvalidOffsetError struct { + Path string + Offset int64 + DriverName string +} + +func (err InvalidOffsetError) Error() string { + return fmt.Sprintf("%s: invalid offset: %d for path: %s", err.DriverName, err.Offset, err.Path) +} + +// Error is a catch-all error type which captures an error string and +// the driver type on which it occurred. +type Error struct { + DriverName string + Enclosed error +} + +func (err Error) Error() string { + return fmt.Sprintf("%s: %s", err.DriverName, err.Enclosed) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/swift/swift.go b/vendor/github.com/docker/distribution/registry/storage/driver/swift/swift.go new file mode 100644 index 000000000..4b7aa4e9f --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/swift/swift.go @@ -0,0 +1,915 @@ +// Package swift provides a storagedriver.StorageDriver implementation to +// store blobs in Openstack Swift object storage. +// +// This package leverages the ncw/swift client library for interfacing with +// Swift. +// +// It supports both TempAuth authentication and Keystone authentication +// (up to version 3). +// +// As Swift has a limit on the size of a single uploaded object (by default +// this is 5GB), the driver makes use of the Swift Large Object Support +// (http://docs.openstack.org/developer/swift/overview_large_objects.html). +// Only one container is used for both manifests and data objects. Manifests +// are stored in the 'files' pseudo directory, data objects are stored under +// 'segments'. +package swift + +import ( + "bufio" + "bytes" + "crypto/rand" + "crypto/sha1" + "crypto/tls" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/mitchellh/mapstructure" + "github.com/ncw/swift" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" + "github.com/docker/distribution/version" +) + +const driverName = "swift" + +// defaultChunkSize defines the default size of a segment +const defaultChunkSize = 20 * 1024 * 1024 + +// minChunkSize defines the minimum size of a segment +const minChunkSize = 1 << 20 + +// contentType defines the Content-Type header associated with stored segments +const contentType = "application/octet-stream" + +// readAfterWriteTimeout defines the time we wait before an object appears after having been uploaded +var readAfterWriteTimeout = 15 * time.Second + +// readAfterWriteWait defines the time to sleep between two retries +var readAfterWriteWait = 200 * time.Millisecond + +// Parameters A struct that encapsulates all of the driver parameters after all values have been set +type Parameters struct { + Username string + Password string + AuthURL string + Tenant string + TenantID string + Domain string + DomainID string + TenantDomain string + TenantDomainID string + TrustID string + Region string + AuthVersion int + Container string + Prefix string + EndpointType string + InsecureSkipVerify bool + ChunkSize int + SecretKey string + AccessKey string + TempURLContainerKey bool + TempURLMethods []string +} + +// swiftInfo maps the JSON structure returned by Swift /info endpoint +type swiftInfo struct { + Swift struct { + Version string `mapstructure:"version"` + } + Tempurl struct { + Methods []string `mapstructure:"methods"` + } + BulkDelete struct { + MaxDeletesPerRequest int `mapstructure:"max_deletes_per_request"` + } `mapstructure:"bulk_delete"` +} + +func init() { + factory.Register(driverName, &swiftDriverFactory{}) +} + +// swiftDriverFactory implements the factory.StorageDriverFactory interface +type swiftDriverFactory struct{} + +func (factory *swiftDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + Conn *swift.Connection + Container string + Prefix string + BulkDeleteSupport bool + BulkDeleteMaxDeletes int + ChunkSize int + SecretKey string + AccessKey string + TempURLContainerKey bool + TempURLMethods []string +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Openstack Swift +// Objects are stored at absolute keys in the provided container. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - username +// - password +// - authurl +// - container +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + params := Parameters{ + ChunkSize: defaultChunkSize, + InsecureSkipVerify: false, + } + + if err := mapstructure.Decode(parameters, ¶ms); err != nil { + return nil, err + } + + if params.Username == "" { + return nil, fmt.Errorf("No username parameter provided") + } + + if params.Password == "" { + return nil, fmt.Errorf("No password parameter provided") + } + + if params.AuthURL == "" { + return nil, fmt.Errorf("No authurl parameter provided") + } + + if params.Container == "" { + return nil, fmt.Errorf("No container parameter provided") + } + + if params.ChunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", params.ChunkSize, minChunkSize) + } + + return New(params) +} + +// New constructs a new Driver with the given Openstack Swift credentials and container name +func New(params Parameters) (*Driver, error) { + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + MaxIdleConnsPerHost: 2048, + TLSClientConfig: &tls.Config{InsecureSkipVerify: params.InsecureSkipVerify}, + } + + ct := &swift.Connection{ + UserName: params.Username, + ApiKey: params.Password, + AuthUrl: params.AuthURL, + Region: params.Region, + AuthVersion: params.AuthVersion, + UserAgent: "distribution/" + version.Version, + Tenant: params.Tenant, + TenantId: params.TenantID, + Domain: params.Domain, + DomainId: params.DomainID, + TenantDomain: params.TenantDomain, + TenantDomainId: params.TenantDomainID, + TrustId: params.TrustID, + EndpointType: swift.EndpointType(params.EndpointType), + Transport: transport, + ConnectTimeout: 60 * time.Second, + Timeout: 15 * 60 * time.Second, + } + err := ct.Authenticate() + if err != nil { + return nil, fmt.Errorf("Swift authentication failed: %s", err) + } + + if _, _, err := ct.Container(params.Container); err == swift.ContainerNotFound { + if err := ct.ContainerCreate(params.Container, nil); err != nil { + return nil, fmt.Errorf("Failed to create container %s (%s)", params.Container, err) + } + } else if err != nil { + return nil, fmt.Errorf("Failed to retrieve info about container %s (%s)", params.Container, err) + } + + d := &driver{ + Conn: ct, + Container: params.Container, + Prefix: params.Prefix, + ChunkSize: params.ChunkSize, + TempURLMethods: make([]string, 0), + AccessKey: params.AccessKey, + } + + info := swiftInfo{} + if config, err := d.Conn.QueryInfo(); err == nil { + _, d.BulkDeleteSupport = config["bulk_delete"] + + if err := mapstructure.Decode(config, &info); err == nil { + d.TempURLContainerKey = info.Swift.Version >= "2.3.0" + d.TempURLMethods = info.Tempurl.Methods + if d.BulkDeleteSupport { + d.BulkDeleteMaxDeletes = info.BulkDelete.MaxDeletesPerRequest + } + } + } else { + d.TempURLContainerKey = params.TempURLContainerKey + d.TempURLMethods = params.TempURLMethods + } + + if len(d.TempURLMethods) > 0 { + secretKey := params.SecretKey + if secretKey == "" { + secretKey, _ = generateSecret() + } + + // Since Swift 2.2.2, we can now set secret keys on containers + // in addition to the account secret keys. Use them in preference. + if d.TempURLContainerKey { + _, containerHeaders, err := d.Conn.Container(d.Container) + if err != nil { + return nil, fmt.Errorf("Failed to fetch container info %s (%s)", d.Container, err) + } + + d.SecretKey = containerHeaders["X-Container-Meta-Temp-Url-Key"] + if d.SecretKey == "" || (params.SecretKey != "" && d.SecretKey != params.SecretKey) { + m := swift.Metadata{} + m["temp-url-key"] = secretKey + if d.Conn.ContainerUpdate(d.Container, m.ContainerHeaders()); err == nil { + d.SecretKey = secretKey + } + } + } else { + // Use the account secret key + _, accountHeaders, err := d.Conn.Account() + if err != nil { + return nil, fmt.Errorf("Failed to fetch account info (%s)", err) + } + + d.SecretKey = accountHeaders["X-Account-Meta-Temp-Url-Key"] + if d.SecretKey == "" || (params.SecretKey != "" && d.SecretKey != params.SecretKey) { + m := swift.Metadata{} + m["temp-url-key"] = secretKey + if err := d.Conn.AccountUpdate(m.AccountHeaders()); err == nil { + d.SecretKey = secretKey + } + } + } + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + content, err := d.Conn.ObjectGetBytes(d.Container, d.swiftPath(path)) + if err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return content, err +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + err := d.Conn.ObjectPutBytes(d.Container, d.swiftPath(path), contents, contentType) + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + headers := make(swift.Headers) + headers["Range"] = "bytes=" + strconv.FormatInt(offset, 10) + "-" + + waitingTime := readAfterWriteWait + endTime := time.Now().Add(readAfterWriteTimeout) + + for { + file, headers, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) + if err != nil { + if err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == http.StatusRequestedRangeNotSatisfiable { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + return file, err + } + + //if this is a DLO and it is clear that segments are still missing, + //wait until they show up + _, isDLO := headers["X-Object-Manifest"] + size, err := file.Length() + if err != nil { + return file, err + } + if isDLO && size == 0 { + if time.Now().Add(waitingTime).After(endTime) { + return nil, fmt.Errorf("Timeout expired while waiting for segments of %s to show up", path) + } + time.Sleep(waitingTime) + waitingTime *= 2 + continue + } + + //if not, then this reader will be fine + return file, nil + } +} + +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + var ( + segments []swift.Object + segmentsPath string + err error + ) + + if !append { + segmentsPath, err = d.swiftSegmentPath(path) + if err != nil { + return nil, err + } + } else { + info, headers, err := d.Conn.Object(d.Container, d.swiftPath(path)) + if err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } else if err != nil { + return nil, err + } + manifest, ok := headers["X-Object-Manifest"] + if !ok { + segmentsPath, err = d.swiftSegmentPath(path) + if err != nil { + return nil, err + } + if err := d.Conn.ObjectMove(d.Container, d.swiftPath(path), d.Container, getSegmentPath(segmentsPath, len(segments))); err != nil { + return nil, err + } + segments = []swift.Object{info} + } else { + _, segmentsPath = parseManifest(manifest) + if segments, err = d.getAllSegments(segmentsPath); err != nil { + return nil, err + } + } + } + + return d.newWriter(path, segmentsPath, segments), nil +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + swiftPath := d.swiftPath(path) + opts := &swift.ObjectsOpts{ + Prefix: swiftPath, + Delimiter: '/', + } + + objects, err := d.Conn.ObjectsAll(d.Container, opts) + if err != nil { + if err == swift.ContainerNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return nil, err + } + + fi := storagedriver.FileInfoFields{ + Path: strings.TrimPrefix(strings.TrimSuffix(swiftPath, "/"), d.swiftPath("/")), + } + + for _, obj := range objects { + if obj.PseudoDirectory && obj.Name == swiftPath+"/" { + fi.IsDir = true + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil + } else if obj.Name == swiftPath { + // The file exists. But on Swift 1.12, the 'bytes' field is always 0 so + // we need to do a separate HEAD request. + break + } + } + + //Don't trust an empty `objects` slice. A container listing can be + //outdated. For files, we can make a HEAD request on the object which + //reports existence (at least) much more reliably. + waitingTime := readAfterWriteWait + endTime := time.Now().Add(readAfterWriteTimeout) + + for { + info, headers, err := d.Conn.Object(d.Container, swiftPath) + if err != nil { + if err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return nil, err + } + + //if this is a DLO and it is clear that segments are still missing, + //wait until they show up + _, isDLO := headers["X-Object-Manifest"] + if isDLO && info.Bytes == 0 { + if time.Now().Add(waitingTime).After(endTime) { + return nil, fmt.Errorf("Timeout expired while waiting for segments of %s to show up", path) + } + time.Sleep(waitingTime) + waitingTime *= 2 + continue + } + + //otherwise, accept the result + fi.IsDir = false + fi.Size = info.Bytes + fi.ModTime = info.LastModified + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil + } +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, path string) ([]string, error) { + var files []string + + prefix := d.swiftPath(path) + if prefix != "" { + prefix += "/" + } + + opts := &swift.ObjectsOpts{ + Prefix: prefix, + Delimiter: '/', + } + + objects, err := d.Conn.ObjectsAll(d.Container, opts) + for _, obj := range objects { + files = append(files, strings.TrimPrefix(strings.TrimSuffix(obj.Name, "/"), d.swiftPath("/"))) + } + + if err == swift.ContainerNotFound || (len(objects) == 0 && path != "/") { + return files, storagedriver.PathNotFoundError{Path: path} + } + return files, err +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + _, headers, err := d.Conn.Object(d.Container, d.swiftPath(sourcePath)) + if err == nil { + if manifest, ok := headers["X-Object-Manifest"]; ok { + if err = d.createManifest(destPath, manifest); err != nil { + return err + } + err = d.Conn.ObjectDelete(d.Container, d.swiftPath(sourcePath)) + } else { + err = d.Conn.ObjectMove(d.Container, d.swiftPath(sourcePath), d.Container, d.swiftPath(destPath)) + } + } + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: sourcePath} + } + return err +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + opts := swift.ObjectsOpts{ + Prefix: d.swiftPath(path) + "/", + } + + objects, err := d.Conn.ObjectsAll(d.Container, &opts) + if err != nil { + if err == swift.ContainerNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + + for _, obj := range objects { + if obj.PseudoDirectory { + continue + } + if _, headers, err := d.Conn.Object(d.Container, obj.Name); err == nil { + manifest, ok := headers["X-Object-Manifest"] + if ok { + _, prefix := parseManifest(manifest) + segments, err := d.getAllSegments(prefix) + if err != nil { + return err + } + objects = append(objects, segments...) + } + } else { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: obj.Name} + } + return err + } + } + + if d.BulkDeleteSupport && len(objects) > 0 && d.BulkDeleteMaxDeletes > 0 { + filenames := make([]string, len(objects)) + for i, obj := range objects { + filenames[i] = obj.Name + } + + chunks, err := chunkFilenames(filenames, d.BulkDeleteMaxDeletes) + if err != nil { + return err + } + for _, chunk := range chunks { + _, err := d.Conn.BulkDelete(d.Container, chunk) + // Don't fail on ObjectNotFound because eventual consistency + // makes this situation normal. + if err != nil && err != swift.Forbidden && err != swift.ObjectNotFound { + if err == swift.ContainerNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + } + } else { + for _, obj := range objects { + if err := d.Conn.ObjectDelete(d.Container, obj.Name); err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: obj.Name} + } + return err + } + } + } + + _, _, err = d.Conn.Object(d.Container, d.swiftPath(path)) + if err == nil { + if err := d.Conn.ObjectDelete(d.Container, d.swiftPath(path)); err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + } else if err == swift.ObjectNotFound { + if len(objects) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + } else { + return err + } + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + if d.SecretKey == "" { + return "", storagedriver.ErrUnsupportedMethod{} + } + + methodString := "GET" + method, ok := options["method"] + if ok { + if methodString, ok = method.(string); !ok { + return "", storagedriver.ErrUnsupportedMethod{} + } + } + + if methodString == "HEAD" { + // A "HEAD" request on a temporary URL is allowed if the + // signature was generated with "GET", "POST" or "PUT" + methodString = "GET" + } + + supported := false + for _, method := range d.TempURLMethods { + if method == methodString { + supported = true + break + } + } + + if !supported { + return "", storagedriver.ErrUnsupportedMethod{} + } + + expiresTime := time.Now().Add(20 * time.Minute) + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresTime = et + } + } + + tempURL := d.Conn.ObjectTempUrl(d.Container, d.swiftPath(path), d.SecretKey, methodString, expiresTime) + + if d.AccessKey != "" { + // On HP Cloud, the signature must be in the form of tenant_id:access_key:signature + url, _ := url.Parse(tempURL) + query := url.Query() + query.Set("temp_url_sig", fmt.Sprintf("%s:%s:%s", d.Conn.TenantId, d.AccessKey, query.Get("temp_url_sig"))) + url.RawQuery = query.Encode() + tempURL = url.String() + } + + return tempURL, nil +} + +func (d *driver) swiftPath(path string) string { + return strings.TrimLeft(strings.TrimRight(d.Prefix+"/files"+path, "/"), "/") +} + +func (d *driver) swiftSegmentPath(path string) (string, error) { + checksum := sha1.New() + random := make([]byte, 32) + if _, err := rand.Read(random); err != nil { + return "", err + } + path = hex.EncodeToString(checksum.Sum(append([]byte(path), random...))) + return strings.TrimLeft(strings.TrimRight(d.Prefix+"/segments/"+path[0:3]+"/"+path[3:], "/"), "/"), nil +} + +func (d *driver) getAllSegments(path string) ([]swift.Object, error) { + //a simple container listing works 99.9% of the time + segments, err := d.Conn.ObjectsAll(d.Container, &swift.ObjectsOpts{Prefix: path}) + if err != nil { + if err == swift.ContainerNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return nil, err + } + + //build a lookup table by object name + hasObjectName := make(map[string]struct{}) + for _, segment := range segments { + hasObjectName[segment.Name] = struct{}{} + } + + //The container listing might be outdated (i.e. not contain all existing + //segment objects yet) because of temporary inconsistency (Swift is only + //eventually consistent!). Check its completeness. + segmentNumber := 0 + for { + segmentNumber++ + segmentPath := getSegmentPath(path, segmentNumber) + + if _, seen := hasObjectName[segmentPath]; seen { + continue + } + + //This segment is missing in the container listing. Use a more reliable + //request to check its existence. (HEAD requests on segments are + //guaranteed to return the correct metadata, except for the pathological + //case of an outage of large parts of the Swift cluster or its network, + //since every segment is only written once.) + segment, _, err := d.Conn.Object(d.Container, segmentPath) + switch err { + case nil: + //found new segment -> keep going, more might be missing + segments = append(segments, segment) + continue + case swift.ObjectNotFound: + //This segment is missing. Since we upload segments sequentially, + //there won't be any more segments after it. + return segments, nil + default: + return nil, err //unexpected error + } + } +} + +func (d *driver) createManifest(path string, segments string) error { + headers := make(swift.Headers) + headers["X-Object-Manifest"] = segments + manifest, err := d.Conn.ObjectCreate(d.Container, d.swiftPath(path), false, "", contentType, headers) + if err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + if err := manifest.Close(); err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + return nil +} + +func chunkFilenames(slice []string, maxSize int) (chunks [][]string, err error) { + if maxSize > 0 { + for offset := 0; offset < len(slice); offset += maxSize { + chunkSize := maxSize + if offset+chunkSize > len(slice) { + chunkSize = len(slice) - offset + } + chunks = append(chunks, slice[offset:offset+chunkSize]) + } + } else { + return nil, fmt.Errorf("Max chunk size must be > 0") + } + return +} + +func parseManifest(manifest string) (container string, prefix string) { + components := strings.SplitN(manifest, "/", 2) + container = components[0] + if len(components) > 1 { + prefix = components[1] + } + return container, prefix +} + +func generateSecret() (string, error) { + var secretBytes [32]byte + if _, err := rand.Read(secretBytes[:]); err != nil { + return "", fmt.Errorf("could not generate random bytes for Swift secret key: %v", err) + } + return hex.EncodeToString(secretBytes[:]), nil +} + +func getSegmentPath(segmentsPath string, partNumber int) string { + return fmt.Sprintf("%s/%016d", segmentsPath, partNumber) +} + +type writer struct { + driver *driver + path string + segmentsPath string + size int64 + bw *bufio.Writer + closed bool + committed bool + cancelled bool +} + +func (d *driver) newWriter(path, segmentsPath string, segments []swift.Object) storagedriver.FileWriter { + var size int64 + for _, segment := range segments { + size += segment.Bytes + } + return &writer{ + driver: d, + path: path, + segmentsPath: segmentsPath, + size: size, + bw: bufio.NewWriterSize(&segmentWriter{ + conn: d.Conn, + container: d.Container, + segmentsPath: segmentsPath, + segmentNumber: len(segments) + 1, + maxChunkSize: d.ChunkSize, + }, d.ChunkSize), + } +} + +func (w *writer) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + + n, err := w.bw.Write(p) + w.size += int64(n) + return n, err +} + +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + + if err := w.bw.Flush(); err != nil { + return err + } + + if !w.committed && !w.cancelled { + if err := w.driver.createManifest(w.path, w.driver.Container+"/"+w.segmentsPath); err != nil { + return err + } + if err := w.waitForSegmentsToShowUp(); err != nil { + return err + } + } + w.closed = true + + return nil +} + +func (w *writer) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + return w.driver.Delete(context.Background(), w.path) +} + +func (w *writer) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + + if err := w.bw.Flush(); err != nil { + return err + } + + if err := w.driver.createManifest(w.path, w.driver.Container+"/"+w.segmentsPath); err != nil { + return err + } + + w.committed = true + return w.waitForSegmentsToShowUp() +} + +func (w *writer) waitForSegmentsToShowUp() error { + var err error + waitingTime := readAfterWriteWait + endTime := time.Now().Add(readAfterWriteTimeout) + + for { + var info swift.Object + if info, _, err = w.driver.Conn.Object(w.driver.Container, w.driver.swiftPath(w.path)); err == nil { + if info.Bytes == w.size { + break + } + err = fmt.Errorf("Timeout expired while waiting for segments of %s to show up", w.path) + } + if time.Now().Add(waitingTime).After(endTime) { + break + } + time.Sleep(waitingTime) + waitingTime *= 2 + } + + return err +} + +type segmentWriter struct { + conn *swift.Connection + container string + segmentsPath string + segmentNumber int + maxChunkSize int +} + +func (sw *segmentWriter) Write(p []byte) (int, error) { + n := 0 + for offset := 0; offset < len(p); offset += sw.maxChunkSize { + chunkSize := sw.maxChunkSize + if offset+chunkSize > len(p) { + chunkSize = len(p) - offset + } + _, err := sw.conn.ObjectPut(sw.container, getSegmentPath(sw.segmentsPath, sw.segmentNumber), bytes.NewReader(p[offset:offset+chunkSize]), false, "", contentType, nil) + if err != nil { + return n, err + } + + sw.segmentNumber++ + n += chunkSize + } + + return n, nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/swift/swift_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/swift/swift_test.go new file mode 100644 index 000000000..dcd5e4ff8 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/swift/swift_test.go @@ -0,0 +1,245 @@ +package swift + +import ( + "io/ioutil" + "os" + "reflect" + "strconv" + "strings" + "testing" + + "github.com/ncw/swift/swifttest" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +var swiftDriverConstructor func(prefix string) (*Driver, error) + +func init() { + var ( + username string + password string + authURL string + tenant string + tenantID string + domain string + domainID string + tenantDomain string + tenantDomainID string + trustID string + container string + region string + AuthVersion int + endpointType string + insecureSkipVerify bool + secretKey string + accessKey string + containerKey bool + tempURLMethods []string + + swiftServer *swifttest.SwiftServer + err error + ) + username = os.Getenv("SWIFT_USERNAME") + password = os.Getenv("SWIFT_PASSWORD") + authURL = os.Getenv("SWIFT_AUTH_URL") + tenant = os.Getenv("SWIFT_TENANT_NAME") + tenantID = os.Getenv("SWIFT_TENANT_ID") + domain = os.Getenv("SWIFT_DOMAIN_NAME") + domainID = os.Getenv("SWIFT_DOMAIN_ID") + tenantDomain = os.Getenv("SWIFT_DOMAIN_NAME") + tenantDomainID = os.Getenv("SWIFT_DOMAIN_ID") + trustID = os.Getenv("SWIFT_TRUST_ID") + container = os.Getenv("SWIFT_CONTAINER_NAME") + region = os.Getenv("SWIFT_REGION_NAME") + AuthVersion, _ = strconv.Atoi(os.Getenv("SWIFT_AUTH_VERSION")) + endpointType = os.Getenv("SWIFT_ENDPOINT_TYPE") + insecureSkipVerify, _ = strconv.ParseBool(os.Getenv("SWIFT_INSECURESKIPVERIFY")) + secretKey = os.Getenv("SWIFT_SECRET_KEY") + accessKey = os.Getenv("SWIFT_ACCESS_KEY") + containerKey, _ = strconv.ParseBool(os.Getenv("SWIFT_TEMPURL_CONTAINERKEY")) + tempURLMethods = strings.Split(os.Getenv("SWIFT_TEMPURL_METHODS"), ",") + + if username == "" || password == "" || authURL == "" || container == "" { + if swiftServer, err = swifttest.NewSwiftServer("localhost"); err != nil { + panic(err) + } + username = "swifttest" + password = "swifttest" + authURL = swiftServer.AuthURL + container = "test" + } + + prefix, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(prefix) + + swiftDriverConstructor = func(root string) (*Driver, error) { + parameters := Parameters{ + username, + password, + authURL, + tenant, + tenantID, + domain, + domainID, + tenantDomain, + tenantDomainID, + trustID, + region, + AuthVersion, + container, + root, + endpointType, + insecureSkipVerify, + defaultChunkSize, + secretKey, + accessKey, + containerKey, + tempURLMethods, + } + + return New(parameters) + } + + driverConstructor := func() (storagedriver.StorageDriver, error) { + return swiftDriverConstructor(prefix) + } + + testsuites.RegisterSuite(driverConstructor, testsuites.NeverSkip) +} + +func TestEmptyRootList(t *testing.T) { + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + rootedDriver, err := swiftDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := swiftDriverConstructor("") + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := swiftDriverConstructor("/") + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } + + filename := "/test" + contents := []byte("contents") + ctx := context.Background() + err = rootedDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + + keys, err := emptyRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } + + keys, err = slashRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } + + // Create an object with a path nested under the existing object + err = rootedDriver.PutContent(ctx, filename+"/file1", contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + + err = rootedDriver.Delete(ctx, filename) + if err != nil { + t.Fatalf("failed to delete: %v", err) + } + + keys, err = rootedDriver.List(ctx, "/") + if err != nil { + t.Fatalf("failed to list objects after deletion: %v", err) + } + + if len(keys) != 0 { + t.Fatal("delete did not remove nested objects") + } +} + +func TestFilenameChunking(t *testing.T) { + // Test valid input and sizes + input := []string{"a", "b", "c", "d", "e"} + expecteds := [][][]string{ + { + {"a"}, + {"b"}, + {"c"}, + {"d"}, + {"e"}, + }, + { + {"a", "b"}, + {"c", "d"}, + {"e"}, + }, + { + {"a", "b", "c"}, + {"d", "e"}, + }, + { + {"a", "b", "c", "d"}, + {"e"}, + }, + { + {"a", "b", "c", "d", "e"}, + }, + { + {"a", "b", "c", "d", "e"}, + }, + } + for i, expected := range expecteds { + actual, err := chunkFilenames(input, i+1) + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("chunk %v didn't match expected value %v", actual, expected) + } + if err != nil { + t.Fatalf("unexpected error chunking filenames: %v", err) + } + } + + // Test nil input + actual, err := chunkFilenames(nil, 5) + if len(actual) != 0 { + t.Fatal("chunks were returned when passed nil") + } + if err != nil { + t.Fatalf("unexpected error chunking filenames: %v", err) + } + + // Test 0 and < 0 sizes + actual, err = chunkFilenames(nil, 0) + if err == nil { + t.Fatal("expected error for size = 0") + } + actual, err = chunkFilenames(nil, -1) + if err == nil { + t.Fatal("expected error for size = -1") + } +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/testdriver/testdriver.go b/vendor/github.com/docker/distribution/registry/storage/driver/testdriver/testdriver.go new file mode 100644 index 000000000..988e5d33b --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/testdriver/testdriver.go @@ -0,0 +1,71 @@ +package testdriver + +import ( + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/factory" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +const driverName = "testdriver" + +func init() { + factory.Register(driverName, &testDriverFactory{}) +} + +// testDriverFactory implements the factory.StorageDriverFactory interface. +type testDriverFactory struct{} + +func (factory *testDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return New(), nil +} + +// TestDriver is a StorageDriver for testing purposes. The Writer returned by this driver +// simulates the case where Write operations are buffered. This causes the value returned by Size to lag +// behind until Close (or Commit, or Cancel) is called. +type TestDriver struct { + storagedriver.StorageDriver +} + +type testFileWriter struct { + storagedriver.FileWriter + prevchunk []byte +} + +var _ storagedriver.StorageDriver = &TestDriver{} + +// New constructs a new StorageDriver for testing purposes. The Writer returned by this driver +// simulates the case where Write operations are buffered. This causes the value returned by Size to lag +// behind until Close (or Commit, or Cancel) is called. +func New() *TestDriver { + return &TestDriver{StorageDriver: inmemory.New()} +} + +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (td *TestDriver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + fw, err := td.StorageDriver.Writer(ctx, path, append) + return &testFileWriter{FileWriter: fw}, err +} + +func (tfw *testFileWriter) Write(p []byte) (int, error) { + _, err := tfw.FileWriter.Write(tfw.prevchunk) + tfw.prevchunk = make([]byte, len(p)) + copy(tfw.prevchunk, p) + return len(p), err +} + +func (tfw *testFileWriter) Close() error { + tfw.Write(nil) + return tfw.FileWriter.Close() +} + +func (tfw *testFileWriter) Cancel() error { + tfw.Write(nil) + return tfw.FileWriter.Cancel() +} + +func (tfw *testFileWriter) Commit() error { + tfw.Write(nil) + return tfw.FileWriter.Commit() +} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go b/vendor/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go new file mode 100644 index 000000000..d8afe0c85 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go @@ -0,0 +1,1273 @@ +package testsuites + +import ( + "bytes" + "crypto/sha1" + "io" + "io/ioutil" + "math/rand" + "net/http" + "os" + "path" + "sort" + "strings" + "sync" + "testing" + "time" + + "gopkg.in/check.v1" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +// Test hooks up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +// RegisterSuite registers an in-process storage driver test suite with +// the go test runner. +func RegisterSuite(driverConstructor DriverConstructor, skipCheck SkipCheck) { + check.Suite(&DriverSuite{ + Constructor: driverConstructor, + SkipCheck: skipCheck, + ctx: context.Background(), + }) +} + +// SkipCheck is a function used to determine if a test suite should be skipped. +// If a SkipCheck returns a non-empty skip reason, the suite is skipped with +// the given reason. +type SkipCheck func() (reason string) + +// NeverSkip is a default SkipCheck which never skips the suite. +var NeverSkip SkipCheck = func() string { return "" } + +// DriverConstructor is a function which returns a new +// storagedriver.StorageDriver. +type DriverConstructor func() (storagedriver.StorageDriver, error) + +// DriverTeardown is a function which cleans up a suite's +// storagedriver.StorageDriver. +type DriverTeardown func() error + +// DriverSuite is a gocheck test suite designed to test a +// storagedriver.StorageDriver. The intended way to create a DriverSuite is +// with RegisterSuite. +type DriverSuite struct { + Constructor DriverConstructor + Teardown DriverTeardown + SkipCheck + storagedriver.StorageDriver + ctx context.Context +} + +// SetUpSuite sets up the gocheck test suite. +func (suite *DriverSuite) SetUpSuite(c *check.C) { + if reason := suite.SkipCheck(); reason != "" { + c.Skip(reason) + } + d, err := suite.Constructor() + c.Assert(err, check.IsNil) + suite.StorageDriver = d +} + +// TearDownSuite tears down the gocheck test suite. +func (suite *DriverSuite) TearDownSuite(c *check.C) { + if suite.Teardown != nil { + err := suite.Teardown() + c.Assert(err, check.IsNil) + } +} + +// TearDownTest tears down the gocheck test. +// This causes the suite to abort if any files are left around in the storage +// driver. +func (suite *DriverSuite) TearDownTest(c *check.C) { + files, _ := suite.StorageDriver.List(suite.ctx, "/") + if len(files) > 0 { + c.Fatalf("Storage driver did not clean up properly. Offending files: %#v", files) + } +} + +// TestRootExists ensures that all storage drivers have a root path by default. +func (suite *DriverSuite) TestRootExists(c *check.C) { + _, err := suite.StorageDriver.List(suite.ctx, "/") + if err != nil { + c.Fatalf(`the root path "/" should always exist: %v`, err) + } +} + +// TestValidPaths checks that various valid file paths are accepted by the +// storage driver. +func (suite *DriverSuite) TestValidPaths(c *check.C) { + contents := randomContents(64) + validFiles := []string{ + "/a", + "/2", + "/aa", + "/a.a", + "/0-9/abcdefg", + "/abcdefg/z.75", + "/abc/1.2.3.4.5-6_zyx/123.z/4", + "/docker/docker-registry", + "/123.abc", + "/abc./abc", + "/.abc", + "/a--b", + "/a-.b", + "/_.abc", + "/Docker/docker-registry", + "/Abc/Cba"} + + for _, filename := range validFiles { + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + defer suite.deletePath(c, firstPart(filename)) + c.Assert(err, check.IsNil) + + received, err := suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, contents) + } +} + +func (suite *DriverSuite) deletePath(c *check.C, path string) { + for tries := 2; tries > 0; tries-- { + err := suite.StorageDriver.Delete(suite.ctx, path) + if _, ok := err.(storagedriver.PathNotFoundError); ok { + err = nil + } + c.Assert(err, check.IsNil) + paths, err := suite.StorageDriver.List(suite.ctx, path) + if len(paths) == 0 { + break + } + time.Sleep(time.Second * 2) + } +} + +// TestInvalidPaths checks that various invalid file paths are rejected by the +// storage driver. +func (suite *DriverSuite) TestInvalidPaths(c *check.C) { + contents := randomContents(64) + invalidFiles := []string{ + "", + "/", + "abc", + "123.abc", + "//bcd", + "/abc_123/"} + + for _, filename := range invalidFiles { + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + // only delete if file was successfully written + if err == nil { + defer suite.deletePath(c, firstPart(filename)) + } + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + _, err = suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + } +} + +// TestWriteRead1 tests a simple write-read workflow. +func (suite *DriverSuite) TestWriteRead1(c *check.C) { + filename := randomPath(32) + contents := []byte("a") + suite.writeReadCompare(c, filename, contents) +} + +// TestWriteRead2 tests a simple write-read workflow with unicode data. +func (suite *DriverSuite) TestWriteRead2(c *check.C) { + filename := randomPath(32) + contents := []byte("\xc3\x9f") + suite.writeReadCompare(c, filename, contents) +} + +// TestWriteRead3 tests a simple write-read workflow with a small string. +func (suite *DriverSuite) TestWriteRead3(c *check.C) { + filename := randomPath(32) + contents := randomContents(32) + suite.writeReadCompare(c, filename, contents) +} + +// TestWriteRead4 tests a simple write-read workflow with 1MB of data. +func (suite *DriverSuite) TestWriteRead4(c *check.C) { + filename := randomPath(32) + contents := randomContents(1024 * 1024) + suite.writeReadCompare(c, filename, contents) +} + +// TestWriteReadNonUTF8 tests that non-utf8 data may be written to the storage +// driver safely. +func (suite *DriverSuite) TestWriteReadNonUTF8(c *check.C) { + filename := randomPath(32) + contents := []byte{0x80, 0x80, 0x80, 0x80} + suite.writeReadCompare(c, filename, contents) +} + +// TestTruncate tests that putting smaller contents than an original file does +// remove the excess contents. +func (suite *DriverSuite) TestTruncate(c *check.C) { + filename := randomPath(32) + contents := randomContents(1024 * 1024) + suite.writeReadCompare(c, filename, contents) + + contents = randomContents(1024) + suite.writeReadCompare(c, filename, contents) +} + +// TestReadNonexistent tests reading content from an empty path. +func (suite *DriverSuite) TestReadNonexistent(c *check.C) { + filename := randomPath(32) + _, err := suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) +} + +// TestWriteReadStreams1 tests a simple write-read streaming workflow. +func (suite *DriverSuite) TestWriteReadStreams1(c *check.C) { + filename := randomPath(32) + contents := []byte("a") + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadStreams2 tests a simple write-read streaming workflow with +// unicode data. +func (suite *DriverSuite) TestWriteReadStreams2(c *check.C) { + filename := randomPath(32) + contents := []byte("\xc3\x9f") + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadStreams3 tests a simple write-read streaming workflow with a +// small amount of data. +func (suite *DriverSuite) TestWriteReadStreams3(c *check.C) { + filename := randomPath(32) + contents := randomContents(32) + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadStreams4 tests a simple write-read streaming workflow with 1MB +// of data. +func (suite *DriverSuite) TestWriteReadStreams4(c *check.C) { + filename := randomPath(32) + contents := randomContents(1024 * 1024) + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadStreamsNonUTF8 tests that non-utf8 data may be written to the +// storage driver safely. +func (suite *DriverSuite) TestWriteReadStreamsNonUTF8(c *check.C) { + filename := randomPath(32) + contents := []byte{0x80, 0x80, 0x80, 0x80} + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadLargeStreams tests that a 5GB file may be written to the storage +// driver safely. +func (suite *DriverSuite) TestWriteReadLargeStreams(c *check.C) { + if testing.Short() { + c.Skip("Skipping test in short mode") + } + + filename := randomPath(32) + defer suite.deletePath(c, firstPart(filename)) + + checksum := sha1.New() + var fileSize int64 = 5 * 1024 * 1024 * 1024 + + contents := newRandReader(fileSize) + + writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) + c.Assert(err, check.IsNil) + written, err := io.Copy(writer, io.TeeReader(contents, checksum)) + c.Assert(err, check.IsNil) + c.Assert(written, check.Equals, fileSize) + + err = writer.Commit() + c.Assert(err, check.IsNil) + err = writer.Close() + c.Assert(err, check.IsNil) + + reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) + c.Assert(err, check.IsNil) + defer reader.Close() + + writtenChecksum := sha1.New() + io.Copy(writtenChecksum, reader) + + c.Assert(writtenChecksum.Sum(nil), check.DeepEquals, checksum.Sum(nil)) +} + +// TestReaderWithOffset tests that the appropriate data is streamed when +// reading with a given offset. +func (suite *DriverSuite) TestReaderWithOffset(c *check.C) { + filename := randomPath(32) + defer suite.deletePath(c, firstPart(filename)) + + chunkSize := int64(32) + + contentsChunk1 := randomContents(chunkSize) + contentsChunk2 := randomContents(chunkSize) + contentsChunk3 := randomContents(chunkSize) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) + c.Assert(err, check.IsNil) + + reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) + + reader, err = suite.StorageDriver.Reader(suite.ctx, filename, chunkSize) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err = ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, append(contentsChunk2, contentsChunk3...)) + + reader, err = suite.StorageDriver.Reader(suite.ctx, filename, chunkSize*2) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err = ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + c.Assert(readContents, check.DeepEquals, contentsChunk3) + + // Ensure we get invalid offest for negative offsets. + reader, err = suite.StorageDriver.Reader(suite.ctx, filename, -1) + c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) + c.Assert(err.(storagedriver.InvalidOffsetError).Offset, check.Equals, int64(-1)) + c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) + c.Assert(reader, check.IsNil) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + // Read past the end of the content and make sure we get a reader that + // returns 0 bytes and io.EOF + reader, err = suite.StorageDriver.Reader(suite.ctx, filename, chunkSize*3) + c.Assert(err, check.IsNil) + defer reader.Close() + + buf := make([]byte, chunkSize) + n, err := reader.Read(buf) + c.Assert(err, check.Equals, io.EOF) + c.Assert(n, check.Equals, 0) + + // Check the N-1 boundary condition, ensuring we get 1 byte then io.EOF. + reader, err = suite.StorageDriver.Reader(suite.ctx, filename, chunkSize*3-1) + c.Assert(err, check.IsNil) + defer reader.Close() + + n, err = reader.Read(buf) + c.Assert(n, check.Equals, 1) + + // We don't care whether the io.EOF comes on the this read or the first + // zero read, but the only error acceptable here is io.EOF. + if err != nil { + c.Assert(err, check.Equals, io.EOF) + } + + // Any more reads should result in zero bytes and io.EOF + n, err = reader.Read(buf) + c.Assert(n, check.Equals, 0) + c.Assert(err, check.Equals, io.EOF) +} + +// TestContinueStreamAppendLarge tests that a stream write can be appended to without +// corrupting the data with a large chunk size. +func (suite *DriverSuite) TestContinueStreamAppendLarge(c *check.C) { + suite.testContinueStreamAppend(c, int64(10*1024*1024)) +} + +// TestContinueStreamAppendSmall is the same as TestContinueStreamAppendLarge, but only +// with a tiny chunk size in order to test corner cases for some cloud storage drivers. +func (suite *DriverSuite) TestContinueStreamAppendSmall(c *check.C) { + suite.testContinueStreamAppend(c, int64(32)) +} + +func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64) { + filename := randomPath(32) + defer suite.deletePath(c, firstPart(filename)) + + contentsChunk1 := randomContents(chunkSize) + contentsChunk2 := randomContents(chunkSize) + contentsChunk3 := randomContents(chunkSize) + + fullContents := append(append(contentsChunk1, contentsChunk2...), contentsChunk3...) + + writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) + c.Assert(err, check.IsNil) + nn, err := io.Copy(writer, bytes.NewReader(contentsChunk1)) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(contentsChunk1))) + + err = writer.Close() + c.Assert(err, check.IsNil) + + curSize := writer.Size() + c.Assert(curSize, check.Equals, int64(len(contentsChunk1))) + + writer, err = suite.StorageDriver.Writer(suite.ctx, filename, true) + c.Assert(err, check.IsNil) + c.Assert(writer.Size(), check.Equals, curSize) + + nn, err = io.Copy(writer, bytes.NewReader(contentsChunk2)) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(contentsChunk2))) + + err = writer.Close() + c.Assert(err, check.IsNil) + + curSize = writer.Size() + c.Assert(curSize, check.Equals, 2*chunkSize) + + writer, err = suite.StorageDriver.Writer(suite.ctx, filename, true) + c.Assert(err, check.IsNil) + c.Assert(writer.Size(), check.Equals, curSize) + + nn, err = io.Copy(writer, bytes.NewReader(fullContents[curSize:])) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(fullContents[curSize:]))) + + err = writer.Commit() + c.Assert(err, check.IsNil) + err = writer.Close() + c.Assert(err, check.IsNil) + + received, err := suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, fullContents) +} + +// TestReadNonexistentStream tests that reading a stream for a nonexistent path +// fails. +func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { + filename := randomPath(32) + + _, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + _, err = suite.StorageDriver.Reader(suite.ctx, filename, 64) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) +} + +// TestList checks the returned list of keys after populating a directory tree. +func (suite *DriverSuite) TestList(c *check.C) { + rootDirectory := "/" + randomFilename(int64(8+rand.Intn(8))) + defer suite.deletePath(c, rootDirectory) + + doesnotexist := path.Join(rootDirectory, "nonexistent") + _, err := suite.StorageDriver.List(suite.ctx, doesnotexist) + c.Assert(err, check.Equals, storagedriver.PathNotFoundError{ + Path: doesnotexist, + DriverName: suite.StorageDriver.Name(), + }) + + parentDirectory := rootDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) + childFiles := make([]string, 50) + for i := 0; i < len(childFiles); i++ { + childFile := parentDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) + childFiles[i] = childFile + err := suite.StorageDriver.PutContent(suite.ctx, childFile, randomContents(32)) + c.Assert(err, check.IsNil) + } + sort.Strings(childFiles) + + keys, err := suite.StorageDriver.List(suite.ctx, "/") + c.Assert(err, check.IsNil) + c.Assert(keys, check.DeepEquals, []string{rootDirectory}) + + keys, err = suite.StorageDriver.List(suite.ctx, rootDirectory) + c.Assert(err, check.IsNil) + c.Assert(keys, check.DeepEquals, []string{parentDirectory}) + + keys, err = suite.StorageDriver.List(suite.ctx, parentDirectory) + c.Assert(err, check.IsNil) + + sort.Strings(keys) + c.Assert(keys, check.DeepEquals, childFiles) + + // A few checks to add here (check out #819 for more discussion on this): + // 1. Ensure that all paths are absolute. + // 2. Ensure that listings only include direct children. + // 3. Ensure that we only respond to directory listings that end with a slash (maybe?). +} + +// TestMove checks that a moved object no longer exists at the source path and +// does exist at the destination. +func (suite *DriverSuite) TestMove(c *check.C) { + contents := randomContents(32) + sourcePath := randomPath(32) + destPath := randomPath(32) + + defer suite.deletePath(c, firstPart(sourcePath)) + defer suite.deletePath(c, firstPart(destPath)) + + err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) + c.Assert(err, check.IsNil) + + received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, contents) + + _, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) +} + +// TestMoveOverwrite checks that a moved object no longer exists at the source +// path and overwrites the contents at the destination. +func (suite *DriverSuite) TestMoveOverwrite(c *check.C) { + sourcePath := randomPath(32) + destPath := randomPath(32) + sourceContents := randomContents(32) + destContents := randomContents(64) + + defer suite.deletePath(c, firstPart(sourcePath)) + defer suite.deletePath(c, firstPart(destPath)) + + err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, sourceContents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.PutContent(suite.ctx, destPath, destContents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) + c.Assert(err, check.IsNil) + + received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, sourceContents) + + _, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) +} + +// TestMoveNonexistent checks that moving a nonexistent key fails and does not +// delete the data at the destination path. +func (suite *DriverSuite) TestMoveNonexistent(c *check.C) { + contents := randomContents(32) + sourcePath := randomPath(32) + destPath := randomPath(32) + + defer suite.deletePath(c, firstPart(destPath)) + + err := suite.StorageDriver.PutContent(suite.ctx, destPath, contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, contents) +} + +// TestMoveInvalid provides various checks for invalid moves. +func (suite *DriverSuite) TestMoveInvalid(c *check.C) { + contents := randomContents(32) + + // Create a regular file. + err := suite.StorageDriver.PutContent(suite.ctx, "/notadir", contents) + c.Assert(err, check.IsNil) + defer suite.deletePath(c, "/notadir") + + // Now try to move a non-existent file under it. + err = suite.StorageDriver.Move(suite.ctx, "/notadir/foo", "/notadir/bar") + c.Assert(err, check.NotNil) // non-nil error +} + +// TestDelete checks that the delete operation removes data from the storage +// driver +func (suite *DriverSuite) TestDelete(c *check.C) { + filename := randomPath(32) + contents := randomContents(32) + + defer suite.deletePath(c, firstPart(filename)) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Delete(suite.ctx, filename) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) +} + +// TestURLFor checks that the URLFor method functions properly, but only if it +// is implemented +func (suite *DriverSuite) TestURLFor(c *check.C) { + filename := randomPath(32) + contents := randomContents(32) + + defer suite.deletePath(c, firstPart(filename)) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + url, err := suite.StorageDriver.URLFor(suite.ctx, filename, nil) + if _, ok := err.(storagedriver.ErrUnsupportedMethod); ok { + return + } + c.Assert(err, check.IsNil) + + response, err := http.Get(url) + c.Assert(err, check.IsNil) + defer response.Body.Close() + + read, err := ioutil.ReadAll(response.Body) + c.Assert(err, check.IsNil) + c.Assert(read, check.DeepEquals, contents) + + url, err = suite.StorageDriver.URLFor(suite.ctx, filename, map[string]interface{}{"method": "HEAD"}) + if _, ok := err.(storagedriver.ErrUnsupportedMethod); ok { + return + } + c.Assert(err, check.IsNil) + + response, err = http.Head(url) + c.Assert(response.StatusCode, check.Equals, 200) + c.Assert(response.ContentLength, check.Equals, int64(32)) +} + +// TestDeleteNonexistent checks that removing a nonexistent key fails. +func (suite *DriverSuite) TestDeleteNonexistent(c *check.C) { + filename := randomPath(32) + err := suite.StorageDriver.Delete(suite.ctx, filename) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) +} + +// TestDeleteFolder checks that deleting a folder removes all child elements. +func (suite *DriverSuite) TestDeleteFolder(c *check.C) { + dirname := randomPath(32) + filename1 := randomPath(32) + filename2 := randomPath(32) + filename3 := randomPath(32) + contents := randomContents(32) + + defer suite.deletePath(c, firstPart(dirname)) + + err := suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename1), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename2), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename3), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Delete(suite.ctx, path.Join(dirname, filename1)) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2)) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename3)) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Delete(suite.ctx, dirname) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename3)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) +} + +// TestDeleteOnlyDeletesSubpaths checks that deleting path A does not +// delete path B when A is a prefix of B but B is not a subpath of A (so that +// deleting "/a" does not delete "/ab"). This matters for services like S3 that +// do not implement directories. +func (suite *DriverSuite) TestDeleteOnlyDeletesSubpaths(c *check.C) { + dirname := randomPath(32) + filename := randomPath(32) + contents := randomContents(32) + + defer suite.deletePath(c, firstPart(dirname)) + + err := suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename+"suffix"), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, dirname, filename), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, dirname+"suffix", filename), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Delete(suite.ctx, path.Join(dirname, filename)) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename+"suffix")) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Delete(suite.ctx, path.Join(dirname, dirname)) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, dirname, filename)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, dirname+"suffix", filename)) + c.Assert(err, check.IsNil) +} + +// TestStatCall runs verifies the implementation of the storagedriver's Stat call. +func (suite *DriverSuite) TestStatCall(c *check.C) { + content := randomContents(4096) + dirPath := randomPath(32) + fileName := randomFilename(32) + filePath := path.Join(dirPath, fileName) + + defer suite.deletePath(c, firstPart(dirPath)) + + // Call on non-existent file/dir, check error. + fi, err := suite.StorageDriver.Stat(suite.ctx, dirPath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + c.Assert(fi, check.IsNil) + + fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) + c.Assert(fi, check.IsNil) + + err = suite.StorageDriver.PutContent(suite.ctx, filePath, content) + c.Assert(err, check.IsNil) + + // Call on regular file, check results + fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + c.Assert(fi.Path(), check.Equals, filePath) + c.Assert(fi.Size(), check.Equals, int64(len(content))) + c.Assert(fi.IsDir(), check.Equals, false) + createdTime := fi.ModTime() + + // Sleep and modify the file + time.Sleep(time.Second * 10) + content = randomContents(4096) + err = suite.StorageDriver.PutContent(suite.ctx, filePath, content) + c.Assert(err, check.IsNil) + fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + time.Sleep(time.Second * 5) // allow changes to propagate (eventual consistency) + + // Check if the modification time is after the creation time. + // In case of cloud storage services, storage frontend nodes might have + // time drift between them, however that should be solved with sleeping + // before update. + modTime := fi.ModTime() + if !modTime.After(createdTime) { + c.Errorf("modtime (%s) is before the creation time (%s)", modTime, createdTime) + } + + // Call on directory (do not check ModTime as dirs don't need to support it) + fi, err = suite.StorageDriver.Stat(suite.ctx, dirPath) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + c.Assert(fi.Path(), check.Equals, dirPath) + c.Assert(fi.Size(), check.Equals, int64(0)) + c.Assert(fi.IsDir(), check.Equals, true) +} + +// TestPutContentMultipleTimes checks that if storage driver can overwrite the content +// in the subsequent puts. Validates that PutContent does not have to work +// with an offset like Writer does and overwrites the file entirely +// rather than writing the data to the [0,len(data)) of the file. +func (suite *DriverSuite) TestPutContentMultipleTimes(c *check.C) { + filename := randomPath(32) + contents := randomContents(4096) + + defer suite.deletePath(c, firstPart(filename)) + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + contents = randomContents(2048) // upload a different, smaller file + err = suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + readContents, err := suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.IsNil) + c.Assert(readContents, check.DeepEquals, contents) +} + +// TestConcurrentStreamReads checks that multiple clients can safely read from +// the same file simultaneously with various offsets. +func (suite *DriverSuite) TestConcurrentStreamReads(c *check.C) { + var filesize int64 = 128 * 1024 * 1024 + + if testing.Short() { + filesize = 10 * 1024 * 1024 + c.Log("Reducing file size to 10MB for short mode") + } + + filename := randomPath(32) + contents := randomContents(filesize) + + defer suite.deletePath(c, firstPart(filename)) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + var wg sync.WaitGroup + + readContents := func() { + defer wg.Done() + offset := rand.Int63n(int64(len(contents))) + reader, err := suite.StorageDriver.Reader(suite.ctx, filename, offset) + c.Assert(err, check.IsNil) + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + c.Assert(readContents, check.DeepEquals, contents[offset:]) + } + + wg.Add(10) + for i := 0; i < 10; i++ { + go readContents() + } + wg.Wait() +} + +// TestConcurrentFileStreams checks that multiple *os.File objects can be passed +// in to Writer concurrently without hanging. +func (suite *DriverSuite) TestConcurrentFileStreams(c *check.C) { + numStreams := 32 + + if testing.Short() { + numStreams = 8 + c.Log("Reducing number of streams to 8 for short mode") + } + + var wg sync.WaitGroup + + testStream := func(size int64) { + defer wg.Done() + suite.testFileStreams(c, size) + } + + wg.Add(numStreams) + for i := numStreams; i > 0; i-- { + go testStream(int64(numStreams) * 1024 * 1024) + } + + wg.Wait() +} + +// TODO (brianbland): evaluate the relevancy of this test +// TestEventualConsistency checks that if stat says that a file is a certain size, then +// you can freely read from the file (this is the only guarantee that the driver needs to provide) +// func (suite *DriverSuite) TestEventualConsistency(c *check.C) { +// if testing.Short() { +// c.Skip("Skipping test in short mode") +// } +// +// filename := randomPath(32) +// defer suite.deletePath(c, firstPart(filename)) +// +// var offset int64 +// var misswrites int +// var chunkSize int64 = 32 +// +// for i := 0; i < 1024; i++ { +// contents := randomContents(chunkSize) +// read, err := suite.StorageDriver.Writer(suite.ctx, filename, offset, bytes.NewReader(contents)) +// c.Assert(err, check.IsNil) +// +// fi, err := suite.StorageDriver.Stat(suite.ctx, filename) +// c.Assert(err, check.IsNil) +// +// // We are most concerned with being able to read data as soon as Stat declares +// // it is uploaded. This is the strongest guarantee that some drivers (that guarantee +// // at best eventual consistency) absolutely need to provide. +// if fi.Size() == offset+chunkSize { +// reader, err := suite.StorageDriver.Reader(suite.ctx, filename, offset) +// c.Assert(err, check.IsNil) +// +// readContents, err := ioutil.ReadAll(reader) +// c.Assert(err, check.IsNil) +// +// c.Assert(readContents, check.DeepEquals, contents) +// +// reader.Close() +// offset += read +// } else { +// misswrites++ +// } +// } +// +// if misswrites > 0 { +// c.Log("There were " + string(misswrites) + " occurrences of a write not being instantly available.") +// } +// +// c.Assert(misswrites, check.Not(check.Equals), 1024) +// } + +// BenchmarkPutGetEmptyFiles benchmarks PutContent/GetContent for 0B files +func (suite *DriverSuite) BenchmarkPutGetEmptyFiles(c *check.C) { + suite.benchmarkPutGetFiles(c, 0) +} + +// BenchmarkPutGet1KBFiles benchmarks PutContent/GetContent for 1KB files +func (suite *DriverSuite) BenchmarkPutGet1KBFiles(c *check.C) { + suite.benchmarkPutGetFiles(c, 1024) +} + +// BenchmarkPutGet1MBFiles benchmarks PutContent/GetContent for 1MB files +func (suite *DriverSuite) BenchmarkPutGet1MBFiles(c *check.C) { + suite.benchmarkPutGetFiles(c, 1024*1024) +} + +// BenchmarkPutGet1GBFiles benchmarks PutContent/GetContent for 1GB files +func (suite *DriverSuite) BenchmarkPutGet1GBFiles(c *check.C) { + suite.benchmarkPutGetFiles(c, 1024*1024*1024) +} + +func (suite *DriverSuite) benchmarkPutGetFiles(c *check.C, size int64) { + c.SetBytes(size) + parentDir := randomPath(8) + defer func() { + c.StopTimer() + suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) + }() + + for i := 0; i < c.N; i++ { + filename := path.Join(parentDir, randomPath(32)) + err := suite.StorageDriver.PutContent(suite.ctx, filename, randomContents(size)) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.IsNil) + } +} + +// BenchmarkStreamEmptyFiles benchmarks Writer/Reader for 0B files +func (suite *DriverSuite) BenchmarkStreamEmptyFiles(c *check.C) { + suite.benchmarkStreamFiles(c, 0) +} + +// BenchmarkStream1KBFiles benchmarks Writer/Reader for 1KB files +func (suite *DriverSuite) BenchmarkStream1KBFiles(c *check.C) { + suite.benchmarkStreamFiles(c, 1024) +} + +// BenchmarkStream1MBFiles benchmarks Writer/Reader for 1MB files +func (suite *DriverSuite) BenchmarkStream1MBFiles(c *check.C) { + suite.benchmarkStreamFiles(c, 1024*1024) +} + +// BenchmarkStream1GBFiles benchmarks Writer/Reader for 1GB files +func (suite *DriverSuite) BenchmarkStream1GBFiles(c *check.C) { + suite.benchmarkStreamFiles(c, 1024*1024*1024) +} + +func (suite *DriverSuite) benchmarkStreamFiles(c *check.C, size int64) { + c.SetBytes(size) + parentDir := randomPath(8) + defer func() { + c.StopTimer() + suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) + }() + + for i := 0; i < c.N; i++ { + filename := path.Join(parentDir, randomPath(32)) + writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) + c.Assert(err, check.IsNil) + written, err := io.Copy(writer, bytes.NewReader(randomContents(size))) + c.Assert(err, check.IsNil) + c.Assert(written, check.Equals, size) + + err = writer.Commit() + c.Assert(err, check.IsNil) + err = writer.Close() + c.Assert(err, check.IsNil) + + rc, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) + c.Assert(err, check.IsNil) + rc.Close() + } +} + +// BenchmarkList5Files benchmarks List for 5 small files +func (suite *DriverSuite) BenchmarkList5Files(c *check.C) { + suite.benchmarkListFiles(c, 5) +} + +// BenchmarkList50Files benchmarks List for 50 small files +func (suite *DriverSuite) BenchmarkList50Files(c *check.C) { + suite.benchmarkListFiles(c, 50) +} + +func (suite *DriverSuite) benchmarkListFiles(c *check.C, numFiles int64) { + parentDir := randomPath(8) + defer func() { + c.StopTimer() + suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) + }() + + for i := int64(0); i < numFiles; i++ { + err := suite.StorageDriver.PutContent(suite.ctx, path.Join(parentDir, randomPath(32)), nil) + c.Assert(err, check.IsNil) + } + + c.ResetTimer() + for i := 0; i < c.N; i++ { + files, err := suite.StorageDriver.List(suite.ctx, parentDir) + c.Assert(err, check.IsNil) + c.Assert(int64(len(files)), check.Equals, numFiles) + } +} + +// BenchmarkDelete5Files benchmarks Delete for 5 small files +func (suite *DriverSuite) BenchmarkDelete5Files(c *check.C) { + suite.benchmarkDeleteFiles(c, 5) +} + +// BenchmarkDelete50Files benchmarks Delete for 50 small files +func (suite *DriverSuite) BenchmarkDelete50Files(c *check.C) { + suite.benchmarkDeleteFiles(c, 50) +} + +func (suite *DriverSuite) benchmarkDeleteFiles(c *check.C, numFiles int64) { + for i := 0; i < c.N; i++ { + parentDir := randomPath(8) + defer suite.deletePath(c, firstPart(parentDir)) + + c.StopTimer() + for j := int64(0); j < numFiles; j++ { + err := suite.StorageDriver.PutContent(suite.ctx, path.Join(parentDir, randomPath(32)), nil) + c.Assert(err, check.IsNil) + } + c.StartTimer() + + // This is the operation we're benchmarking + err := suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) + c.Assert(err, check.IsNil) + } +} + +func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { + tf, err := ioutil.TempFile("", "tf") + c.Assert(err, check.IsNil) + defer os.Remove(tf.Name()) + defer tf.Close() + + filename := randomPath(32) + defer suite.deletePath(c, firstPart(filename)) + + contents := randomContents(size) + + _, err = tf.Write(contents) + c.Assert(err, check.IsNil) + + tf.Sync() + tf.Seek(0, os.SEEK_SET) + + writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) + c.Assert(err, check.IsNil) + nn, err := io.Copy(writer, tf) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, size) + + err = writer.Commit() + c.Assert(err, check.IsNil) + err = writer.Close() + c.Assert(err, check.IsNil) + + reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, contents) +} + +func (suite *DriverSuite) writeReadCompare(c *check.C, filename string, contents []byte) { + defer suite.deletePath(c, firstPart(filename)) + + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + c.Assert(err, check.IsNil) + + readContents, err := suite.StorageDriver.GetContent(suite.ctx, filename) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, contents) +} + +func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, contents []byte) { + defer suite.deletePath(c, firstPart(filename)) + + writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) + c.Assert(err, check.IsNil) + nn, err := io.Copy(writer, bytes.NewReader(contents)) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(contents))) + + err = writer.Commit() + c.Assert(err, check.IsNil) + err = writer.Close() + c.Assert(err, check.IsNil) + + reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, contents) +} + +var filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789") +var separatorChars = []byte("._-") + +func randomPath(length int64) string { + path := "/" + for int64(len(path)) < length { + chunkLength := rand.Int63n(length-int64(len(path))) + 1 + chunk := randomFilename(chunkLength) + path += chunk + remaining := length - int64(len(path)) + if remaining == 1 { + path += randomFilename(1) + } else if remaining > 1 { + path += "/" + } + } + return path +} + +func randomFilename(length int64) string { + b := make([]byte, length) + wasSeparator := true + for i := range b { + if !wasSeparator && i < len(b)-1 && rand.Intn(4) == 0 { + b[i] = separatorChars[rand.Intn(len(separatorChars))] + wasSeparator = true + } else { + b[i] = filenameChars[rand.Intn(len(filenameChars))] + wasSeparator = false + } + } + return string(b) +} + +// randomBytes pre-allocates all of the memory sizes needed for the test. If +// anything panics while accessing randomBytes, just make this number bigger. +var randomBytes = make([]byte, 128<<20) + +func init() { + _, _ = rand.Read(randomBytes) // always returns len(randomBytes) and nil error +} + +func randomContents(length int64) []byte { + return randomBytes[:length] +} + +type randReader struct { + r int64 + m sync.Mutex +} + +func (rr *randReader) Read(p []byte) (n int, err error) { + rr.m.Lock() + defer rr.m.Unlock() + + toread := int64(len(p)) + if toread > rr.r { + toread = rr.r + } + n = copy(p, randomContents(toread)) + rr.r -= int64(n) + + if rr.r <= 0 { + err = io.EOF + } + + return +} + +func newRandReader(n int64) *randReader { + return &randReader{r: n} +} + +func firstPart(filePath string) string { + if filePath == "" { + return "/" + } + for { + if filePath[len(filePath)-1] == '/' { + filePath = filePath[:len(filePath)-1] + } + + dir, file := path.Split(filePath) + if dir == "" && file == "" { + return "/" + } + if dir == "/" || dir == "" { + return "/" + file + } + if file == "" { + return dir + } + filePath = dir + } +} diff --git a/vendor/github.com/docker/distribution/registry/storage/filereader.go b/vendor/github.com/docker/distribution/registry/storage/filereader.go new file mode 100644 index 000000000..3b06c8179 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/filereader.go @@ -0,0 +1,177 @@ +package storage + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +// TODO(stevvooe): Set an optimal buffer size here. We'll have to +// understand the latency characteristics of the underlying network to +// set this correctly, so we may want to leave it to the driver. For +// out of process drivers, we'll have to optimize this buffer size for +// local communication. +const fileReaderBufferSize = 4 << 20 + +// remoteFileReader provides a read seeker interface to files stored in +// storagedriver. Used to implement part of layer interface and will be used +// to implement read side of LayerUpload. +type fileReader struct { + driver storagedriver.StorageDriver + + ctx context.Context + + // identifying fields + path string + size int64 // size is the total size, must be set. + + // mutable fields + rc io.ReadCloser // remote read closer + brd *bufio.Reader // internal buffered io + offset int64 // offset is the current read offset + err error // terminal error, if set, reader is closed +} + +// newFileReader initializes a file reader for the remote file. The reader +// takes on the size and path that must be determined externally with a stat +// call. The reader operates optimistically, assuming that the file is already +// there. +func newFileReader(ctx context.Context, driver storagedriver.StorageDriver, path string, size int64) (*fileReader, error) { + return &fileReader{ + ctx: ctx, + driver: driver, + path: path, + size: size, + }, nil +} + +func (fr *fileReader) Read(p []byte) (n int, err error) { + if fr.err != nil { + return 0, fr.err + } + + rd, err := fr.reader() + if err != nil { + return 0, err + } + + n, err = rd.Read(p) + fr.offset += int64(n) + + // Simulate io.EOR error if we reach filesize. + if err == nil && fr.offset >= fr.size { + err = io.EOF + } + + return n, err +} + +func (fr *fileReader) Seek(offset int64, whence int) (int64, error) { + if fr.err != nil { + return 0, fr.err + } + + var err error + newOffset := fr.offset + + switch whence { + case os.SEEK_CUR: + newOffset += int64(offset) + case os.SEEK_END: + newOffset = fr.size + int64(offset) + case os.SEEK_SET: + newOffset = int64(offset) + } + + if newOffset < 0 { + err = fmt.Errorf("cannot seek to negative position") + } else { + if fr.offset != newOffset { + fr.reset() + } + + // No problems, set the offset. + fr.offset = newOffset + } + + return fr.offset, err +} + +func (fr *fileReader) Close() error { + return fr.closeWithErr(fmt.Errorf("fileReader: closed")) +} + +// reader prepares the current reader at the lrs offset, ensuring its buffered +// and ready to go. +func (fr *fileReader) reader() (io.Reader, error) { + if fr.err != nil { + return nil, fr.err + } + + if fr.rc != nil { + return fr.brd, nil + } + + // If we don't have a reader, open one up. + rc, err := fr.driver.Reader(fr.ctx, fr.path, fr.offset) + if err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // NOTE(stevvooe): If the path is not found, we simply return a + // reader that returns io.EOF. However, we do not set fr.rc, + // allowing future attempts at getting a reader to possibly + // succeed if the file turns up later. + return ioutil.NopCloser(bytes.NewReader([]byte{})), nil + default: + return nil, err + } + } + + fr.rc = rc + + if fr.brd == nil { + fr.brd = bufio.NewReaderSize(fr.rc, fileReaderBufferSize) + } else { + fr.brd.Reset(fr.rc) + } + + return fr.brd, nil +} + +// resetReader resets the reader, forcing the read method to open up a new +// connection and rebuild the buffered reader. This should be called when the +// offset and the reader will become out of sync, such as during a seek +// operation. +func (fr *fileReader) reset() { + if fr.err != nil { + return + } + if fr.rc != nil { + fr.rc.Close() + fr.rc = nil + } +} + +func (fr *fileReader) closeWithErr(err error) error { + if fr.err != nil { + return fr.err + } + + fr.err = err + + // close and release reader chain + if fr.rc != nil { + fr.rc.Close() + } + + fr.rc = nil + fr.brd = nil + + return fr.err +} diff --git a/vendor/github.com/docker/distribution/registry/storage/filereader_test.go b/vendor/github.com/docker/distribution/registry/storage/filereader_test.go new file mode 100644 index 000000000..5926020cc --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/filereader_test.go @@ -0,0 +1,198 @@ +package storage + +import ( + "bytes" + "io" + mrand "math/rand" + "os" + "testing" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +func TestSimpleRead(t *testing.T) { + ctx := context.Background() + content := make([]byte, 1<<20) + n, err := mrand.Read(content) + if err != nil { + t.Fatalf("unexpected error building random data: %v", err) + } + + if n != len(content) { + t.Fatalf("random read didn't fill buffer") + } + + dgst, err := digest.FromReader(bytes.NewReader(content)) + if err != nil { + t.Fatalf("unexpected error digesting random content: %v", err) + } + + driver := inmemory.New() + path := "/random" + + if err := driver.PutContent(ctx, path, content); err != nil { + t.Fatalf("error putting patterned content: %v", err) + } + + fr, err := newFileReader(ctx, driver, path, int64(len(content))) + if err != nil { + t.Fatalf("error allocating file reader: %v", err) + } + + verifier, err := digest.NewDigestVerifier(dgst) + if err != nil { + t.Fatalf("error getting digest verifier: %s", err) + } + + io.Copy(verifier, fr) + + if !verifier.Verified() { + t.Fatalf("unable to verify read data") + } +} + +func TestFileReaderSeek(t *testing.T) { + driver := inmemory.New() + pattern := "01234567890ab" // prime length block + repititions := 1024 + path := "/patterned" + content := bytes.Repeat([]byte(pattern), repititions) + ctx := context.Background() + + if err := driver.PutContent(ctx, path, content); err != nil { + t.Fatalf("error putting patterned content: %v", err) + } + + fr, err := newFileReader(ctx, driver, path, int64(len(content))) + + if err != nil { + t.Fatalf("unexpected error creating file reader: %v", err) + } + + // Seek all over the place, in blocks of pattern size and make sure we get + // the right data. + for _, repitition := range mrand.Perm(repititions - 1) { + targetOffset := int64(len(pattern) * repitition) + // Seek to a multiple of pattern size and read pattern size bytes + offset, err := fr.Seek(targetOffset, os.SEEK_SET) + if err != nil { + t.Fatalf("unexpected error seeking: %v", err) + } + + if offset != targetOffset { + t.Fatalf("did not seek to correct offset: %d != %d", offset, targetOffset) + } + + p := make([]byte, len(pattern)) + + n, err := fr.Read(p) + if err != nil { + t.Fatalf("error reading pattern: %v", err) + } + + if n != len(pattern) { + t.Fatalf("incorrect read length: %d != %d", n, len(pattern)) + } + + if string(p) != pattern { + t.Fatalf("incorrect read content: %q != %q", p, pattern) + } + + // Check offset + current, err := fr.Seek(0, os.SEEK_CUR) + if err != nil { + t.Fatalf("error checking current offset: %v", err) + } + + if current != targetOffset+int64(len(pattern)) { + t.Fatalf("unexpected offset after read: %v", err) + } + } + + start, err := fr.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatalf("error seeking to start: %v", err) + } + + if start != 0 { + t.Fatalf("expected to seek to start: %v != 0", start) + } + + end, err := fr.Seek(0, os.SEEK_END) + if err != nil { + t.Fatalf("error checking current offset: %v", err) + } + + if end != int64(len(content)) { + t.Fatalf("expected to seek to end: %v != %v", end, len(content)) + } + + // 4. Seek before start, ensure error. + + // seek before start + before, err := fr.Seek(-1, os.SEEK_SET) + if err == nil { + t.Fatalf("error expected, returned offset=%v", before) + } + + // 5. Seek after end, + after, err := fr.Seek(1, os.SEEK_END) + if err != nil { + t.Fatalf("unexpected error expected, returned offset=%v", after) + } + + p := make([]byte, 16) + n, err := fr.Read(p) + + if n != 0 { + t.Fatalf("bytes reads %d != %d", n, 0) + } + + if err != io.EOF { + t.Fatalf("expected io.EOF, got %v", err) + } +} + +// TestFileReaderNonExistentFile ensures the reader behaves as expected with a +// missing or zero-length remote file. While the file may not exist, the +// reader should not error out on creation and should return 0-bytes from the +// read method, with an io.EOF error. +func TestFileReaderNonExistentFile(t *testing.T) { + driver := inmemory.New() + fr, err := newFileReader(context.Background(), driver, "/doesnotexist", 10) + if err != nil { + t.Fatalf("unexpected error initializing reader: %v", err) + } + + var buf [1024]byte + + n, err := fr.Read(buf[:]) + if n != 0 { + t.Fatalf("non-zero byte read reported: %d != 0", n) + } + + if err != io.EOF { + t.Fatalf("read on missing file should return io.EOF, got %v", err) + } +} + +// TestLayerReadErrors covers the various error return type for different +// conditions that can arise when reading a layer. +func TestFileReaderErrors(t *testing.T) { + // TODO(stevvooe): We need to cover error return types, driven by the + // errors returned via the HTTP API. For now, here is an incomplete list: + // + // 1. Layer Not Found: returned when layer is not found or access is + // denied. + // 2. Layer Unavailable: returned when link references are unresolved, + // but layer is known to the registry. + // 3. Layer Invalid: This may more split into more errors, but should be + // returned when name or tarsum does not reference a valid error. We + // may also need something to communication layer verification errors + // for the inline tarsum check. + // 4. Timeout: timeouts to backend. Need to better understand these + // failure cases and how the storage driver propagates these errors + // up the stack. +} diff --git a/vendor/github.com/docker/distribution/registry/storage/garbagecollect.go b/vendor/github.com/docker/distribution/registry/storage/garbagecollect.go new file mode 100644 index 000000000..7cf0298e1 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/garbagecollect.go @@ -0,0 +1,114 @@ +package storage + +import ( + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/driver" +) + +func emit(format string, a ...interface{}) { + fmt.Printf(format+"\n", a...) +} + +// MarkAndSweep performs a mark and sweep of registry data +func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, registry distribution.Namespace, dryRun bool) error { + repositoryEnumerator, ok := registry.(distribution.RepositoryEnumerator) + if !ok { + return fmt.Errorf("unable to convert Namespace to RepositoryEnumerator") + } + + // mark + markSet := make(map[digest.Digest]struct{}) + err := repositoryEnumerator.Enumerate(ctx, func(repoName string) error { + emit(repoName) + + var err error + named, err := reference.ParseNamed(repoName) + if err != nil { + return fmt.Errorf("failed to parse repo name %s: %v", repoName, err) + } + repository, err := registry.Repository(ctx, named) + if err != nil { + return fmt.Errorf("failed to construct repository: %v", err) + } + + manifestService, err := repository.Manifests(ctx) + if err != nil { + return fmt.Errorf("failed to construct manifest service: %v", err) + } + + manifestEnumerator, ok := manifestService.(distribution.ManifestEnumerator) + if !ok { + return fmt.Errorf("unable to convert ManifestService into ManifestEnumerator") + } + + err = manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error { + // Mark the manifest's blob + emit("%s: marking manifest %s ", repoName, dgst) + markSet[dgst] = struct{}{} + + manifest, err := manifestService.Get(ctx, dgst) + if err != nil { + return fmt.Errorf("failed to retrieve manifest for digest %v: %v", dgst, err) + } + + descriptors := manifest.References() + for _, descriptor := range descriptors { + markSet[descriptor.Digest] = struct{}{} + emit("%s: marking blob %s", repoName, descriptor.Digest) + } + + return nil + }) + + if err != nil { + // In certain situations such as unfinished uploads, deleting all + // tags in S3 or removing the _manifests folder manually, this + // error may be of type PathNotFound. + // + // In these cases we can continue marking other manifests safely. + if _, ok := err.(driver.PathNotFoundError); ok { + return nil + } + } + + return err + }) + + if err != nil { + return fmt.Errorf("failed to mark: %v", err) + } + + // sweep + blobService := registry.Blobs() + deleteSet := make(map[digest.Digest]struct{}) + err = blobService.Enumerate(ctx, func(dgst digest.Digest) error { + // check if digest is in markSet. If not, delete it! + if _, ok := markSet[dgst]; !ok { + deleteSet[dgst] = struct{}{} + } + return nil + }) + if err != nil { + return fmt.Errorf("error enumerating blobs: %v", err) + } + emit("\n%d blobs marked, %d blobs eligible for deletion", len(markSet), len(deleteSet)) + // Construct vacuum + vacuum := NewVacuum(ctx, storageDriver) + for dgst := range deleteSet { + emit("blob eligible for deletion: %s", dgst) + if dryRun { + continue + } + err = vacuum.RemoveBlob(string(dgst)) + if err != nil { + return fmt.Errorf("failed to delete blob %s: %v", dgst, err) + } + } + + return err +} diff --git a/vendor/github.com/docker/distribution/registry/storage/garbagecollect_test.go b/vendor/github.com/docker/distribution/registry/storage/garbagecollect_test.go new file mode 100644 index 000000000..925f1a10f --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/garbagecollect_test.go @@ -0,0 +1,377 @@ +package storage + +import ( + "io" + "path" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/testutil" + "github.com/docker/libtrust" +) + +type image struct { + manifest distribution.Manifest + manifestDigest digest.Digest + layers map[digest.Digest]io.ReadSeeker +} + +func createRegistry(t *testing.T, driver driver.StorageDriver, options ...RegistryOption) distribution.Namespace { + ctx := context.Background() + k, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + options = append([]RegistryOption{EnableDelete, Schema1SigningKey(k)}, options...) + registry, err := NewRegistry(ctx, driver, options...) + if err != nil { + t.Fatalf("Failed to construct namespace") + } + return registry +} + +func makeRepository(t *testing.T, registry distribution.Namespace, name string) distribution.Repository { + ctx := context.Background() + + // Initialize a dummy repository + named, err := reference.ParseNamed(name) + if err != nil { + t.Fatalf("Failed to parse name %s: %v", name, err) + } + + repo, err := registry.Repository(ctx, named) + if err != nil { + t.Fatalf("Failed to construct repository: %v", err) + } + return repo +} + +func makeManifestService(t *testing.T, repository distribution.Repository) distribution.ManifestService { + ctx := context.Background() + + manifestService, err := repository.Manifests(ctx) + if err != nil { + t.Fatalf("Failed to construct manifest store: %v", err) + } + return manifestService +} + +func allBlobs(t *testing.T, registry distribution.Namespace) map[digest.Digest]struct{} { + ctx := context.Background() + blobService := registry.Blobs() + allBlobsMap := make(map[digest.Digest]struct{}) + err := blobService.Enumerate(ctx, func(dgst digest.Digest) error { + allBlobsMap[dgst] = struct{}{} + return nil + }) + if err != nil { + t.Fatalf("Error getting all blobs: %v", err) + } + return allBlobsMap +} + +func uploadImage(t *testing.T, repository distribution.Repository, im image) digest.Digest { + // upload layers + err := testutil.UploadBlobs(repository, im.layers) + if err != nil { + t.Fatalf("layer upload failed: %v", err) + } + + // upload manifest + ctx := context.Background() + manifestService := makeManifestService(t, repository) + manifestDigest, err := manifestService.Put(ctx, im.manifest) + if err != nil { + t.Fatalf("manifest upload failed: %v", err) + } + + return manifestDigest +} + +func uploadRandomSchema1Image(t *testing.T, repository distribution.Repository) image { + randomLayers, err := testutil.CreateRandomLayers(2) + if err != nil { + t.Fatalf("%v", err) + } + + digests := []digest.Digest{} + for digest := range randomLayers { + digests = append(digests, digest) + } + + manifest, err := testutil.MakeSchema1Manifest(digests) + if err != nil { + t.Fatalf("%v", err) + } + + manifestDigest := uploadImage(t, repository, image{manifest: manifest, layers: randomLayers}) + return image{ + manifest: manifest, + manifestDigest: manifestDigest, + layers: randomLayers, + } +} + +func uploadRandomSchema2Image(t *testing.T, repository distribution.Repository) image { + randomLayers, err := testutil.CreateRandomLayers(2) + if err != nil { + t.Fatalf("%v", err) + } + + digests := []digest.Digest{} + for digest := range randomLayers { + digests = append(digests, digest) + } + + manifest, err := testutil.MakeSchema2Manifest(repository, digests) + if err != nil { + t.Fatalf("%v", err) + } + + manifestDigest := uploadImage(t, repository, image{manifest: manifest, layers: randomLayers}) + return image{ + manifest: manifest, + manifestDigest: manifestDigest, + layers: randomLayers, + } +} + +func TestNoDeletionNoEffect(t *testing.T) { + ctx := context.Background() + inmemoryDriver := inmemory.New() + + registry := createRegistry(t, inmemoryDriver) + repo := makeRepository(t, registry, "palailogos") + manifestService, err := repo.Manifests(ctx) + + image1 := uploadRandomSchema1Image(t, repo) + image2 := uploadRandomSchema1Image(t, repo) + uploadRandomSchema2Image(t, repo) + + // construct manifestlist for fun. + blobstatter := registry.BlobStatter() + manifestList, err := testutil.MakeManifestList(blobstatter, []digest.Digest{ + image1.manifestDigest, image2.manifestDigest}) + if err != nil { + t.Fatalf("Failed to make manifest list: %v", err) + } + + _, err = manifestService.Put(ctx, manifestList) + if err != nil { + t.Fatalf("Failed to add manifest list: %v", err) + } + + before := allBlobs(t, registry) + + // Run GC + err = MarkAndSweep(context.Background(), inmemoryDriver, registry, false) + if err != nil { + t.Fatalf("Failed mark and sweep: %v", err) + } + + after := allBlobs(t, registry) + if len(before) != len(after) { + t.Fatalf("Garbage collection affected storage: %d != %d", len(before), len(after)) + } +} + +func TestGCWithMissingManifests(t *testing.T) { + ctx := context.Background() + d := inmemory.New() + + registry := createRegistry(t, d) + repo := makeRepository(t, registry, "testrepo") + uploadRandomSchema1Image(t, repo) + + // Simulate a missing _manifests directory + revPath, err := pathFor(manifestRevisionsPathSpec{"testrepo"}) + if err != nil { + t.Fatal(err) + } + + _manifestsPath := path.Dir(revPath) + err = d.Delete(ctx, _manifestsPath) + if err != nil { + t.Fatal(err) + } + + err = MarkAndSweep(context.Background(), d, registry, false) + if err != nil { + t.Fatalf("Failed mark and sweep: %v", err) + } + + blobs := allBlobs(t, registry) + if len(blobs) > 0 { + t.Errorf("unexpected blobs after gc") + } +} + +func TestDeletionHasEffect(t *testing.T) { + ctx := context.Background() + inmemoryDriver := inmemory.New() + + registry := createRegistry(t, inmemoryDriver) + repo := makeRepository(t, registry, "komnenos") + manifests, err := repo.Manifests(ctx) + + image1 := uploadRandomSchema1Image(t, repo) + image2 := uploadRandomSchema1Image(t, repo) + image3 := uploadRandomSchema2Image(t, repo) + + manifests.Delete(ctx, image2.manifestDigest) + manifests.Delete(ctx, image3.manifestDigest) + + // Run GC + err = MarkAndSweep(context.Background(), inmemoryDriver, registry, false) + if err != nil { + t.Fatalf("Failed mark and sweep: %v", err) + } + + blobs := allBlobs(t, registry) + + // check that the image1 manifest and all the layers are still in blobs + if _, ok := blobs[image1.manifestDigest]; !ok { + t.Fatalf("First manifest is missing") + } + + for layer := range image1.layers { + if _, ok := blobs[layer]; !ok { + t.Fatalf("manifest 1 layer is missing: %v", layer) + } + } + + // check that image2 and image3 layers are not still around + for layer := range image2.layers { + if _, ok := blobs[layer]; ok { + t.Fatalf("manifest 2 layer is present: %v", layer) + } + } + + for layer := range image3.layers { + if _, ok := blobs[layer]; ok { + t.Fatalf("manifest 3 layer is present: %v", layer) + } + } +} + +func getAnyKey(digests map[digest.Digest]io.ReadSeeker) (d digest.Digest) { + for d = range digests { + break + } + return +} + +func getKeys(digests map[digest.Digest]io.ReadSeeker) (ds []digest.Digest) { + for d := range digests { + ds = append(ds, d) + } + return +} + +func TestDeletionWithSharedLayer(t *testing.T) { + ctx := context.Background() + inmemoryDriver := inmemory.New() + + registry := createRegistry(t, inmemoryDriver) + repo := makeRepository(t, registry, "tzimiskes") + + // Create random layers + randomLayers1, err := testutil.CreateRandomLayers(3) + if err != nil { + t.Fatalf("failed to make layers: %v", err) + } + + randomLayers2, err := testutil.CreateRandomLayers(3) + if err != nil { + t.Fatalf("failed to make layers: %v", err) + } + + // Upload all layers + err = testutil.UploadBlobs(repo, randomLayers1) + if err != nil { + t.Fatalf("failed to upload layers: %v", err) + } + + err = testutil.UploadBlobs(repo, randomLayers2) + if err != nil { + t.Fatalf("failed to upload layers: %v", err) + } + + // Construct manifests + manifest1, err := testutil.MakeSchema1Manifest(getKeys(randomLayers1)) + if err != nil { + t.Fatalf("failed to make manifest: %v", err) + } + + sharedKey := getAnyKey(randomLayers1) + manifest2, err := testutil.MakeSchema2Manifest(repo, append(getKeys(randomLayers2), sharedKey)) + if err != nil { + t.Fatalf("failed to make manifest: %v", err) + } + + manifestService := makeManifestService(t, repo) + + // Upload manifests + _, err = manifestService.Put(ctx, manifest1) + if err != nil { + t.Fatalf("manifest upload failed: %v", err) + } + + manifestDigest2, err := manifestService.Put(ctx, manifest2) + if err != nil { + t.Fatalf("manifest upload failed: %v", err) + } + + // delete + err = manifestService.Delete(ctx, manifestDigest2) + if err != nil { + t.Fatalf("manifest deletion failed: %v", err) + } + + // check that all of the layers in layer 1 are still there + blobs := allBlobs(t, registry) + for dgst := range randomLayers1 { + if _, ok := blobs[dgst]; !ok { + t.Fatalf("random layer 1 blob missing: %v", dgst) + } + } +} + +func TestOrphanBlobDeleted(t *testing.T) { + inmemoryDriver := inmemory.New() + + registry := createRegistry(t, inmemoryDriver) + repo := makeRepository(t, registry, "michael_z_doukas") + + digests, err := testutil.CreateRandomLayers(1) + if err != nil { + t.Fatalf("Failed to create random digest: %v", err) + } + + if err = testutil.UploadBlobs(repo, digests); err != nil { + t.Fatalf("Failed to upload blob: %v", err) + } + + // formality to create the necessary directories + uploadRandomSchema2Image(t, repo) + + // Run GC + err = MarkAndSweep(context.Background(), inmemoryDriver, registry, false) + if err != nil { + t.Fatalf("Failed mark and sweep: %v", err) + } + + blobs := allBlobs(t, registry) + + // check that orphan blob layers are not still around + for dgst := range digests { + if _, ok := blobs[dgst]; ok { + t.Fatalf("Orphan layer is present: %v", dgst) + } + } +} diff --git a/vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go b/vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go new file mode 100644 index 000000000..6a5e8d033 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go @@ -0,0 +1,470 @@ +package storage + +import ( + "fmt" + "net/http" + "path" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/uuid" +) + +// linkPathFunc describes a function that can resolve a link based on the +// repository name and digest. +type linkPathFunc func(name string, dgst digest.Digest) (string, error) + +// linkedBlobStore provides a full BlobService that namespaces the blobs to a +// given repository. Effectively, it manages the links in a given repository +// that grant access to the global blob store. +type linkedBlobStore struct { + *blobStore + registry *registry + blobServer distribution.BlobServer + blobAccessController distribution.BlobDescriptorService + repository distribution.Repository + ctx context.Context // only to be used where context can't come through method args + deleteEnabled bool + resumableDigestEnabled bool + + // linkPathFns specifies one or more path functions allowing one to + // control the repository blob link set to which the blob store + // dispatches. This is required because manifest and layer blobs have not + // yet been fully merged. At some point, this functionality should be + // removed the blob links folder should be merged. The first entry is + // treated as the "canonical" link location and will be used for writes. + linkPathFns []linkPathFunc + + // linkDirectoryPathSpec locates the root directories in which one might find links + linkDirectoryPathSpec pathSpec +} + +var _ distribution.BlobStore = &linkedBlobStore{} + +func (lbs *linkedBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return lbs.blobAccessController.Stat(ctx, dgst) +} + +func (lbs *linkedBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + canonical, err := lbs.Stat(ctx, dgst) // access check + if err != nil { + return nil, err + } + + return lbs.blobStore.Get(ctx, canonical.Digest) +} + +func (lbs *linkedBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + canonical, err := lbs.Stat(ctx, dgst) // access check + if err != nil { + return nil, err + } + + return lbs.blobStore.Open(ctx, canonical.Digest) +} + +func (lbs *linkedBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + canonical, err := lbs.Stat(ctx, dgst) // access check + if err != nil { + return err + } + + if canonical.MediaType != "" { + // Set the repository local content type. + w.Header().Set("Content-Type", canonical.MediaType) + } + + return lbs.blobServer.ServeBlob(ctx, w, r, canonical.Digest) +} + +func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + dgst := digest.FromBytes(p) + // Place the data in the blob store first. + desc, err := lbs.blobStore.Put(ctx, mediaType, p) + if err != nil { + context.GetLogger(ctx).Errorf("error putting into main store: %v", err) + return distribution.Descriptor{}, err + } + + if err := lbs.blobAccessController.SetDescriptor(ctx, dgst, desc); err != nil { + return distribution.Descriptor{}, err + } + + // TODO(stevvooe): Write out mediatype if incoming differs from what is + // returned by Put above. Note that we should allow updates for a given + // repository. + + return desc, lbs.linkBlob(ctx, desc) +} + +type optionFunc func(interface{}) error + +func (f optionFunc) Apply(v interface{}) error { + return f(v) +} + +// WithMountFrom returns a BlobCreateOption which designates that the blob should be +// mounted from the given canonical reference. +func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { + return optionFunc(func(v interface{}) error { + opts, ok := v.(*distribution.CreateOptions) + if !ok { + return fmt.Errorf("unexpected options type: %T", v) + } + + opts.Mount.ShouldMount = true + opts.Mount.From = ref + + return nil + }) +} + +// Writer begins a blob write session, returning a handle. +func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + context.GetLogger(ctx).Debug("(*linkedBlobStore).Writer") + + var opts distribution.CreateOptions + + for _, option := range options { + err := option.Apply(&opts) + if err != nil { + return nil, err + } + } + + if opts.Mount.ShouldMount { + desc, err := lbs.mount(ctx, opts.Mount.From, opts.Mount.From.Digest(), opts.Mount.Stat) + if err == nil { + // Mount successful, no need to initiate an upload session + return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} + } + } + + uuid := uuid.Generate().String() + startedAt := time.Now().UTC() + + path, err := pathFor(uploadDataPathSpec{ + name: lbs.repository.Named().Name(), + id: uuid, + }) + + if err != nil { + return nil, err + } + + startedAtPath, err := pathFor(uploadStartedAtPathSpec{ + name: lbs.repository.Named().Name(), + id: uuid, + }) + + if err != nil { + return nil, err + } + + // Write a startedat file for this upload + if err := lbs.blobStore.driver.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { + return nil, err + } + + return lbs.newBlobUpload(ctx, uuid, path, startedAt, false) +} + +func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + context.GetLogger(ctx).Debug("(*linkedBlobStore).Resume") + + startedAtPath, err := pathFor(uploadStartedAtPathSpec{ + name: lbs.repository.Named().Name(), + id: id, + }) + + if err != nil { + return nil, err + } + + startedAtBytes, err := lbs.blobStore.driver.GetContent(ctx, startedAtPath) + if err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return nil, distribution.ErrBlobUploadUnknown + default: + return nil, err + } + } + + startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) + if err != nil { + return nil, err + } + + path, err := pathFor(uploadDataPathSpec{ + name: lbs.repository.Named().Name(), + id: id, + }) + + if err != nil { + return nil, err + } + + return lbs.newBlobUpload(ctx, id, path, startedAt, true) +} + +func (lbs *linkedBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + if !lbs.deleteEnabled { + return distribution.ErrUnsupported + } + + // Ensure the blob is available for deletion + _, err := lbs.blobAccessController.Stat(ctx, dgst) + if err != nil { + return err + } + + err = lbs.blobAccessController.Clear(ctx, dgst) + if err != nil { + return err + } + + return nil +} + +func (lbs *linkedBlobStore) Enumerate(ctx context.Context, ingestor func(digest.Digest) error) error { + rootPath, err := pathFor(lbs.linkDirectoryPathSpec) + if err != nil { + return err + } + err = Walk(ctx, lbs.blobStore.driver, rootPath, func(fileInfo driver.FileInfo) error { + // exit early if directory... + if fileInfo.IsDir() { + return nil + } + filePath := fileInfo.Path() + + // check if it's a link + _, fileName := path.Split(filePath) + if fileName != "link" { + return nil + } + + // read the digest found in link + digest, err := lbs.blobStore.readlink(ctx, filePath) + if err != nil { + return err + } + + // ensure this conforms to the linkPathFns + _, err = lbs.Stat(ctx, digest) + if err != nil { + // we expect this error to occur so we move on + if err == distribution.ErrBlobUnknown { + return nil + } + return err + } + + err = ingestor(digest) + if err != nil { + return err + } + + return nil + }) + + if err != nil { + return err + } + + return nil +} + +func (lbs *linkedBlobStore) mount(ctx context.Context, sourceRepo reference.Named, dgst digest.Digest, sourceStat *distribution.Descriptor) (distribution.Descriptor, error) { + var stat distribution.Descriptor + if sourceStat == nil { + // look up the blob info from the sourceRepo if not already provided + repo, err := lbs.registry.Repository(ctx, sourceRepo) + if err != nil { + return distribution.Descriptor{}, err + } + stat, err = repo.Blobs(ctx).Stat(ctx, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + } else { + // use the provided blob info + stat = *sourceStat + } + + desc := distribution.Descriptor{ + Size: stat.Size, + + // NOTE(stevvooe): The central blob store firewalls media types from + // other users. The caller should look this up and override the value + // for the specific repository. + MediaType: "application/octet-stream", + Digest: dgst, + } + return desc, lbs.linkBlob(ctx, desc) +} + +// newBlobUpload allocates a new upload controller with the given state. +func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string, startedAt time.Time, append bool) (distribution.BlobWriter, error) { + fw, err := lbs.driver.Writer(ctx, path, append) + if err != nil { + return nil, err + } + + bw := &blobWriter{ + ctx: ctx, + blobStore: lbs, + id: uuid, + startedAt: startedAt, + digester: digest.Canonical.New(), + fileWriter: fw, + driver: lbs.driver, + path: path, + resumableDigestEnabled: lbs.resumableDigestEnabled, + } + + return bw, nil +} + +// linkBlob links a valid, written blob into the registry under the named +// repository for the upload controller. +func (lbs *linkedBlobStore) linkBlob(ctx context.Context, canonical distribution.Descriptor, aliases ...digest.Digest) error { + dgsts := append([]digest.Digest{canonical.Digest}, aliases...) + + // TODO(stevvooe): Need to write out mediatype for only canonical hash + // since we don't care about the aliases. They are generally unused except + // for tarsum but those versions don't care about mediatype. + + // Don't make duplicate links. + seenDigests := make(map[digest.Digest]struct{}, len(dgsts)) + + // only use the first link + linkPathFn := lbs.linkPathFns[0] + + for _, dgst := range dgsts { + if _, seen := seenDigests[dgst]; seen { + continue + } + seenDigests[dgst] = struct{}{} + + blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst) + if err != nil { + return err + } + + if err := lbs.blobStore.link(ctx, blobLinkPath, canonical.Digest); err != nil { + return err + } + } + + return nil +} + +type linkedBlobStatter struct { + *blobStore + repository distribution.Repository + + // linkPathFns specifies one or more path functions allowing one to + // control the repository blob link set to which the blob store + // dispatches. This is required because manifest and layer blobs have not + // yet been fully merged. At some point, this functionality should be + // removed an the blob links folder should be merged. The first entry is + // treated as the "canonical" link location and will be used for writes. + linkPathFns []linkPathFunc +} + +var _ distribution.BlobDescriptorService = &linkedBlobStatter{} + +func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + var ( + found bool + target digest.Digest + ) + + // try the many link path functions until we get success or an error that + // is not PathNotFoundError. + for _, linkPathFn := range lbs.linkPathFns { + var err error + target, err = lbs.resolveWithLinkFunc(ctx, dgst, linkPathFn) + + if err == nil { + found = true + break // success! + } + + switch err := err.(type) { + case driver.PathNotFoundError: + // do nothing, just move to the next linkPathFn + default: + return distribution.Descriptor{}, err + } + } + + if !found { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + if target != dgst { + // Track when we are doing cross-digest domain lookups. ie, sha512 to sha256. + context.GetLogger(ctx).Warnf("looking up blob with canonical target: %v -> %v", dgst, target) + } + + // TODO(stevvooe): Look up repository local mediatype and replace that on + // the returned descriptor. + + return lbs.blobStore.statter.Stat(ctx, target) +} + +func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) (err error) { + // clear any possible existence of a link described in linkPathFns + for _, linkPathFn := range lbs.linkPathFns { + blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst) + if err != nil { + return err + } + + err = lbs.blobStore.driver.Delete(ctx, blobLinkPath) + if err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + continue // just ignore this error and continue + default: + return err + } + } + } + + return nil +} + +// resolveTargetWithFunc allows us to read a link to a resource with different +// linkPathFuncs to let us try a few different paths before returning not +// found. +func (lbs *linkedBlobStatter) resolveWithLinkFunc(ctx context.Context, dgst digest.Digest, linkPathFn linkPathFunc) (digest.Digest, error) { + blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst) + if err != nil { + return "", err + } + + return lbs.blobStore.readlink(ctx, blobLinkPath) +} + +func (lbs *linkedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + // The canonical descriptor for a blob is set at the commit phase of upload + return nil +} + +// blobLinkPath provides the path to the blob link, also known as layers. +func blobLinkPath(name string, dgst digest.Digest) (string, error) { + return pathFor(layerLinkPathSpec{name: name, digest: dgst}) +} + +// manifestRevisionLinkPath provides the path to the manifest revision link. +func manifestRevisionLinkPath(name string, dgst digest.Digest) (string, error) { + return pathFor(manifestRevisionLinkPathSpec{name: name, revision: dgst}) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/linkedblobstore_test.go b/vendor/github.com/docker/distribution/registry/storage/linkedblobstore_test.go new file mode 100644 index 000000000..f0f63d87b --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/linkedblobstore_test.go @@ -0,0 +1,217 @@ +package storage + +import ( + "fmt" + "io" + "reflect" + "strconv" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + + "github.com/docker/distribution/reference" + "github.com/docker/distribution/testutil" +) + +func TestLinkedBlobStoreCreateWithMountFrom(t *testing.T) { + fooRepoName, _ := reference.ParseNamed("nm/foo") + fooEnv := newManifestStoreTestEnv(t, fooRepoName, "thetag") + ctx := context.Background() + stats, err := mockRegistry(t, fooEnv.registry) + if err != nil { + t.Fatal(err) + } + + // Build up some test layers and add them to the manifest, saving the + // readseekers for upload later. + testLayers := map[digest.Digest]io.ReadSeeker{} + for i := 0; i < 2; i++ { + rs, ds, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("unexpected error generating test layer file") + } + dgst := digest.Digest(ds) + + testLayers[digest.Digest(dgst)] = rs + } + + // upload the layers to foo/bar + for dgst, rs := range testLayers { + wr, err := fooEnv.repository.Blobs(fooEnv.ctx).Create(fooEnv.ctx) + if err != nil { + t.Fatalf("unexpected error creating test upload: %v", err) + } + + if _, err := io.Copy(wr, rs); err != nil { + t.Fatalf("unexpected error copying to upload: %v", err) + } + + if _, err := wr.Commit(fooEnv.ctx, distribution.Descriptor{Digest: dgst}); err != nil { + t.Fatalf("unexpected error finishing upload: %v", err) + } + } + + // create another repository nm/bar + barRepoName, _ := reference.ParseNamed("nm/bar") + barRepo, err := fooEnv.registry.Repository(ctx, barRepoName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + + // cross-repo mount the test layers into a nm/bar + for dgst := range testLayers { + fooCanonical, _ := reference.WithDigest(fooRepoName, dgst) + option := WithMountFrom(fooCanonical) + // ensure we can instrospect it + createOpts := distribution.CreateOptions{} + if err := option.Apply(&createOpts); err != nil { + t.Fatalf("failed to apply MountFrom option: %v", err) + } + if !createOpts.Mount.ShouldMount || createOpts.Mount.From.String() != fooCanonical.String() { + t.Fatalf("unexpected create options: %#+v", createOpts.Mount) + } + + _, err := barRepo.Blobs(ctx).Create(ctx, WithMountFrom(fooCanonical)) + if err == nil { + t.Fatalf("unexpected non-error while mounting from %q: %v", fooRepoName.String(), err) + } + if _, ok := err.(distribution.ErrBlobMounted); !ok { + t.Fatalf("expected ErrMountFrom error, not %T: %v", err, err) + } + } + for dgst := range testLayers { + fooCanonical, _ := reference.WithDigest(fooRepoName, dgst) + count, exists := stats[fooCanonical.String()] + if !exists { + t.Errorf("expected entry %q not found among handled stat calls", fooCanonical.String()) + } else if count != 1 { + t.Errorf("expected exactly one stat call for entry %q, not %d", fooCanonical.String(), count) + } + } + + clearStats(stats) + + // create yet another repository nm/baz + bazRepoName, _ := reference.ParseNamed("nm/baz") + bazRepo, err := fooEnv.registry.Repository(ctx, bazRepoName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + + // cross-repo mount them into a nm/baz and provide a prepopulated blob descriptor + for dgst := range testLayers { + fooCanonical, _ := reference.WithDigest(fooRepoName, dgst) + size, err := strconv.ParseInt("0x"+dgst.Hex()[:8], 0, 64) + if err != nil { + t.Fatal(err) + } + prepolutatedDescriptor := distribution.Descriptor{ + Digest: dgst, + Size: size, + MediaType: "application/octet-stream", + } + _, err = bazRepo.Blobs(ctx).Create(ctx, WithMountFrom(fooCanonical), &statCrossMountCreateOption{ + desc: prepolutatedDescriptor, + }) + blobMounted, ok := err.(distribution.ErrBlobMounted) + if !ok { + t.Errorf("expected ErrMountFrom error, not %T: %v", err, err) + continue + } + if !reflect.DeepEqual(blobMounted.Descriptor, prepolutatedDescriptor) { + t.Errorf("unexpected descriptor: %#+v != %#+v", blobMounted.Descriptor, prepolutatedDescriptor) + } + } + // this time no stat calls will be made + if len(stats) != 0 { + t.Errorf("unexpected number of stats made: %d != %d", len(stats), len(testLayers)) + } +} + +func clearStats(stats map[string]int) { + for k := range stats { + delete(stats, k) + } +} + +// mockRegistry sets a mock blob descriptor service factory that overrides +// statter's Stat method to note each attempt to stat a blob in any repository. +// Returned stats map contains canonical references to blobs with a number of +// attempts. +func mockRegistry(t *testing.T, nm distribution.Namespace) (map[string]int, error) { + registry, ok := nm.(*registry) + if !ok { + return nil, fmt.Errorf("not an expected type of registry: %T", nm) + } + stats := make(map[string]int) + + registry.blobDescriptorServiceFactory = &mockBlobDescriptorServiceFactory{ + t: t, + stats: stats, + } + + return stats, nil +} + +type mockBlobDescriptorServiceFactory struct { + t *testing.T + stats map[string]int +} + +func (f *mockBlobDescriptorServiceFactory) BlobAccessController(svc distribution.BlobDescriptorService) distribution.BlobDescriptorService { + return &mockBlobDescriptorService{ + BlobDescriptorService: svc, + t: f.t, + stats: f.stats, + } +} + +type mockBlobDescriptorService struct { + distribution.BlobDescriptorService + t *testing.T + stats map[string]int +} + +var _ distribution.BlobDescriptorService = &mockBlobDescriptorService{} + +func (bs *mockBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + statter, ok := bs.BlobDescriptorService.(*linkedBlobStatter) + if !ok { + return distribution.Descriptor{}, fmt.Errorf("unexpected blob descriptor service: %T", bs.BlobDescriptorService) + } + + name := statter.repository.Named() + canonical, err := reference.WithDigest(name, dgst) + if err != nil { + return distribution.Descriptor{}, fmt.Errorf("failed to make canonical reference: %v", err) + } + + bs.stats[canonical.String()]++ + bs.t.Logf("calling Stat on %s", canonical.String()) + + return bs.BlobDescriptorService.Stat(ctx, dgst) +} + +// statCrossMountCreateOptions ensures the expected options type is passed, and optionally pre-fills the cross-mount stat info +type statCrossMountCreateOption struct { + desc distribution.Descriptor +} + +var _ distribution.BlobCreateOption = statCrossMountCreateOption{} + +func (f statCrossMountCreateOption) Apply(v interface{}) error { + opts, ok := v.(*distribution.CreateOptions) + if !ok { + return fmt.Errorf("Unexpected create options: %#v", v) + } + + if !opts.Mount.ShouldMount { + return nil + } + + opts.Mount.Stat = &f.desc + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/manifestlisthandler.go b/vendor/github.com/docker/distribution/registry/storage/manifestlisthandler.go new file mode 100644 index 000000000..e24062cd0 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/manifestlisthandler.go @@ -0,0 +1,92 @@ +package storage + +import ( + "fmt" + + "encoding/json" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/manifestlist" +) + +// manifestListHandler is a ManifestHandler that covers schema2 manifest lists. +type manifestListHandler struct { + repository distribution.Repository + blobStore distribution.BlobStore + ctx context.Context +} + +var _ ManifestHandler = &manifestListHandler{} + +func (ms *manifestListHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { + context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Unmarshal") + + var m manifestlist.DeserializedManifestList + if err := json.Unmarshal(content, &m); err != nil { + return nil, err + } + + return &m, nil +} + +func (ms *manifestListHandler) Put(ctx context.Context, manifestList distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { + context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Put") + + m, ok := manifestList.(*manifestlist.DeserializedManifestList) + if !ok { + return "", fmt.Errorf("wrong type put to manifestListHandler: %T", manifestList) + } + + if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil { + return "", err + } + + mt, payload, err := m.Payload() + if err != nil { + return "", err + } + + revision, err := ms.blobStore.Put(ctx, mt, payload) + if err != nil { + context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + return "", err + } + + return revision.Digest, nil +} + +// verifyManifest ensures that the manifest content is valid from the +// perspective of the registry. As a policy, the registry only tries to +// store valid content, leaving trust policies of that content up to +// consumers. +func (ms *manifestListHandler) verifyManifest(ctx context.Context, mnfst manifestlist.DeserializedManifestList, skipDependencyVerification bool) error { + var errs distribution.ErrManifestVerification + + if !skipDependencyVerification { + // This manifest service is different from the blob service + // returned by Blob. It uses a linked blob store to ensure that + // only manifests are accessible. + + manifestService, err := ms.repository.Manifests(ctx) + if err != nil { + return err + } + + for _, manifestDescriptor := range mnfst.References() { + exists, err := manifestService.Exists(ctx, manifestDescriptor.Digest) + if err != nil && err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + if err != nil || !exists { + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: manifestDescriptor.Digest}) + } + } + } + if len(errs) != 0 { + return errs + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/manifeststore.go b/vendor/github.com/docker/distribution/registry/storage/manifeststore.go new file mode 100644 index 000000000..9e8065bb7 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/manifeststore.go @@ -0,0 +1,141 @@ +package storage + +import ( + "fmt" + + "encoding/json" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" +) + +// A ManifestHandler gets and puts manifests of a particular type. +type ManifestHandler interface { + // Unmarshal unmarshals the manifest from a byte slice. + Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) + + // Put creates or updates the given manifest returning the manifest digest. + Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) +} + +// SkipLayerVerification allows a manifest to be Put before its +// layers are on the filesystem +func SkipLayerVerification() distribution.ManifestServiceOption { + return skipLayerOption{} +} + +type skipLayerOption struct{} + +func (o skipLayerOption) Apply(m distribution.ManifestService) error { + if ms, ok := m.(*manifestStore); ok { + ms.skipDependencyVerification = true + return nil + } + return fmt.Errorf("skip layer verification only valid for manifestStore") +} + +type manifestStore struct { + repository *repository + blobStore *linkedBlobStore + ctx context.Context + + skipDependencyVerification bool + + schema1Handler ManifestHandler + schema2Handler ManifestHandler + manifestListHandler ManifestHandler +} + +var _ distribution.ManifestService = &manifestStore{} + +func (ms *manifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + context.GetLogger(ms.ctx).Debug("(*manifestStore).Exists") + + _, err := ms.blobStore.Stat(ms.ctx, dgst) + if err != nil { + if err == distribution.ErrBlobUnknown { + return false, nil + } + + return false, err + } + + return true, nil +} + +func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + context.GetLogger(ms.ctx).Debug("(*manifestStore).Get") + + // TODO(stevvooe): Need to check descriptor from above to ensure that the + // mediatype is as we expect for the manifest store. + + content, err := ms.blobStore.Get(ctx, dgst) + if err != nil { + if err == distribution.ErrBlobUnknown { + return nil, distribution.ErrManifestUnknownRevision{ + Name: ms.repository.Named().Name(), + Revision: dgst, + } + } + + return nil, err + } + + var versioned manifest.Versioned + if err = json.Unmarshal(content, &versioned); err != nil { + return nil, err + } + + switch versioned.SchemaVersion { + case 1: + return ms.schema1Handler.Unmarshal(ctx, dgst, content) + case 2: + // This can be an image manifest or a manifest list + switch versioned.MediaType { + case schema2.MediaTypeManifest: + return ms.schema2Handler.Unmarshal(ctx, dgst, content) + case manifestlist.MediaTypeManifestList: + return ms.manifestListHandler.Unmarshal(ctx, dgst, content) + default: + return nil, distribution.ErrManifestVerification{fmt.Errorf("unrecognized manifest content type %s", versioned.MediaType)} + } + } + + return nil, fmt.Errorf("unrecognized manifest schema version %d", versioned.SchemaVersion) +} + +func (ms *manifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + context.GetLogger(ms.ctx).Debug("(*manifestStore).Put") + + switch manifest.(type) { + case *schema1.SignedManifest: + return ms.schema1Handler.Put(ctx, manifest, ms.skipDependencyVerification) + case *schema2.DeserializedManifest: + return ms.schema2Handler.Put(ctx, manifest, ms.skipDependencyVerification) + case *manifestlist.DeserializedManifestList: + return ms.manifestListHandler.Put(ctx, manifest, ms.skipDependencyVerification) + } + + return "", fmt.Errorf("unrecognized manifest type %T", manifest) +} + +// Delete removes the revision of the specified manifest. +func (ms *manifestStore) Delete(ctx context.Context, dgst digest.Digest) error { + context.GetLogger(ms.ctx).Debug("(*manifestStore).Delete") + return ms.blobStore.Delete(ctx, dgst) +} + +func (ms *manifestStore) Enumerate(ctx context.Context, ingester func(digest.Digest) error) error { + err := ms.blobStore.Enumerate(ctx, func(dgst digest.Digest) error { + err := ingester(dgst) + if err != nil { + return err + } + return nil + }) + return err +} diff --git a/vendor/github.com/docker/distribution/registry/storage/manifeststore_test.go b/vendor/github.com/docker/distribution/registry/storage/manifeststore_test.go new file mode 100644 index 000000000..cbd30c044 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/manifeststore_test.go @@ -0,0 +1,391 @@ +package storage + +import ( + "bytes" + "io" + "reflect" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/testutil" + "github.com/docker/libtrust" +) + +type manifestStoreTestEnv struct { + ctx context.Context + driver driver.StorageDriver + registry distribution.Namespace + repository distribution.Repository + name reference.Named + tag string +} + +func newManifestStoreTestEnv(t *testing.T, name reference.Named, tag string, options ...RegistryOption) *manifestStoreTestEnv { + ctx := context.Background() + driver := inmemory.New() + registry, err := NewRegistry(ctx, driver, options...) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + + repo, err := registry.Repository(ctx, name) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + + return &manifestStoreTestEnv{ + ctx: ctx, + driver: driver, + registry: registry, + repository: repo, + name: name, + tag: tag, + } +} + +func TestManifestStorage(t *testing.T) { + k, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + testManifestStorage(t, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect, Schema1SigningKey(k)) +} + +func testManifestStorage(t *testing.T, options ...RegistryOption) { + repoName, _ := reference.ParseNamed("foo/bar") + env := newManifestStoreTestEnv(t, repoName, "thetag", options...) + ctx := context.Background() + ms, err := env.repository.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + m := schema1.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: env.name.Name(), + Tag: env.tag, + } + + // Build up some test layers and add them to the manifest, saving the + // readseekers for upload later. + testLayers := map[digest.Digest]io.ReadSeeker{} + for i := 0; i < 2; i++ { + rs, ds, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("unexpected error generating test layer file") + } + dgst := digest.Digest(ds) + + testLayers[digest.Digest(dgst)] = rs + m.FSLayers = append(m.FSLayers, schema1.FSLayer{ + BlobSum: dgst, + }) + m.History = append(m.History, schema1.History{ + V1Compatibility: "", + }) + + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + sm, merr := schema1.Sign(&m, pk) + if merr != nil { + t.Fatalf("error signing manifest: %v", err) + } + + _, err = ms.Put(ctx, sm) + if err == nil { + t.Fatalf("expected errors putting manifest with full verification") + } + + switch err := err.(type) { + case distribution.ErrManifestVerification: + if len(err) != 2 { + t.Fatalf("expected 2 verification errors: %#v", err) + } + + for _, err := range err { + if _, ok := err.(distribution.ErrManifestBlobUnknown); !ok { + t.Fatalf("unexpected error type: %v", err) + } + } + default: + t.Fatalf("unexpected error verifying manifest: %v", err) + } + + // Now, upload the layers that were missing! + for dgst, rs := range testLayers { + wr, err := env.repository.Blobs(env.ctx).Create(env.ctx) + if err != nil { + t.Fatalf("unexpected error creating test upload: %v", err) + } + + if _, err := io.Copy(wr, rs); err != nil { + t.Fatalf("unexpected error copying to upload: %v", err) + } + + if _, err := wr.Commit(env.ctx, distribution.Descriptor{Digest: dgst}); err != nil { + t.Fatalf("unexpected error finishing upload: %v", err) + } + } + + var manifestDigest digest.Digest + if manifestDigest, err = ms.Put(ctx, sm); err != nil { + t.Fatalf("unexpected error putting manifest: %v", err) + } + + exists, err := ms.Exists(ctx, manifestDigest) + if err != nil { + t.Fatalf("unexpected error checking manifest existence: %#v", err) + } + + if !exists { + t.Fatalf("manifest should exist") + } + + fromStore, err := ms.Get(ctx, manifestDigest) + if err != nil { + t.Fatalf("unexpected error fetching manifest: %v", err) + } + + fetchedManifest, ok := fromStore.(*schema1.SignedManifest) + if !ok { + t.Fatalf("unexpected manifest type from signedstore") + } + + if !bytes.Equal(fetchedManifest.Canonical, sm.Canonical) { + t.Fatalf("fetched payload does not match original payload: %q != %q", fetchedManifest.Canonical, sm.Canonical) + } + + _, pl, err := fetchedManifest.Payload() + if err != nil { + t.Fatalf("error getting payload %#v", err) + } + + fetchedJWS, err := libtrust.ParsePrettySignature(pl, "signatures") + if err != nil { + t.Fatalf("unexpected error parsing jws: %v", err) + } + + payload, err := fetchedJWS.Payload() + if err != nil { + t.Fatalf("unexpected error extracting payload: %v", err) + } + + // Now that we have a payload, take a moment to check that the manifest is + // return by the payload digest. + + dgst := digest.FromBytes(payload) + exists, err = ms.Exists(ctx, dgst) + if err != nil { + t.Fatalf("error checking manifest existence by digest: %v", err) + } + + if !exists { + t.Fatalf("manifest %s should exist", dgst) + } + + fetchedByDigest, err := ms.Get(ctx, dgst) + if err != nil { + t.Fatalf("unexpected error fetching manifest by digest: %v", err) + } + + byDigestManifest, ok := fetchedByDigest.(*schema1.SignedManifest) + if !ok { + t.Fatalf("unexpected manifest type from signedstore") + } + + if !bytes.Equal(byDigestManifest.Canonical, fetchedManifest.Canonical) { + t.Fatalf("fetched manifest not equal: %q != %q", byDigestManifest.Canonical, fetchedManifest.Canonical) + } + + sigs, err := fetchedJWS.Signatures() + if err != nil { + t.Fatalf("unable to extract signatures: %v", err) + } + + if len(sigs) != 1 { + t.Fatalf("unexpected number of signatures: %d != %d", len(sigs), 1) + } + + // Now, push the same manifest with a different key + pk2, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + sm2, err := schema1.Sign(&m, pk2) + if err != nil { + t.Fatalf("unexpected error signing manifest: %v", err) + } + _, pl, err = sm2.Payload() + if err != nil { + t.Fatalf("error getting payload %#v", err) + } + + jws2, err := libtrust.ParsePrettySignature(pl, "signatures") + if err != nil { + t.Fatalf("error parsing signature: %v", err) + } + + sigs2, err := jws2.Signatures() + if err != nil { + t.Fatalf("unable to extract signatures: %v", err) + } + + if len(sigs2) != 1 { + t.Fatalf("unexpected number of signatures: %d != %d", len(sigs2), 1) + } + + if manifestDigest, err = ms.Put(ctx, sm2); err != nil { + t.Fatalf("unexpected error putting manifest: %v", err) + } + + fromStore, err = ms.Get(ctx, manifestDigest) + if err != nil { + t.Fatalf("unexpected error fetching manifest: %v", err) + } + + fetched, ok := fromStore.(*schema1.SignedManifest) + if !ok { + t.Fatalf("unexpected type from signed manifeststore : %T", fetched) + } + + if _, err := schema1.Verify(fetched); err != nil { + t.Fatalf("unexpected error verifying manifest: %v", err) + } + + _, pl, err = fetched.Payload() + if err != nil { + t.Fatalf("error getting payload %#v", err) + } + + receivedJWS, err := libtrust.ParsePrettySignature(pl, "signatures") + if err != nil { + t.Fatalf("unexpected error parsing jws: %v", err) + } + + receivedPayload, err := receivedJWS.Payload() + if err != nil { + t.Fatalf("unexpected error extracting received payload: %v", err) + } + + if !bytes.Equal(receivedPayload, payload) { + t.Fatalf("payloads are not equal") + } + + // Test deleting manifests + err = ms.Delete(ctx, dgst) + if err != nil { + t.Fatalf("unexpected an error deleting manifest by digest: %v", err) + } + + exists, err = ms.Exists(ctx, dgst) + if err != nil { + t.Fatalf("Error querying manifest existence") + } + if exists { + t.Errorf("Deleted manifest should not exist") + } + + deletedManifest, err := ms.Get(ctx, dgst) + if err == nil { + t.Errorf("Unexpected success getting deleted manifest") + } + switch err.(type) { + case distribution.ErrManifestUnknownRevision: + break + default: + t.Errorf("Unexpected error getting deleted manifest: %s", reflect.ValueOf(err).Type()) + } + + if deletedManifest != nil { + t.Errorf("Deleted manifest get returned non-nil") + } + + // Re-upload should restore manifest to a good state + _, err = ms.Put(ctx, sm) + if err != nil { + t.Errorf("Error re-uploading deleted manifest") + } + + exists, err = ms.Exists(ctx, dgst) + if err != nil { + t.Fatalf("Error querying manifest existence") + } + if !exists { + t.Errorf("Restored manifest should exist") + } + + deletedManifest, err = ms.Get(ctx, dgst) + if err != nil { + t.Errorf("Unexpected error getting manifest") + } + if deletedManifest == nil { + t.Errorf("Deleted manifest get returned non-nil") + } + + r, err := NewRegistry(ctx, env.driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + repo, err := r.Repository(ctx, env.name) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + ms, err = repo.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + err = ms.Delete(ctx, dgst) + if err == nil { + t.Errorf("Unexpected success deleting while disabled") + } +} + +// TestLinkPathFuncs ensures that the link path functions behavior are locked +// down and implemented as expected. +func TestLinkPathFuncs(t *testing.T) { + for _, testcase := range []struct { + repo string + digest digest.Digest + linkPathFn linkPathFunc + expected string + }{ + { + repo: "foo/bar", + digest: "sha256:deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + linkPathFn: blobLinkPath, + expected: "/docker/registry/v2/repositories/foo/bar/_layers/sha256/deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855/link", + }, + { + repo: "foo/bar", + digest: "sha256:deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + linkPathFn: manifestRevisionLinkPath, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855/link", + }, + } { + p, err := testcase.linkPathFn(testcase.repo, testcase.digest) + if err != nil { + t.Fatalf("unexpected error calling linkPathFn(pm, %q, %q): %v", testcase.repo, testcase.digest, err) + } + + if p != testcase.expected { + t.Fatalf("incorrect path returned: %q != %q", p, testcase.expected) + } + } + +} diff --git a/vendor/github.com/docker/distribution/registry/storage/paths.go b/vendor/github.com/docker/distribution/registry/storage/paths.go new file mode 100644 index 000000000..1b142b88f --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/paths.go @@ -0,0 +1,490 @@ +package storage + +import ( + "fmt" + "path" + "strings" + + "github.com/docker/distribution/digest" +) + +const ( + storagePathVersion = "v2" // fixed storage layout version + storagePathRoot = "/docker/registry/" // all driver paths have a prefix + + // TODO(stevvooe): Get rid of the "storagePathRoot". Initially, we though + // storage path root would configurable for all drivers through this + // package. In reality, we've found it simpler to do this on a per driver + // basis. +) + +// pathFor maps paths based on "object names" and their ids. The "object +// names" mapped by are internal to the storage system. +// +// The path layout in the storage backend is roughly as follows: +// +// /v2 +// -> repositories/ +// ->/ +// -> _manifests/ +// revisions +// -> +// -> link +// tags/ +// -> current/link +// -> index +// -> //link +// -> _layers/ +// +// -> _uploads/ +// data +// startedat +// hashstates// +// -> blob/ +// +// +// The storage backend layout is broken up into a content-addressable blob +// store and repositories. The content-addressable blob store holds most data +// throughout the backend, keyed by algorithm and digests of the underlying +// content. Access to the blob store is controlled through links from the +// repository to blobstore. +// +// A repository is made up of layers, manifests and tags. The layers component +// is just a directory of layers which are "linked" into a repository. A layer +// can only be accessed through a qualified repository name if it is linked in +// the repository. Uploads of layers are managed in the uploads directory, +// which is key by upload id. When all data for an upload is received, the +// data is moved into the blob store and the upload directory is deleted. +// Abandoned uploads can be garbage collected by reading the startedat file +// and removing uploads that have been active for longer than a certain time. +// +// The third component of the repository directory is the manifests store, +// which is made up of a revision store and tag store. Manifests are stored in +// the blob store and linked into the revision store. +// While the registry can save all revisions of a manifest, no relationship is +// implied as to the ordering of changes to a manifest. The tag store provides +// support for name, tag lookups of manifests, using "current/link" under a +// named tag directory. An index is maintained to support deletions of all +// revisions of a given manifest tag. +// +// We cover the path formats implemented by this path mapper below. +// +// Manifests: +// +// manifestRevisionsPathSpec: /v2/repositories//_manifests/revisions/ +// manifestRevisionPathSpec: /v2/repositories//_manifests/revisions/// +// manifestRevisionLinkPathSpec: /v2/repositories//_manifests/revisions///link +// +// Tags: +// +// manifestTagsPathSpec: /v2/repositories//_manifests/tags/ +// manifestTagPathSpec: /v2/repositories//_manifests/tags// +// manifestTagCurrentPathSpec: /v2/repositories//_manifests/tags//current/link +// manifestTagIndexPathSpec: /v2/repositories//_manifests/tags//index/ +// manifestTagIndexEntryPathSpec: /v2/repositories//_manifests/tags//index/// +// manifestTagIndexEntryLinkPathSpec: /v2/repositories//_manifests/tags//index///link +// +// Blobs: +// +// layerLinkPathSpec: /v2/repositories//_layers///link +// +// Uploads: +// +// uploadDataPathSpec: /v2/repositories//_uploads//data +// uploadStartedAtPathSpec: /v2/repositories//_uploads//startedat +// uploadHashStatePathSpec: /v2/repositories//_uploads//hashstates// +// +// Blob Store: +// +// blobsPathSpec: /v2/blobs/ +// blobPathSpec: /v2/blobs/// +// blobDataPathSpec: /v2/blobs////data +// blobMediaTypePathSpec: /v2/blobs////data +// +// For more information on the semantic meaning of each path and their +// contents, please see the path spec documentation. +func pathFor(spec pathSpec) (string, error) { + + // Switch on the path object type and return the appropriate path. At + // first glance, one may wonder why we don't use an interface to + // accomplish this. By keep the formatting separate from the pathSpec, we + // keep separate the path generation componentized. These specs could be + // passed to a completely different mapper implementation and generate a + // different set of paths. + // + // For example, imagine migrating from one backend to the other: one could + // build a filesystem walker that converts a string path in one version, + // to an intermediate path object, than can be consumed and mapped by the + // other version. + + rootPrefix := []string{storagePathRoot, storagePathVersion} + repoPrefix := append(rootPrefix, "repositories") + + switch v := spec.(type) { + + case manifestRevisionsPathSpec: + return path.Join(append(repoPrefix, v.name, "_manifests", "revisions")...), nil + + case manifestRevisionPathSpec: + components, err := digestPathComponents(v.revision, false) + if err != nil { + return "", err + } + + return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil + case manifestRevisionLinkPathSpec: + root, err := pathFor(manifestRevisionPathSpec{ + name: v.name, + revision: v.revision, + }) + + if err != nil { + return "", err + } + + return path.Join(root, "link"), nil + case manifestTagsPathSpec: + return path.Join(append(repoPrefix, v.name, "_manifests", "tags")...), nil + case manifestTagPathSpec: + root, err := pathFor(manifestTagsPathSpec{ + name: v.name, + }) + + if err != nil { + return "", err + } + + return path.Join(root, v.tag), nil + case manifestTagCurrentPathSpec: + root, err := pathFor(manifestTagPathSpec{ + name: v.name, + tag: v.tag, + }) + + if err != nil { + return "", err + } + + return path.Join(root, "current", "link"), nil + case manifestTagIndexPathSpec: + root, err := pathFor(manifestTagPathSpec{ + name: v.name, + tag: v.tag, + }) + + if err != nil { + return "", err + } + + return path.Join(root, "index"), nil + case manifestTagIndexEntryLinkPathSpec: + root, err := pathFor(manifestTagIndexEntryPathSpec{ + name: v.name, + tag: v.tag, + revision: v.revision, + }) + + if err != nil { + return "", err + } + + return path.Join(root, "link"), nil + case manifestTagIndexEntryPathSpec: + root, err := pathFor(manifestTagIndexPathSpec{ + name: v.name, + tag: v.tag, + }) + + if err != nil { + return "", err + } + + components, err := digestPathComponents(v.revision, false) + if err != nil { + return "", err + } + + return path.Join(root, path.Join(components...)), nil + case layerLinkPathSpec: + components, err := digestPathComponents(v.digest, false) + if err != nil { + return "", err + } + + // TODO(stevvooe): Right now, all blobs are linked under "_layers". If + // we have future migrations, we may want to rename this to "_blobs". + // A migration strategy would simply leave existing items in place and + // write the new paths, commit a file then delete the old files. + + blobLinkPathComponents := append(repoPrefix, v.name, "_layers") + + return path.Join(path.Join(append(blobLinkPathComponents, components...)...), "link"), nil + case blobsPathSpec: + blobsPathPrefix := append(rootPrefix, "blobs") + return path.Join(blobsPathPrefix...), nil + case blobPathSpec: + components, err := digestPathComponents(v.digest, true) + if err != nil { + return "", err + } + + blobPathPrefix := append(rootPrefix, "blobs") + return path.Join(append(blobPathPrefix, components...)...), nil + case blobDataPathSpec: + components, err := digestPathComponents(v.digest, true) + if err != nil { + return "", err + } + + components = append(components, "data") + blobPathPrefix := append(rootPrefix, "blobs") + return path.Join(append(blobPathPrefix, components...)...), nil + + case uploadDataPathSpec: + return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "data")...), nil + case uploadStartedAtPathSpec: + return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "startedat")...), nil + case uploadHashStatePathSpec: + offset := fmt.Sprintf("%d", v.offset) + if v.list { + offset = "" // Limit to the prefix for listing offsets. + } + return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "hashstates", string(v.alg), offset)...), nil + case repositoriesRootPathSpec: + return path.Join(repoPrefix...), nil + default: + // TODO(sday): This is an internal error. Ensure it doesn't escape (panic?). + return "", fmt.Errorf("unknown path spec: %#v", v) + } +} + +// pathSpec is a type to mark structs as path specs. There is no +// implementation because we'd like to keep the specs and the mappers +// decoupled. +type pathSpec interface { + pathSpec() +} + +// manifestRevisionsPathSpec describes the directory path for +// a manifest revision. +type manifestRevisionsPathSpec struct { + name string +} + +func (manifestRevisionsPathSpec) pathSpec() {} + +// manifestRevisionPathSpec describes the components of the directory path for +// a manifest revision. +type manifestRevisionPathSpec struct { + name string + revision digest.Digest +} + +func (manifestRevisionPathSpec) pathSpec() {} + +// manifestRevisionLinkPathSpec describes the path components required to look +// up the data link for a revision of a manifest. If this file is not present, +// the manifest blob is not available in the given repo. The contents of this +// file should just be the digest. +type manifestRevisionLinkPathSpec struct { + name string + revision digest.Digest +} + +func (manifestRevisionLinkPathSpec) pathSpec() {} + +// manifestTagsPathSpec describes the path elements required to point to the +// manifest tags directory. +type manifestTagsPathSpec struct { + name string +} + +func (manifestTagsPathSpec) pathSpec() {} + +// manifestTagPathSpec describes the path elements required to point to the +// manifest tag links files under a repository. These contain a blob id that +// can be used to look up the data and signatures. +type manifestTagPathSpec struct { + name string + tag string +} + +func (manifestTagPathSpec) pathSpec() {} + +// manifestTagCurrentPathSpec describes the link to the current revision for a +// given tag. +type manifestTagCurrentPathSpec struct { + name string + tag string +} + +func (manifestTagCurrentPathSpec) pathSpec() {} + +// manifestTagCurrentPathSpec describes the link to the index of revisions +// with the given tag. +type manifestTagIndexPathSpec struct { + name string + tag string +} + +func (manifestTagIndexPathSpec) pathSpec() {} + +// manifestTagIndexEntryPathSpec contains the entries of the index by revision. +type manifestTagIndexEntryPathSpec struct { + name string + tag string + revision digest.Digest +} + +func (manifestTagIndexEntryPathSpec) pathSpec() {} + +// manifestTagIndexEntryLinkPathSpec describes the link to a revisions of a +// manifest with given tag within the index. +type manifestTagIndexEntryLinkPathSpec struct { + name string + tag string + revision digest.Digest +} + +func (manifestTagIndexEntryLinkPathSpec) pathSpec() {} + +// blobLinkPathSpec specifies a path for a blob link, which is a file with a +// blob id. The blob link will contain a content addressable blob id reference +// into the blob store. The format of the contents is as follows: +// +// : +// +// The following example of the file contents is more illustrative: +// +// sha256:96443a84ce518ac22acb2e985eda402b58ac19ce6f91980bde63726a79d80b36 +// +// This indicates that there is a blob with the id/digest, calculated via +// sha256 that can be fetched from the blob store. +type layerLinkPathSpec struct { + name string + digest digest.Digest +} + +func (layerLinkPathSpec) pathSpec() {} + +// blobAlgorithmReplacer does some very simple path sanitization for user +// input. Paths should be "safe" before getting this far due to strict digest +// requirements but we can add further path conversion here, if needed. +var blobAlgorithmReplacer = strings.NewReplacer( + "+", "/", + ".", "/", + ";", "/", +) + +// blobsPathSpec contains the path for the blobs directory +type blobsPathSpec struct{} + +func (blobsPathSpec) pathSpec() {} + +// blobPathSpec contains the path for the registry global blob store. +type blobPathSpec struct { + digest digest.Digest +} + +func (blobPathSpec) pathSpec() {} + +// blobDataPathSpec contains the path for the registry global blob store. For +// now, this contains layer data, exclusively. +type blobDataPathSpec struct { + digest digest.Digest +} + +func (blobDataPathSpec) pathSpec() {} + +// uploadDataPathSpec defines the path parameters of the data file for +// uploads. +type uploadDataPathSpec struct { + name string + id string +} + +func (uploadDataPathSpec) pathSpec() {} + +// uploadDataPathSpec defines the path parameters for the file that stores the +// start time of an uploads. If it is missing, the upload is considered +// unknown. Admittedly, the presence of this file is an ugly hack to make sure +// we have a way to cleanup old or stalled uploads that doesn't rely on driver +// FileInfo behavior. If we come up with a more clever way to do this, we +// should remove this file immediately and rely on the startetAt field from +// the client to enforce time out policies. +type uploadStartedAtPathSpec struct { + name string + id string +} + +func (uploadStartedAtPathSpec) pathSpec() {} + +// uploadHashStatePathSpec defines the path parameters for the file that stores +// the hash function state of an upload at a specific byte offset. If `list` is +// set, then the path mapper will generate a list prefix for all hash state +// offsets for the upload identified by the name, id, and alg. +type uploadHashStatePathSpec struct { + name string + id string + alg digest.Algorithm + offset int64 + list bool +} + +func (uploadHashStatePathSpec) pathSpec() {} + +// repositoriesRootPathSpec returns the root of repositories +type repositoriesRootPathSpec struct { +} + +func (repositoriesRootPathSpec) pathSpec() {} + +// digestPathComponents provides a consistent path breakdown for a given +// digest. For a generic digest, it will be as follows: +// +// / +// +// If multilevel is true, the first two bytes of the digest will separate +// groups of digest folder. It will be as follows: +// +// // +// +func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) { + if err := dgst.Validate(); err != nil { + return nil, err + } + + algorithm := blobAlgorithmReplacer.Replace(string(dgst.Algorithm())) + hex := dgst.Hex() + prefix := []string{algorithm} + + var suffix []string + + if multilevel { + suffix = append(suffix, hex[:2]) + } + + suffix = append(suffix, hex) + + return append(prefix, suffix...), nil +} + +// Reconstructs a digest from a path +func digestFromPath(digestPath string) (digest.Digest, error) { + + digestPath = strings.TrimSuffix(digestPath, "/data") + dir, hex := path.Split(digestPath) + dir = path.Dir(dir) + dir, next := path.Split(dir) + + // next is either the algorithm OR the first two characters in the hex string + var algo string + if next == hex[:2] { + algo = path.Base(dir) + } else { + algo = next + } + + dgst := digest.NewDigestFromHex(algo, hex) + return dgst, dgst.Validate() +} diff --git a/vendor/github.com/docker/distribution/registry/storage/paths_test.go b/vendor/github.com/docker/distribution/registry/storage/paths_test.go new file mode 100644 index 000000000..f739552aa --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/paths_test.go @@ -0,0 +1,135 @@ +package storage + +import ( + "testing" + + "github.com/docker/distribution/digest" +) + +func TestPathMapper(t *testing.T) { + for _, testcase := range []struct { + spec pathSpec + expected string + err error + }{ + { + spec: manifestRevisionPathSpec{ + name: "foo/bar", + revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", + }, + { + spec: manifestRevisionLinkPathSpec{ + name: "foo/bar", + revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link", + }, + { + spec: manifestTagsPathSpec{ + name: "foo/bar", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags", + }, + { + spec: manifestTagPathSpec{ + name: "foo/bar", + tag: "thetag", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag", + }, + { + spec: manifestTagCurrentPathSpec{ + name: "foo/bar", + tag: "thetag", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/current/link", + }, + { + spec: manifestTagIndexPathSpec{ + name: "foo/bar", + tag: "thetag", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index", + }, + { + spec: manifestTagIndexEntryPathSpec{ + name: "foo/bar", + tag: "thetag", + revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", + }, + { + spec: manifestTagIndexEntryLinkPathSpec{ + name: "foo/bar", + tag: "thetag", + revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link", + }, + + { + spec: uploadDataPathSpec{ + name: "foo/bar", + id: "asdf-asdf-asdf-adsf", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/data", + }, + { + spec: uploadStartedAtPathSpec{ + name: "foo/bar", + id: "asdf-asdf-asdf-adsf", + }, + expected: "/docker/registry/v2/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/startedat", + }, + } { + p, err := pathFor(testcase.spec) + if err != nil { + t.Fatalf("unexpected generating path (%T): %v", testcase.spec, err) + } + + if p != testcase.expected { + t.Fatalf("unexpected path generated (%T): %q != %q", testcase.spec, p, testcase.expected) + } + } + + // Add a few test cases to ensure we cover some errors + + // Specify a path that requires a revision and get a digest validation error. + badpath, err := pathFor(manifestRevisionPathSpec{ + name: "foo/bar", + }) + + if err == nil { + t.Fatalf("expected an error when mapping an invalid revision: %s", badpath) + } + +} + +func TestDigestFromPath(t *testing.T) { + for _, testcase := range []struct { + path string + expected digest.Digest + multilevel bool + err error + }{ + { + path: "/docker/registry/v2/blobs/sha256/99/9943fffae777400c0344c58869c4c2619c329ca3ad4df540feda74d291dd7c86/data", + multilevel: true, + expected: "sha256:9943fffae777400c0344c58869c4c2619c329ca3ad4df540feda74d291dd7c86", + err: nil, + }, + } { + result, err := digestFromPath(testcase.path) + if err != testcase.err { + t.Fatalf("Unexpected error value %v when we wanted %v", err, testcase.err) + } + + if result != testcase.expected { + t.Fatalf("Unexpected result value %v when we wanted %v", result, testcase.expected) + + } + } +} diff --git a/vendor/github.com/docker/distribution/registry/storage/purgeuploads.go b/vendor/github.com/docker/distribution/registry/storage/purgeuploads.go new file mode 100644 index 000000000..7576b189c --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/purgeuploads.go @@ -0,0 +1,139 @@ +package storage + +import ( + "path" + "strings" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/docker/distribution/context" + storageDriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/uuid" +) + +// uploadData stored the location of temporary files created during a layer upload +// along with the date the upload was started +type uploadData struct { + containingDir string + startedAt time.Time +} + +func newUploadData() uploadData { + return uploadData{ + containingDir: "", + // default to far in future to protect against missing startedat + startedAt: time.Now().Add(time.Duration(10000 * time.Hour)), + } +} + +// PurgeUploads deletes files from the upload directory +// created before olderThan. The list of files deleted and errors +// encountered are returned +func PurgeUploads(ctx context.Context, driver storageDriver.StorageDriver, olderThan time.Time, actuallyDelete bool) ([]string, []error) { + log.Infof("PurgeUploads starting: olderThan=%s, actuallyDelete=%t", olderThan, actuallyDelete) + uploadData, errors := getOutstandingUploads(ctx, driver) + var deleted []string + for _, uploadData := range uploadData { + if uploadData.startedAt.Before(olderThan) { + var err error + log.Infof("Upload files in %s have older date (%s) than purge date (%s). Removing upload directory.", + uploadData.containingDir, uploadData.startedAt, olderThan) + if actuallyDelete { + err = driver.Delete(ctx, uploadData.containingDir) + } + if err == nil { + deleted = append(deleted, uploadData.containingDir) + } else { + errors = append(errors, err) + } + } + } + + log.Infof("Purge uploads finished. Num deleted=%d, num errors=%d", len(deleted), len(errors)) + return deleted, errors +} + +// getOutstandingUploads walks the upload directory, collecting files +// which could be eligible for deletion. The only reliable way to +// classify the age of a file is with the date stored in the startedAt +// file, so gather files by UUID with a date from startedAt. +func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriver) (map[string]uploadData, []error) { + var errors []error + uploads := make(map[string]uploadData, 0) + + inUploadDir := false + root, err := pathFor(repositoriesRootPathSpec{}) + if err != nil { + return uploads, append(errors, err) + } + + err = Walk(ctx, driver, root, func(fileInfo storageDriver.FileInfo) error { + filePath := fileInfo.Path() + _, file := path.Split(filePath) + if file[0] == '_' { + // Reserved directory + inUploadDir = (file == "_uploads") + + if fileInfo.IsDir() && !inUploadDir { + return ErrSkipDir + } + + } + + uuid, isContainingDir := uUIDFromPath(filePath) + if uuid == "" { + // Cannot reliably delete + return nil + } + ud, ok := uploads[uuid] + if !ok { + ud = newUploadData() + } + if isContainingDir { + ud.containingDir = filePath + } + if file == "startedat" { + if t, err := readStartedAtFile(driver, filePath); err == nil { + ud.startedAt = t + } else { + errors = pushError(errors, filePath, err) + } + + } + + uploads[uuid] = ud + return nil + }) + + if err != nil { + errors = pushError(errors, root, err) + } + return uploads, errors +} + +// uUIDFromPath extracts the upload UUID from a given path +// If the UUID is the last path component, this is the containing +// directory for all upload files +func uUIDFromPath(path string) (string, bool) { + components := strings.Split(path, "/") + for i := len(components) - 1; i >= 0; i-- { + if u, err := uuid.Parse(components[i]); err == nil { + return u.String(), i == len(components)-1 + } + } + return "", false +} + +// readStartedAtFile reads the date from an upload's startedAtFile +func readStartedAtFile(driver storageDriver.StorageDriver, path string) (time.Time, error) { + // todo:(richardscothern) - pass in a context + startedAtBytes, err := driver.GetContent(context.Background(), path) + if err != nil { + return time.Now(), err + } + startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) + if err != nil { + return time.Now(), err + } + return startedAt, nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/purgeuploads_test.go b/vendor/github.com/docker/distribution/registry/storage/purgeuploads_test.go new file mode 100644 index 000000000..3b70f7234 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/purgeuploads_test.go @@ -0,0 +1,166 @@ +package storage + +import ( + "path" + "strings" + "testing" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/uuid" +) + +func testUploadFS(t *testing.T, numUploads int, repoName string, startedAt time.Time) (driver.StorageDriver, context.Context) { + d := inmemory.New() + ctx := context.Background() + for i := 0; i < numUploads; i++ { + addUploads(ctx, t, d, uuid.Generate().String(), repoName, startedAt) + } + return d, ctx +} + +func addUploads(ctx context.Context, t *testing.T, d driver.StorageDriver, uploadID, repo string, startedAt time.Time) { + dataPath, err := pathFor(uploadDataPathSpec{name: repo, id: uploadID}) + if err != nil { + t.Fatalf("Unable to resolve path") + } + if err := d.PutContent(ctx, dataPath, []byte("")); err != nil { + t.Fatalf("Unable to write data file") + } + + startedAtPath, err := pathFor(uploadStartedAtPathSpec{name: repo, id: uploadID}) + if err != nil { + t.Fatalf("Unable to resolve path") + } + + if d.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { + t.Fatalf("Unable to write startedAt file") + } + +} + +func TestPurgeGather(t *testing.T) { + uploadCount := 5 + fs, ctx := testUploadFS(t, uploadCount, "test-repo", time.Now()) + uploadData, errs := getOutstandingUploads(ctx, fs) + if len(errs) != 0 { + t.Errorf("Unexepected errors: %q", errs) + } + if len(uploadData) != uploadCount { + t.Errorf("Unexpected upload file count: %d != %d", uploadCount, len(uploadData)) + } +} + +func TestPurgeNone(t *testing.T) { + fs, ctx := testUploadFS(t, 10, "test-repo", time.Now()) + oneHourAgo := time.Now().Add(-1 * time.Hour) + deleted, errs := PurgeUploads(ctx, fs, oneHourAgo, true) + if len(errs) != 0 { + t.Error("Unexpected errors", errs) + } + if len(deleted) != 0 { + t.Errorf("Unexpectedly deleted files for time: %s", oneHourAgo) + } +} + +func TestPurgeAll(t *testing.T) { + uploadCount := 10 + oneHourAgo := time.Now().Add(-1 * time.Hour) + fs, ctx := testUploadFS(t, uploadCount, "test-repo", oneHourAgo) + + // Ensure > 1 repos are purged + addUploads(ctx, t, fs, uuid.Generate().String(), "test-repo2", oneHourAgo) + uploadCount++ + + deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) + if len(errs) != 0 { + t.Error("Unexpected errors:", errs) + } + fileCount := uploadCount + if len(deleted) != fileCount { + t.Errorf("Unexpectedly deleted file count %d != %d", + len(deleted), fileCount) + } +} + +func TestPurgeSome(t *testing.T) { + oldUploadCount := 5 + oneHourAgo := time.Now().Add(-1 * time.Hour) + fs, ctx := testUploadFS(t, oldUploadCount, "library/test-repo", oneHourAgo) + + newUploadCount := 4 + + for i := 0; i < newUploadCount; i++ { + addUploads(ctx, t, fs, uuid.Generate().String(), "test-repo", time.Now().Add(1*time.Hour)) + } + + deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) + if len(errs) != 0 { + t.Error("Unexpected errors:", errs) + } + if len(deleted) != oldUploadCount { + t.Errorf("Unexpectedly deleted file count %d != %d", + len(deleted), oldUploadCount) + } +} + +func TestPurgeOnlyUploads(t *testing.T) { + oldUploadCount := 5 + oneHourAgo := time.Now().Add(-1 * time.Hour) + fs, ctx := testUploadFS(t, oldUploadCount, "test-repo", oneHourAgo) + + // Create a directory tree outside _uploads and ensure + // these files aren't deleted. + dataPath, err := pathFor(uploadDataPathSpec{name: "test-repo", id: uuid.Generate().String()}) + if err != nil { + t.Fatalf(err.Error()) + } + nonUploadPath := strings.Replace(dataPath, "_upload", "_important", -1) + if strings.Index(nonUploadPath, "_upload") != -1 { + t.Fatalf("Non-upload path not created correctly") + } + + nonUploadFile := path.Join(nonUploadPath, "file") + if err = fs.PutContent(ctx, nonUploadFile, []byte("")); err != nil { + t.Fatalf("Unable to write data file") + } + + deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) + if len(errs) != 0 { + t.Error("Unexpected errors", errs) + } + for _, file := range deleted { + if strings.Index(file, "_upload") == -1 { + t.Errorf("Non-upload file deleted") + } + } +} + +func TestPurgeMissingStartedAt(t *testing.T) { + oneHourAgo := time.Now().Add(-1 * time.Hour) + fs, ctx := testUploadFS(t, 1, "test-repo", oneHourAgo) + + err := Walk(ctx, fs, "/", func(fileInfo driver.FileInfo) error { + filePath := fileInfo.Path() + _, file := path.Split(filePath) + + if file == "startedat" { + if err := fs.Delete(ctx, filePath); err != nil { + t.Fatalf("Unable to delete startedat file: %s", filePath) + } + } + return nil + }) + if err != nil { + t.Fatalf("Unexpected error during Walk: %s ", err.Error()) + } + deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) + if len(errs) > 0 { + t.Errorf("Unexpected errors") + } + if len(deleted) > 0 { + t.Errorf("Files unexpectedly deleted: %s", deleted) + } +} diff --git a/vendor/github.com/docker/distribution/registry/storage/registry.go b/vendor/github.com/docker/distribution/registry/storage/registry.go new file mode 100644 index 000000000..20525ffb3 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/registry.go @@ -0,0 +1,306 @@ +package storage + +import ( + "regexp" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/cache" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/libtrust" +) + +// registry is the top-level implementation of Registry for use in the storage +// package. All instances should descend from this object. +type registry struct { + blobStore *blobStore + blobServer *blobServer + statter *blobStatter // global statter service. + blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider + deleteEnabled bool + resumableDigestEnabled bool + schema1SigningKey libtrust.PrivateKey + blobDescriptorServiceFactory distribution.BlobDescriptorServiceFactory + manifestURLs manifestURLs +} + +// manifestURLs holds regular expressions for controlling manifest URL whitelisting +type manifestURLs struct { + allow *regexp.Regexp + deny *regexp.Regexp +} + +// RegistryOption is the type used for functional options for NewRegistry. +type RegistryOption func(*registry) error + +// EnableRedirect is a functional option for NewRegistry. It causes the backend +// blob server to attempt using (StorageDriver).URLFor to serve all blobs. +func EnableRedirect(registry *registry) error { + registry.blobServer.redirect = true + return nil +} + +// EnableDelete is a functional option for NewRegistry. It enables deletion on +// the registry. +func EnableDelete(registry *registry) error { + registry.deleteEnabled = true + return nil +} + +// DisableDigestResumption is a functional option for NewRegistry. It should be +// used if the registry is acting as a caching proxy. +func DisableDigestResumption(registry *registry) error { + registry.resumableDigestEnabled = false + return nil +} + +// ManifestURLsAllowRegexp is a functional option for NewRegistry. +func ManifestURLsAllowRegexp(r *regexp.Regexp) RegistryOption { + return func(registry *registry) error { + registry.manifestURLs.allow = r + return nil + } +} + +// ManifestURLsDenyRegexp is a functional option for NewRegistry. +func ManifestURLsDenyRegexp(r *regexp.Regexp) RegistryOption { + return func(registry *registry) error { + registry.manifestURLs.deny = r + return nil + } +} + +// Schema1SigningKey returns a functional option for NewRegistry. It sets the +// key for signing all schema1 manifests. +func Schema1SigningKey(key libtrust.PrivateKey) RegistryOption { + return func(registry *registry) error { + registry.schema1SigningKey = key + return nil + } +} + +// BlobDescriptorServiceFactory returns a functional option for NewRegistry. It sets the +// factory to create BlobDescriptorServiceFactory middleware. +func BlobDescriptorServiceFactory(factory distribution.BlobDescriptorServiceFactory) RegistryOption { + return func(registry *registry) error { + registry.blobDescriptorServiceFactory = factory + return nil + } +} + +// BlobDescriptorCacheProvider returns a functional option for +// NewRegistry. It creates a cached blob statter for use by the +// registry. +func BlobDescriptorCacheProvider(blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider) RegistryOption { + // TODO(aaronl): The duplication of statter across several objects is + // ugly, and prevents us from using interface types in the registry + // struct. Ideally, blobStore and blobServer should be lazily + // initialized, and use the current value of + // blobDescriptorCacheProvider. + return func(registry *registry) error { + if blobDescriptorCacheProvider != nil { + statter := cache.NewCachedBlobStatter(blobDescriptorCacheProvider, registry.statter) + registry.blobStore.statter = statter + registry.blobServer.statter = statter + registry.blobDescriptorCacheProvider = blobDescriptorCacheProvider + } + return nil + } +} + +// NewRegistry creates a new registry instance from the provided driver. The +// resulting registry may be shared by multiple goroutines but is cheap to +// allocate. If the Redirect option is specified, the backend blob server will +// attempt to use (StorageDriver).URLFor to serve all blobs. +func NewRegistry(ctx context.Context, driver storagedriver.StorageDriver, options ...RegistryOption) (distribution.Namespace, error) { + // create global statter + statter := &blobStatter{ + driver: driver, + } + + bs := &blobStore{ + driver: driver, + statter: statter, + } + + registry := ®istry{ + blobStore: bs, + blobServer: &blobServer{ + driver: driver, + statter: statter, + pathFn: bs.path, + }, + statter: statter, + resumableDigestEnabled: true, + } + + for _, option := range options { + if err := option(registry); err != nil { + return nil, err + } + } + + return registry, nil +} + +// Scope returns the namespace scope for a registry. The registry +// will only serve repositories contained within this scope. +func (reg *registry) Scope() distribution.Scope { + return distribution.GlobalScope +} + +// Repository returns an instance of the repository tied to the registry. +// Instances should not be shared between goroutines but are cheap to +// allocate. In general, they should be request scoped. +func (reg *registry) Repository(ctx context.Context, canonicalName reference.Named) (distribution.Repository, error) { + var descriptorCache distribution.BlobDescriptorService + if reg.blobDescriptorCacheProvider != nil { + var err error + descriptorCache, err = reg.blobDescriptorCacheProvider.RepositoryScoped(canonicalName.Name()) + if err != nil { + return nil, err + } + } + + return &repository{ + ctx: ctx, + registry: reg, + name: canonicalName, + descriptorCache: descriptorCache, + }, nil +} + +func (reg *registry) Blobs() distribution.BlobEnumerator { + return reg.blobStore +} + +func (reg *registry) BlobStatter() distribution.BlobStatter { + return reg.statter +} + +// repository provides name-scoped access to various services. +type repository struct { + *registry + ctx context.Context + name reference.Named + descriptorCache distribution.BlobDescriptorService +} + +// Name returns the name of the repository. +func (repo *repository) Named() reference.Named { + return repo.name +} + +func (repo *repository) Tags(ctx context.Context) distribution.TagService { + tags := &tagStore{ + repository: repo, + blobStore: repo.registry.blobStore, + } + + return tags +} + +// Manifests returns an instance of ManifestService. Instantiation is cheap and +// may be context sensitive in the future. The instance should be used similar +// to a request local. +func (repo *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + manifestLinkPathFns := []linkPathFunc{ + // NOTE(stevvooe): Need to search through multiple locations since + // 2.1.0 unintentionally linked into _layers. + manifestRevisionLinkPath, + blobLinkPath, + } + + manifestDirectoryPathSpec := manifestRevisionsPathSpec{name: repo.name.Name()} + + var statter distribution.BlobDescriptorService = &linkedBlobStatter{ + blobStore: repo.blobStore, + repository: repo, + linkPathFns: manifestLinkPathFns, + } + + if repo.registry.blobDescriptorServiceFactory != nil { + statter = repo.registry.blobDescriptorServiceFactory.BlobAccessController(statter) + } + + blobStore := &linkedBlobStore{ + ctx: ctx, + blobStore: repo.blobStore, + repository: repo, + deleteEnabled: repo.registry.deleteEnabled, + blobAccessController: statter, + + // TODO(stevvooe): linkPath limits this blob store to only + // manifests. This instance cannot be used for blob checks. + linkPathFns: manifestLinkPathFns, + linkDirectoryPathSpec: manifestDirectoryPathSpec, + } + + ms := &manifestStore{ + ctx: ctx, + repository: repo, + blobStore: blobStore, + schema1Handler: &signedManifestHandler{ + ctx: ctx, + schema1SigningKey: repo.schema1SigningKey, + repository: repo, + blobStore: blobStore, + }, + schema2Handler: &schema2ManifestHandler{ + ctx: ctx, + repository: repo, + blobStore: blobStore, + manifestURLs: repo.registry.manifestURLs, + }, + manifestListHandler: &manifestListHandler{ + ctx: ctx, + repository: repo, + blobStore: blobStore, + }, + } + + // Apply options + for _, option := range options { + err := option.Apply(ms) + if err != nil { + return nil, err + } + } + + return ms, nil +} + +// Blobs returns an instance of the BlobStore. Instantiation is cheap and +// may be context sensitive in the future. The instance should be used similar +// to a request local. +func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore { + var statter distribution.BlobDescriptorService = &linkedBlobStatter{ + blobStore: repo.blobStore, + repository: repo, + linkPathFns: []linkPathFunc{blobLinkPath}, + } + + if repo.descriptorCache != nil { + statter = cache.NewCachedBlobStatter(repo.descriptorCache, statter) + } + + if repo.registry.blobDescriptorServiceFactory != nil { + statter = repo.registry.blobDescriptorServiceFactory.BlobAccessController(statter) + } + + return &linkedBlobStore{ + registry: repo.registry, + blobStore: repo.blobStore, + blobServer: repo.blobServer, + blobAccessController: statter, + repository: repo, + ctx: ctx, + + // TODO(stevvooe): linkPath limits this blob store to only layers. + // This instance cannot be used for manifest checks. + linkPathFns: []linkPathFunc{blobLinkPath}, + deleteEnabled: repo.registry.deleteEnabled, + resumableDigestEnabled: repo.resumableDigestEnabled, + } +} diff --git a/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler.go b/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler.go new file mode 100644 index 000000000..9fe71bb4d --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler.go @@ -0,0 +1,136 @@ +package storage + +import ( + "encoding/json" + "errors" + "fmt" + "net/url" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" +) + +var ( + errUnexpectedURL = errors.New("unexpected URL on layer") + errMissingURL = errors.New("missing URL on layer") + errInvalidURL = errors.New("invalid URL on layer") +) + +//schema2ManifestHandler is a ManifestHandler that covers schema2 manifests. +type schema2ManifestHandler struct { + repository distribution.Repository + blobStore distribution.BlobStore + ctx context.Context + manifestURLs manifestURLs +} + +var _ ManifestHandler = &schema2ManifestHandler{} + +func (ms *schema2ManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { + context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Unmarshal") + + var m schema2.DeserializedManifest + if err := json.Unmarshal(content, &m); err != nil { + return nil, err + } + + return &m, nil +} + +func (ms *schema2ManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { + context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Put") + + m, ok := manifest.(*schema2.DeserializedManifest) + if !ok { + return "", fmt.Errorf("non-schema2 manifest put to schema2ManifestHandler: %T", manifest) + } + + if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil { + return "", err + } + + mt, payload, err := m.Payload() + if err != nil { + return "", err + } + + revision, err := ms.blobStore.Put(ctx, mt, payload) + if err != nil { + context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + return "", err + } + + return revision.Digest, nil +} + +// verifyManifest ensures that the manifest content is valid from the +// perspective of the registry. As a policy, the registry only tries to store +// valid content, leaving trust policies of that content up to consumers. +func (ms *schema2ManifestHandler) verifyManifest(ctx context.Context, mnfst schema2.DeserializedManifest, skipDependencyVerification bool) error { + var errs distribution.ErrManifestVerification + + if skipDependencyVerification { + return nil + } + + manifestService, err := ms.repository.Manifests(ctx) + if err != nil { + return err + } + + blobsService := ms.repository.Blobs(ctx) + + for _, descriptor := range mnfst.References() { + var err error + + switch descriptor.MediaType { + case schema2.MediaTypeForeignLayer: + // Clients download this layer from an external URL, so do not check for + // its presense. + if len(descriptor.URLs) == 0 { + err = errMissingURL + } + allow := ms.manifestURLs.allow + deny := ms.manifestURLs.deny + for _, u := range descriptor.URLs { + var pu *url.URL + pu, err = url.Parse(u) + if err != nil || (pu.Scheme != "http" && pu.Scheme != "https") || pu.Fragment != "" || (allow != nil && !allow.MatchString(u)) || (deny != nil && deny.MatchString(u)) { + err = errInvalidURL + break + } + } + case schema2.MediaTypeManifest, schema1.MediaTypeManifest: + var exists bool + exists, err = manifestService.Exists(ctx, descriptor.Digest) + if err != nil || !exists { + err = distribution.ErrBlobUnknown // just coerce to unknown. + } + + fallthrough // double check the blob store. + default: + // forward all else to blob storage + if len(descriptor.URLs) == 0 { + _, err = blobsService.Stat(ctx, descriptor.Digest) + } + } + + if err != nil { + if err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: descriptor.Digest}) + } + } + + if len(errs) != 0 { + return errs + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler_test.go b/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler_test.go new file mode 100644 index 000000000..5051fa3de --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler_test.go @@ -0,0 +1,136 @@ +package storage + +import ( + "regexp" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +func TestVerifyManifestForeignLayer(t *testing.T) { + ctx := context.Background() + inmemoryDriver := inmemory.New() + registry := createRegistry(t, inmemoryDriver, + ManifestURLsAllowRegexp(regexp.MustCompile("^https?://foo")), + ManifestURLsDenyRegexp(regexp.MustCompile("^https?://foo/nope"))) + repo := makeRepository(t, registry, "test") + manifestService := makeManifestService(t, repo) + + config, err := repo.Blobs(ctx).Put(ctx, schema2.MediaTypeConfig, nil) + if err != nil { + t.Fatal(err) + } + + layer, err := repo.Blobs(ctx).Put(ctx, schema2.MediaTypeLayer, nil) + if err != nil { + t.Fatal(err) + } + + foreignLayer := distribution.Descriptor{ + Digest: "sha256:463435349086340864309863409683460843608348608934092322395278926a", + Size: 6323, + MediaType: schema2.MediaTypeForeignLayer, + } + + template := schema2.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: schema2.MediaTypeManifest, + }, + Config: config, + } + + type testcase struct { + BaseLayer distribution.Descriptor + URLs []string + Err error + } + + cases := []testcase{ + { + foreignLayer, + nil, + errMissingURL, + }, + { + // regular layers may have foreign urls + layer, + []string{"http://foo/bar"}, + nil, + }, + { + foreignLayer, + []string{"file:///local/file"}, + errInvalidURL, + }, + { + foreignLayer, + []string{"http://foo/bar#baz"}, + errInvalidURL, + }, + { + foreignLayer, + []string{""}, + errInvalidURL, + }, + { + foreignLayer, + []string{"https://foo/bar", ""}, + errInvalidURL, + }, + { + foreignLayer, + []string{"", "https://foo/bar"}, + errInvalidURL, + }, + { + foreignLayer, + []string{"http://nope/bar"}, + errInvalidURL, + }, + { + foreignLayer, + []string{"http://foo/nope"}, + errInvalidURL, + }, + { + foreignLayer, + []string{"http://foo/bar"}, + nil, + }, + { + foreignLayer, + []string{"https://foo/bar"}, + nil, + }, + } + + for _, c := range cases { + m := template + l := c.BaseLayer + l.URLs = c.URLs + m.Layers = []distribution.Descriptor{l} + dm, err := schema2.FromStruct(m) + if err != nil { + t.Error(err) + continue + } + + _, err = manifestService.Put(ctx, dm) + if verr, ok := err.(distribution.ErrManifestVerification); ok { + // Extract the first error + if len(verr) == 2 { + if _, ok = verr[1].(distribution.ErrManifestBlobUnknown); ok { + err = verr[0] + } + } + } + if err != c.Err { + t.Errorf("%#v: expected %v, got %v", l, c.Err, err) + } + } +} diff --git a/vendor/github.com/docker/distribution/registry/storage/signedmanifesthandler.go b/vendor/github.com/docker/distribution/registry/storage/signedmanifesthandler.go new file mode 100644 index 000000000..30d330824 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/signedmanifesthandler.go @@ -0,0 +1,141 @@ +package storage + +import ( + "encoding/json" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" +) + +// signedManifestHandler is a ManifestHandler that covers schema1 manifests. It +// can unmarshal and put schema1 manifests that have been signed by libtrust. +type signedManifestHandler struct { + repository distribution.Repository + schema1SigningKey libtrust.PrivateKey + blobStore distribution.BlobStore + ctx context.Context +} + +var _ ManifestHandler = &signedManifestHandler{} + +func (ms *signedManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { + context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Unmarshal") + + var ( + signatures [][]byte + err error + ) + + jsig, err := libtrust.NewJSONSignature(content, signatures...) + if err != nil { + return nil, err + } + + if ms.schema1SigningKey != nil { + if err := jsig.Sign(ms.schema1SigningKey); err != nil { + return nil, err + } + } + + // Extract the pretty JWS + raw, err := jsig.PrettySignature("signatures") + if err != nil { + return nil, err + } + + var sm schema1.SignedManifest + if err := json.Unmarshal(raw, &sm); err != nil { + return nil, err + } + return &sm, nil +} + +func (ms *signedManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { + context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Put") + + sm, ok := manifest.(*schema1.SignedManifest) + if !ok { + return "", fmt.Errorf("non-schema1 manifest put to signedManifestHandler: %T", manifest) + } + + if err := ms.verifyManifest(ms.ctx, *sm, skipDependencyVerification); err != nil { + return "", err + } + + mt := schema1.MediaTypeManifest + payload := sm.Canonical + + revision, err := ms.blobStore.Put(ctx, mt, payload) + if err != nil { + context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + return "", err + } + + return revision.Digest, nil +} + +// verifyManifest ensures that the manifest content is valid from the +// perspective of the registry. It ensures that the signature is valid for the +// enclosed payload. As a policy, the registry only tries to store valid +// content, leaving trust policies of that content up to consumers. +func (ms *signedManifestHandler) verifyManifest(ctx context.Context, mnfst schema1.SignedManifest, skipDependencyVerification bool) error { + var errs distribution.ErrManifestVerification + + if len(mnfst.Name) > reference.NameTotalLengthMax { + errs = append(errs, + distribution.ErrManifestNameInvalid{ + Name: mnfst.Name, + Reason: fmt.Errorf("manifest name must not be more than %v characters", reference.NameTotalLengthMax), + }) + } + + if !reference.NameRegexp.MatchString(mnfst.Name) { + errs = append(errs, + distribution.ErrManifestNameInvalid{ + Name: mnfst.Name, + Reason: fmt.Errorf("invalid manifest name format"), + }) + } + + if len(mnfst.History) != len(mnfst.FSLayers) { + errs = append(errs, fmt.Errorf("mismatched history and fslayer cardinality %d != %d", + len(mnfst.History), len(mnfst.FSLayers))) + } + + if _, err := schema1.Verify(&mnfst); err != nil { + switch err { + case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey: + errs = append(errs, distribution.ErrManifestUnverified{}) + default: + if err.Error() == "invalid signature" { // TODO(stevvooe): This should be exported by libtrust + errs = append(errs, distribution.ErrManifestUnverified{}) + } else { + errs = append(errs, err) + } + } + } + + if !skipDependencyVerification { + for _, fsLayer := range mnfst.References() { + _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest) + if err != nil { + if err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.Digest}) + } + } + } + if len(errs) != 0 { + return errs + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/tagstore.go b/vendor/github.com/docker/distribution/registry/storage/tagstore.go new file mode 100644 index 000000000..4386ffcac --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/tagstore.go @@ -0,0 +1,191 @@ +package storage + +import ( + "path" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +var _ distribution.TagService = &tagStore{} + +// tagStore provides methods to manage manifest tags in a backend storage driver. +// This implementation uses the same on-disk layout as the (now deleted) tag +// store. This provides backward compatibility with current registry deployments +// which only makes use of the Digest field of the returned distribution.Descriptor +// but does not enable full roundtripping of Descriptor objects +type tagStore struct { + repository *repository + blobStore *blobStore +} + +// All returns all tags +func (ts *tagStore) All(ctx context.Context) ([]string, error) { + var tags []string + + pathSpec, err := pathFor(manifestTagPathSpec{ + name: ts.repository.Named().Name(), + }) + if err != nil { + return tags, err + } + + entries, err := ts.blobStore.driver.List(ctx, pathSpec) + if err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + return tags, distribution.ErrRepositoryUnknown{Name: ts.repository.Named().Name()} + default: + return tags, err + } + } + + for _, entry := range entries { + _, filename := path.Split(entry) + tags = append(tags, filename) + } + + return tags, nil +} + +// exists returns true if the specified manifest tag exists in the repository. +func (ts *tagStore) exists(ctx context.Context, tag string) (bool, error) { + tagPath, err := pathFor(manifestTagCurrentPathSpec{ + name: ts.repository.Named().Name(), + tag: tag, + }) + + if err != nil { + return false, err + } + + exists, err := exists(ctx, ts.blobStore.driver, tagPath) + if err != nil { + return false, err + } + + return exists, nil +} + +// Tag tags the digest with the given tag, updating the the store to point at +// the current tag. The digest must point to a manifest. +func (ts *tagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + currentPath, err := pathFor(manifestTagCurrentPathSpec{ + name: ts.repository.Named().Name(), + tag: tag, + }) + + if err != nil { + return err + } + + lbs := ts.linkedBlobStore(ctx, tag) + + // Link into the index + if err := lbs.linkBlob(ctx, desc); err != nil { + return err + } + + // Overwrite the current link + return ts.blobStore.link(ctx, currentPath, desc.Digest) +} + +// resolve the current revision for name and tag. +func (ts *tagStore) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + currentPath, err := pathFor(manifestTagCurrentPathSpec{ + name: ts.repository.Named().Name(), + tag: tag, + }) + + if err != nil { + return distribution.Descriptor{}, err + } + + revision, err := ts.blobStore.readlink(ctx, currentPath) + if err != nil { + switch err.(type) { + case storagedriver.PathNotFoundError: + return distribution.Descriptor{}, distribution.ErrTagUnknown{Tag: tag} + } + + return distribution.Descriptor{}, err + } + + return distribution.Descriptor{Digest: revision}, nil +} + +// Untag removes the tag association +func (ts *tagStore) Untag(ctx context.Context, tag string) error { + tagPath, err := pathFor(manifestTagPathSpec{ + name: ts.repository.Named().Name(), + tag: tag, + }) + + switch err.(type) { + case storagedriver.PathNotFoundError: + return distribution.ErrTagUnknown{Tag: tag} + case nil: + break + default: + return err + } + + return ts.blobStore.driver.Delete(ctx, tagPath) +} + +// linkedBlobStore returns the linkedBlobStore for the named tag, allowing one +// to index manifest blobs by tag name. While the tag store doesn't map +// precisely to the linked blob store, using this ensures the links are +// managed via the same code path. +func (ts *tagStore) linkedBlobStore(ctx context.Context, tag string) *linkedBlobStore { + return &linkedBlobStore{ + blobStore: ts.blobStore, + repository: ts.repository, + ctx: ctx, + linkPathFns: []linkPathFunc{func(name string, dgst digest.Digest) (string, error) { + return pathFor(manifestTagIndexEntryLinkPathSpec{ + name: name, + tag: tag, + revision: dgst, + }) + + }}, + } +} + +// Lookup recovers a list of tags which refer to this digest. When a manifest is deleted by +// digest, tag entries which point to it need to be recovered to avoid dangling tags. +func (ts *tagStore) Lookup(ctx context.Context, desc distribution.Descriptor) ([]string, error) { + allTags, err := ts.All(ctx) + switch err.(type) { + case distribution.ErrRepositoryUnknown: + // This tag store has been initialized but not yet populated + break + case nil: + break + default: + return nil, err + } + + var tags []string + for _, tag := range allTags { + tagLinkPathSpec := manifestTagCurrentPathSpec{ + name: ts.repository.Named().Name(), + tag: tag, + } + + tagLinkPath, err := pathFor(tagLinkPathSpec) + tagDigest, err := ts.blobStore.readlink(ctx, tagLinkPath) + if err != nil { + return nil, err + } + + if tagDigest == desc.Digest { + tags = append(tags, tag) + } + } + + return tags, nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/tagstore_test.go b/vendor/github.com/docker/distribution/registry/storage/tagstore_test.go new file mode 100644 index 000000000..554a46bf7 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/tagstore_test.go @@ -0,0 +1,209 @@ +package storage + +import ( + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +type tagsTestEnv struct { + ts distribution.TagService + ctx context.Context +} + +func testTagStore(t *testing.T) *tagsTestEnv { + ctx := context.Background() + d := inmemory.New() + reg, err := NewRegistry(ctx, d) + if err != nil { + t.Fatal(err) + } + + repoRef, _ := reference.ParseNamed("a/b") + repo, err := reg.Repository(ctx, repoRef) + if err != nil { + t.Fatal(err) + } + + return &tagsTestEnv{ + ctx: ctx, + ts: repo.Tags(ctx), + } +} + +func TestTagStoreTag(t *testing.T) { + env := testTagStore(t) + tags := env.ts + ctx := env.ctx + + d := distribution.Descriptor{} + err := tags.Tag(ctx, "latest", d) + if err == nil { + t.Errorf("unexpected error putting malformed descriptor : %s", err) + } + + d.Digest = "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + err = tags.Tag(ctx, "latest", d) + if err != nil { + t.Error(err) + } + + d1, err := tags.Get(ctx, "latest") + if err != nil { + t.Error(err) + } + + if d1.Digest != d.Digest { + t.Error("put and get digest differ") + } + + // Overwrite existing + d.Digest = "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + err = tags.Tag(ctx, "latest", d) + if err != nil { + t.Error(err) + } + + d1, err = tags.Get(ctx, "latest") + if err != nil { + t.Error(err) + } + + if d1.Digest != d.Digest { + t.Error("put and get digest differ") + } +} + +func TestTagStoreUnTag(t *testing.T) { + env := testTagStore(t) + tags := env.ts + ctx := env.ctx + desc := distribution.Descriptor{Digest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"} + + err := tags.Untag(ctx, "latest") + if err == nil { + t.Errorf("Expected error untagging non-existant tag") + } + + err = tags.Tag(ctx, "latest", desc) + if err != nil { + t.Error(err) + } + + err = tags.Untag(ctx, "latest") + if err != nil { + t.Error(err) + } + + errExpect := distribution.ErrTagUnknown{Tag: "latest"}.Error() + _, err = tags.Get(ctx, "latest") + if err == nil || err.Error() != errExpect { + t.Error("Expected error getting untagged tag") + } +} + +func TestTagStoreAll(t *testing.T) { + env := testTagStore(t) + tagStore := env.ts + ctx := env.ctx + + alpha := "abcdefghijklmnopqrstuvwxyz" + for i := 0; i < len(alpha); i++ { + tag := alpha[i] + desc := distribution.Descriptor{Digest: "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"} + err := tagStore.Tag(ctx, string(tag), desc) + if err != nil { + t.Error(err) + } + } + + all, err := tagStore.All(ctx) + if err != nil { + t.Error(err) + } + if len(all) != len(alpha) { + t.Errorf("Unexpected count returned from enumerate") + } + + for i, c := range all { + if c != string(alpha[i]) { + t.Errorf("unexpected tag in enumerate %s", c) + } + } + + removed := "a" + err = tagStore.Untag(ctx, removed) + if err != nil { + t.Error(err) + } + + all, err = tagStore.All(ctx) + if err != nil { + t.Error(err) + } + for _, tag := range all { + if tag == removed { + t.Errorf("unexpected tag in enumerate %s", removed) + } + } + +} + +func TestTagLookup(t *testing.T) { + env := testTagStore(t) + tagStore := env.ts + ctx := env.ctx + + descA := distribution.Descriptor{Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"} + desc0 := distribution.Descriptor{Digest: "sha256:0000000000000000000000000000000000000000000000000000000000000000"} + + tags, err := tagStore.Lookup(ctx, descA) + if err != nil { + t.Fatal(err) + } + if len(tags) != 0 { + t.Fatalf("Lookup returned > 0 tags from empty store") + } + + err = tagStore.Tag(ctx, "a", descA) + if err != nil { + t.Fatal(err) + } + + err = tagStore.Tag(ctx, "b", descA) + if err != nil { + t.Fatal(err) + } + + err = tagStore.Tag(ctx, "0", desc0) + if err != nil { + t.Fatal(err) + } + + err = tagStore.Tag(ctx, "1", desc0) + if err != nil { + t.Fatal(err) + } + + tags, err = tagStore.Lookup(ctx, descA) + if err != nil { + t.Fatal(err) + } + + if len(tags) != 2 { + t.Errorf("Lookup of descA returned %d tags, expected 2", len(tags)) + } + + tags, err = tagStore.Lookup(ctx, desc0) + if err != nil { + t.Fatal(err) + } + + if len(tags) != 2 { + t.Errorf("Lookup of descB returned %d tags, expected 2", len(tags)) + } + +} diff --git a/vendor/github.com/docker/distribution/registry/storage/util.go b/vendor/github.com/docker/distribution/registry/storage/util.go new file mode 100644 index 000000000..773d7ba0b --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/util.go @@ -0,0 +1,21 @@ +package storage + +import ( + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" +) + +// Exists provides a utility method to test whether or not a path exists in +// the given driver. +func exists(ctx context.Context, drv driver.StorageDriver, path string) (bool, error) { + if _, err := drv.Stat(ctx, path); err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return false, nil + default: + return false, err + } + } + + return true, nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/vacuum.go b/vendor/github.com/docker/distribution/registry/storage/vacuum.go new file mode 100644 index 000000000..3bdfebf27 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/vacuum.go @@ -0,0 +1,67 @@ +package storage + +import ( + "path" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/driver" +) + +// vacuum contains functions for cleaning up repositories and blobs +// These functions will only reliably work on strongly consistent +// storage systems. +// https://en.wikipedia.org/wiki/Consistency_model + +// NewVacuum creates a new Vacuum +func NewVacuum(ctx context.Context, driver driver.StorageDriver) Vacuum { + return Vacuum{ + ctx: ctx, + driver: driver, + } +} + +// Vacuum removes content from the filesystem +type Vacuum struct { + driver driver.StorageDriver + ctx context.Context +} + +// RemoveBlob removes a blob from the filesystem +func (v Vacuum) RemoveBlob(dgst string) error { + d, err := digest.ParseDigest(dgst) + if err != nil { + return err + } + + blobPath, err := pathFor(blobPathSpec{digest: d}) + if err != nil { + return err + } + + context.GetLogger(v.ctx).Infof("Deleting blob: %s", blobPath) + + err = v.driver.Delete(v.ctx, blobPath) + if err != nil { + return err + } + + return nil +} + +// RemoveRepository removes a repository directory from the +// filesystem +func (v Vacuum) RemoveRepository(repoName string) error { + rootForRepository, err := pathFor(repositoriesRootPathSpec{}) + if err != nil { + return err + } + repoDir := path.Join(rootForRepository, repoName) + context.GetLogger(v.ctx).Infof("Deleting repo: %s", repoDir) + err = v.driver.Delete(v.ctx, repoDir) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/walk.go b/vendor/github.com/docker/distribution/registry/storage/walk.go new file mode 100644 index 000000000..d979796eb --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/walk.go @@ -0,0 +1,59 @@ +package storage + +import ( + "errors" + "fmt" + "sort" + + "github.com/docker/distribution/context" + storageDriver "github.com/docker/distribution/registry/storage/driver" +) + +// ErrSkipDir is used as a return value from onFileFunc to indicate that +// the directory named in the call is to be skipped. It is not returned +// as an error by any function. +var ErrSkipDir = errors.New("skip this directory") + +// WalkFn is called once per file by Walk +// If the returned error is ErrSkipDir and fileInfo refers +// to a directory, the directory will not be entered and Walk +// will continue the traversal. Otherwise Walk will return +type WalkFn func(fileInfo storageDriver.FileInfo) error + +// Walk traverses a filesystem defined within driver, starting +// from the given path, calling f on each file +func Walk(ctx context.Context, driver storageDriver.StorageDriver, from string, f WalkFn) error { + children, err := driver.List(ctx, from) + if err != nil { + return err + } + sort.Stable(sort.StringSlice(children)) + for _, child := range children { + // TODO(stevvooe): Calling driver.Stat for every entry is quite + // expensive when running against backends with a slow Stat + // implementation, such as s3. This is very likely a serious + // performance bottleneck. + fileInfo, err := driver.Stat(ctx, child) + if err != nil { + return err + } + err = f(fileInfo) + skipDir := (err == ErrSkipDir) + if err != nil && !skipDir { + return err + } + + if fileInfo.IsDir() && !skipDir { + if err := Walk(ctx, driver, child, f); err != nil { + return err + } + } + } + return nil +} + +// pushError formats an error type given a path and an error +// and pushes it to a slice of errors +func pushError(errors []error, path string, err error) []error { + return append(errors, fmt.Errorf("%s: %s", path, err)) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/walk_test.go b/vendor/github.com/docker/distribution/registry/storage/walk_test.go new file mode 100644 index 000000000..3d7a4b1b6 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/walk_test.go @@ -0,0 +1,152 @@ +package storage + +import ( + "fmt" + "sort" + "testing" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +func testFS(t *testing.T) (driver.StorageDriver, map[string]string, context.Context) { + d := inmemory.New() + ctx := context.Background() + + expected := map[string]string{ + "/a": "dir", + "/a/b": "dir", + "/a/b/c": "dir", + "/a/b/c/d": "file", + "/a/b/c/e": "file", + "/a/b/f": "dir", + "/a/b/f/g": "file", + "/a/b/f/h": "file", + "/a/b/f/i": "file", + "/z": "dir", + "/z/y": "file", + } + + for p, typ := range expected { + if typ != "file" { + continue + } + + if err := d.PutContent(ctx, p, []byte(p)); err != nil { + t.Fatalf("unable to put content into fixture: %v", err) + } + } + + return d, expected, ctx +} + +func TestWalkErrors(t *testing.T) { + d, expected, ctx := testFS(t) + fileCount := len(expected) + err := Walk(ctx, d, "", func(fileInfo driver.FileInfo) error { + return nil + }) + if err == nil { + t.Error("Expected invalid root err") + } + + errEarlyExpected := fmt.Errorf("Early termination") + + err = Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { + // error on the 2nd file + if fileInfo.Path() == "/a/b" { + return errEarlyExpected + } + + delete(expected, fileInfo.Path()) + return nil + }) + if len(expected) != fileCount-1 { + t.Error("Walk failed to terminate with error") + } + if err != errEarlyExpected { + if err == nil { + t.Fatalf("expected an error due to early termination") + } else { + t.Error(err.Error()) + } + } + + err = Walk(ctx, d, "/nonexistent", func(fileInfo driver.FileInfo) error { + return nil + }) + if err == nil { + t.Errorf("Expected missing file err") + } + +} + +func TestWalk(t *testing.T) { + d, expected, ctx := testFS(t) + var traversed []string + err := Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { + filePath := fileInfo.Path() + filetype, ok := expected[filePath] + if !ok { + t.Fatalf("Unexpected file in walk: %q", filePath) + } + + if fileInfo.IsDir() { + if filetype != "dir" { + t.Errorf("Unexpected file type: %q", filePath) + } + } else { + if filetype != "file" { + t.Errorf("Unexpected file type: %q", filePath) + } + + // each file has its own path as the contents. If the length + // doesn't match the path length, fail. + if fileInfo.Size() != int64(len(fileInfo.Path())) { + t.Fatalf("unexpected size for %q: %v != %v", + fileInfo.Path(), fileInfo.Size(), len(fileInfo.Path())) + } + } + delete(expected, filePath) + traversed = append(traversed, filePath) + return nil + }) + if len(expected) > 0 { + t.Errorf("Missed files in walk: %q", expected) + } + + if !sort.StringsAreSorted(traversed) { + t.Errorf("result should be sorted: %v", traversed) + } + + if err != nil { + t.Fatalf(err.Error()) + } +} + +func TestWalkSkipDir(t *testing.T) { + d, expected, ctx := testFS(t) + err := Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { + filePath := fileInfo.Path() + if filePath == "/a/b" { + // skip processing /a/b/c and /a/b/c/d + return ErrSkipDir + } + delete(expected, filePath) + return nil + }) + if err != nil { + t.Fatalf(err.Error()) + } + if _, ok := expected["/a/b/c"]; !ok { + t.Errorf("/a/b/c not skipped") + } + if _, ok := expected["/a/b/c/d"]; !ok { + t.Errorf("/a/b/c/d not skipped") + } + if _, ok := expected["/a/b/c/e"]; !ok { + t.Errorf("/a/b/c/e not skipped") + } + +} diff --git a/vendor/github.com/docker/distribution/tags.go b/vendor/github.com/docker/distribution/tags.go new file mode 100644 index 000000000..503056596 --- /dev/null +++ b/vendor/github.com/docker/distribution/tags.go @@ -0,0 +1,27 @@ +package distribution + +import ( + "github.com/docker/distribution/context" +) + +// TagService provides access to information about tagged objects. +type TagService interface { + // Get retrieves the descriptor identified by the tag. Some + // implementations may differentiate between "trusted" tags and + // "untrusted" tags. If a tag is "untrusted", the mapping will be returned + // as an ErrTagUntrusted error, with the target descriptor. + Get(ctx context.Context, tag string) (Descriptor, error) + + // Tag associates the tag with the provided descriptor, updating the + // current association, if needed. + Tag(ctx context.Context, tag string, desc Descriptor) error + + // Untag removes the given tag association + Untag(ctx context.Context, tag string) error + + // All returns the set of tags managed by this tag service + All(ctx context.Context) ([]string, error) + + // Lookup returns the set of tags referencing the given digest. + Lookup(ctx context.Context, digest Descriptor) ([]string, error) +} diff --git a/vendor/github.com/docker/distribution/testutil/handler.go b/vendor/github.com/docker/distribution/testutil/handler.go new file mode 100644 index 000000000..00cd8a6ac --- /dev/null +++ b/vendor/github.com/docker/distribution/testutil/handler.go @@ -0,0 +1,148 @@ +package testutil + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strings" +) + +// RequestResponseMap is an ordered mapping from Requests to Responses +type RequestResponseMap []RequestResponseMapping + +// RequestResponseMapping defines a Response to be sent in response to a given +// Request +type RequestResponseMapping struct { + Request Request + Response Response +} + +// Request is a simplified http.Request object +type Request struct { + // Method is the http method of the request, for example GET + Method string + + // Route is the http route of this request + Route string + + // QueryParams are the query parameters of this request + QueryParams map[string][]string + + // Body is the byte contents of the http request + Body []byte + + // Headers are the header for this request + Headers http.Header +} + +func (r Request) String() string { + queryString := "" + if len(r.QueryParams) > 0 { + keys := make([]string, 0, len(r.QueryParams)) + queryParts := make([]string, 0, len(r.QueryParams)) + for k := range r.QueryParams { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + for _, val := range r.QueryParams[k] { + queryParts = append(queryParts, fmt.Sprintf("%s=%s", k, url.QueryEscape(val))) + } + } + queryString = "?" + strings.Join(queryParts, "&") + } + var headers []string + if len(r.Headers) > 0 { + var headerKeys []string + for k := range r.Headers { + headerKeys = append(headerKeys, k) + } + sort.Strings(headerKeys) + + for _, k := range headerKeys { + for _, val := range r.Headers[k] { + headers = append(headers, fmt.Sprintf("%s:%s", k, val)) + } + } + + } + return fmt.Sprintf("%s %s%s\n%s\n%s", r.Method, r.Route, queryString, headers, r.Body) +} + +// Response is a simplified http.Response object +type Response struct { + // Statuscode is the http status code of the Response + StatusCode int + + // Headers are the http headers of this Response + Headers http.Header + + // Body is the response body + Body []byte +} + +// testHandler is an http.Handler with a defined mapping from Request to an +// ordered list of Response objects +type testHandler struct { + responseMap map[string][]Response +} + +// NewHandler returns a new test handler that responds to defined requests +// with specified responses +// Each time a Request is received, the next Response is returned in the +// mapping, until no Responses are defined, at which point a 404 is sent back +func NewHandler(requestResponseMap RequestResponseMap) http.Handler { + responseMap := make(map[string][]Response) + for _, mapping := range requestResponseMap { + responses, ok := responseMap[mapping.Request.String()] + if ok { + responseMap[mapping.Request.String()] = append(responses, mapping.Response) + } else { + responseMap[mapping.Request.String()] = []Response{mapping.Response} + } + } + return &testHandler{responseMap: responseMap} +} + +func (app *testHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + requestBody, _ := ioutil.ReadAll(r.Body) + request := Request{ + Method: r.Method, + Route: r.URL.Path, + QueryParams: r.URL.Query(), + Body: requestBody, + Headers: make(map[string][]string), + } + + // Add headers of interest here + for k, v := range r.Header { + if k == "If-None-Match" { + request.Headers[k] = v + } + } + + responses, ok := app.responseMap[request.String()] + + if !ok || len(responses) == 0 { + http.NotFound(w, r) + return + } + + response := responses[0] + app.responseMap[request.String()] = responses[1:] + + responseHeader := w.Header() + for k, v := range response.Headers { + responseHeader[k] = v + } + + w.WriteHeader(response.StatusCode) + + io.Copy(w, bytes.NewReader(response.Body)) +} diff --git a/vendor/github.com/docker/distribution/testutil/manifests.go b/vendor/github.com/docker/distribution/testutil/manifests.go new file mode 100644 index 000000000..c4f9fef53 --- /dev/null +++ b/vendor/github.com/docker/distribution/testutil/manifests.go @@ -0,0 +1,87 @@ +package testutil + +import ( + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/libtrust" +) + +// MakeManifestList constructs a manifest list out of a list of manifest digests +func MakeManifestList(blobstatter distribution.BlobStatter, manifestDigests []digest.Digest) (*manifestlist.DeserializedManifestList, error) { + ctx := context.Background() + + var manifestDescriptors []manifestlist.ManifestDescriptor + for _, manifestDigest := range manifestDigests { + descriptor, err := blobstatter.Stat(ctx, manifestDigest) + if err != nil { + return nil, err + } + platformSpec := manifestlist.PlatformSpec{ + Architecture: "atari2600", + OS: "CP/M", + Variant: "ternary", + Features: []string{"VLIW", "superscalaroutoforderdevnull"}, + } + manifestDescriptor := manifestlist.ManifestDescriptor{ + Descriptor: descriptor, + Platform: platformSpec, + } + manifestDescriptors = append(manifestDescriptors, manifestDescriptor) + } + + return manifestlist.FromDescriptors(manifestDescriptors) +} + +// MakeSchema1Manifest constructs a schema 1 manifest from a given list of digests and returns +// the digest of the manifest +func MakeSchema1Manifest(digests []digest.Digest) (distribution.Manifest, error) { + manifest := schema1.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: "who", + Tag: "cares", + } + + for _, digest := range digests { + manifest.FSLayers = append(manifest.FSLayers, schema1.FSLayer{BlobSum: digest}) + manifest.History = append(manifest.History, schema1.History{V1Compatibility: ""}) + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("unexpected error generating private key: %v", err) + } + + signedManifest, err := schema1.Sign(&manifest, pk) + if err != nil { + return nil, fmt.Errorf("error signing manifest: %v", err) + } + + return signedManifest, nil +} + +// MakeSchema2Manifest constructs a schema 2 manifest from a given list of digests and returns +// the digest of the manifest +func MakeSchema2Manifest(repository distribution.Repository, digests []digest.Digest) (distribution.Manifest, error) { + ctx := context.Background() + blobStore := repository.Blobs(ctx) + builder := schema2.NewManifestBuilder(blobStore, []byte{}) + for _, digest := range digests { + builder.AppendReference(distribution.Descriptor{Digest: digest}) + } + + manifest, err := builder.Build(ctx) + if err != nil { + return nil, fmt.Errorf("unexpected error generating manifest: %v", err) + } + + return manifest, nil +} diff --git a/vendor/github.com/docker/distribution/testutil/tarfile.go b/vendor/github.com/docker/distribution/testutil/tarfile.go new file mode 100644 index 000000000..a8ba01553 --- /dev/null +++ b/vendor/github.com/docker/distribution/testutil/tarfile.go @@ -0,0 +1,115 @@ +package testutil + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + mrand "math/rand" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" +) + +// CreateRandomTarFile creates a random tarfile, returning it as an +// io.ReadSeeker along with its digest. An error is returned if there is a +// problem generating valid content. +func CreateRandomTarFile() (rs io.ReadSeeker, dgst digest.Digest, err error) { + nFiles := mrand.Intn(10) + 10 + target := &bytes.Buffer{} + wr := tar.NewWriter(target) + + // Perturb this on each iteration of the loop below. + header := &tar.Header{ + Mode: 0644, + ModTime: time.Now(), + Typeflag: tar.TypeReg, + Uname: "randocalrissian", + Gname: "cloudcity", + AccessTime: time.Now(), + ChangeTime: time.Now(), + } + + for fileNumber := 0; fileNumber < nFiles; fileNumber++ { + fileSize := mrand.Int63n(1<<20) + 1<<20 + + header.Name = fmt.Sprint(fileNumber) + header.Size = fileSize + + if err := wr.WriteHeader(header); err != nil { + return nil, "", err + } + + randomData := make([]byte, fileSize) + + // Fill up the buffer with some random data. + n, err := mrand.Read(randomData) + + if n != len(randomData) { + return nil, "", fmt.Errorf("short read creating random reader: %v bytes != %v bytes", n, len(randomData)) + } + + if err != nil { + return nil, "", err + } + + nn, err := io.Copy(wr, bytes.NewReader(randomData)) + if nn != fileSize { + return nil, "", fmt.Errorf("short copy writing random file to tar") + } + + if err != nil { + return nil, "", err + } + + if err := wr.Flush(); err != nil { + return nil, "", err + } + } + + if err := wr.Close(); err != nil { + return nil, "", err + } + + dgst = digest.FromBytes(target.Bytes()) + + return bytes.NewReader(target.Bytes()), dgst, nil +} + +// CreateRandomLayers returns a map of n digests. We don't particularly care +// about the order of said digests (since they're all random anyway). +func CreateRandomLayers(n int) (map[digest.Digest]io.ReadSeeker, error) { + digestMap := map[digest.Digest]io.ReadSeeker{} + for i := 0; i < n; i++ { + rs, ds, err := CreateRandomTarFile() + if err != nil { + return nil, fmt.Errorf("unexpected error generating test layer file: %v", err) + } + + dgst := digest.Digest(ds) + digestMap[dgst] = rs + } + return digestMap, nil +} + +// UploadBlobs lets you upload blobs to a repository +func UploadBlobs(repository distribution.Repository, layers map[digest.Digest]io.ReadSeeker) error { + ctx := context.Background() + for digest, rs := range layers { + wr, err := repository.Blobs(ctx).Create(ctx) + if err != nil { + return fmt.Errorf("unexpected error creating upload: %v", err) + } + + if _, err := io.Copy(wr, rs); err != nil { + return fmt.Errorf("unexpected error copying to upload: %v", err) + } + + if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: digest}); err != nil { + return fmt.Errorf("unexpected error committinng upload: %v", err) + } + } + return nil +} diff --git a/vendor/github.com/docker/distribution/uuid/uuid.go b/vendor/github.com/docker/distribution/uuid/uuid.go new file mode 100644 index 000000000..d433ccaf5 --- /dev/null +++ b/vendor/github.com/docker/distribution/uuid/uuid.go @@ -0,0 +1,126 @@ +// Package uuid provides simple UUID generation. Only version 4 style UUIDs +// can be generated. +// +// Please see http://tools.ietf.org/html/rfc4122 for details on UUIDs. +package uuid + +import ( + "crypto/rand" + "fmt" + "io" + "os" + "syscall" + "time" +) + +const ( + // Bits is the number of bits in a UUID + Bits = 128 + + // Size is the number of bytes in a UUID + Size = Bits / 8 + + format = "%08x-%04x-%04x-%04x-%012x" +) + +var ( + // ErrUUIDInvalid indicates a parsed string is not a valid uuid. + ErrUUIDInvalid = fmt.Errorf("invalid uuid") + + // Loggerf can be used to override the default logging destination. Such + // log messages in this library should be logged at warning or higher. + Loggerf = func(format string, args ...interface{}) {} +) + +// UUID represents a UUID value. UUIDs can be compared and set to other values +// and accessed by byte. +type UUID [Size]byte + +// Generate creates a new, version 4 uuid. +func Generate() (u UUID) { + const ( + // ensures we backoff for less than 450ms total. Use the following to + // select new value, in units of 10ms: + // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 + maxretries = 9 + backoff = time.Millisecond * 10 + ) + + var ( + totalBackoff time.Duration + count int + retries int + ) + + for { + // This should never block but the read may fail. Because of this, + // we just try to read the random number generator until we get + // something. This is a very rare condition but may happen. + b := time.Duration(retries) * backoff + time.Sleep(b) + totalBackoff += b + + n, err := io.ReadFull(rand.Reader, u[count:]) + if err != nil { + if retryOnError(err) && retries < maxretries { + count += n + retries++ + Loggerf("error generating version 4 uuid, retrying: %v", err) + continue + } + + // Any other errors represent a system problem. What did someone + // do to /dev/urandom? + panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) + } + + break + } + + u[6] = (u[6] & 0x0f) | 0x40 // set version byte + u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b} + + return u +} + +// Parse attempts to extract a uuid from the string or returns an error. +func Parse(s string) (u UUID, err error) { + if len(s) != 36 { + return UUID{}, ErrUUIDInvalid + } + + // create stack addresses for each section of the uuid. + p := make([][]byte, 5) + + if _, err := fmt.Sscanf(s, format, &p[0], &p[1], &p[2], &p[3], &p[4]); err != nil { + return u, err + } + + copy(u[0:4], p[0]) + copy(u[4:6], p[1]) + copy(u[6:8], p[2]) + copy(u[8:10], p[3]) + copy(u[10:16], p[4]) + + return +} + +func (u UUID) String() string { + return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:]) +} + +// retryOnError tries to detect whether or not retrying would be fruitful. +func retryOnError(err error) bool { + switch err := err.(type) { + case *os.PathError: + return retryOnError(err.Err) // unpack the target error + case syscall.Errno: + if err == syscall.EPERM { + // EPERM represents an entropy pool exhaustion, a condition under + // which we backoff and retry. + return true + } + } + + return false +} diff --git a/vendor/github.com/docker/distribution/uuid/uuid_test.go b/vendor/github.com/docker/distribution/uuid/uuid_test.go new file mode 100644 index 000000000..09c3a7bb4 --- /dev/null +++ b/vendor/github.com/docker/distribution/uuid/uuid_test.go @@ -0,0 +1,48 @@ +package uuid + +import ( + "testing" +) + +const iterations = 1000 + +func TestUUID4Generation(t *testing.T) { + for i := 0; i < iterations; i++ { + u := Generate() + + if u[6]&0xf0 != 0x40 { + t.Fatalf("version byte not correctly set: %v, %08b %08b", u, u[6], u[6]&0xf0) + } + + if u[8]&0xc0 != 0x80 { + t.Fatalf("top order 8th byte not correctly set: %v, %b", u, u[8]) + } + } +} + +func TestParseAndEquality(t *testing.T) { + for i := 0; i < iterations; i++ { + u := Generate() + + parsed, err := Parse(u.String()) + if err != nil { + t.Fatalf("error parsing uuid %v: %v", u, err) + } + + if parsed != u { + t.Fatalf("parsing round trip failed: %v != %v", parsed, u) + } + } + + for _, c := range []string{ + "bad", + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", // correct length, incorrect format + " 20cc7775-2671-43c7-8742-51d1cfa23258", // leading space + "20cc7775-2671-43c7-8742-51d1cfa23258 ", // trailing space + "00000000-0000-0000-0000-x00000000000", // out of range character + } { + if _, err := Parse(c); err == nil { + t.Fatalf("parsing %q should have failed", c) + } + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/LICENSE new file mode 100644 index 000000000..af39a91e7 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md new file mode 100644 index 000000000..0ab099848 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md @@ -0,0 +1,5 @@ +# Azure Storage SDK for Go + +The `github.com/Azure/azure-sdk-for-go/storage` package is used to perform operations in Azure Storage Service. To manage your storage accounts (Azure Resource Manager / ARM), use the [github.com/Azure/azure-sdk-for-go/arm/storage](../arm/storage) package. For your classic storage accounts (Azure Service Management / ASM), use [github.com/Azure/azure-sdk-for-go/management/storageservice](../management/storageservice) package. + +This package includes support for [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/) \ No newline at end of file diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go new file mode 100644 index 000000000..f3c9ba6c8 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go @@ -0,0 +1,1280 @@ +package storage + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +// BlobStorageClient contains operations for Microsoft Azure Blob Storage +// Service. +type BlobStorageClient struct { + client Client +} + +// A Container is an entry in ContainerListResponse. +type Container struct { + Name string `xml:"Name"` + Properties ContainerProperties `xml:"Properties"` + // TODO (ahmetalpbalkan) Metadata +} + +// ContainerProperties contains various properties of a container returned from +// various endpoints like ListContainers. +type ContainerProperties struct { + LastModified string `xml:"Last-Modified"` + Etag string `xml:"Etag"` + LeaseStatus string `xml:"LeaseStatus"` + LeaseState string `xml:"LeaseState"` + LeaseDuration string `xml:"LeaseDuration"` + // TODO (ahmetalpbalkan) remaining fields +} + +// ContainerListResponse contains the response fields from +// ListContainers call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +type ContainerListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Containers []Container `xml:"Containers>Container"` +} + +// A Blob is an entry in BlobListResponse. +type Blob struct { + Name string `xml:"Name"` + Properties BlobProperties `xml:"Properties"` + Metadata BlobMetadata `xml:"Metadata"` +} + +// BlobMetadata is a set of custom name/value pairs. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179404.aspx +type BlobMetadata map[string]string + +type blobMetadataEntries struct { + Entries []blobMetadataEntry `xml:",any"` +} +type blobMetadataEntry struct { + XMLName xml.Name + Value string `xml:",chardata"` +} + +// UnmarshalXML converts the xml:Metadata into Metadata map +func (bm *BlobMetadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var entries blobMetadataEntries + if err := d.DecodeElement(&entries, &start); err != nil { + return err + } + for _, entry := range entries.Entries { + if *bm == nil { + *bm = make(BlobMetadata) + } + (*bm)[strings.ToLower(entry.XMLName.Local)] = entry.Value + } + return nil +} + +// MarshalXML implements the xml.Marshaler interface. It encodes +// metadata name/value pairs as they would appear in an Azure +// ListBlobs response. +func (bm BlobMetadata) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + entries := make([]blobMetadataEntry, 0, len(bm)) + for k, v := range bm { + entries = append(entries, blobMetadataEntry{ + XMLName: xml.Name{Local: http.CanonicalHeaderKey(k)}, + Value: v, + }) + } + return enc.EncodeElement(blobMetadataEntries{ + Entries: entries, + }, start) +} + +// BlobProperties contains various properties of a blob +// returned in various endpoints like ListBlobs or GetBlobProperties. +type BlobProperties struct { + LastModified string `xml:"Last-Modified"` + Etag string `xml:"Etag"` + ContentMD5 string `xml:"Content-MD5"` + ContentLength int64 `xml:"Content-Length"` + ContentType string `xml:"Content-Type"` + ContentEncoding string `xml:"Content-Encoding"` + CacheControl string `xml:"Cache-Control"` + ContentLanguage string `xml:"Cache-Language"` + BlobType BlobType `xml:"x-ms-blob-blob-type"` + SequenceNumber int64 `xml:"x-ms-blob-sequence-number"` + CopyID string `xml:"CopyId"` + CopyStatus string `xml:"CopyStatus"` + CopySource string `xml:"CopySource"` + CopyProgress string `xml:"CopyProgress"` + CopyCompletionTime string `xml:"CopyCompletionTime"` + CopyStatusDescription string `xml:"CopyStatusDescription"` + LeaseStatus string `xml:"LeaseStatus"` +} + +// BlobHeaders contains various properties of a blob and is an entry +// in SetBlobProperties +type BlobHeaders struct { + ContentMD5 string `header:"x-ms-blob-content-md5"` + ContentLanguage string `header:"x-ms-blob-content-language"` + ContentEncoding string `header:"x-ms-blob-content-encoding"` + ContentType string `header:"x-ms-blob-content-type"` + CacheControl string `header:"x-ms-blob-cache-control"` +} + +// BlobListResponse contains the response fields from ListBlobs call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx +type BlobListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Blobs []Blob `xml:"Blobs>Blob"` + + // BlobPrefix is used to traverse blobs as if it were a file system. + // It is returned if ListBlobsParameters.Delimiter is specified. + // The list here can be thought of as "folders" that may contain + // other folders or blobs. + BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"` + + // Delimiter is used to traverse blobs as if it were a file system. + // It is returned if ListBlobsParameters.Delimiter is specified. + Delimiter string `xml:"Delimiter"` +} + +// ListContainersParameters defines the set of customizable parameters to make a +// List Containers call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +type ListContainersParameters struct { + Prefix string + Marker string + Include string + MaxResults uint + Timeout uint +} + +func (p ListContainersParameters) getParameters() url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.Include != "" { + out.Set("include", p.Include) + } + if p.MaxResults != 0 { + out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) + } + if p.Timeout != 0 { + out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) + } + + return out +} + +// ListBlobsParameters defines the set of customizable +// parameters to make a List Blobs call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx +type ListBlobsParameters struct { + Prefix string + Delimiter string + Marker string + Include string + MaxResults uint + Timeout uint +} + +func (p ListBlobsParameters) getParameters() url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Delimiter != "" { + out.Set("delimiter", p.Delimiter) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.Include != "" { + out.Set("include", p.Include) + } + if p.MaxResults != 0 { + out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) + } + if p.Timeout != 0 { + out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) + } + + return out +} + +// BlobType defines the type of the Azure Blob. +type BlobType string + +// Types of page blobs +const ( + BlobTypeBlock BlobType = "BlockBlob" + BlobTypePage BlobType = "PageBlob" + BlobTypeAppend BlobType = "AppendBlob" +) + +// PageWriteType defines the type updates that are going to be +// done on the page blob. +type PageWriteType string + +// Types of operations on page blobs +const ( + PageWriteTypeUpdate PageWriteType = "update" + PageWriteTypeClear PageWriteType = "clear" +) + +const ( + blobCopyStatusPending = "pending" + blobCopyStatusSuccess = "success" + blobCopyStatusAborted = "aborted" + blobCopyStatusFailed = "failed" +) + +// lease constants. +const ( + leaseHeaderPrefix = "x-ms-lease-" + leaseID = "x-ms-lease-id" + leaseAction = "x-ms-lease-action" + leaseBreakPeriod = "x-ms-lease-break-period" + leaseDuration = "x-ms-lease-duration" + leaseProposedID = "x-ms-proposed-lease-id" + leaseTime = "x-ms-lease-time" + + acquireLease = "acquire" + renewLease = "renew" + changeLease = "change" + releaseLease = "release" + breakLease = "break" +) + +// BlockListType is used to filter out types of blocks in a Get Blocks List call +// for a block blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all +// block types. +type BlockListType string + +// Filters for listing blocks in block blobs +const ( + BlockListTypeAll BlockListType = "all" + BlockListTypeCommitted BlockListType = "committed" + BlockListTypeUncommitted BlockListType = "uncommitted" +) + +// ContainerAccessType defines the access level to the container from a public +// request. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms- +// blob-public-access" header. +type ContainerAccessType string + +// Access options for containers +const ( + ContainerAccessTypePrivate ContainerAccessType = "" + ContainerAccessTypeBlob ContainerAccessType = "blob" + ContainerAccessTypeContainer ContainerAccessType = "container" +) + +// Maximum sizes (per REST API) for various concepts +const ( + MaxBlobBlockSize = 4 * 1024 * 1024 + MaxBlobPageSize = 4 * 1024 * 1024 +) + +// BlockStatus defines states a block for a block blob can +// be in. +type BlockStatus string + +// List of statuses that can be used to refer to a block in a block list +const ( + BlockStatusUncommitted BlockStatus = "Uncommitted" + BlockStatusCommitted BlockStatus = "Committed" + BlockStatusLatest BlockStatus = "Latest" +) + +// Block is used to create Block entities for Put Block List +// call. +type Block struct { + ID string + Status BlockStatus +} + +// BlockListResponse contains the response fields from Get Block List call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx +type BlockListResponse struct { + XMLName xml.Name `xml:"BlockList"` + CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"` + UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"` +} + +// BlockResponse contains the block information returned +// in the GetBlockListCall. +type BlockResponse struct { + Name string `xml:"Name"` + Size int64 `xml:"Size"` +} + +// GetPageRangesResponse contains the reponse fields from +// Get Page Ranges call. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx +type GetPageRangesResponse struct { + XMLName xml.Name `xml:"PageList"` + PageList []PageRange `xml:"PageRange"` +} + +// PageRange contains information about a page of a page blob from +// Get Pages Range call. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx +type PageRange struct { + Start int64 `xml:"Start"` + End int64 `xml:"End"` +} + +var ( + errBlobCopyAborted = errors.New("storage: blob copy is aborted") + errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch") +) + +// ListContainers returns the list of containers in a storage account along with +// pagination token and other response details. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +func (b BlobStorageClient) ListContainers(params ListContainersParameters) (ContainerListResponse, error) { + q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) + uri := b.client.getEndpoint(blobServiceName, "", q) + headers := b.client.getStandardHeaders() + + var out ContainerListResponse + resp, err := b.client.exec("GET", uri, headers, nil) + if err != nil { + return out, err + } + defer resp.body.Close() + + err = xmlUnmarshal(resp.body, &out) + return out, err +} + +// CreateContainer creates a blob container within the storage account +// with given name and access level. Returns error if container already exists. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx +func (b BlobStorageClient) CreateContainer(name string, access ContainerAccessType) error { + resp, err := b.createContainer(name, access) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// CreateContainerIfNotExists creates a blob container if it does not exist. Returns +// true if container is newly created or false if container already exists. +func (b BlobStorageClient) CreateContainerIfNotExists(name string, access ContainerAccessType) (bool, error) { + resp, err := b.createContainer(name, access) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { + return resp.statusCode == http.StatusCreated, nil + } + } + return false, err +} + +func (b BlobStorageClient) createContainer(name string, access ContainerAccessType) (*storageResponse, error) { + verb := "PUT" + uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) + + headers := b.client.getStandardHeaders() + if access != "" { + headers["x-ms-blob-public-access"] = string(access) + } + return b.client.exec(verb, uri, headers, nil) +} + +// ContainerExists returns true if a container with given name exists +// on the storage account, otherwise returns false. +func (b BlobStorageClient) ContainerExists(name string) (bool, error) { + verb := "HEAD" + uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) + headers := b.client.getStandardHeaders() + + resp, err := b.client.exec(verb, uri, headers, nil) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusOK, nil + } + } + return false, err +} + +// DeleteContainer deletes the container with given name on the storage +// account. If the container does not exist returns error. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx +func (b BlobStorageClient) DeleteContainer(name string) error { + resp, err := b.deleteContainer(name) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) +} + +// DeleteContainerIfExists deletes the container with given name on the storage +// account if it exists. Returns true if container is deleted with this call, or +// false if the container did not exist at the time of the Delete Container +// operation. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx +func (b BlobStorageClient) DeleteContainerIfExists(name string) (bool, error) { + resp, err := b.deleteContainer(name) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusAccepted, nil + } + } + return false, err +} + +func (b BlobStorageClient) deleteContainer(name string) (*storageResponse, error) { + verb := "DELETE" + uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) + + headers := b.client.getStandardHeaders() + return b.client.exec(verb, uri, headers, nil) +} + +// ListBlobs returns an object that contains list of blobs in the container, +// pagination token and other information in the response of List Blobs call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx +func (b BlobStorageClient) ListBlobs(container string, params ListBlobsParameters) (BlobListResponse, error) { + q := mergeParams(params.getParameters(), url.Values{ + "restype": {"container"}, + "comp": {"list"}}) + uri := b.client.getEndpoint(blobServiceName, pathForContainer(container), q) + headers := b.client.getStandardHeaders() + + var out BlobListResponse + resp, err := b.client.exec("GET", uri, headers, nil) + if err != nil { + return out, err + } + defer resp.body.Close() + + err = xmlUnmarshal(resp.body, &out) + return out, err +} + +// BlobExists returns true if a blob with given name exists on the specified +// container of the storage account. +func (b BlobStorageClient) BlobExists(container, name string) (bool, error) { + verb := "HEAD" + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + headers := b.client.getStandardHeaders() + resp, err := b.client.exec(verb, uri, headers, nil) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusOK, nil + } + } + return false, err +} + +// GetBlobURL gets the canonical URL to the blob with the specified name in the +// specified container. This method does not create a publicly accessible URL if +// the blob or container is private and this method does not check if the blob +// exists. +func (b BlobStorageClient) GetBlobURL(container, name string) string { + if container == "" { + container = "$root" + } + return b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) +} + +// GetBlob returns a stream to read the blob. Caller must call Close() the +// reader to close on the underlying connection. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx +func (b BlobStorageClient) GetBlob(container, name string) (io.ReadCloser, error) { + resp, err := b.getBlobRange(container, name, "", nil) + if err != nil { + return nil, err + } + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + return resp.body, nil +} + +// GetBlobRange reads the specified range of a blob to a stream. The bytesRange +// string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx +func (b BlobStorageClient) GetBlobRange(container, name, bytesRange string, extraHeaders map[string]string) (io.ReadCloser, error) { + resp, err := b.getBlobRange(container, name, bytesRange, extraHeaders) + if err != nil { + return nil, err + } + + if err := checkRespCode(resp.statusCode, []int{http.StatusPartialContent}); err != nil { + return nil, err + } + return resp.body, nil +} + +func (b BlobStorageClient) getBlobRange(container, name, bytesRange string, extraHeaders map[string]string) (*storageResponse, error) { + verb := "GET" + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + + headers := b.client.getStandardHeaders() + if bytesRange != "" { + headers["Range"] = fmt.Sprintf("bytes=%s", bytesRange) + } + + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := b.client.exec(verb, uri, headers, nil) + if err != nil { + return nil, err + } + return resp, err +} + +// leasePut is common PUT code for the various aquire/release/break etc functions. +func (b BlobStorageClient) leaseCommonPut(container string, name string, headers map[string]string, expectedStatus int) (http.Header, error) { + params := url.Values{"comp": {"lease"}} + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) + + resp, err := b.client.exec("PUT", uri, headers, nil) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{expectedStatus}); err != nil { + return nil, err + } + + return resp.headers, nil +} + +// AcquireLease creates a lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx +// returns leaseID acquired +func (b BlobStorageClient) AcquireLease(container string, name string, leaseTimeInSeconds int, proposedLeaseID string) (returnedLeaseID string, err error) { + headers := b.client.getStandardHeaders() + headers[leaseAction] = acquireLease + headers[leaseProposedID] = proposedLeaseID + headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds) + + respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusCreated) + if err != nil { + return "", err + } + + returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(leaseID)) + + if returnedLeaseID != "" { + return returnedLeaseID, nil + } + + // what should we return in case of HTTP 201 but no lease ID? + // or it just cant happen? (brave words) + return "", errors.New("LeaseID not returned") +} + +// BreakLease breaks the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx +// Returns the timeout remaining in the lease in seconds +func (b BlobStorageClient) BreakLease(container string, name string) (breakTimeout int, err error) { + headers := b.client.getStandardHeaders() + headers[leaseAction] = breakLease + return b.breakLeaseCommon(container, name, headers) +} + +// BreakLeaseWithBreakPeriod breaks the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx +// breakPeriodInSeconds is used to determine how long until new lease can be created. +// Returns the timeout remaining in the lease in seconds +func (b BlobStorageClient) BreakLeaseWithBreakPeriod(container string, name string, breakPeriodInSeconds int) (breakTimeout int, err error) { + headers := b.client.getStandardHeaders() + headers[leaseAction] = breakLease + headers[leaseBreakPeriod] = strconv.Itoa(breakPeriodInSeconds) + return b.breakLeaseCommon(container, name, headers) +} + +// breakLeaseCommon is common code for both version of BreakLease (with and without break period) +func (b BlobStorageClient) breakLeaseCommon(container string, name string, headers map[string]string) (breakTimeout int, err error) { + + respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusAccepted) + if err != nil { + return 0, err + } + + breakTimeoutStr := respHeaders.Get(http.CanonicalHeaderKey(leaseTime)) + if breakTimeoutStr != "" { + breakTimeout, err = strconv.Atoi(breakTimeoutStr) + if err != nil { + return 0, err + } + } + + return breakTimeout, nil +} + +// ChangeLease changes a lease ID for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx +// Returns the new LeaseID acquired +func (b BlobStorageClient) ChangeLease(container string, name string, currentLeaseID string, proposedLeaseID string) (newLeaseID string, err error) { + headers := b.client.getStandardHeaders() + headers[leaseAction] = changeLease + headers[leaseID] = currentLeaseID + headers[leaseProposedID] = proposedLeaseID + + respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusOK) + if err != nil { + return "", err + } + + newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(leaseID)) + if newLeaseID != "" { + return newLeaseID, nil + } + + return "", errors.New("LeaseID not returned") +} + +// ReleaseLease releases the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx +func (b BlobStorageClient) ReleaseLease(container string, name string, currentLeaseID string) error { + headers := b.client.getStandardHeaders() + headers[leaseAction] = releaseLease + headers[leaseID] = currentLeaseID + + _, err := b.leaseCommonPut(container, name, headers, http.StatusOK) + if err != nil { + return err + } + + return nil +} + +// RenewLease renews the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx +func (b BlobStorageClient) RenewLease(container string, name string, currentLeaseID string) error { + headers := b.client.getStandardHeaders() + headers[leaseAction] = renewLease + headers[leaseID] = currentLeaseID + + _, err := b.leaseCommonPut(container, name, headers, http.StatusOK) + if err != nil { + return err + } + + return nil +} + +// GetBlobProperties provides various information about the specified +// blob. See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx +func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobProperties, error) { + verb := "HEAD" + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + + headers := b.client.getStandardHeaders() + resp, err := b.client.exec(verb, uri, headers, nil) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + var contentLength int64 + contentLengthStr := resp.headers.Get("Content-Length") + if contentLengthStr != "" { + contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) + if err != nil { + return nil, err + } + } + + var sequenceNum int64 + sequenceNumStr := resp.headers.Get("x-ms-blob-sequence-number") + if sequenceNumStr != "" { + sequenceNum, err = strconv.ParseInt(sequenceNumStr, 0, 64) + if err != nil { + return nil, err + } + } + + return &BlobProperties{ + LastModified: resp.headers.Get("Last-Modified"), + Etag: resp.headers.Get("Etag"), + ContentMD5: resp.headers.Get("Content-MD5"), + ContentLength: contentLength, + ContentEncoding: resp.headers.Get("Content-Encoding"), + ContentType: resp.headers.Get("Content-Type"), + CacheControl: resp.headers.Get("Cache-Control"), + ContentLanguage: resp.headers.Get("Content-Language"), + SequenceNumber: sequenceNum, + CopyCompletionTime: resp.headers.Get("x-ms-copy-completion-time"), + CopyStatusDescription: resp.headers.Get("x-ms-copy-status-description"), + CopyID: resp.headers.Get("x-ms-copy-id"), + CopyProgress: resp.headers.Get("x-ms-copy-progress"), + CopySource: resp.headers.Get("x-ms-copy-source"), + CopyStatus: resp.headers.Get("x-ms-copy-status"), + BlobType: BlobType(resp.headers.Get("x-ms-blob-type")), + LeaseStatus: resp.headers.Get("x-ms-lease-status"), + }, nil +} + +// SetBlobProperties replaces the BlobHeaders for the specified blob. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetBlobProperties. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee691966.aspx +func (b BlobStorageClient) SetBlobProperties(container, name string, blobHeaders BlobHeaders) error { + params := url.Values{"comp": {"properties"}} + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) + headers := b.client.getStandardHeaders() + + extraHeaders := headersFromStruct(blobHeaders) + + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := b.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusOK}) +} + +// SetBlobMetadata replaces the metadata for the specified blob. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetBlobMetadata. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx +func (b BlobStorageClient) SetBlobMetadata(container, name string, metadata map[string]string, extraHeaders map[string]string) error { + params := url.Values{"comp": {"metadata"}} + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) + headers := b.client.getStandardHeaders() + for k, v := range metadata { + headers[userDefinedMetadataHeaderPrefix+k] = v + } + + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := b.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusOK}) +} + +// GetBlobMetadata returns all user-defined metadata for the specified blob. +// +// All metadata keys will be returned in lower case. (HTTP header +// names are case-insensitive.) +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx +func (b BlobStorageClient) GetBlobMetadata(container, name string) (map[string]string, error) { + params := url.Values{"comp": {"metadata"}} + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) + headers := b.client.getStandardHeaders() + + resp, err := b.client.exec("GET", uri, headers, nil) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + metadata := make(map[string]string) + for k, v := range resp.headers { + // Can't trust CanonicalHeaderKey() to munge case + // reliably. "_" is allowed in identifiers: + // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx + // https://msdn.microsoft.com/library/aa664670(VS.71).aspx + // http://tools.ietf.org/html/rfc7230#section-3.2 + // ...but "_" is considered invalid by + // CanonicalMIMEHeaderKey in + // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 + // so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar". + k = strings.ToLower(k) + if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { + continue + } + // metadata["foo"] = content of the last X-Ms-Meta-Foo header + k = k[len(userDefinedMetadataHeaderPrefix):] + metadata[k] = v[len(v)-1] + } + return metadata, nil +} + +// CreateBlockBlob initializes an empty block blob with no blocks. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx +func (b BlobStorageClient) CreateBlockBlob(container, name string) error { + return b.CreateBlockBlobFromReader(container, name, 0, nil, nil) +} + +// CreateBlockBlobFromReader initializes a block blob using data from +// reader. Size must be the number of bytes read from reader. To +// create an empty blob, use size==0 and reader==nil. +// +// The API rejects requests with size > 64 MiB (but this limit is not +// checked by the SDK). To write a larger blob, use CreateBlockBlob, +// PutBlock, and PutBlockList. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx +func (b BlobStorageClient) CreateBlockBlobFromReader(container, name string, size uint64, blob io.Reader, extraHeaders map[string]string) error { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeBlock) + headers["Content-Length"] = fmt.Sprintf("%d", size) + + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := b.client.exec("PUT", uri, headers, blob) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// PutBlock saves the given data chunk to the specified block blob with +// given ID. +// +// The API rejects chunks larger than 4 MiB (but this limit is not +// checked by the SDK). +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx +func (b BlobStorageClient) PutBlock(container, name, blockID string, chunk []byte) error { + return b.PutBlockWithLength(container, name, blockID, uint64(len(chunk)), bytes.NewReader(chunk), nil) +} + +// PutBlockWithLength saves the given data stream of exactly specified size to +// the block blob with given ID. It is an alternative to PutBlocks where data +// comes as stream but the length is known in advance. +// +// The API rejects requests with size > 4 MiB (but this limit is not +// checked by the SDK). +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx +func (b BlobStorageClient) PutBlockWithLength(container, name, blockID string, size uint64, blob io.Reader, extraHeaders map[string]string) error { + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"block"}, "blockid": {blockID}}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeBlock) + headers["Content-Length"] = fmt.Sprintf("%v", size) + + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := b.client.exec("PUT", uri, headers, blob) + if err != nil { + return err + } + + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// PutBlockList saves list of blocks to the specified block blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179467.aspx +func (b BlobStorageClient) PutBlockList(container, name string, blocks []Block) error { + blockListXML := prepareBlockListRequest(blocks) + + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"blocklist"}}) + headers := b.client.getStandardHeaders() + headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML)) + + resp, err := b.client.exec("PUT", uri, headers, strings.NewReader(blockListXML)) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// GetBlockList retrieves list of blocks in the specified block blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx +func (b BlobStorageClient) GetBlockList(container, name string, blockType BlockListType) (BlockListResponse, error) { + params := url.Values{"comp": {"blocklist"}, "blocklisttype": {string(blockType)}} + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) + headers := b.client.getStandardHeaders() + + var out BlockListResponse + resp, err := b.client.exec("GET", uri, headers, nil) + if err != nil { + return out, err + } + defer resp.body.Close() + + err = xmlUnmarshal(resp.body, &out) + return out, err +} + +// PutPageBlob initializes an empty page blob with specified name and maximum +// size in bytes (size must be aligned to a 512-byte boundary). A page blob must +// be created using this method before writing pages. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx +func (b BlobStorageClient) PutPageBlob(container, name string, size int64, extraHeaders map[string]string) error { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypePage) + headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", size) + + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := b.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// PutPage writes a range of pages to a page blob or clears the given range. +// In case of 'clear' writes, given chunk is discarded. Ranges must be aligned +// with 512-byte boundaries and chunk must be of size multiplies by 512. +// +// See https://msdn.microsoft.com/en-us/library/ee691975.aspx +func (b BlobStorageClient) PutPage(container, name string, startByte, endByte int64, writeType PageWriteType, chunk []byte, extraHeaders map[string]string) error { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"page"}}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypePage) + headers["x-ms-page-write"] = string(writeType) + headers["x-ms-range"] = fmt.Sprintf("bytes=%v-%v", startByte, endByte) + for k, v := range extraHeaders { + headers[k] = v + } + var contentLength int64 + var data io.Reader + if writeType == PageWriteTypeClear { + contentLength = 0 + data = bytes.NewReader([]byte{}) + } else { + contentLength = int64(len(chunk)) + data = bytes.NewReader(chunk) + } + headers["Content-Length"] = fmt.Sprintf("%v", contentLength) + + resp, err := b.client.exec("PUT", uri, headers, data) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// GetPageRanges returns the list of valid page ranges for a page blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx +func (b BlobStorageClient) GetPageRanges(container, name string) (GetPageRangesResponse, error) { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"pagelist"}}) + headers := b.client.getStandardHeaders() + + var out GetPageRangesResponse + resp, err := b.client.exec("GET", uri, headers, nil) + if err != nil { + return out, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return out, err + } + err = xmlUnmarshal(resp.body, &out) + return out, err +} + +// PutAppendBlob initializes an empty append blob with specified name. An +// append blob must be created using this method before appending blocks. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx +func (b BlobStorageClient) PutAppendBlob(container, name string, extraHeaders map[string]string) error { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeAppend) + + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := b.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// AppendBlock appends a block to an append blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/mt427365.aspx +func (b BlobStorageClient) AppendBlock(container, name string, chunk []byte, extraHeaders map[string]string) error { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"appendblock"}}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeAppend) + headers["Content-Length"] = fmt.Sprintf("%v", len(chunk)) + + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := b.client.exec("PUT", uri, headers, bytes.NewReader(chunk)) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// CopyBlob starts a blob copy operation and waits for the operation to +// complete. sourceBlob parameter must be a canonical URL to the blob (can be +// obtained using GetBlobURL method.) There is no SLA on blob copy and therefore +// this helper method works faster on smaller files. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd894037.aspx +func (b BlobStorageClient) CopyBlob(container, name, sourceBlob string) error { + copyID, err := b.startBlobCopy(container, name, sourceBlob) + if err != nil { + return err + } + + return b.waitForBlobCopy(container, name, copyID) +} + +func (b BlobStorageClient) startBlobCopy(container, name, sourceBlob string) (string, error) { + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + + headers := b.client.getStandardHeaders() + headers["x-ms-copy-source"] = sourceBlob + + resp, err := b.client.exec("PUT", uri, headers, nil) + if err != nil { + return "", err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted, http.StatusCreated}); err != nil { + return "", err + } + + copyID := resp.headers.Get("x-ms-copy-id") + if copyID == "" { + return "", errors.New("Got empty copy id header") + } + return copyID, nil +} + +func (b BlobStorageClient) waitForBlobCopy(container, name, copyID string) error { + for { + props, err := b.GetBlobProperties(container, name) + if err != nil { + return err + } + + if props.CopyID != copyID { + return errBlobCopyIDMismatch + } + + switch props.CopyStatus { + case blobCopyStatusSuccess: + return nil + case blobCopyStatusPending: + continue + case blobCopyStatusAborted: + return errBlobCopyAborted + case blobCopyStatusFailed: + return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", props.CopyID, props.CopyStatusDescription) + default: + return fmt.Errorf("storage: unhandled blob copy status: '%s'", props.CopyStatus) + } + } +} + +// DeleteBlob deletes the given blob from the specified container. +// If the blob does not exists at the time of the Delete Blob operation, it +// returns error. See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx +func (b BlobStorageClient) DeleteBlob(container, name string, extraHeaders map[string]string) error { + resp, err := b.deleteBlob(container, name, extraHeaders) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) +} + +// DeleteBlobIfExists deletes the given blob from the specified container If the +// blob is deleted with this call, returns true. Otherwise returns false. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx +func (b BlobStorageClient) DeleteBlobIfExists(container, name string, extraHeaders map[string]string) (bool, error) { + resp, err := b.deleteBlob(container, name, extraHeaders) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusAccepted, nil + } + } + return false, err +} + +func (b BlobStorageClient) deleteBlob(container, name string, extraHeaders map[string]string) (*storageResponse, error) { + verb := "DELETE" + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + headers := b.client.getStandardHeaders() + for k, v := range extraHeaders { + headers[k] = v + } + + return b.client.exec(verb, uri, headers, nil) +} + +// helper method to construct the path to a container given its name +func pathForContainer(name string) string { + return fmt.Sprintf("/%s", name) +} + +// helper method to construct the path to a blob given its container and blob +// name +func pathForBlob(container, name string) string { + return fmt.Sprintf("/%s/%s", container, name) +} + +// GetBlobSASURI creates an URL to the specified blob which contains the Shared +// Access Signature with specified permissions and expiration time. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx +func (b BlobStorageClient) GetBlobSASURI(container, name string, expiry time.Time, permissions string) (string, error) { + var ( + signedPermissions = permissions + blobURL = b.GetBlobURL(container, name) + ) + canonicalizedResource, err := b.client.buildCanonicalizedResource(blobURL) + + if err != nil { + return "", err + } + + // "The canonicalizedresouce portion of the string is a canonical path to the signed resource. + // It must include the service name (blob, table, queue or file) for version 2015-02-21 or + // later, the storage account name, and the resource name, and must be URL-decoded. + // -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + + // We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component). + canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1) + + canonicalizedResource, err = url.QueryUnescape(canonicalizedResource) + if err != nil { + return "", err + } + + signedExpiry := expiry.UTC().Format(time.RFC3339) + signedResource := "b" + + stringToSign, err := blobSASStringToSign(b.client.apiVersion, canonicalizedResource, signedExpiry, signedPermissions) + if err != nil { + return "", err + } + + sig := b.client.computeHmac256(stringToSign) + sasParams := url.Values{ + "sv": {b.client.apiVersion}, + "se": {signedExpiry}, + "sr": {signedResource}, + "sp": {signedPermissions}, + "sig": {sig}, + } + + sasURL, err := url.Parse(blobURL) + if err != nil { + return "", err + } + sasURL.RawQuery = sasParams.Encode() + return sasURL.String(), nil +} + +func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, signedPermissions string) (string, error) { + var signedStart, signedIdentifier, rscc, rscd, rsce, rscl, rsct string + + if signedVersion >= "2015-02-21" { + canonicalizedResource = "/blob" + canonicalizedResource + } + + // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + if signedVersion >= "2013-08-15" { + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil + } + return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go new file mode 100644 index 000000000..2816e03ec --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go @@ -0,0 +1,551 @@ +// Package storage provides clients for Microsoft Azure Storage Services. +package storage + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "encoding/xml" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "sort" + "strconv" + "strings" +) + +const ( + // DefaultBaseURL is the domain name used for storage requests when a + // default client is created. + DefaultBaseURL = "core.windows.net" + + // DefaultAPIVersion is the Azure Storage API version string used when a + // basic client is created. + DefaultAPIVersion = "2015-02-21" + + defaultUseHTTPS = true + + // StorageEmulatorAccountName is the fixed storage account used by Azure Storage Emulator + StorageEmulatorAccountName = "devstoreaccount1" + + // StorageEmulatorAccountKey is the the fixed storage account used by Azure Storage Emulator + StorageEmulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" + + blobServiceName = "blob" + tableServiceName = "table" + queueServiceName = "queue" + fileServiceName = "file" + + storageEmulatorBlob = "127.0.0.1:10000" + storageEmulatorTable = "127.0.0.1:10002" + storageEmulatorQueue = "127.0.0.1:10001" +) + +// Client is the object that needs to be constructed to perform +// operations on the storage account. +type Client struct { + // HTTPClient is the http.Client used to initiate API + // requests. If it is nil, http.DefaultClient is used. + HTTPClient *http.Client + + accountName string + accountKey []byte + useHTTPS bool + baseURL string + apiVersion string +} + +type storageResponse struct { + statusCode int + headers http.Header + body io.ReadCloser +} + +type odataResponse struct { + storageResponse + odata odataErrorMessage +} + +// AzureStorageServiceError contains fields of the error response from +// Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx +// Some fields might be specific to certain calls. +type AzureStorageServiceError struct { + Code string `xml:"Code"` + Message string `xml:"Message"` + AuthenticationErrorDetail string `xml:"AuthenticationErrorDetail"` + QueryParameterName string `xml:"QueryParameterName"` + QueryParameterValue string `xml:"QueryParameterValue"` + Reason string `xml:"Reason"` + StatusCode int + RequestID string +} + +type odataErrorMessageMessage struct { + Lang string `json:"lang"` + Value string `json:"value"` +} + +type odataErrorMessageInternal struct { + Code string `json:"code"` + Message odataErrorMessageMessage `json:"message"` +} + +type odataErrorMessage struct { + Err odataErrorMessageInternal `json:"odata.error"` +} + +// UnexpectedStatusCodeError is returned when a storage service responds with neither an error +// nor with an HTTP status code indicating success. +type UnexpectedStatusCodeError struct { + allowed []int + got int +} + +func (e UnexpectedStatusCodeError) Error() string { + s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) } + + got := s(e.got) + expected := []string{} + for _, v := range e.allowed { + expected = append(expected, s(v)) + } + return fmt.Sprintf("storage: status code from service response is %s; was expecting %s", got, strings.Join(expected, " or ")) +} + +// Got is the actual status code returned by Azure. +func (e UnexpectedStatusCodeError) Got() int { + return e.got +} + +// NewBasicClient constructs a Client with given storage service name and +// key. +func NewBasicClient(accountName, accountKey string) (Client, error) { + if accountName == StorageEmulatorAccountName { + return NewEmulatorClient() + } + return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS) +} + +//NewEmulatorClient contructs a Client intended to only work with Azure +//Storage Emulator +func NewEmulatorClient() (Client, error) { + return NewClient(StorageEmulatorAccountName, StorageEmulatorAccountKey, DefaultBaseURL, DefaultAPIVersion, false) +} + +// NewClient constructs a Client. This should be used if the caller wants +// to specify whether to use HTTPS, a specific REST API version or a custom +// storage endpoint than Azure Public Cloud. +func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, useHTTPS bool) (Client, error) { + var c Client + if accountName == "" { + return c, fmt.Errorf("azure: account name required") + } else if accountKey == "" { + return c, fmt.Errorf("azure: account key required") + } else if blobServiceBaseURL == "" { + return c, fmt.Errorf("azure: base storage service url required") + } + + key, err := base64.StdEncoding.DecodeString(accountKey) + if err != nil { + return c, fmt.Errorf("azure: malformed storage account key: %v", err) + } + + return Client{ + accountName: accountName, + accountKey: key, + useHTTPS: useHTTPS, + baseURL: blobServiceBaseURL, + apiVersion: apiVersion, + }, nil +} + +func (c Client) getBaseURL(service string) string { + scheme := "http" + if c.useHTTPS { + scheme = "https" + } + host := "" + if c.accountName == StorageEmulatorAccountName { + switch service { + case blobServiceName: + host = storageEmulatorBlob + case tableServiceName: + host = storageEmulatorTable + case queueServiceName: + host = storageEmulatorQueue + } + } else { + host = fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL) + } + + u := &url.URL{ + Scheme: scheme, + Host: host} + return u.String() +} + +func (c Client) getEndpoint(service, path string, params url.Values) string { + u, err := url.Parse(c.getBaseURL(service)) + if err != nil { + // really should not be happening + panic(err) + } + + // API doesn't accept path segments not starting with '/' + if !strings.HasPrefix(path, "/") { + path = fmt.Sprintf("/%v", path) + } + + if c.accountName == StorageEmulatorAccountName { + path = fmt.Sprintf("/%v%v", StorageEmulatorAccountName, path) + } + + u.Path = path + u.RawQuery = params.Encode() + return u.String() +} + +// GetBlobService returns a BlobStorageClient which can operate on the blob +// service of the storage account. +func (c Client) GetBlobService() BlobStorageClient { + return BlobStorageClient{c} +} + +// GetQueueService returns a QueueServiceClient which can operate on the queue +// service of the storage account. +func (c Client) GetQueueService() QueueServiceClient { + return QueueServiceClient{c} +} + +// GetTableService returns a TableServiceClient which can operate on the table +// service of the storage account. +func (c Client) GetTableService() TableServiceClient { + return TableServiceClient{c} +} + +// GetFileService returns a FileServiceClient which can operate on the file +// service of the storage account. +func (c Client) GetFileService() FileServiceClient { + return FileServiceClient{c} +} + +func (c Client) createAuthorizationHeader(canonicalizedString string) string { + signature := c.computeHmac256(canonicalizedString) + return fmt.Sprintf("%s %s:%s", "SharedKey", c.getCanonicalizedAccountName(), signature) +} + +func (c Client) getAuthorizationHeader(verb, url string, headers map[string]string) (string, error) { + canonicalizedResource, err := c.buildCanonicalizedResource(url) + if err != nil { + return "", err + } + + canonicalizedString := c.buildCanonicalizedString(verb, headers, canonicalizedResource) + return c.createAuthorizationHeader(canonicalizedString), nil +} + +func (c Client) getStandardHeaders() map[string]string { + return map[string]string{ + "x-ms-version": c.apiVersion, + "x-ms-date": currentTimeRfc1123Formatted(), + } +} + +func (c Client) getCanonicalizedAccountName() string { + // since we may be trying to access a secondary storage account, we need to + // remove the -secondary part of the storage name + return strings.TrimSuffix(c.accountName, "-secondary") +} + +func (c Client) buildCanonicalizedHeader(headers map[string]string) string { + cm := make(map[string]string) + + for k, v := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + match, _ := regexp.MatchString("x-ms-", headerName) + if match { + cm[headerName] = v + } + } + + if len(cm) == 0 { + return "" + } + + keys := make([]string, 0, len(cm)) + for key := range cm { + keys = append(keys, key) + } + + sort.Strings(keys) + + ch := "" + + for i, key := range keys { + if i == len(keys)-1 { + ch += fmt.Sprintf("%s:%s", key, cm[key]) + } else { + ch += fmt.Sprintf("%s:%s\n", key, cm[key]) + } + } + return ch +} + +func (c Client) buildCanonicalizedResourceTable(uri string) (string, error) { + errMsg := "buildCanonicalizedResourceTable error: %s" + u, err := url.Parse(uri) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + cr := "/" + c.getCanonicalizedAccountName() + + if len(u.Path) > 0 { + cr += u.Path + } + + return cr, nil +} + +func (c Client) buildCanonicalizedResource(uri string) (string, error) { + errMsg := "buildCanonicalizedResource error: %s" + u, err := url.Parse(uri) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + cr := "/" + c.getCanonicalizedAccountName() + + if len(u.Path) > 0 { + // Any portion of the CanonicalizedResource string that is derived from + // the resource's URI should be encoded exactly as it is in the URI. + // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx + cr += u.EscapedPath() + } + + params, err := url.ParseQuery(u.RawQuery) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + if len(params) > 0 { + cr += "\n" + keys := make([]string, 0, len(params)) + for key := range params { + keys = append(keys, key) + } + + sort.Strings(keys) + + for i, key := range keys { + if len(params[key]) > 1 { + sort.Strings(params[key]) + } + + if i == len(keys)-1 { + cr += fmt.Sprintf("%s:%s", key, strings.Join(params[key], ",")) + } else { + cr += fmt.Sprintf("%s:%s\n", key, strings.Join(params[key], ",")) + } + } + } + + return cr, nil +} + +func (c Client) buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string) string { + contentLength := headers["Content-Length"] + if contentLength == "0" { + contentLength = "" + } + canonicalizedString := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", + verb, + headers["Content-Encoding"], + headers["Content-Language"], + contentLength, + headers["Content-MD5"], + headers["Content-Type"], + headers["Date"], + headers["If-Modified-Since"], + headers["If-Match"], + headers["If-None-Match"], + headers["If-Unmodified-Since"], + headers["Range"], + c.buildCanonicalizedHeader(headers), + canonicalizedResource) + + return canonicalizedString +} + +func (c Client) exec(verb, url string, headers map[string]string, body io.Reader) (*storageResponse, error) { + authHeader, err := c.getAuthorizationHeader(verb, url, headers) + if err != nil { + return nil, err + } + headers["Authorization"] = authHeader + if err != nil { + return nil, err + } + + req, err := http.NewRequest(verb, url, body) + if err != nil { + return nil, errors.New("azure/storage: error creating request: " + err.Error()) + } + + if clstr, ok := headers["Content-Length"]; ok { + // content length header is being signed, but completely ignored by golang. + // instead we have to use the ContentLength property on the request struct + // (see https://golang.org/src/net/http/request.go?s=18140:18370#L536 and + // https://golang.org/src/net/http/transfer.go?s=1739:2467#L49) + req.ContentLength, err = strconv.ParseInt(clstr, 10, 64) + if err != nil { + return nil, err + } + } + for k, v := range headers { + req.Header.Add(k, v) + } + + httpClient := c.HTTPClient + if httpClient == nil { + httpClient = http.DefaultClient + } + resp, err := httpClient.Do(req) + if err != nil { + return nil, err + } + + statusCode := resp.StatusCode + if statusCode >= 400 && statusCode <= 505 { + var respBody []byte + respBody, err = readResponseBody(resp) + if err != nil { + return nil, err + } + + if len(respBody) == 0 { + // no error in response body + err = fmt.Errorf("storage: service returned without a response body (%s)", resp.Status) + } else { + // response contains storage service error object, unmarshal + storageErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, resp.Header.Get("x-ms-request-id")) + if err != nil { // error unmarshaling the error response + err = errIn + } + err = storageErr + } + return &storageResponse{ + statusCode: resp.StatusCode, + headers: resp.Header, + body: ioutil.NopCloser(bytes.NewReader(respBody)), /* restore the body */ + }, err + } + + return &storageResponse{ + statusCode: resp.StatusCode, + headers: resp.Header, + body: resp.Body}, nil +} + +func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader) (*odataResponse, error) { + req, err := http.NewRequest(verb, url, body) + for k, v := range headers { + req.Header.Add(k, v) + } + + httpClient := c.HTTPClient + if httpClient == nil { + httpClient = http.DefaultClient + } + + resp, err := httpClient.Do(req) + if err != nil { + return nil, err + } + + respToRet := &odataResponse{} + respToRet.body = resp.Body + respToRet.statusCode = resp.StatusCode + respToRet.headers = resp.Header + + statusCode := resp.StatusCode + if statusCode >= 400 && statusCode <= 505 { + var respBody []byte + respBody, err = readResponseBody(resp) + if err != nil { + return nil, err + } + + if len(respBody) == 0 { + // no error in response body + err = fmt.Errorf("storage: service returned without a response body (%d)", resp.StatusCode) + return respToRet, err + } + // try unmarshal as odata.error json + err = json.Unmarshal(respBody, &respToRet.odata) + return respToRet, err + } + + return respToRet, nil +} + +func (c Client) createSharedKeyLite(url string, headers map[string]string) (string, error) { + can, err := c.buildCanonicalizedResourceTable(url) + + if err != nil { + return "", err + } + strToSign := headers["x-ms-date"] + "\n" + can + + hmac := c.computeHmac256(strToSign) + return fmt.Sprintf("SharedKeyLite %s:%s", c.accountName, hmac), nil +} + +func (c Client) execTable(verb, url string, headers map[string]string, body io.Reader) (*odataResponse, error) { + var err error + headers["Authorization"], err = c.createSharedKeyLite(url, headers) + if err != nil { + return nil, err + } + + return c.execInternalJSON(verb, url, headers, body) +} + +func readResponseBody(resp *http.Response) ([]byte, error) { + defer resp.Body.Close() + out, err := ioutil.ReadAll(resp.Body) + if err == io.EOF { + err = nil + } + return out, err +} + +func serviceErrFromXML(body []byte, statusCode int, requestID string) (AzureStorageServiceError, error) { + var storageErr AzureStorageServiceError + if err := xml.Unmarshal(body, &storageErr); err != nil { + return storageErr, err + } + storageErr.StatusCode = statusCode + storageErr.RequestID = requestID + return storageErr, nil +} + +func (e AzureStorageServiceError) Error() string { + return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s, QueryParameterName=%s, QueryParameterValue=%s", + e.StatusCode, e.Code, e.Message, e.RequestID, e.QueryParameterName, e.QueryParameterValue) +} + +// checkRespCode returns UnexpectedStatusError if the given response code is not +// one of the allowed status codes; otherwise nil. +func checkRespCode(respCode int, allowed []int) error { + for _, v := range allowed { + if respCode == v { + return nil + } + } + return UnexpectedStatusCodeError{allowed, respCode} +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go new file mode 100644 index 000000000..2397587c8 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go @@ -0,0 +1,352 @@ +package storage + +import ( + "encoding/xml" + "fmt" + "net/http" + "net/url" + "strings" +) + +// FileServiceClient contains operations for Microsoft Azure File Service. +type FileServiceClient struct { + client Client +} + +// A Share is an entry in ShareListResponse. +type Share struct { + Name string `xml:"Name"` + Properties ShareProperties `xml:"Properties"` +} + +// ShareProperties contains various properties of a share returned from +// various endpoints like ListShares. +type ShareProperties struct { + LastModified string `xml:"Last-Modified"` + Etag string `xml:"Etag"` + Quota string `xml:"Quota"` +} + +// ShareListResponse contains the response fields from +// ListShares call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx +type ShareListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Shares []Share `xml:"Shares>Share"` +} + +// ListSharesParameters defines the set of customizable parameters to make a +// List Shares call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx +type ListSharesParameters struct { + Prefix string + Marker string + Include string + MaxResults uint + Timeout uint +} + +// ShareHeaders contains various properties of a file and is an entry +// in SetShareProperties +type ShareHeaders struct { + Quota string `header:"x-ms-share-quota"` +} + +func (p ListSharesParameters) getParameters() url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.Include != "" { + out.Set("include", p.Include) + } + if p.MaxResults != 0 { + out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) + } + if p.Timeout != 0 { + out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) + } + + return out +} + +// pathForFileShare returns the URL path segment for a File Share resource +func pathForFileShare(name string) string { + return fmt.Sprintf("/%s", name) +} + +// ListShares returns the list of shares in a storage account along with +// pagination token and other response details. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +func (f FileServiceClient) ListShares(params ListSharesParameters) (ShareListResponse, error) { + q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) + uri := f.client.getEndpoint(fileServiceName, "", q) + headers := f.client.getStandardHeaders() + + var out ShareListResponse + resp, err := f.client.exec("GET", uri, headers, nil) + if err != nil { + return out, err + } + defer resp.body.Close() + + err = xmlUnmarshal(resp.body, &out) + return out, err +} + +// CreateShare operation creates a new share under the specified account. If the +// share with the same name already exists, the operation fails. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx +func (f FileServiceClient) CreateShare(name string) error { + resp, err := f.createShare(name) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// ShareExists returns true if a share with given name exists +// on the storage account, otherwise returns false. +func (f FileServiceClient) ShareExists(name string) (bool, error) { + uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}}) + headers := f.client.getStandardHeaders() + + resp, err := f.client.exec("HEAD", uri, headers, nil) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusOK, nil + } + } + return false, err +} + +// GetShareURL gets the canonical URL to the share with the specified name in the +// specified container. This method does not create a publicly accessible URL if +// the file is private and this method does not check if the file +// exists. +func (f FileServiceClient) GetShareURL(name string) string { + return f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{}) +} + +// CreateShareIfNotExists creates a new share under the specified account if +// it does not exist. Returns true if container is newly created or false if +// container already exists. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx +func (f FileServiceClient) CreateShareIfNotExists(name string) (bool, error) { + resp, err := f.createShare(name) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { + return resp.statusCode == http.StatusCreated, nil + } + } + return false, err +} + +// CreateShare creates a Azure File Share and returns its response +func (f FileServiceClient) createShare(name string) (*storageResponse, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } + uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}}) + headers := f.client.getStandardHeaders() + return f.client.exec("PUT", uri, headers, nil) +} + +// GetShareProperties provides various information about the specified +// file. See https://msdn.microsoft.com/en-us/library/azure/dn689099.aspx +func (f FileServiceClient) GetShareProperties(name string) (*ShareProperties, error) { + uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}}) + + headers := f.client.getStandardHeaders() + resp, err := f.client.exec("HEAD", uri, headers, nil) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + return &ShareProperties{ + LastModified: resp.headers.Get("Last-Modified"), + Etag: resp.headers.Get("Etag"), + Quota: resp.headers.Get("x-ms-share-quota"), + }, nil +} + +// SetShareProperties replaces the ShareHeaders for the specified file. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by SetShareProperties. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://msdn.microsoft.com/en-us/library/azure/mt427368.aspx +func (f FileServiceClient) SetShareProperties(name string, shareHeaders ShareHeaders) error { + params := url.Values{} + params.Set("restype", "share") + params.Set("comp", "properties") + + uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), params) + headers := f.client.getStandardHeaders() + + extraHeaders := headersFromStruct(shareHeaders) + + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := f.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusOK}) +} + +// DeleteShare operation marks the specified share for deletion. The share +// and any files contained within it are later deleted during garbage +// collection. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx +func (f FileServiceClient) DeleteShare(name string) error { + resp, err := f.deleteShare(name) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) +} + +// DeleteShareIfExists operation marks the specified share for deletion if it +// exists. The share and any files contained within it are later deleted during +// garbage collection. Returns true if share existed and deleted with this call, +// false otherwise. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx +func (f FileServiceClient) DeleteShareIfExists(name string) (bool, error) { + resp, err := f.deleteShare(name) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusAccepted, nil + } + } + return false, err +} + +// deleteShare makes the call to Delete Share operation endpoint and returns +// the response +func (f FileServiceClient) deleteShare(name string) (*storageResponse, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } + uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}}) + return f.client.exec("DELETE", uri, f.client.getStandardHeaders(), nil) +} + +// SetShareMetadata replaces the metadata for the specified Share. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetShareMetadata. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx +func (f FileServiceClient) SetShareMetadata(name string, metadata map[string]string, extraHeaders map[string]string) error { + params := url.Values{} + params.Set("restype", "share") + params.Set("comp", "metadata") + + uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), params) + headers := f.client.getStandardHeaders() + for k, v := range metadata { + headers[userDefinedMetadataHeaderPrefix+k] = v + } + + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := f.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusOK}) +} + +// GetShareMetadata returns all user-defined metadata for the specified share. +// +// All metadata keys will be returned in lower case. (HTTP header +// names are case-insensitive.) +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx +func (f FileServiceClient) GetShareMetadata(name string) (map[string]string, error) { + params := url.Values{} + params.Set("restype", "share") + params.Set("comp", "metadata") + + uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), params) + headers := f.client.getStandardHeaders() + + resp, err := f.client.exec("GET", uri, headers, nil) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + metadata := make(map[string]string) + for k, v := range resp.headers { + // Can't trust CanonicalHeaderKey() to munge case + // reliably. "_" is allowed in identifiers: + // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx + // https://msdn.microsoft.com/library/aa664670(VS.71).aspx + // http://tools.ietf.org/html/rfc7230#section-3.2 + // ...but "_" is considered invalid by + // CanonicalMIMEHeaderKey in + // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 + // so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar". + k = strings.ToLower(k) + if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { + continue + } + // metadata["foo"] = content of the last X-Ms-Meta-Foo header + k = k[len(userDefinedMetadataHeaderPrefix):] + metadata[k] = v[len(v)-1] + } + return metadata, nil +} + +//checkForStorageEmulator determines if the client is setup for use with +//Azure Storage Emulator, and returns a relevant error +func (f FileServiceClient) checkForStorageEmulator() error { + if f.client.accountName == StorageEmulatorAccountName { + return fmt.Errorf("Error: File service is not currently supported by Azure Storage Emulator") + } + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go new file mode 100644 index 000000000..3ecf4aca0 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go @@ -0,0 +1,306 @@ +package storage + +import ( + "encoding/xml" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" +) + +const ( + // casing is per Golang's http.Header canonicalizing the header names. + approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count" + userDefinedMetadataHeaderPrefix = "X-Ms-Meta-" +) + +// QueueServiceClient contains operations for Microsoft Azure Queue Storage +// Service. +type QueueServiceClient struct { + client Client +} + +func pathForQueue(queue string) string { return fmt.Sprintf("/%s", queue) } +func pathForQueueMessages(queue string) string { return fmt.Sprintf("/%s/messages", queue) } +func pathForMessage(queue, name string) string { return fmt.Sprintf("/%s/messages/%s", queue, name) } + +type putMessageRequest struct { + XMLName xml.Name `xml:"QueueMessage"` + MessageText string `xml:"MessageText"` +} + +// PutMessageParameters is the set of options can be specified for Put Messsage +// operation. A zero struct does not use any preferences for the request. +type PutMessageParameters struct { + VisibilityTimeout int + MessageTTL int +} + +func (p PutMessageParameters) getParameters() url.Values { + out := url.Values{} + if p.VisibilityTimeout != 0 { + out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout)) + } + if p.MessageTTL != 0 { + out.Set("messagettl", strconv.Itoa(p.MessageTTL)) + } + return out +} + +// GetMessagesParameters is the set of options can be specified for Get +// Messsages operation. A zero struct does not use any preferences for the +// request. +type GetMessagesParameters struct { + NumOfMessages int + VisibilityTimeout int +} + +func (p GetMessagesParameters) getParameters() url.Values { + out := url.Values{} + if p.NumOfMessages != 0 { + out.Set("numofmessages", strconv.Itoa(p.NumOfMessages)) + } + if p.VisibilityTimeout != 0 { + out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout)) + } + return out +} + +// PeekMessagesParameters is the set of options can be specified for Peek +// Messsage operation. A zero struct does not use any preferences for the +// request. +type PeekMessagesParameters struct { + NumOfMessages int +} + +func (p PeekMessagesParameters) getParameters() url.Values { + out := url.Values{"peekonly": {"true"}} // Required for peek operation + if p.NumOfMessages != 0 { + out.Set("numofmessages", strconv.Itoa(p.NumOfMessages)) + } + return out +} + +// GetMessagesResponse represents a response returned from Get Messages +// operation. +type GetMessagesResponse struct { + XMLName xml.Name `xml:"QueueMessagesList"` + QueueMessagesList []GetMessageResponse `xml:"QueueMessage"` +} + +// GetMessageResponse represents a QueueMessage object returned from Get +// Messages operation response. +type GetMessageResponse struct { + MessageID string `xml:"MessageId"` + InsertionTime string `xml:"InsertionTime"` + ExpirationTime string `xml:"ExpirationTime"` + PopReceipt string `xml:"PopReceipt"` + TimeNextVisible string `xml:"TimeNextVisible"` + DequeueCount int `xml:"DequeueCount"` + MessageText string `xml:"MessageText"` +} + +// PeekMessagesResponse represents a response returned from Get Messages +// operation. +type PeekMessagesResponse struct { + XMLName xml.Name `xml:"QueueMessagesList"` + QueueMessagesList []PeekMessageResponse `xml:"QueueMessage"` +} + +// PeekMessageResponse represents a QueueMessage object returned from Peek +// Messages operation response. +type PeekMessageResponse struct { + MessageID string `xml:"MessageId"` + InsertionTime string `xml:"InsertionTime"` + ExpirationTime string `xml:"ExpirationTime"` + DequeueCount int `xml:"DequeueCount"` + MessageText string `xml:"MessageText"` +} + +// QueueMetadataResponse represents user defined metadata and queue +// properties on a specific queue. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx +type QueueMetadataResponse struct { + ApproximateMessageCount int + UserDefinedMetadata map[string]string +} + +// SetMetadata operation sets user-defined metadata on the specified queue. +// Metadata is associated with the queue as name-value pairs. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179348.aspx +func (c QueueServiceClient) SetMetadata(name string, metadata map[string]string) error { + uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}}) + headers := c.client.getStandardHeaders() + for k, v := range metadata { + headers[userDefinedMetadataHeaderPrefix+k] = v + } + + resp, err := c.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +// GetMetadata operation retrieves user-defined metadata and queue +// properties on the specified queue. Metadata is associated with +// the queue as name-values pairs. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx +// +// Because the way Golang's http client (and http.Header in particular) +// canonicalize header names, the returned metadata names would always +// be all lower case. +func (c QueueServiceClient) GetMetadata(name string) (QueueMetadataResponse, error) { + qm := QueueMetadataResponse{} + qm.UserDefinedMetadata = make(map[string]string) + uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}}) + headers := c.client.getStandardHeaders() + resp, err := c.client.exec("GET", uri, headers, nil) + if err != nil { + return qm, err + } + defer resp.body.Close() + + for k, v := range resp.headers { + if len(v) != 1 { + return qm, fmt.Errorf("Unexpected number of values (%d) in response header '%s'", len(v), k) + } + + value := v[0] + + if k == approximateMessagesCountHeader { + qm.ApproximateMessageCount, err = strconv.Atoi(value) + if err != nil { + return qm, fmt.Errorf("Unexpected value in response header '%s': '%s' ", k, value) + } + } else if strings.HasPrefix(k, userDefinedMetadataHeaderPrefix) { + name := strings.TrimPrefix(k, userDefinedMetadataHeaderPrefix) + qm.UserDefinedMetadata[strings.ToLower(name)] = value + } + } + + return qm, checkRespCode(resp.statusCode, []int{http.StatusOK}) +} + +// CreateQueue operation creates a queue under the given account. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179342.aspx +func (c QueueServiceClient) CreateQueue(name string) error { + uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{}) + headers := c.client.getStandardHeaders() + resp, err := c.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// DeleteQueue operation permanently deletes the specified queue. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179436.aspx +func (c QueueServiceClient) DeleteQueue(name string) error { + uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{}) + resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +// QueueExists returns true if a queue with given name exists. +func (c QueueServiceClient) QueueExists(name string) (bool, error) { + uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": {"metadata"}}) + resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil) + if resp != nil && (resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound) { + return resp.statusCode == http.StatusOK, nil + } + + return false, err +} + +// PutMessage operation adds a new message to the back of the message queue. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179346.aspx +func (c QueueServiceClient) PutMessage(queue string, message string, params PutMessageParameters) error { + uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) + req := putMessageRequest{MessageText: message} + body, nn, err := xmlMarshal(req) + if err != nil { + return err + } + headers := c.client.getStandardHeaders() + headers["Content-Length"] = strconv.Itoa(nn) + resp, err := c.client.exec("POST", uri, headers, body) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// ClearMessages operation deletes all messages from the specified queue. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179454.aspx +func (c QueueServiceClient) ClearMessages(queue string) error { + uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), url.Values{}) + resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +// GetMessages operation retrieves one or more messages from the front of the +// queue. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179474.aspx +func (c QueueServiceClient) GetMessages(queue string, params GetMessagesParameters) (GetMessagesResponse, error) { + var r GetMessagesResponse + uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) + resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil) + if err != nil { + return r, err + } + defer resp.body.Close() + err = xmlUnmarshal(resp.body, &r) + return r, err +} + +// PeekMessages retrieves one or more messages from the front of the queue, but +// does not alter the visibility of the message. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179472.aspx +func (c QueueServiceClient) PeekMessages(queue string, params PeekMessagesParameters) (PeekMessagesResponse, error) { + var r PeekMessagesResponse + uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) + resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil) + if err != nil { + return r, err + } + defer resp.body.Close() + err = xmlUnmarshal(resp.body, &r) + return r, err +} + +// DeleteMessage operation deletes the specified message. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx +func (c QueueServiceClient) DeleteMessage(queue, messageID, popReceipt string) error { + uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), url.Values{ + "popreceipt": {popReceipt}}) + resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go new file mode 100644 index 000000000..39e997503 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go @@ -0,0 +1,129 @@ +package storage + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "net/url" +) + +// TableServiceClient contains operations for Microsoft Azure Table Storage +// Service. +type TableServiceClient struct { + client Client +} + +// AzureTable is the typedef of the Azure Table name +type AzureTable string + +const ( + tablesURIPath = "/Tables" +) + +type createTableRequest struct { + TableName string `json:"TableName"` +} + +func pathForTable(table AzureTable) string { return fmt.Sprintf("%s", table) } + +func (c *TableServiceClient) getStandardHeaders() map[string]string { + return map[string]string{ + "x-ms-version": "2015-02-21", + "x-ms-date": currentTimeRfc1123Formatted(), + "Accept": "application/json;odata=nometadata", + "Accept-Charset": "UTF-8", + "Content-Type": "application/json", + } +} + +// QueryTables returns the tables created in the +// *TableServiceClient storage account. +func (c *TableServiceClient) QueryTables() ([]AzureTable, error) { + uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{}) + + headers := c.getStandardHeaders() + headers["Content-Length"] = "0" + + resp, err := c.client.execTable("GET", uri, headers, nil) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + buf := new(bytes.Buffer) + buf.ReadFrom(resp.body) + + var respArray queryTablesResponse + if err := json.Unmarshal(buf.Bytes(), &respArray); err != nil { + return nil, err + } + + s := make([]AzureTable, len(respArray.TableName)) + for i, elem := range respArray.TableName { + s[i] = AzureTable(elem.TableName) + } + + return s, nil +} + +// CreateTable creates the table given the specific +// name. This function fails if the name is not compliant +// with the specification or the tables already exists. +func (c *TableServiceClient) CreateTable(table AzureTable) error { + uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{}) + + headers := c.getStandardHeaders() + + req := createTableRequest{TableName: string(table)} + buf := new(bytes.Buffer) + + if err := json.NewEncoder(buf).Encode(req); err != nil { + return err + } + + headers["Content-Length"] = fmt.Sprintf("%d", buf.Len()) + + resp, err := c.client.execTable("POST", uri, headers, buf) + + if err != nil { + return err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil { + return err + } + + return nil +} + +// DeleteTable deletes the table given the specific +// name. This function fails if the table is not present. +// Be advised: DeleteTable deletes all the entries +// that may be present. +func (c *TableServiceClient) DeleteTable(table AzureTable) error { + uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{}) + uri += fmt.Sprintf("('%s')", string(table)) + + headers := c.getStandardHeaders() + + headers["Content-Length"] = "0" + + resp, err := c.client.execTable("DELETE", uri, headers, nil) + + if err != nil { + return err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { + return err + + } + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go new file mode 100644 index 000000000..1b5919cd1 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go @@ -0,0 +1,355 @@ +package storage + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "reflect" +) + +const ( + partitionKeyNode = "PartitionKey" + rowKeyNode = "RowKey" + tag = "table" + tagIgnore = "-" + continuationTokenPartitionKeyHeader = "X-Ms-Continuation-Nextpartitionkey" + continuationTokenRowHeader = "X-Ms-Continuation-Nextrowkey" + maxTopParameter = 1000 +) + +type queryTablesResponse struct { + TableName []struct { + TableName string `json:"TableName"` + } `json:"value"` +} + +const ( + tableOperationTypeInsert = iota + tableOperationTypeUpdate = iota + tableOperationTypeMerge = iota + tableOperationTypeInsertOrReplace = iota + tableOperationTypeInsertOrMerge = iota +) + +type tableOperation int + +// TableEntity interface specifies +// the functions needed to support +// marshaling and unmarshaling into +// Azure Tables. The struct must only contain +// simple types because Azure Tables do not +// support hierarchy. +type TableEntity interface { + PartitionKey() string + RowKey() string + SetPartitionKey(string) error + SetRowKey(string) error +} + +// ContinuationToken is an opaque (ie not useful to inspect) +// struct that Get... methods can return if there are more +// entries to be returned than the ones already +// returned. Just pass it to the same function to continue +// receiving the remaining entries. +type ContinuationToken struct { + NextPartitionKey string + NextRowKey string +} + +type getTableEntriesResponse struct { + Elements []map[string]interface{} `json:"value"` +} + +// QueryTableEntities queries the specified table and returns the unmarshaled +// entities of type retType. +// top parameter limits the returned entries up to top. Maximum top +// allowed by Azure API is 1000. In case there are more than top entries to be +// returned the function will return a non nil *ContinuationToken. You can call the +// same function again passing the received ContinuationToken as previousContToken +// parameter in order to get the following entries. The query parameter +// is the odata query. To retrieve all the entries pass the empty string. +// The function returns a pointer to a TableEntity slice, the *ContinuationToken +// if there are more entries to be returned and an error in case something went +// wrong. +// +// Example: +// entities, cToken, err = tSvc.QueryTableEntities("table", cToken, reflect.TypeOf(entity), 20, "") +func (c *TableServiceClient) QueryTableEntities(tableName AzureTable, previousContToken *ContinuationToken, retType reflect.Type, top int, query string) ([]TableEntity, *ContinuationToken, error) { + if top > maxTopParameter { + return nil, nil, fmt.Errorf("top accepts at maximum %d elements. Requested %d instead", maxTopParameter, top) + } + + uri := c.client.getEndpoint(tableServiceName, pathForTable(tableName), url.Values{}) + uri += fmt.Sprintf("?$top=%d", top) + if query != "" { + uri += fmt.Sprintf("&$filter=%s", url.QueryEscape(query)) + } + + if previousContToken != nil { + uri += fmt.Sprintf("&NextPartitionKey=%s&NextRowKey=%s", previousContToken.NextPartitionKey, previousContToken.NextRowKey) + } + + headers := c.getStandardHeaders() + + headers["Content-Length"] = "0" + + resp, err := c.client.execTable("GET", uri, headers, nil) + + if err != nil { + return nil, nil, err + } + + contToken := extractContinuationTokenFromHeaders(resp.headers) + + if err != nil { + return nil, contToken, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, contToken, err + } + + retEntries, err := deserializeEntity(retType, resp.body) + if err != nil { + return nil, contToken, err + } + + return retEntries, contToken, nil +} + +// InsertEntity inserts an entity in the specified table. +// The function fails if there is an entity with the same +// PartitionKey and RowKey in the table. +func (c *TableServiceClient) InsertEntity(table AzureTable, entity TableEntity) error { + var err error + + if sc, err := c.execTable(table, entity, false, "POST"); err != nil { + return checkRespCode(sc, []int{http.StatusCreated}) + } + + return err +} + +func (c *TableServiceClient) execTable(table AzureTable, entity TableEntity, specifyKeysInURL bool, method string) (int, error) { + uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{}) + if specifyKeysInURL { + uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey())) + } + + headers := c.getStandardHeaders() + + var buf bytes.Buffer + + if err := injectPartitionAndRowKeys(entity, &buf); err != nil { + return 0, err + } + + headers["Content-Length"] = fmt.Sprintf("%d", buf.Len()) + + var err error + var resp *odataResponse + + resp, err = c.client.execTable(method, uri, headers, &buf) + + if err != nil { + return 0, err + } + + defer resp.body.Close() + + return resp.statusCode, nil +} + +// UpdateEntity updates the contents of an entity with the +// one passed as parameter. The function fails if there is no entity +// with the same PartitionKey and RowKey in the table. +func (c *TableServiceClient) UpdateEntity(table AzureTable, entity TableEntity) error { + var err error + + if sc, err := c.execTable(table, entity, true, "PUT"); err != nil { + return checkRespCode(sc, []int{http.StatusNoContent}) + } + return err +} + +// MergeEntity merges the contents of an entity with the +// one passed as parameter. +// The function fails if there is no entity +// with the same PartitionKey and RowKey in the table. +func (c *TableServiceClient) MergeEntity(table AzureTable, entity TableEntity) error { + var err error + + if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil { + return checkRespCode(sc, []int{http.StatusNoContent}) + } + return err +} + +// DeleteEntityWithoutCheck deletes the entity matching by +// PartitionKey and RowKey. There is no check on IfMatch +// parameter so the entity is always deleted. +// The function fails if there is no entity +// with the same PartitionKey and RowKey in the table. +func (c *TableServiceClient) DeleteEntityWithoutCheck(table AzureTable, entity TableEntity) error { + return c.DeleteEntity(table, entity, "*") +} + +// DeleteEntity deletes the entity matching by +// PartitionKey, RowKey and ifMatch field. +// The function fails if there is no entity +// with the same PartitionKey and RowKey in the table or +// the ifMatch is different. +func (c *TableServiceClient) DeleteEntity(table AzureTable, entity TableEntity, ifMatch string) error { + uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{}) + uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey())) + + headers := c.getStandardHeaders() + + headers["Content-Length"] = "0" + headers["If-Match"] = ifMatch + + resp, err := c.client.execTable("DELETE", uri, headers, nil) + + if err != nil { + return err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { + return err + } + + return nil +} + +// InsertOrReplaceEntity inserts an entity in the specified table +// or replaced the existing one. +func (c *TableServiceClient) InsertOrReplaceEntity(table AzureTable, entity TableEntity) error { + var err error + + if sc, err := c.execTable(table, entity, true, "PUT"); err != nil { + return checkRespCode(sc, []int{http.StatusNoContent}) + } + return err +} + +// InsertOrMergeEntity inserts an entity in the specified table +// or merges the existing one. +func (c *TableServiceClient) InsertOrMergeEntity(table AzureTable, entity TableEntity) error { + var err error + + if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil { + return checkRespCode(sc, []int{http.StatusNoContent}) + } + return err +} + +func injectPartitionAndRowKeys(entity TableEntity, buf *bytes.Buffer) error { + if err := json.NewEncoder(buf).Encode(entity); err != nil { + return err + } + + dec := make(map[string]interface{}) + if err := json.NewDecoder(buf).Decode(&dec); err != nil { + return err + } + + // Inject PartitionKey and RowKey + dec[partitionKeyNode] = entity.PartitionKey() + dec[rowKeyNode] = entity.RowKey() + + // Remove tagged fields + // The tag is defined in the const section + // This is useful to avoid storing the PartitionKey and RowKey twice. + numFields := reflect.ValueOf(entity).Elem().NumField() + for i := 0; i < numFields; i++ { + f := reflect.ValueOf(entity).Elem().Type().Field(i) + + if f.Tag.Get(tag) == tagIgnore { + // we must look for its JSON name in the dictionary + // as the user can rename it using a tag + jsonName := f.Name + if f.Tag.Get("json") != "" { + jsonName = f.Tag.Get("json") + } + delete(dec, jsonName) + } + } + + buf.Reset() + + if err := json.NewEncoder(buf).Encode(&dec); err != nil { + return err + } + + return nil +} + +func deserializeEntity(retType reflect.Type, reader io.Reader) ([]TableEntity, error) { + buf := new(bytes.Buffer) + + var ret getTableEntriesResponse + if err := json.NewDecoder(reader).Decode(&ret); err != nil { + return nil, err + } + + tEntries := make([]TableEntity, len(ret.Elements)) + + for i, entry := range ret.Elements { + + buf.Reset() + if err := json.NewEncoder(buf).Encode(entry); err != nil { + return nil, err + } + + dec := make(map[string]interface{}) + if err := json.NewDecoder(buf).Decode(&dec); err != nil { + return nil, err + } + + var pKey, rKey string + // strip pk and rk + for key, val := range dec { + switch key { + case partitionKeyNode: + pKey = val.(string) + case rowKeyNode: + rKey = val.(string) + } + } + + delete(dec, partitionKeyNode) + delete(dec, rowKeyNode) + + buf.Reset() + if err := json.NewEncoder(buf).Encode(dec); err != nil { + return nil, err + } + + // Create a empty retType instance + tEntries[i] = reflect.New(retType.Elem()).Interface().(TableEntity) + // Popolate it with the values + if err := json.NewDecoder(buf).Decode(&tEntries[i]); err != nil { + return nil, err + } + + // Reset PartitionKey and RowKey + tEntries[i].SetPartitionKey(pKey) + tEntries[i].SetRowKey(rKey) + } + + return tEntries, nil +} + +func extractContinuationTokenFromHeaders(h http.Header) *ContinuationToken { + ct := ContinuationToken{h.Get(continuationTokenPartitionKeyHeader), h.Get(continuationTokenRowHeader)} + + if ct.NextPartitionKey != "" && ct.NextRowKey != "" { + return &ct + } + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go new file mode 100644 index 000000000..d71c6ce55 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go @@ -0,0 +1,85 @@ +package storage + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "time" +) + +func (c Client) computeHmac256(message string) string { + h := hmac.New(sha256.New, c.accountKey) + h.Write([]byte(message)) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func currentTimeRfc1123Formatted() string { + return timeRfc1123Formatted(time.Now().UTC()) +} + +func timeRfc1123Formatted(t time.Time) string { + return t.Format(http.TimeFormat) +} + +func mergeParams(v1, v2 url.Values) url.Values { + out := url.Values{} + for k, v := range v1 { + out[k] = v + } + for k, v := range v2 { + vals, ok := out[k] + if ok { + vals = append(vals, v...) + out[k] = vals + } else { + out[k] = v + } + } + return out +} + +func prepareBlockListRequest(blocks []Block) string { + s := `` + for _, v := range blocks { + s += fmt.Sprintf("<%s>%s", v.Status, v.ID, v.Status) + } + s += `` + return s +} + +func xmlUnmarshal(body io.Reader, v interface{}) error { + data, err := ioutil.ReadAll(body) + if err != nil { + return err + } + return xml.Unmarshal(data, v) +} + +func xmlMarshal(v interface{}) (io.Reader, int, error) { + b, err := xml.Marshal(v) + if err != nil { + return nil, 0, err + } + return bytes.NewReader(b), len(b), nil +} + +func headersFromStruct(v interface{}) map[string]string { + headers := make(map[string]string) + value := reflect.ValueOf(v) + for i := 0; i < value.NumField(); i++ { + key := value.Type().Field(i).Tag.Get("header") + val := value.Field(i).String() + if val != "" { + headers[key] = val + } + } + return headers +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/.gitignore b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/.gitignore new file mode 100644 index 000000000..66be63a00 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/.gitignore @@ -0,0 +1 @@ +logrus diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/.travis.yml b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/.travis.yml new file mode 100644 index 000000000..2d8c08661 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/.travis.yml @@ -0,0 +1,8 @@ +language: go +go: + - 1.2 + - 1.3 + - 1.4 + - tip +install: + - go get -t ./... diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/CHANGELOG.md new file mode 100644 index 000000000..eb72bff93 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/CHANGELOG.md @@ -0,0 +1,7 @@ +# 0.7.3 + +formatter/\*: allow configuration of timestamp layout + +# 0.7.2 + +formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/LICENSE new file mode 100644 index 000000000..f090cb42f --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/README.md new file mode 100644 index 000000000..d55f90924 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/README.md @@ -0,0 +1,349 @@ +# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc] + +Logrus is a structured logger for Go (golang), completely API compatible with +the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not +yet stable (pre 1.0). Logrus itself is completely stable and has been used in +many large deployments. The core API is unlikely to change much but please +version control your Logrus to make sure you aren't fetching latest `master` on +every build.** + +Nicely color-coded in development (when a TTY is attached, otherwise just +plain text): + +![Colored](http://i.imgur.com/PY7qMwd.png) + +With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash +or Splunk: + +```json +{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the +ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} + +{"level":"warning","msg":"The group's number increased tremendously!", +"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"A giant walrus appears!", +"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", +"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} + +{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, +"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} +``` + +With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not +attached, the output is compatible with the +[logfmt](http://godoc.org/github.com/kr/logfmt) format: + +```text +time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 +time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 +time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true +time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 +time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 +time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true +exit status 1 +``` + +#### Example + +The simplest way to use Logrus is simply the package-level exported logger: + +```go +package main + +import ( + log "github.com/Sirupsen/logrus" +) + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + }).Info("A walrus appears") +} +``` + +Note that it's completely api-compatible with the stdlib logger, so you can +replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` +and you'll now have the flexibility of Logrus. You can customize it all you +want: + +```go +package main + +import ( + "os" + log "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus/hooks/airbrake" +) + +func init() { + // Log as JSON instead of the default ASCII formatter. + log.SetFormatter(&log.JSONFormatter{}) + + // Use the Airbrake hook to report errors that have Error severity or above to + // an exception tracker. You can create custom hooks, see the Hooks section. + log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) + + // Output to stderr instead of stdout, could also be a file. + log.SetOutput(os.Stderr) + + // Only log the warning severity or above. + log.SetLevel(log.WarnLevel) +} + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(log.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(log.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") + + // A common pattern is to re-use fields between logging statements by re-using + // the logrus.Entry returned from WithFields() + contextLogger := log.WithFields(log.Fields{ + "common": "this is a common field", + "other": "I also should be logged always", + }) + + contextLogger.Info("I'll be logged with common and other field") + contextLogger.Info("Me too") +} +``` + +For more advanced usage such as logging to multiple locations from the same +application, you can also create an instance of the `logrus` Logger: + +```go +package main + +import ( + "github.com/Sirupsen/logrus" +) + +// Create a new instance of the logger. You can have any number of instances. +var log = logrus.New() + +func main() { + // The API for setting attributes is a little different than the package level + // exported logger. See Godoc. + log.Out = os.Stderr + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") +} +``` + +#### Fields + +Logrus encourages careful, structured logging though logging fields instead of +long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +to send event %s to topic %s with key %d")`, you should log the much more +discoverable: + +```go +log.WithFields(log.Fields{ + "event": event, + "topic": topic, + "key": key, +}).Fatal("Failed to send event") +``` + +We've found this API forces you to think about logging in a way that produces +much more useful logging messages. We've been in countless situations where just +a single added field to a log statement that was already there would've saved us +hours. The `WithFields` call is optional. + +In general, with Logrus using any of the `printf`-family functions should be +seen as a hint you should add a field, however, you can still use the +`printf`-family functions with Logrus. + +#### Hooks + +You can add hooks for logging levels. For example to send errors to an exception +tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to +multiple places simultaneously, e.g. syslog. + +Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in +`init`: + +```go +import ( + log "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus/hooks/airbrake" + "github.com/Sirupsen/logrus/hooks/syslog" + "log/syslog" +) + +func init() { + log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) + + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + if err != nil { + log.Error("Unable to connect to local syslog daemon") + } else { + log.AddHook(hook) + } +} +``` + + +| Hook | Description | +| ----- | ----------- | +| [Airbrake](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) | Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | +| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. | +| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | +| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | +| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | +| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | +| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | +| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | +| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) | + +#### Level logging + +Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. + +```go +log.Debug("Useful debugging information.") +log.Info("Something noteworthy happened!") +log.Warn("You should probably take a look at this.") +log.Error("Something failed but I'm not quitting.") +// Calls os.Exit(1) after logging +log.Fatal("Bye.") +// Calls panic() after logging +log.Panic("I'm bailing.") +``` + +You can set the logging level on a `Logger`, then it will only log entries with +that severity or anything above it: + +```go +// Will log anything that is info or above (warn, error, fatal, panic). Default. +log.SetLevel(log.InfoLevel) +``` + +It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +environment if your application has that. + +#### Entries + +Besides the fields added with `WithField` or `WithFields` some fields are +automatically added to all logging events: + +1. `time`. The timestamp when the entry was created. +2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after + the `AddFields` call. E.g. `Failed to send event.` +3. `level`. The logging level. E.g. `info`. + +#### Environments + +Logrus has no notion of environment. + +If you wish for hooks and formatters to only be used in specific environments, +you should handle that yourself. For example, if your application has a global +variable `Environment`, which is a string representation of the environment you +could do: + +```go +import ( + log "github.com/Sirupsen/logrus" +) + +init() { + // do something here to set environment depending on an environment variable + // or command-line flag + if Environment == "production" { + log.SetFormatter(logrus.JSONFormatter) + } else { + // The TextFormatter is default, you don't actually have to do this. + log.SetFormatter(logrus.TextFormatter) + } +} +``` + +This configuration is how `logrus` was intended to be used, but JSON in +production is mostly only useful if you do log aggregation with tools like +Splunk or Logstash. + +#### Formatters + +The built-in logging formatters are: + +* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise + without colors. + * *Note:* to force colored output when there is no TTY, set the `ForceColors` + field to `true`. To force no colored output even if there is a TTY set the + `DisableColors` field to `true` +* `logrus.JSONFormatter`. Logs fields as JSON. +* `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net). + + ```go + logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"}) + ``` + +Third party logging formatters: + +* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. + +You can define your formatter by implementing the `Formatter` interface, +requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a +`Fields` type (`map[string]interface{}`) with all your fields as well as the +default ones (see Entries section above): + +```go +type MyJSONFormatter struct { +} + +log.SetFormatter(new(MyJSONFormatter)) + +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + // Note this doesn't include Time, Level and Message which are available on + // the Entry. Consult `godoc` on information about those fields or read the + // source of the official loggers. + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} +``` + +#### Logger as an `io.Writer` + +Logrus can be transormed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. + +```go +w := logger.Writer() +defer w.Close() + +srv := http.Server{ + // create a stdlib log.Logger that writes to + // logrus.Logger. + ErrorLog: log.New(w, "", 0), +} +``` + +Each line written to that writer will be printed the usual way, using formatters +and hooks. The level for those entries is `info`. + +#### Rotation + +Log rotation is not provided with Logrus. Log rotation should be done by an +external program (like `logrotate(8)`) that can compress and delete old log +entries. It should not be a feature of the application-level logger. + + +[godoc]: https://godoc.org/github.com/Sirupsen/logrus diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/entry.go new file mode 100644 index 000000000..17fe6f707 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/entry.go @@ -0,0 +1,252 @@ +package logrus + +import ( + "bytes" + "fmt" + "io" + "os" + "time" +) + +// An entry is the final or intermediate Logrus logging entry. It contains all +// the fields passed with WithField{,s}. It's finally logged when Debug, Info, +// Warn, Error, Fatal or Panic is called on it. These objects can be reused and +// passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic + Level Level + + // Message passed to Debug, Info, Warn, Error, Fatal or Panic + Message string +} + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, give a little extra room + Data: make(Fields, 5), + } +} + +// Returns a reader for the entry, which is a proxy to the formatter. +func (entry *Entry) Reader() (*bytes.Buffer, error) { + serialized, err := entry.Logger.Formatter.Format(entry) + return bytes.NewBuffer(serialized), err +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + reader, err := entry.Reader() + if err != nil { + return "", err + } + + return reader.String(), err +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := Fields{} + for k, v := range entry.Data { + data[k] = v + } + for k, v := range fields { + data[k] = v + } + return &Entry{Logger: entry.Logger, Data: data} +} + +func (entry *Entry) log(level Level, msg string) { + entry.Time = time.Now() + entry.Level = level + entry.Message = msg + + if err := entry.Logger.Hooks.Fire(level, entry); err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + entry.Logger.mu.Unlock() + } + + reader, err := entry.Reader() + if err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + entry.Logger.mu.Unlock() + } + + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + + _, err = io.Copy(entry.Logger.Out, reader) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(entry) + } +} + +func (entry *Entry) Debug(args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.log(DebugLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.log(InfoLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warn(args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.log(WarnLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warning(args ...interface{}) { + entry.Warn(args...) +} + +func (entry *Entry) Error(args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.log(ErrorLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Fatal(args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.log(FatalLevel, fmt.Sprint(args...)) + } + os.Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.log(PanicLevel, fmt.Sprint(args...)) + } + panic(fmt.Sprint(args...)) +} + +// Entry Printf family functions + +func (entry *Entry) Debugf(format string, args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.Debug(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.Info(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.Warn(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.Error(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.Fatal(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.Panic(fmt.Sprintf(format, args...)) + } +} + +// Entry Println family functions + +func (entry *Entry) Debugln(args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.Debug(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Infoln(args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.Info(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.Warn(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.Error(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Fatalln(args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.Fatal(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Panicln(args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.Panic(entry.sprintlnn(args...)) + } +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/exported.go new file mode 100644 index 000000000..a67e1b802 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/exported.go @@ -0,0 +1,188 @@ +package logrus + +import ( + "io" +) + +var ( + // std is the name of the standard logger in stdlib `log` + std = New() +) + +func StandardLogger() *Logger { + return std +} + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + std.mu.Lock() + defer std.mu.Unlock() + std.Out = out +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter Formatter) { + std.mu.Lock() + defer std.mu.Unlock() + std.Formatter = formatter +} + +// SetLevel sets the standard logger level. +func SetLevel(level Level) { + std.mu.Lock() + defer std.mu.Unlock() + std.Level = level +} + +// GetLevel returns the standard logger level. +func GetLevel() Level { + std.mu.Lock() + defer std.mu.Unlock() + return std.Level +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook Hook) { + std.mu.Lock() + defer std.mu.Unlock() + std.Hooks.Add(hook) +} + +// WithField creates an entry from the standard logger and adds a field to +// it. If you want multiple fields, use `WithFields`. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithField(key string, value interface{}) *Entry { + return std.WithField(key, value) +} + +// WithFields creates an entry from the standard logger and adds multiple +// fields to it. This is simply a helper for `WithField`, invoking it +// once for each field. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithFields(fields Fields) *Entry { + return std.WithFields(fields) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + std.Debug(args...) +} + +// Print logs a message at level Info on the standard logger. +func Print(args ...interface{}) { + std.Print(args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + std.Info(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + std.Warn(args...) +} + +// Warning logs a message at level Warn on the standard logger. +func Warning(args ...interface{}) { + std.Warning(args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + std.Error(args...) +} + +// Panic logs a message at level Panic on the standard logger. +func Panic(args ...interface{}) { + std.Panic(args...) +} + +// Fatal logs a message at level Fatal on the standard logger. +func Fatal(args ...interface{}) { + std.Fatal(args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + std.Debugf(format, args...) +} + +// Printf logs a message at level Info on the standard logger. +func Printf(format string, args ...interface{}) { + std.Printf(format, args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + std.Infof(format, args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + std.Warnf(format, args...) +} + +// Warningf logs a message at level Warn on the standard logger. +func Warningf(format string, args ...interface{}) { + std.Warningf(format, args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + std.Errorf(format, args...) +} + +// Panicf logs a message at level Panic on the standard logger. +func Panicf(format string, args ...interface{}) { + std.Panicf(format, args...) +} + +// Fatalf logs a message at level Fatal on the standard logger. +func Fatalf(format string, args ...interface{}) { + std.Fatalf(format, args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + std.Debugln(args...) +} + +// Println logs a message at level Info on the standard logger. +func Println(args ...interface{}) { + std.Println(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + std.Infoln(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + std.Warnln(args...) +} + +// Warningln logs a message at level Warn on the standard logger. +func Warningln(args ...interface{}) { + std.Warningln(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + std.Errorln(args...) +} + +// Panicln logs a message at level Panic on the standard logger. +func Panicln(args ...interface{}) { + std.Panicln(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger. +func Fatalln(args ...interface{}) { + std.Fatalln(args...) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/formatter.go new file mode 100644 index 000000000..104d689f1 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/formatter.go @@ -0,0 +1,48 @@ +package logrus + +import "time" + +const DefaultTimestampFormat = time.RFC3339 + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data Fields) { + _, ok := data["time"] + if ok { + data["fields.time"] = data["time"] + } + + _, ok = data["msg"] + if ok { + data["fields.msg"] = data["msg"] + } + + _, ok = data["level"] + if ok { + data["fields.level"] = data["level"] + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/formatters/logstash/logstash.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/formatters/logstash/logstash.go new file mode 100644 index 000000000..8ea93ddf2 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/formatters/logstash/logstash.go @@ -0,0 +1,56 @@ +package logstash + +import ( + "encoding/json" + "fmt" + + "github.com/Sirupsen/logrus" +) + +// Formatter generates json in logstash format. +// Logstash site: http://logstash.net/ +type LogstashFormatter struct { + Type string // if not empty use for logstash type field. + + // TimestampFormat sets the format used for timestamps. + TimestampFormat string +} + +func (f *LogstashFormatter) Format(entry *logrus.Entry) ([]byte, error) { + entry.Data["@version"] = 1 + + if f.TimestampFormat == "" { + f.TimestampFormat = logrus.DefaultTimestampFormat + } + + entry.Data["@timestamp"] = entry.Time.Format(f.TimestampFormat) + + // set message field + v, ok := entry.Data["message"] + if ok { + entry.Data["fields.message"] = v + } + entry.Data["message"] = entry.Message + + // set level field + v, ok = entry.Data["level"] + if ok { + entry.Data["fields.level"] = v + } + entry.Data["level"] = entry.Level.String() + + // set type field + if f.Type != "" { + v, ok = entry.Data["type"] + if ok { + entry.Data["fields.type"] = v + } + entry.Data["type"] = f.Type + } + + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/hooks.go new file mode 100644 index 000000000..0da2b3653 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/hooks.go @@ -0,0 +1,34 @@ +package logrus + +// A hook to be fired when logging on the logging levels returned from +// `Levels()` on your implementation of the interface. Note that this is not +// fired in a goroutine or a channel with workers, you should handle such +// functionality yourself if your call is non-blocking and you don't wish for +// the logging calls for levels returned from `Levels()` to block. +type Hook interface { + Levels() []Level + Fire(*Entry) error +} + +// Internal type for storing the hooks on a logger instance. +type levelHooks map[Level][]Hook + +// Add a hook to an instance of logger. This is called with +// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. +func (hooks levelHooks) Add(hook Hook) { + for _, level := range hook.Levels() { + hooks[level] = append(hooks[level], hook) + } +} + +// Fire all the hooks for the passed level. Used by `entry.log` to fire +// appropriate hooks for a log entry. +func (hooks levelHooks) Fire(level Level, entry *Entry) error { + for _, hook := range hooks[level] { + if err := hook.Fire(entry); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/json_formatter.go new file mode 100644 index 000000000..dcc4f1d9f --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/json_formatter.go @@ -0,0 +1,40 @@ +package logrus + +import ( + "encoding/json" + "fmt" +) + +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + TimestampFormat string +} + +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+3) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/Sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + prefixFieldClashes(data) + + if f.TimestampFormat == "" { + f.TimestampFormat = DefaultTimestampFormat + } + + data["time"] = entry.Time.Format(f.TimestampFormat) + data["msg"] = entry.Message + data["level"] = entry.Level.String() + + serialized, err := json.Marshal(data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/logger.go new file mode 100644 index 000000000..da928a375 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/logger.go @@ -0,0 +1,203 @@ +package logrus + +import ( + "io" + "os" + "sync" +) + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stdout`. You can also set this to + // something more adventorous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks levelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. `logrus.Debug` is useful in + Level Level + // Used to sync writing to the log. + mu sync.Mutex +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &Logger{ +// Out: os.Stderr, +// Formatter: new(JSONFormatter), +// Hooks: make(levelHooks), +// Level: logrus.DebugLevel, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stdout, + Formatter: new(TextFormatter), + Hooks: make(levelHooks), + Level: InfoLevel, + } +} + +// Adds a field to the log entry, note that you it doesn't log until you call +// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. +// Ff you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + return NewEntry(logger).WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + return NewEntry(logger).WithFields(fields) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + if logger.Level >= DebugLevel { + NewEntry(logger).Debugf(format, args...) + } +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + if logger.Level >= InfoLevel { + NewEntry(logger).Infof(format, args...) + } +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + NewEntry(logger).Printf(format, args...) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warnf(format, args...) + } +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warnf(format, args...) + } +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + if logger.Level >= ErrorLevel { + NewEntry(logger).Errorf(format, args...) + } +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + if logger.Level >= FatalLevel { + NewEntry(logger).Fatalf(format, args...) + } +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + if logger.Level >= PanicLevel { + NewEntry(logger).Panicf(format, args...) + } +} + +func (logger *Logger) Debug(args ...interface{}) { + if logger.Level >= DebugLevel { + NewEntry(logger).Debug(args...) + } +} + +func (logger *Logger) Info(args ...interface{}) { + if logger.Level >= InfoLevel { + NewEntry(logger).Info(args...) + } +} + +func (logger *Logger) Print(args ...interface{}) { + NewEntry(logger).Info(args...) +} + +func (logger *Logger) Warn(args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warn(args...) + } +} + +func (logger *Logger) Warning(args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warn(args...) + } +} + +func (logger *Logger) Error(args ...interface{}) { + if logger.Level >= ErrorLevel { + NewEntry(logger).Error(args...) + } +} + +func (logger *Logger) Fatal(args ...interface{}) { + if logger.Level >= FatalLevel { + NewEntry(logger).Fatal(args...) + } +} + +func (logger *Logger) Panic(args ...interface{}) { + if logger.Level >= PanicLevel { + NewEntry(logger).Panic(args...) + } +} + +func (logger *Logger) Debugln(args ...interface{}) { + if logger.Level >= DebugLevel { + NewEntry(logger).Debugln(args...) + } +} + +func (logger *Logger) Infoln(args ...interface{}) { + if logger.Level >= InfoLevel { + NewEntry(logger).Infoln(args...) + } +} + +func (logger *Logger) Println(args ...interface{}) { + NewEntry(logger).Println(args...) +} + +func (logger *Logger) Warnln(args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warnln(args...) + } +} + +func (logger *Logger) Warningln(args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warnln(args...) + } +} + +func (logger *Logger) Errorln(args ...interface{}) { + if logger.Level >= ErrorLevel { + NewEntry(logger).Errorln(args...) + } +} + +func (logger *Logger) Fatalln(args ...interface{}) { + if logger.Level >= FatalLevel { + NewEntry(logger).Fatalln(args...) + } +} + +func (logger *Logger) Panicln(args ...interface{}) { + if logger.Level >= PanicLevel { + NewEntry(logger).Panicln(args...) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/logrus.go new file mode 100644 index 000000000..43ee12e90 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/logrus.go @@ -0,0 +1,94 @@ +package logrus + +import ( + "fmt" + "log" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Level type +type Level uint8 + +// Convert the Level to a string. E.g. PanicLevel becomes "panic". +func (level Level) String() string { + switch level { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warning" + case ErrorLevel: + return "error" + case FatalLevel: + return "fatal" + case PanicLevel: + return "panic" + } + + return "unknown" +} + +// ParseLevel takes a string level and returns the Logrus log level constant. +func ParseLevel(lvl string) (Level, error) { + switch lvl { + case "panic": + return PanicLevel, nil + case "fatal": + return FatalLevel, nil + case "error": + return ErrorLevel, nil + case "warn", "warning": + return WarnLevel, nil + case "info": + return InfoLevel, nil + case "debug": + return DebugLevel, nil + } + + var l Level + return l, fmt.Errorf("not a valid logrus Level: %q", lvl) +} + +// These are the different logging levels. You can set the logging level to log +// on your instance of logger, obtained with `logrus.New()`. +const ( + // PanicLevel level, highest level of severity. Logs and then calls panic with the + // message passed to Debug, Info, ... + PanicLevel Level = iota + // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the + // logging level is set to Panic. + FatalLevel + // ErrorLevel level. Logs. Used for errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel + // InfoLevel level. General operational entries about what's going on inside the + // application. + InfoLevel + // DebugLevel level. Usually only enabled when debugging. Very verbose logging. + DebugLevel +) + +// Won't compile if StdLogger can't be realized by a log.Logger +var _ StdLogger = &log.Logger{} + +// StdLogger is what your logrus-enabled library should take, that way +// it'll accept a stdlib logger and a logrus logger. There's no standard +// interface, this is the closest we get, unfortunately. +type StdLogger interface { + Print(...interface{}) + Printf(string, ...interface{}) + Println(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + Fatalln(...interface{}) + + Panic(...interface{}) + Panicf(string, ...interface{}) + Panicln(...interface{}) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_darwin.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_darwin.go new file mode 100644 index 000000000..8fe02a4ae --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_darwin.go @@ -0,0 +1,12 @@ +// Based on ssh/terminal: +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA + +type Termios syscall.Termios diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go new file mode 100644 index 000000000..0428ee5d5 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go @@ -0,0 +1,20 @@ +/* + Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin. +*/ +package logrus + +import ( + "syscall" +) + +const ioctlReadTermios = syscall.TIOCGETA + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed uint32 + Ospeed uint32 +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_linux.go new file mode 100644 index 000000000..a2c0b40db --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_linux.go @@ -0,0 +1,12 @@ +// Based on ssh/terminal: +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TCGETS + +type Termios syscall.Termios diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go new file mode 100644 index 000000000..b8bebc13e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go @@ -0,0 +1,21 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin freebsd openbsd + +package logrus + +import ( + "syscall" + "unsafe" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal() bool { + fd := syscall.Stdout + var termios Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_openbsd.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_openbsd.go new file mode 100644 index 000000000..af609a53d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_openbsd.go @@ -0,0 +1,7 @@ +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA + +type Termios syscall.Termios diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_windows.go new file mode 100644 index 000000000..2e09f6f7e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_windows.go @@ -0,0 +1,27 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package logrus + +import ( + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal() bool { + fd := syscall.Stdout + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/text_formatter.go new file mode 100644 index 000000000..612417ff9 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/text_formatter.go @@ -0,0 +1,149 @@ +package logrus + +import ( + "bytes" + "fmt" + "sort" + "strings" + "time" +) + +const ( + nocolor = 0 + red = 31 + green = 32 + yellow = 33 + blue = 34 + gray = 37 +) + +var ( + baseTimestamp time.Time + isTerminal bool +) + +func init() { + baseTimestamp = time.Now() + isTerminal = IsTerminal() +} + +func miniTS() int { + return int(time.Since(baseTimestamp) / time.Second) +} + +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool +} + +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + var keys []string = make([]string, 0, len(entry.Data)) + for k := range entry.Data { + keys = append(keys, k) + } + + if !f.DisableSorting { + sort.Strings(keys) + } + + b := &bytes.Buffer{} + + prefixFieldClashes(entry.Data) + + isColored := (f.ForceColors || isTerminal) && !f.DisableColors + + if f.TimestampFormat == "" { + f.TimestampFormat = DefaultTimestampFormat + } + if isColored { + f.printColored(b, entry, keys) + } else { + if !f.DisableTimestamp { + f.appendKeyValue(b, "time", entry.Time.Format(f.TimestampFormat)) + } + f.appendKeyValue(b, "level", entry.Level.String()) + f.appendKeyValue(b, "msg", entry.Message) + for _, key := range keys { + f.appendKeyValue(b, key, entry.Data[key]) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string) { + var levelColor int + switch entry.Level { + case DebugLevel: + levelColor = gray + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String())[0:4] + + if !f.FullTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) + } else { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(f.TimestampFormat), entry.Message) + } + for _, k := range keys { + v := entry.Data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v) + } +} + +func needsQuoting(text string) bool { + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.') { + return false + } + } + return true +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key, value interface{}) { + switch value.(type) { + case string: + if needsQuoting(value.(string)) { + fmt.Fprintf(b, "%v=%s ", key, value) + } else { + fmt.Fprintf(b, "%v=%q ", key, value) + } + case error: + if needsQuoting(value.(error).Error()) { + fmt.Fprintf(b, "%v=%s ", key, value) + } else { + fmt.Fprintf(b, "%v=%q ", key, value) + } + default: + fmt.Fprintf(b, "%v=%v ", key, value) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/writer.go new file mode 100644 index 000000000..1e30b1c75 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/writer.go @@ -0,0 +1,31 @@ +package logrus + +import ( + "bufio" + "io" + "runtime" +) + +func (logger *Logger) Writer() *io.PipeWriter { + reader, writer := io.Pipe() + + go logger.writerScanner(reader) + runtime.SetFinalizer(writer, writerFinalizer) + + return writer +} + +func (logger *Logger) writerScanner(reader *io.PipeReader) { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + logger.Print(scanner.Text()) + } + if err := scanner.Err(); err != nil { + logger.Errorf("Error while reading from Writer: %s", err) + } + reader.Close() +} + +func writerFinalizer(writer *io.PipeWriter) { + writer.Close() +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/NOTICE.txt new file mode 100644 index 000000000..5f14d1162 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go new file mode 100644 index 000000000..e50771f80 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -0,0 +1,145 @@ +// Package awserr represents API error interface accessors for the SDK. +package awserr + +// An Error wraps lower level errors with code, message and an original error. +// The underlying concrete error type may also satisfy other interfaces which +// can be to used to obtain more specific information about the error. +// +// Calling Error() or String() will always include the full information about +// an error based on its underlying type. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Get error details +// log.Println("Error:", awsErr.Code(), awsErr.Message()) +// +// // Prints out full error message, including original error if there was one. +// log.Println("Error:", awsErr.Error()) +// +// // Get original error +// if origErr := awsErr.OrigErr(); origErr != nil { +// // operate on original error. +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type Error interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErr() error +} + +// BatchError is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occured in the batch. +// +// Deprecated: Replaced with BatchedErrors. Only defined for backwards +// compatibility. +type BatchError interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// BatchedErrors is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occured in the batch. +// +// Replaces BatchError +type BatchedErrors interface { + // Satisfy the base Error interface. + Error + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// New returns an Error object described by the code, message, and origErr. +// +// If origErr satisfies the Error interface it will not be wrapped within a new +// Error object and will instead be returned. +func New(code, message string, origErr error) Error { + var errs []error + if origErr != nil { + errs = append(errs, origErr) + } + return newBaseError(code, message, errs) +} + +// NewBatchError returns an BatchedErrors with a collection of errors as an +// array of errors. +func NewBatchError(code, message string, errs []error) BatchedErrors { + return newBaseError(code, message, errs) +} + +// A RequestFailure is an interface to extract request failure information from +// an Error such as the request ID of the failed request returned by a service. +// RequestFailures may not always have a requestID value if the request failed +// prior to reaching the service such as a connection error. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if reqerr, ok := err.(RequestFailure); ok { +// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// } else { +// log.Println("Error:", err.Error()) +// } +// } +// +// Combined with awserr.Error: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Generic AWS Error with Code, Message, and original error (if any) +// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) +// +// if reqErr, ok := err.(awserr.RequestFailure); ok { +// // A service error occurred +// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type RequestFailure interface { + Error + + // The status code of the HTTP response. + StatusCode() int + + // The request ID returned by the service for a request failure. This will + // be empty if no request ID is available such as the request failed due + // to a connection error. + RequestID() string +} + +// NewRequestFailure returns a new request error wrapper for the given Error +// provided. +func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { + return newRequestError(err, statusCode, reqID) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go new file mode 100644 index 000000000..e2d333b84 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -0,0 +1,194 @@ +package awserr + +import "fmt" + +// SprintError returns a string of the formatted error code. +// +// Both extra and origErr are optional. If they are included their lines +// will be added, but if they are not included their lines will be ignored. +func SprintError(code, message, extra string, origErr error) string { + msg := fmt.Sprintf("%s: %s", code, message) + if extra != "" { + msg = fmt.Sprintf("%s\n\t%s", msg, extra) + } + if origErr != nil { + msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) + } + return msg +} + +// A baseError wraps the code and message which defines an error. It also +// can be used to wrap an original error object. +// +// Should be used as the root for errors satisfying the awserr.Error. Also +// for any error which does not fit into a specific error wrapper type. +type baseError struct { + // Classification of error + code string + + // Detailed information about error + message string + + // Optional original error this error is based off of. Allows building + // chained errors. + errs []error +} + +// newBaseError returns an error object for the code, message, and errors. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the +// error. +// +// origErrs is the error objects which will be nested under the new errors to +// be returned. +func newBaseError(code, message string, origErrs []error) *baseError { + b := &baseError{ + code: code, + message: message, + errs: origErrs, + } + + return b +} + +// Error returns the string representation of the error. +// +// See ErrorWithExtra for formatting. +// +// Satisfies the error interface. +func (b baseError) Error() string { + size := len(b.errs) + if size > 0 { + return SprintError(b.code, b.message, "", errorList(b.errs)) + } + + return SprintError(b.code, b.message, "", nil) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (b baseError) String() string { + return b.Error() +} + +// Code returns the short phrase depicting the classification of the error. +func (b baseError) Code() string { + return b.code +} + +// Message returns the error details message. +func (b baseError) Message() string { + return b.message +} + +// OrigErr returns the original error if one was set. Nil is returned if no +// error was set. This only returns the first element in the list. If the full +// list is needed, use BatchedErrors. +func (b baseError) OrigErr() error { + switch len(b.errs) { + case 0: + return nil + case 1: + return b.errs[0] + default: + if err, ok := b.errs[0].(Error); ok { + return NewBatchError(err.Code(), err.Message(), b.errs[1:]) + } + return NewBatchError("BatchedErrors", + "multiple errors occured", b.errs) + } +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (b baseError) OrigErrs() []error { + return b.errs +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError Error + +// A requestError wraps a request or service error. +// +// Composed of baseError for code, message, and original error. +type requestError struct { + awsError + statusCode int + requestID string +} + +// newRequestError returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +// +// Also wraps original errors via the baseError. +func newRequestError(err Error, statusCode int, requestID string) *requestError { + return &requestError{ + awsError: err, + statusCode: statusCode, + requestID: requestID, + } +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (r requestError) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s", + r.statusCode, r.requestID) + return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (r requestError) String() string { + return r.Error() +} + +// StatusCode returns the wrapped status code for the error +func (r requestError) StatusCode() int { + return r.statusCode +} + +// RequestID returns the wrapped requestID +func (r requestError) RequestID() string { + return r.requestID +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (r requestError) OrigErrs() []error { + if b, ok := r.awsError.(BatchedErrors); ok { + return b.OrigErrs() + } + return []error{r.OrigErr()} +} + +// An error list that satisfies the golang interface +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + // How do we want to handle the array size being zero + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += fmt.Sprintf("%s", e[i].Error()) + // We check the next index to see if it is within the slice. + // If it is, then we append a newline. We do this, because unit tests + // could be broken with the additional '\n' + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go new file mode 100644 index 000000000..8429470b9 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go @@ -0,0 +1,100 @@ +package awsutil + +import ( + "io" + "reflect" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + dst.Set(reflect.New(e)) + } + if src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go new file mode 100644 index 000000000..59fa4a558 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go @@ -0,0 +1,27 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type the are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go new file mode 100644 index 000000000..4d2a01e8c --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -0,0 +1,222 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/jmespath/go-jmespath" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +// rValuesAtPath returns a slice of values found in value v. The values +// in v are explored recursively so all nested values are collected. +func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) + if len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByNameFunc(func(name string) bool { + if c == name { + return true + } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { + return true + } + return false + }) + + if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { + if !value.IsNil() { + value.Set(reflect.Zero(value.Type())) + } + return []reflect.Value{value} + } + + if createPath && value.Kind() == reflect.Ptr && value.IsNil() { + // TODO if the value is the terminus it should not be created + // if the value to be set to its position is nil. + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, value := range values { + value := reflect.Indirect(value) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if createPath { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of values at the case insensitive lexical +// path inside of a structure. +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { + result, err := jmespath.Search(path, i) + if err != nil { + return nil, err + } + + v := reflect.ValueOf(result) + if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, nil + } + if s, ok := result.([]interface{}); ok { + return s, err + } + if v.Kind() == reflect.Map && v.Len() == 0 { + return nil, nil + } + if v.Kind() == reflect.Slice { + out := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + out[i] = v.Index(i).Interface() + } + return out, nil + } + + return []interface{}{result}, nil +} + +// SetValueAtPath sets a value at the case insensitive lexical path inside +// of a structure. +func SetValueAtPath(i interface{}, path string, v interface{}) { + if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue + } + setValue(rval, v) + } + } +} + +func setValue(dstVal reflect.Value, src interface{}) { + if dstVal.Kind() == reflect.Ptr { + dstVal = reflect.Indirect(dstVal) + } + srcVal := reflect.ValueOf(src) + + if !srcVal.IsValid() { // src is literal nil + if dstVal.CanAddr() { + // Convert to pointer so that pointer's value can be nil'ed + // dstVal = dstVal.Addr() + } + dstVal.Set(reflect.Zero(dstVal.Type())) + + } else if srcVal.Kind() == reflect.Ptr { + if srcVal.IsNil() { + srcVal = reflect.Zero(dstVal.Type()) + } else { + srcVal = reflect.ValueOf(src).Elem() + } + dstVal.Set(srcVal) + } else { + dstVal.Set(srcVal) + } + +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go new file mode 100644 index 000000000..fc38172fe --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -0,0 +1,107 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Prettify returns the string representation of a value. +func Prettify(i interface{}) string { + var buf bytes.Buffer + prettify(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// prettify will recursively walk value v to build a textual +// representation of the value. +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("") + break + } + + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + prettify(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + prettify(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + prettify(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + if !v.IsValid() { + fmt.Fprint(buf, "") + return + } + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go new file mode 100644 index 000000000..b6432f1a1 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -0,0 +1,89 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + stringValue(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/client/client.go new file mode 100644 index 000000000..c8d0564d8 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -0,0 +1,120 @@ +package client + +import ( + "fmt" + "io/ioutil" + "net/http/httputil" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides configuration to a service client instance. +type Config struct { + Config *aws.Config + Handlers request.Handlers + Endpoint, SigningRegion string +} + +// ConfigProvider provides a generic way for a service client to receive +// the ClientConfig without circular dependencies. +type ConfigProvider interface { + ClientConfig(serviceName string, cfgs ...*aws.Config) Config +} + +// A Client implements the base client request and response handling +// used by all service clients. +type Client struct { + request.Retryer + metadata.ClientInfo + + Config aws.Config + Handlers request.Handlers +} + +// New will return a pointer to a new initialized service client. +func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { + svc := &Client{ + Config: cfg, + ClientInfo: info, + Handlers: handlers, + } + + switch retryer, ok := cfg.Retryer.(request.Retryer); { + case ok: + svc.Retryer = retryer + case cfg.Retryer != nil && cfg.Logger != nil: + s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) + cfg.Logger.Log(s) + fallthrough + default: + maxRetries := aws.IntValue(cfg.MaxRetries) + if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = 3 + } + svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} + } + + svc.AddDebugHandlers() + + for _, option := range options { + option(svc) + } + + return svc +} + +// NewRequest returns a new Request pointer for the service API +// operation and parameters. +func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { + return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) +} + +// AddDebugHandlers injects debug logging handlers into the service to log request +// debug information. +func (c *Client) AddDebugHandlers() { + if !c.Config.LogLevel.AtLeast(aws.LogDebug) { + return + } + + c.Handlers.Send.PushFront(logRequest) + c.Handlers.Send.PushBack(logResponse) +} + +const logReqMsg = `DEBUG: Request %s/%s Details: +---[ REQUEST POST-SIGN ]----------------------------- +%s +-----------------------------------------------------` + +func logRequest(r *request.Request) { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody) + + if logBody { + // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's + // Body as a NoOpCloser and will not be reset after read by the HTTP + // client reader. + r.Body.Seek(r.BodyStart, 0) + r.HTTPRequest.Body = ioutil.NopCloser(r.Body) + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) +} + +const logRespMsg = `DEBUG: Response %s/%s Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +func logResponse(r *request.Request) { + var msg = "no response data" + if r.HTTPResponse != nil { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody) + msg = string(dumpedBody) + } else if r.Error != nil { + msg = r.Error.Error() + } + r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg)) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go new file mode 100644 index 000000000..43a3676b7 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -0,0 +1,90 @@ +package client + +import ( + "math/rand" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// DefaultRetryer implements basic retry logic using exponential backoff for +// most services. If you want to implement custom retry logic, implement the +// request.Retryer interface or create a structure type that composes this +// struct and override the specific methods. For example, to override only +// the MaxRetries method: +// +// type retryer struct { +// service.DefaultRetryer +// } +// +// // This implementation always has 100 max retries +// func (d retryer) MaxRetries() uint { return 100 } +type DefaultRetryer struct { + NumMaxRetries int +} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API request. +func (d DefaultRetryer) MaxRetries() int { + return d.NumMaxRetries +} + +var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) + +// RetryRules returns the delay duration before retrying this request again +func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { + // Set the upper limit of delay in retrying at ~five minutes + minTime := 30 + throttle := d.shouldThrottle(r) + if throttle { + minTime = 500 + } + + retryCount := r.RetryCount + if retryCount > 13 { + retryCount = 13 + } else if throttle && retryCount > 8 { + retryCount = 8 + } + + delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime) + return time.Duration(delay) * time.Millisecond +} + +// ShouldRetry returns true if the request should be retried. +func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + if r.HTTPResponse.StatusCode >= 500 { + return true + } + return r.IsErrorRetryable() || d.shouldThrottle(r) +} + +// ShouldThrottle returns true if the request should be throttled. +func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { + if r.HTTPResponse.StatusCode == 502 || + r.HTTPResponse.StatusCode == 503 || + r.HTTPResponse.StatusCode == 504 { + return true + } + return r.IsErrorThrottle() +} + +// lockedSource is a thread-safe implementation of rand.Source +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go new file mode 100644 index 000000000..4778056dd --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -0,0 +1,12 @@ +package metadata + +// ClientInfo wraps immutable data from the client.Client structure. +type ClientInfo struct { + ServiceName string + APIVersion string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/config.go new file mode 100644 index 000000000..da72935be --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -0,0 +1,363 @@ +package aws + +import ( + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +// UseServiceDefaultRetries instructs the config to use the service's own default +// number of retries. This will be the default action if Config.MaxRetries +// is nil also. +const UseServiceDefaultRetries = -1 + +// RequestRetryer is an alias for a type that implements the request.Retryer interface. +type RequestRetryer interface{} + +// A Config provides service configuration for service clients. By default, +// all clients will use the {defaults.DefaultConfig} structure. +type Config struct { + // Enables verbose error printing of all credential chain errors. + // Should be used when wanting to see all errors while attempting to retreive + // credentials. + CredentialsChainVerboseErrors *bool + + // The credentials object to use when signing requests. Defaults to + // a chain of credential providers to search for credentials in environment + // variables, shared credential file, and EC2 Instance Roles. + Credentials *credentials.Credentials + + // An optional endpoint URL (hostname only or fully qualified URI) + // that overrides the default generated endpoint for a client. Set this + // to `""` to use the default generated endpoint. + // + // @note You must still provide a `Region` value when specifying an + // endpoint for a client. + Endpoint *string + + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // @see http://docs.aws.amazon.com/general/latest/gr/rande.html + // AWS Regions and Endpoints + Region *string + + // Set this to `true` to disable SSL when sending requests. Defaults + // to `false`. + DisableSSL *bool + + // The HTTP client to use when sending requests. Defaults to + // `http.DefaultClient`. + HTTPClient *http.Client + + // An integer value representing the logging level. The default log level + // is zero (LogOff), which represents no logging. To enable logging set + // to a LogLevel Value. + LogLevel *LogLevelType + + // The logger writer interface to write logging messages to. Defaults to + // standard out. + Logger Logger + + // The maximum number of times that a request will be retried for failures. + // Defaults to -1, which defers the max retry setting to the service specific + // configuration. + MaxRetries *int + + // Retryer guides how HTTP requests should be retried in case of recoverable failures. + // + // When nil or the value does not implement the request.Retryer interface, + // the request.DefaultRetryer will be used. + // + // When both Retryer and MaxRetries are non-nil, the former is used and + // the latter ignored. + // + // To set the Retryer field in a type-safe manner and with chaining, use + // the request.WithRetryer helper function: + // + // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) + // + Retryer RequestRetryer + + // Disables semantic parameter validation, which validates input for missing + // required fields and/or other semantic request input errors. + DisableParamValidation *bool + + // Disables the computation of request and response checksums, e.g., + // CRC32 checksums in Amazon DynamoDB. + DisableComputeChecksums *bool + + // Set this to `true` to force the request to use path-style addressing, + // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will + // use virtual hosted bucket addressing when possible + // (`http://BUCKET.s3.amazonaws.com/KEY`). + // + // @note This configuration option is specific to the Amazon S3 service. + // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // Amazon S3: Virtual Hosting of Buckets + S3ForcePathStyle *bool + + // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` + // header to PUT requests over 2MB of content. 100-Continue instructs the + // HTTP client not to send the body until the service responds with a + // `continue` status. This is useful to prevent sending the request body + // until after the request is authenticated, and validated. + // + // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html + // + // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s + // `ExpectContinueTimeout` for information on adjusting the continue wait timeout. + // https://golang.org/pkg/net/http/#Transport + // + // You should use this flag to disble 100-Continue if you experiance issues + // with proxies or thrid party S3 compatible services. + S3Disable100Continue *bool + + // Set this to `true` to enable S3 Accelerate feature. For all operations compatible + // with S3 Accelerate will use the accelerate endpoint for requests. Requests not compatible + // will fall back to normal S3 requests. + // + // The bucket must be enable for accelerate to be used with S3 client with accelerate + // enabled. If the bucket is not enabled for accelerate an error will be returned. + // The bucket name must be DNS compatible to also work with accelerate. + S3UseAccelerate *bool + + // Set this to `true` to disable the EC2Metadata client from overriding the + // default http.Client's Timeout. This is helpful if you do not want the EC2Metadata + // client to create a new http.Client. This options is only meaningful if you're not + // already using a custom HTTP client with the SDK. Enabled by default. + // + // Must be set and provided to the session.New() in order to disable the EC2Metadata + // overriding the timeout for default credentials chain. + // + // Example: + // sess := session.New(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true)) + // svc := s3.New(sess) + // + EC2MetadataDisableTimeoutOverride *bool + + // SleepDelay is an override for the func the SDK will call when sleeping + // during the lifecycle of a request. Specifically this will be used for + // request delays. This value should only be used for testing. To adjust + // the delay of a request see the aws/client.DefaultRetryer and + // aws/request.Retryer. + SleepDelay func(time.Duration) +} + +// NewConfig returns a new Config pointer that can be chained with builder methods to +// set multiple configuration values inline without using pointers. +// +// sess := session.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10)) +// +func NewConfig() *Config { + return &Config{} +} + +// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning +// a Config pointer. +func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { + c.CredentialsChainVerboseErrors = &verboseErrs + return c +} + +// WithCredentials sets a config Credentials value returning a Config pointer +// for chaining. +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { + c.Credentials = creds + return c +} + +// WithEndpoint sets a config Endpoint value returning a Config pointer for +// chaining. +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = &endpoint + return c +} + +// WithRegion sets a config Region value returning a Config pointer for +// chaining. +func (c *Config) WithRegion(region string) *Config { + c.Region = ®ion + return c +} + +// WithDisableSSL sets a config DisableSSL value returning a Config pointer +// for chaining. +func (c *Config) WithDisableSSL(disable bool) *Config { + c.DisableSSL = &disable + return c +} + +// WithHTTPClient sets a config HTTPClient value returning a Config pointer +// for chaining. +func (c *Config) WithHTTPClient(client *http.Client) *Config { + c.HTTPClient = client + return c +} + +// WithMaxRetries sets a config MaxRetries value returning a Config pointer +// for chaining. +func (c *Config) WithMaxRetries(max int) *Config { + c.MaxRetries = &max + return c +} + +// WithDisableParamValidation sets a config DisableParamValidation value +// returning a Config pointer for chaining. +func (c *Config) WithDisableParamValidation(disable bool) *Config { + c.DisableParamValidation = &disable + return c +} + +// WithDisableComputeChecksums sets a config DisableComputeChecksums value +// returning a Config pointer for chaining. +func (c *Config) WithDisableComputeChecksums(disable bool) *Config { + c.DisableComputeChecksums = &disable + return c +} + +// WithLogLevel sets a config LogLevel value returning a Config pointer for +// chaining. +func (c *Config) WithLogLevel(level LogLevelType) *Config { + c.LogLevel = &level + return c +} + +// WithLogger sets a config Logger value returning a Config pointer for +// chaining. +func (c *Config) WithLogger(logger Logger) *Config { + c.Logger = logger + return c +} + +// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config +// pointer for chaining. +func (c *Config) WithS3ForcePathStyle(force bool) *Config { + c.S3ForcePathStyle = &force + return c +} + +// WithS3Disable100Continue sets a config S3Disable100Continue value returning +// a Config pointer for chaining. +func (c *Config) WithS3Disable100Continue(disable bool) *Config { + c.S3Disable100Continue = &disable + return c +} + +// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config +// pointer for chaining. +func (c *Config) WithS3UseAccelerate(enable bool) *Config { + c.S3UseAccelerate = &enable + return c +} + +// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { + c.EC2MetadataDisableTimeoutOverride = &enable + return c +} + +// WithSleepDelay overrides the function used to sleep while waiting for the +// next retry. Defaults to time.Sleep. +func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { + c.SleepDelay = fn + return c +} + +// MergeIn merges the passed in configs into the existing config object. +func (c *Config) MergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + +func mergeInConfig(dst *Config, other *Config) { + if other == nil { + return + } + + if other.CredentialsChainVerboseErrors != nil { + dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors + } + + if other.Credentials != nil { + dst.Credentials = other.Credentials + } + + if other.Endpoint != nil { + dst.Endpoint = other.Endpoint + } + + if other.Region != nil { + dst.Region = other.Region + } + + if other.DisableSSL != nil { + dst.DisableSSL = other.DisableSSL + } + + if other.HTTPClient != nil { + dst.HTTPClient = other.HTTPClient + } + + if other.LogLevel != nil { + dst.LogLevel = other.LogLevel + } + + if other.Logger != nil { + dst.Logger = other.Logger + } + + if other.MaxRetries != nil { + dst.MaxRetries = other.MaxRetries + } + + if other.Retryer != nil { + dst.Retryer = other.Retryer + } + + if other.DisableParamValidation != nil { + dst.DisableParamValidation = other.DisableParamValidation + } + + if other.DisableComputeChecksums != nil { + dst.DisableComputeChecksums = other.DisableComputeChecksums + } + + if other.S3ForcePathStyle != nil { + dst.S3ForcePathStyle = other.S3ForcePathStyle + } + + if other.S3Disable100Continue != nil { + dst.S3Disable100Continue = other.S3Disable100Continue + } + + if other.S3UseAccelerate != nil { + dst.S3UseAccelerate = other.S3UseAccelerate + } + + if other.EC2MetadataDisableTimeoutOverride != nil { + dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride + } + + if other.SleepDelay != nil { + dst.SleepDelay = other.SleepDelay + } +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c *Config) Copy(cfgs ...*Config) *Config { + dst := &Config{} + dst.MergeIn(c) + + for _, cfg := range cfgs { + dst.MergeIn(cfg) + } + + return dst +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go new file mode 100644 index 000000000..3b73a7da7 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -0,0 +1,369 @@ +package aws + +import "time" + +// String returns a pointer to the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". +// The result is undefined if the Unix time cannot be represented by an int64. +// Which includes calling TimeUnixMilli on a zero Time is undefined. +// +// This utility is useful for service API's such as CloudWatch Logs which require +// their unix time values to be in milliseconds. +// +// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. +func TimeUnixMilli(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go new file mode 100644 index 000000000..8456e29b5 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -0,0 +1,152 @@ +package corehandlers + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "runtime" + "strconv" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Interface for matching types which also have a Len method. +type lener interface { + Len() int +} + +// BuildContentLengthHandler builds the content length of a request based on the body, +// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable +// to determine request body length and no "Content-Length" was specified it will panic. +// +// The Content-Length will only be aded to the request if the length of the body +// is greater than 0. If the body is empty or the current `Content-Length` +// header is <= 0, the header will also be stripped. +var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { + var length int64 + + if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { + length, _ = strconv.ParseInt(slength, 10, 64) + } else { + switch body := r.Body.(type) { + case nil: + length = 0 + case lener: + length = int64(body.Len()) + case io.Seeker: + r.BodyStart, _ = body.Seek(0, 1) + end, _ := body.Seek(0, 2) + body.Seek(r.BodyStart, 0) // make sure to seek back to original location + length = end - r.BodyStart + default: + panic("Cannot get length of body, must provide `ContentLength`") + } + } + + if length > 0 { + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) + } else { + r.HTTPRequest.ContentLength = 0 + r.HTTPRequest.Header.Del("Content-Length") + } +}} + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +var reStatusCode = regexp.MustCompile(`^(\d{3})`) + +// SendHandler is a request handler to send service request using HTTP client. +var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *request.Request) { + var err error + r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest) + if err != nil { + // Prevent leaking if an HTTPResponse was returned. Clean up + // the body. + if r.HTTPResponse != nil { + r.HTTPResponse.Body.Close() + } + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other url redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) + r.HTTPResponse = &http.Response{ + StatusCode: int(code), + Status: http.StatusText(int(code)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + return + } + } + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all other request errors. + r.Error = awserr.New("RequestError", "send request failed", err) + r.Retryable = aws.Bool(true) // network errors are retryable + } +}} + +// ValidateResponseHandler is a request handler to validate service response. +var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { + if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { + // this may be replaced by an UnmarshalError handler + r.Error = awserr.New("UnknownError", "unknown error", nil) + } +}} + +// AfterRetryHandler performs final checks to determine if the request should +// be retried and how long to delay. +var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } + + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) + r.Config.SleepDelay(r.RetryDelay) + + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } + + r.RetryCount++ + r.Error = nil + } +}} + +// ValidateEndpointHandler is a request handler to validate a request had the +// appropriate Region and Endpoint set. Will set r.Error if the endpoint or +// region is not valid. +var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { + if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { + r.Error = aws.ErrMissingRegion + } else if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +}} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go new file mode 100644 index 000000000..7d50b1557 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go @@ -0,0 +1,17 @@ +package corehandlers + +import "github.com/aws/aws-sdk-go/aws/request" + +// ValidateParametersHandler is a request handler to validate the input parameters. +// Validating parameters only has meaning if done prior to the request being sent. +var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { + if !r.ParamsFilled() { + return + } + + if v, ok := r.Params.(request.Validator); ok { + if err := v.Validate(); err != nil { + r.Error = err + } + } +}} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go new file mode 100644 index 000000000..857311f64 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -0,0 +1,100 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var ( + // ErrNoValidProvidersFoundInChain Is returned when there are no valid + // providers in the ChainProvider. + // + // This has been deprecated. For verbose error messaging set + // aws.Config.CredentialsChainVerboseErrors to true + // + // @readonly + ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", + `no valid providers in chain. Deprecated. + For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, + nil) +) + +// A ChainProvider will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The ChainProvider provides a way of chaining multiple providers together +// which will pick the first available using priority order of the Providers +// in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +// +// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. +// In this example EnvProvider will first check if any credentials are available +// vai the environment variables. If there are none ChainProvider will check +// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider +// does not return any credentials ChainProvider will return the error +// ErrNoValidProvidersFoundInChain +// +// creds := NewChainCredentials( +// []Provider{ +// &EnvProvider{}, +// &EC2RoleProvider{ +// Client: ec2metadata.New(sess), +// }, +// }) +// +// // Usage of ChainCredentials with aws.Config +// svc := ec2.New(&aws.Config{Credentials: creds}) +// +type ChainProvider struct { + Providers []Provider + curr Provider + VerboseErrors bool +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *ChainProvider) Retrieve() (Value, error) { + var errs []error + for _, p := range c.Providers { + creds, err := p.Retrieve() + if err == nil { + c.curr = p + return creds, nil + } + errs = append(errs, err) + } + c.curr = nil + + var err error + err = ErrNoValidProvidersFoundInChain + if c.VerboseErrors { + err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) + } + return Value{}, err +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *ChainProvider) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go new file mode 100644 index 000000000..7b8ebf5f9 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -0,0 +1,223 @@ +// Package credentials provides credential retrieval and management +// +// The Credentials is the primary method of getting access to and managing +// credentials Values. Using dependency injection retrieval of the credential +// values is handled by a object which satisfies the Provider interface. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials Value have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := NewEnvCredentials() +// +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := NewCredentials(&EC2RoleProvider{}) +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials + +import ( + "sync" + "time" +) + +// AnonymousCredentials is an empty Credential object that can be used as +// dummy placeholder credentials for requests that do not need signed. +// +// This Credentials can be used to configure a service to not sign requests +// when making service API calls. For example, when accessing public +// s3 buckets. +// +// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials}) +// // Access public S3 buckets. +// +// @readonly +var AnonymousCredentials = NewStaticCredentials("", "", "") + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Provider used to get credentials + ProviderName string +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +// +// The Provider should not need to implement its own mutexes, because +// that will be managed by Credentials. +type Provider interface { + // Refresh returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type EC2RoleProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. Available for testing + // to be able to mock out the current time. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + e.expiration = expiration + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + if e.CurrentTime == nil { + e.CurrentTime = time.Now + } + return e.expiration.Before(e.CurrentTime()) +} + +// A Credentials provides synchronous safe retrieval of AWS credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + creds Value + forceRefresh bool + m sync.Mutex + + provider Provider +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + return &Credentials{ + provider: provider, + forceRefresh: true, + } +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + c.m.Lock() + defer c.m.Unlock() + + if c.isExpired() { + creds, err := c.provider.Retrieve() + if err != nil { + return Value{}, err + } + c.creds = creds + c.forceRefresh = false + } + + return c.creds, nil +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.m.Lock() + defer c.m.Unlock() + + c.forceRefresh = true +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be retrieved. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.m.Lock() + defer c.m.Unlock() + + return c.isExpired() +} + +// isExpired helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpired() bool { + return c.forceRefresh || c.provider.IsExpired() +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go new file mode 100644 index 000000000..aa9d689a0 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -0,0 +1,178 @@ +package ec2rolecreds + +import ( + "bufio" + "encoding/json" + "fmt" + "path" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/ec2metadata" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// Example how to configure the EC2RoleProvider with custom http Client, Endpoint +// or ExpiryWindow +// +// p := &ec2rolecreds.EC2RoleProvider{ +// // Pass in a custom timeout to be used when requesting +// // IAM EC2 Role credentials. +// Client: ec2metadata.New(sess, aws.Config{ +// HTTPClient: &http.Client{Timeout: 10 * time.Second}, +// }), +// +// // Do not use early expiry of credentials. If a non zero value is +// // specified the credentials will be expired early +// ExpiryWindow: 0, +// } +type EC2RoleProvider struct { + credentials.Expiry + + // Required EC2Metadata client to use when connecting to EC2 metadata service. + Client *ec2metadata.EC2Metadata + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. +// The ConfigProvider is satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: ec2metadata.New(c), + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 +// metadata service. +func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: client, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { + credsList, err := requestCredList(m.Client) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + if len(credsList) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) + } + credsName := credsList[0] + + roleCreds, err := requestCred(m.Client, credsName) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + ProviderName: ProviderName, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshalling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "/iam/security-credentials" + +// requestCredList requests a list of credentials from the EC2 service. +// If there are no credentials, or there is an error making or receiving the request +func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadata(iamSecurityCredsPath) + if err != nil { + return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) + } + + credsList := []string{} + s := bufio.NewScanner(strings.NewReader(resp)) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName)) + if err != nil { + return ec2RoleCredRespBody{}, + awserr.New("EC2RoleRequestError", + fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), + err) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + awserr.New("SerializationError", + fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), + err) + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) + } + + return respCreds, nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go new file mode 100644 index 000000000..96655bc46 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -0,0 +1,77 @@ +package credentials + +import ( + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// EnvProviderName provides a name of Env provider +const EnvProviderName = "EnvProvider" + +var ( + // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be + // found in the process's environment. + // + // @readonly + ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) + + // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key + // can't be found in the process's environment. + // + // @readonly + ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) +) + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. Environment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY +type EnvProvider struct { + retrieved bool +} + +// NewEnvCredentials returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvCredentials() *Credentials { + return NewCredentials(&EnvProvider{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + if id == "" { + return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound + } + + if secret == "" { + return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + ProviderName: EnvProviderName, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvProvider) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini new file mode 100644 index 000000000..7fc91d9d2 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini @@ -0,0 +1,12 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go new file mode 100644 index 000000000..7fb7cbf0d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -0,0 +1,151 @@ +package credentials + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/go-ini/ini" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// SharedCredsProviderName provides a name of SharedCreds provider +const SharedCredsProviderName = "SharedCredentialsProvider" + +var ( + // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. + // + // @readonly + ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) +) + +// A SharedCredentialsProvider retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type SharedCredentialsProvider struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewSharedCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewSharedCredentials(filename, profile string) *Credentials { + return NewCredentials(&SharedCredentialsProvider{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *SharedCredentialsProvider) Retrieve() (Value, error) { + p.retrieved = false + + filename, err := p.filename() + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + creds, err := loadProfile(filename, p.profile()) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + p.retrieved = true + return creds, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *SharedCredentialsProvider) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (Value, error) { + config, err := ini.Load(filename) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) + } + iniProfile, err := config.GetSection(profile) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err) + } + + id, err := iniProfile.GetKey("aws_access_key_id") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", + fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), + err) + } + + secret, err := iniProfile.GetKey("aws_secret_access_key") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", + fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), + nil) + } + + // Default to empty string if not found + token := iniProfile.Key("aws_session_token") + + return Value{ + AccessKeyID: id.String(), + SecretAccessKey: secret.String(), + SessionToken: token.String(), + ProviderName: SharedCredsProviderName, + }, nil +} + +// filename returns the filename to use to read AWS shared credentials. +// +// Will return an error if the user's home directory path cannot be found. +func (p *SharedCredentialsProvider) filename() (string, error) { + if p.Filename == "" { + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" { + return p.Filename, nil + } + + homeDir := os.Getenv("HOME") // *nix + if homeDir == "" { // Windows + homeDir = os.Getenv("USERPROFILE") + } + if homeDir == "" { + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = filepath.Join(homeDir, ".aws", "credentials") + } + + return p.Filename, nil +} + +// profile returns the AWS shared credentials profile. If empty will read +// environment variable "AWS_PROFILE". If that is not set profile will +// return "default". +func (p *SharedCredentialsProvider) profile() string { + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + } + if p.Profile == "" { + p.Profile = "default" + } + + return p.Profile +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go new file mode 100644 index 000000000..6f075604e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -0,0 +1,48 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// StaticProviderName provides a name of Static provider +const StaticProviderName = "StaticProvider" + +var ( + // ErrStaticCredentialsEmpty is emitted when static credentials are empty. + // + // @readonly + ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) +) + +// A StaticProvider is a set of credentials which are set programmatically, +// and will never expire. +type StaticProvider struct { + Value +} + +// NewStaticCredentials returns a pointer to a new Credentials object +// wrapping a static credentials value provider. +func NewStaticCredentials(id, secret, token string) *Credentials { + return NewCredentials(&StaticProvider{Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + }}) +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty + } + + s.Value.ProviderName = StaticProviderName + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For StaticProvider, the credentials never expired. +func (s *StaticProvider) IsExpired() bool { + return false +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go new file mode 100644 index 000000000..12be1a5d7 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -0,0 +1,98 @@ +// Package defaults is a collection of helpers to retrieve the SDK's default +// configuration and handlers. +// +// Generally this package shouldn't be used directly, but session.Session +// instead. This package is useful when you need to reset the defaults +// of a session or service client to the SDK defaults before setting +// additional parameters. +package defaults + +import ( + "net/http" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/endpoints" +) + +// A Defaults provides a collection of default values for SDK clients. +type Defaults struct { + Config *aws.Config + Handlers request.Handlers +} + +// Get returns the SDK's default values with Config and handlers pre-configured. +func Get() Defaults { + cfg := Config() + handlers := Handlers() + cfg.Credentials = CredChain(cfg, handlers) + + return Defaults{ + Config: cfg, + Handlers: handlers, + } +} + +// Config returns the default configuration without credentials. +// To retrieve a config with credentials also included use +// `defaults.Get().Config` instead. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the configuration of an +// existing service client or session. +func Config() *aws.Config { + return aws.NewConfig(). + WithCredentials(credentials.AnonymousCredentials). + WithRegion(os.Getenv("AWS_REGION")). + WithHTTPClient(http.DefaultClient). + WithMaxRetries(aws.UseServiceDefaultRetries). + WithLogger(aws.NewDefaultLogger()). + WithLogLevel(aws.LogOff). + WithSleepDelay(time.Sleep) +} + +// Handlers returns the default request handlers. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the request handlers of an +// existing service client or session. +func Handlers() request.Handlers { + var handlers request.Handlers + + handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + handlers.Validate.AfterEachFn = request.HandlerListStopOnError + handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.AfterEachFn = request.HandlerListStopOnError + handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + handlers.Send.PushBackNamed(corehandlers.SendHandler) + handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) + + return handlers +} + +// CredChain returns the default credential chain. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the credentials of an +// existing service client or session's Config. +func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { + endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, *cfg.Region, true) + + return credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.NewClient(*cfg, handlers, endpoint, signingRegion), + ExpiryWindow: 5 * time.Minute, + }, + }}) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go new file mode 100644 index 000000000..669c813a0 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -0,0 +1,140 @@ +package ec2metadata + +import ( + "encoding/json" + "fmt" + "path" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// GetMetadata uses the path provided to request information from the EC2 +// instance metdata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadata(p string) (string, error) { + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "meta-data", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + + return output.Content, req.Send() +} + +// GetDynamicData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicData(p string) (string, error) { + op := &request.Operation{ + Name: "GetDynamicData", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "dynamic", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + + return output.Content, req.Send() +} + +// GetInstanceIdentityDocument retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { + resp, err := c.GetDynamicData("instance-identity/document") + if err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 instance identity document", err) + } + + doc := EC2InstanceIdentityDocument{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("SerializationError", + "failed to decode EC2 instance identity document", err) + } + + return doc, nil +} + +// IAMInfo retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { + resp, err := c.GetMetadata("iam/info") + if err != nil { + return EC2IAMInfo{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 IAM info", err) + } + + info := EC2IAMInfo{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { + return EC2IAMInfo{}, + awserr.New("SerializationError", + "failed to decode EC2 IAM info", err) + } + + if info.Code != "Success" { + errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) + return EC2IAMInfo{}, + awserr.New("EC2MetadataError", errMsg, nil) + } + + return info, nil +} + +// Region returns the region the instance is running in. +func (c *EC2Metadata) Region() (string, error) { + resp, err := c.GetMetadata("placement/availability-zone") + if err != nil { + return "", err + } + + // returns region without the suffix. Eg: us-west-2a becomes us-west-2 + return resp[:len(resp)-1], nil +} + +// Available returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) Available() bool { + if _, err := c.GetMetadata("instance-id"); err != nil { + return false + } + + return true +} + +// An EC2IAMInfo provides the shape for unmarshalling +// an IAM info from the metadata API +type EC2IAMInfo struct { + Code string + LastUpdated time.Time + InstanceProfileArn string + InstanceProfileID string +} + +// An EC2InstanceIdentityDocument provides the shape for unmarshalling +// an instance identity document +type EC2InstanceIdentityDocument struct { + DevpayProductCodes []string `json:"devpayProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go new file mode 100644 index 000000000..5b4379dbd --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -0,0 +1,124 @@ +// Package ec2metadata provides the client for making API calls to the +// EC2 Metadata service. +package ec2metadata + +import ( + "bytes" + "errors" + "io" + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ServiceName is the name of the service. +const ServiceName = "ec2metadata" + +// A EC2Metadata is an EC2 Metadata service Client. +type EC2Metadata struct { + *client.Client +} + +// New creates a new instance of the EC2Metadata client with a session. +// This client is safe to use across multiple goroutines. +// +// +// Example: +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { + c := p.ClientConfig(ServiceName, cfgs...) + return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// NewClient returns a new EC2Metadata client. Should be used to create +// a client when not using a session. Generally using just New with a session +// is preferred. +// +// If an unmodified HTTP client is provided from the stdlib default, or no client +// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. +// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. +func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { + if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { + // If the http client is unmodified and this feature is not disabled + // set custom timeouts for EC2Metadata requests. + cfg.HTTPClient = &http.Client{ + // use a shorter timeout than default because the metadata + // service is local if it is running, and to fail faster + // if not running on an ec2 instance. + Timeout: 5 * time.Second, + } + } + + svc := &EC2Metadata{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: endpoint, + APIVersion: "latest", + }, + handlers, + ), + } + + svc.Handlers.Unmarshal.PushBack(unmarshalHandler) + svc.Handlers.UnmarshalError.PushBack(unmarshalError) + svc.Handlers.Validate.Clear() + svc.Handlers.Validate.PushBack(validateEndpointHandler) + + // Add additional options to the service config + for _, option := range opts { + option(svc.Client) + } + + return svc +} + +func httpClientZero(c *http.Client) bool { + return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) +} + +type metadataOutput struct { + Content string +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b := &bytes.Buffer{} + if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) + return + } + + if data, ok := r.Data.(*metadataOutput); ok { + data.Content = b.String() + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b := &bytes.Buffer{} + if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())) +} + +func validateEndpointHandler(r *request.Request) { + if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/errors.go new file mode 100644 index 000000000..576636168 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/errors.go @@ -0,0 +1,17 @@ +package aws + +import "github.com/aws/aws-sdk-go/aws/awserr" + +var ( + // ErrMissingRegion is an error that is returned if region configuration is + // not found. + // + // @readonly + ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) + + // ErrMissingEndpoint is an error that is returned if an endpoint cannot be + // resolved for a service. + // + // @readonly + ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) +) diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/logger.go new file mode 100644 index 000000000..db87188e2 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -0,0 +1,112 @@ +package aws + +import ( + "log" + "os" +) + +// A LogLevelType defines the level logging should be performed at. Used to instruct +// the SDK which statements should be logged. +type LogLevelType uint + +// LogLevel returns the pointer to a LogLevel. Should be used to workaround +// not being able to take the address of a non-composite literal. +func LogLevel(l LogLevelType) *LogLevelType { + return &l +} + +// Value returns the LogLevel value or the default value LogOff if the LogLevel +// is nil. Safe to use on nil value LogLevelTypes. +func (l *LogLevelType) Value() LogLevelType { + if l != nil { + return *l + } + return LogOff +} + +// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be +// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If +// LogLevel is nill, will default to LogOff comparison. +func (l *LogLevelType) Matches(v LogLevelType) bool { + c := l.Value() + return c&v == v +} + +// AtLeast returns true if this LogLevel is at least high enough to satisfies v. +// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default +// to LogOff comparison. +func (l *LogLevelType) AtLeast(v LogLevelType) bool { + c := l.Value() + return c >= v +} + +const ( + // LogOff states that no logging should be performed by the SDK. This is the + // default state of the SDK, and should be use to disable all logging. + LogOff LogLevelType = iota * 0x1000 + + // LogDebug state that debug output should be logged by the SDK. This should + // be used to inspect request made and responses received. + LogDebug +) + +// Debug Logging Sub Levels +const ( + // LogDebugWithSigning states that the SDK should log request signing and + // presigning events. This should be used to log the signing details of + // requests for debugging. Will also enable LogDebug. + LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) + + // LogDebugWithHTTPBody states the SDK should log HTTP request and response + // HTTP bodys in addition to the headers and path. This should be used to + // see the body content of requests and responses made while using the SDK + // Will also enable LogDebug. + LogDebugWithHTTPBody + + // LogDebugWithRequestRetries states the SDK should log when service requests will + // be retried. This should be used to log when you want to log when service + // requests are being retried. Will also enable LogDebug. + LogDebugWithRequestRetries + + // LogDebugWithRequestErrors states the SDK should log when service requests fail + // to build, send, validate, or unmarshal. + LogDebugWithRequestErrors +) + +// A Logger is a minimalistic interface for the SDK to log messages to. Should +// be used to provide custom logging writers for the SDK to use. +type Logger interface { + Log(...interface{}) +} + +// A LoggerFunc is a convenience type to convert a function taking a variadic +// list of arguments and wrap it so the Logger interface can be used. +// +// Example: +// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { +// fmt.Fprintln(os.Stdout, args...) +// })}) +type LoggerFunc func(...interface{}) + +// Log calls the wrapped function with the arguments provided +func (f LoggerFunc) Log(args ...interface{}) { + f(args...) +} + +// NewDefaultLogger returns a Logger which will write log messages to stdout, and +// use same formatting runes as the stdlib log.Logger +func NewDefaultLogger() Logger { + return &defaultLogger{ + logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +// A defaultLogger provides a minimalistic logger satisfying the Logger interface. +type defaultLogger struct { + logger *log.Logger +} + +// Log logs the parameters to the stdlib logger. See log.Println. +func (l defaultLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go new file mode 100644 index 000000000..5279c19c0 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -0,0 +1,187 @@ +package request + +import ( + "fmt" + "strings" +) + +// A Handlers provides a collection of request handlers for various +// stages of handling requests. +type Handlers struct { + Validate HandlerList + Build HandlerList + Sign HandlerList + Send HandlerList + ValidateResponse HandlerList + Unmarshal HandlerList + UnmarshalMeta HandlerList + UnmarshalError HandlerList + Retry HandlerList + AfterRetry HandlerList +} + +// Copy returns of this handler's lists. +func (h *Handlers) Copy() Handlers { + return Handlers{ + Validate: h.Validate.copy(), + Build: h.Build.copy(), + Sign: h.Sign.copy(), + Send: h.Send.copy(), + ValidateResponse: h.ValidateResponse.copy(), + Unmarshal: h.Unmarshal.copy(), + UnmarshalError: h.UnmarshalError.copy(), + UnmarshalMeta: h.UnmarshalMeta.copy(), + Retry: h.Retry.copy(), + AfterRetry: h.AfterRetry.copy(), + } +} + +// Clear removes callback functions for all handlers +func (h *Handlers) Clear() { + h.Validate.Clear() + h.Build.Clear() + h.Send.Clear() + h.Sign.Clear() + h.Unmarshal.Clear() + h.UnmarshalMeta.Clear() + h.UnmarshalError.Clear() + h.ValidateResponse.Clear() + h.Retry.Clear() + h.AfterRetry.Clear() +} + +// A HandlerListRunItem represents an entry in the HandlerList which +// is being run. +type HandlerListRunItem struct { + Index int + Handler NamedHandler + Request *Request +} + +// A HandlerList manages zero or more handlers in a list. +type HandlerList struct { + list []NamedHandler + + // Called after each request handler in the list is called. If set + // and the func returns true the HandlerList will continue to iterate + // over the request handlers. If false is returned the HandlerList + // will stop iterating. + // + // Should be used if extra logic to be performed between each handler + // in the list. This can be used to terminate a list's iteration + // based on a condition such as error like, HandlerListStopOnError. + // Or for logging like HandlerListLogItem. + AfterEachFn func(item HandlerListRunItem) bool +} + +// A NamedHandler is a struct that contains a name and function callback. +type NamedHandler struct { + Name string + Fn func(*Request) +} + +// copy creates a copy of the handler list. +func (l *HandlerList) copy() HandlerList { + n := HandlerList{ + AfterEachFn: l.AfterEachFn, + } + n.list = append([]NamedHandler{}, l.list...) + return n +} + +// Clear clears the handler list. +func (l *HandlerList) Clear() { + l.list = []NamedHandler{} +} + +// Len returns the number of handlers in the list. +func (l *HandlerList) Len() int { + return len(l.list) +} + +// PushBack pushes handler f to the back of the handler list. +func (l *HandlerList) PushBack(f func(*Request)) { + l.list = append(l.list, NamedHandler{"__anonymous", f}) +} + +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...) +} + +// PushBackNamed pushes named handler f to the back of the handler list. +func (l *HandlerList) PushBackNamed(n NamedHandler) { + l.list = append(l.list, n) +} + +// PushFrontNamed pushes named handler f to the front of the handler list. +func (l *HandlerList) PushFrontNamed(n NamedHandler) { + l.list = append([]NamedHandler{n}, l.list...) +} + +// Remove removes a NamedHandler n +func (l *HandlerList) Remove(n NamedHandler) { + newlist := []NamedHandler{} + for _, m := range l.list { + if m.Name != n.Name { + newlist = append(newlist, m) + } + } + l.list = newlist +} + +// Run executes all handlers in the list with a given request object. +func (l *HandlerList) Run(r *Request) { + for i, h := range l.list { + h.Fn(r) + item := HandlerListRunItem{ + Index: i, Handler: h, Request: r, + } + if l.AfterEachFn != nil && !l.AfterEachFn(item) { + return + } + } +} + +// HandlerListLogItem logs the request handler and the state of the +// request's Error value. Always returns true to continue iterating +// request handlers in a HandlerList. +func HandlerListLogItem(item HandlerListRunItem) bool { + if item.Request.Config.Logger == nil { + return true + } + item.Request.Config.Logger.Log("DEBUG: RequestHandler", + item.Index, item.Handler.Name, item.Request.Error) + + return true +} + +// HandlerListStopOnError returns false to stop the HandlerList iterating +// over request handlers if Request.Error is not nil. True otherwise +// to continue iterating. +func HandlerListStopOnError(item HandlerListRunItem) bool { + return item.Request.Error == nil +} + +// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request +// header. If the extra parameters are provided they will be added as metadata to the +// name/version pair resulting in the following format. +// "name/version (extra0; extra1; ...)" +// The user agent part will be concatenated with this current request's user agent string. +func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { + ua := fmt.Sprintf("%s/%s", name, version) + if len(extra) > 0 { + ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) + } + return func(r *Request) { + AddToUserAgent(r, ua) + } +} + +// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. +// The input string will be concatenated with the current request's user agent string. +func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { + return func(r *Request) { + AddToUserAgent(r, s) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go new file mode 100644 index 000000000..a4087f20e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go @@ -0,0 +1,33 @@ +// +build go1.5 + +package request + +import ( + "io" + "net/http" + "net/url" +) + +func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { + req := &http.Request{ + URL: &url.URL{}, + Header: http.Header{}, + Close: r.Close, + Body: body, + Host: r.Host, + Method: r.Method, + Proto: r.Proto, + ContentLength: r.ContentLength, + // Cancel will be deprecated in 1.7 and will be replaced with Context + Cancel: r.Cancel, + } + + *req.URL = *r.URL + for k, v := range r.Header { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + + return req +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go new file mode 100644 index 000000000..75da021ef --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go @@ -0,0 +1,31 @@ +// +build !go1.5 + +package request + +import ( + "io" + "net/http" + "net/url" +) + +func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { + req := &http.Request{ + URL: &url.URL{}, + Header: http.Header{}, + Close: r.Close, + Body: body, + Host: r.Host, + Method: r.Method, + Proto: r.Proto, + ContentLength: r.ContentLength, + } + + *req.URL = *r.URL + for k, v := range r.Header { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + + return req +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go new file mode 100644 index 000000000..da6396d2d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -0,0 +1,49 @@ +package request + +import ( + "io" + "sync" +) + +// offsetReader is a thread-safe io.ReadCloser to prevent racing +// with retrying requests +type offsetReader struct { + buf io.ReadSeeker + lock sync.RWMutex + closed bool +} + +func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { + reader := &offsetReader{} + buf.Seek(offset, 0) + + reader.buf = buf + return reader +} + +// Close is a thread-safe close. Uses the write lock. +func (o *offsetReader) Close() error { + o.lock.Lock() + defer o.lock.Unlock() + o.closed = true + return nil +} + +// Read is a thread-safe read using a read lock. +func (o *offsetReader) Read(p []byte) (int, error) { + o.lock.RLock() + defer o.lock.RUnlock() + + if o.closed { + return 0, io.EOF + } + + return o.buf.Read(p) +} + +// CloseAndCopy will return a new offsetReader with a copy of the old buffer +// and close the old buffer. +func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { + o.Close() + return newOffsetReader(o.buf, offset) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/request.go new file mode 100644 index 000000000..d04e95bd1 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -0,0 +1,330 @@ +package request + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client/metadata" +) + +// A Request is the service request to be made. +type Request struct { + Config aws.Config + ClientInfo metadata.ClientInfo + Handlers Handlers + + Retryer + Time time.Time + ExpireTime time.Duration + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + LastSignedAt time.Time + + built bool +} + +// An Operation is the service API operation to be made. +type Operation struct { + Name string + HTTPMethod string + HTTPPath string + *Paginator +} + +// Paginator keeps track of pagination configuration for an API operation. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// New returns a new Request pointer for the service API +// operation and parameters. +// +// Params is any value of input parameters to be the request payload. +// Data is pointer value to an object which the request's response +// payload will be deserialized to. +func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, + retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + + method := operation.HTTPMethod + if method == "" { + method = "POST" + } + p := operation.HTTPPath + if p == "" { + p = "/" + } + + httpReq, _ := http.NewRequest(method, "", nil) + + var err error + httpReq.URL, err = url.Parse(clientInfo.Endpoint + p) + if err != nil { + httpReq.URL = &url.URL{} + err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) + } + + r := &Request{ + Config: cfg, + ClientInfo: clientInfo, + Handlers: handlers.Copy(), + + Retryer: retryer, + Time: time.Now(), + ExpireTime: 0, + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: err, + Data: data, + } + r.SetBufferBody([]byte{}) + + return r +} + +// WillRetry returns if the request's can be retried. +func (r *Request) WillRetry() bool { + return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() +} + +// ParamsFilled returns if the request's parameters have been populated +// and the parameters are valid. False is returned if no parameters are +// provided or invalid. +func (r *Request) ParamsFilled() bool { + return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() +} + +// DataFilled returns true if the request's data for response deserialization +// target has been set and is a valid. False is returned if data is not +// set, or is invalid. +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +// SetBufferBody will set the request's body bytes that will be sent to +// the service API. +func (r *Request) SetBufferBody(buf []byte) { + r.SetReaderBody(bytes.NewReader(buf)) +} + +// SetStringBody sets the body of the request to be backed by a string. +func (r *Request) SetStringBody(s string) { + r.SetReaderBody(strings.NewReader(s)) +} + +// SetReaderBody will set the request's body reader. +func (r *Request) SetReaderBody(reader io.ReadSeeker) { + r.HTTPRequest.Body = newOffsetReader(reader, 0) + r.Body = reader +} + +// Presign returns the request's signed URL. Error will be returned +// if the signing fails. +func (r *Request) Presign(expireTime time.Duration) (string, error) { + r.ExpireTime = expireTime + r.NotHoist = false + r.Sign() + if r.Error != nil { + return "", r.Error + } + return r.HTTPRequest.URL.String(), nil +} + +// PresignRequest behaves just like presign, but hoists all headers and signs them. +// Also returns the signed hash back to the user +func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) { + r.ExpireTime = expireTime + r.NotHoist = true + r.Sign() + if r.Error != nil { + return "", nil, r.Error + } + return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil +} + +func debugLogReqError(r *Request, stage string, retrying bool, err error) { + if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { + return + } + + retryStr := "not retrying" + if retrying { + retryStr = "will retry" + } + + r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", + stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) +} + +// Build will build the request's object so it can be signed and sent +// to the service. Build will also validate all the request's parameters. +// Anny additional build Handlers set on this request will be run +// in the order they were set. +// +// The request will only be built once. Multiple calls to build will have +// no effect. +// +// If any Validate or Build errors occur the build will stop and the error +// which occurred will be returned. +func (r *Request) Build() error { + if !r.built { + r.Handlers.Validate.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Request", false, r.Error) + return r.Error + } + r.Handlers.Build.Run(r) + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } + r.built = true + } + + return r.Error +} + +// Sign will sign the request returning error if errors are encountered. +// +// Send will build the request prior to signing. All Sign Handlers will +// be executed in the order they were set. +func (r *Request) Sign() error { + r.Build() + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } + + r.Handlers.Sign.Run(r) + return r.Error +} + +// Send will send the request returning error if errors are encountered. +// +// Send will sign the request prior to sending. All Send Handlers will +// be executed in the order they were set. +// +// Canceling a request is non-deterministic. If a request has been canceled, +// then the transport will choose, randomly, one of the state channels during +// reads or getting the connection. +// +// readLoop() and getConn(req *Request, cm connectMethod) +// https://github.com/golang/go/blob/master/src/net/http/transport.go +func (r *Request) Send() error { + for { + if aws.BoolValue(r.Retryable) { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + var body io.ReadCloser + if reader, ok := r.HTTPRequest.Body.(*offsetReader); ok { + body = reader.CloseAndCopy(r.BodyStart) + } else { + if r.Config.Logger != nil { + r.Config.Logger.Log("Request body type has been overwritten. May cause race conditions") + } + r.Body.Seek(r.BodyStart, 0) + body = ioutil.NopCloser(r.Body) + } + + r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, body) + if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { + // Closing response body. Since we are setting a new request to send off, this + // response will get squashed and leaked. + r.HTTPResponse.Body.Close() + } + } + + r.Sign() + if r.Error != nil { + return r.Error + } + + r.Retryable = nil + + r.Handlers.Send.Run(r) + if r.Error != nil { + if strings.Contains(r.Error.Error(), "net/http: request canceled") { + return r.Error + } + + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", false, r.Error) + return r.Error + } + debugLogReqError(r, "Send Request", true, err) + continue + } + + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.UnmarshalError.Run(r) + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Response", false, r.Error) + return r.Error + } + debugLogReqError(r, "Validate Response", true, err) + continue + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", false, r.Error) + return r.Error + } + debugLogReqError(r, "Unmarshal Response", true, err) + continue + } + + break + } + + return nil +} + +// AddToUserAgent adds the string to the end of the request's current user agent. +func AddToUserAgent(r *Request, s string) { + curUA := r.HTTPRequest.Header.Get("User-Agent") + if len(curUA) > 0 { + s = curUA + " " + s + } + r.HTTPRequest.Header.Set("User-Agent", s) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go new file mode 100644 index 000000000..2939ec473 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -0,0 +1,104 @@ +package request + +import ( + "reflect" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +//type Paginater interface { +// HasNextPage() bool +// NextPage() *Request +// EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error +//} + +// HasNextPage returns true if this request has more pages of data available. +func (r *Request) HasNextPage() bool { + return len(r.nextPageTokens()) > 0 +} + +// nextPageTokens returns the tokens to use when asking for the next page of +// data. +func (r *Request) nextPageTokens() []interface{} { + if r.Operation.Paginator == nil { + return nil + } + + if r.Operation.TruncationToken != "" { + tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) + if len(tr) == 0 { + return nil + } + + switch v := tr[0].(type) { + case *bool: + if !aws.BoolValue(v) { + return nil + } + case bool: + if v == false { + return nil + } + } + } + + tokens := []interface{}{} + tokenAdded := false + for _, outToken := range r.Operation.OutputTokens { + v, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(v) > 0 { + tokens = append(tokens, v[0]) + tokenAdded = true + } else { + tokens = append(tokens, nil) + } + } + if !tokenAdded { + return nil + } + + return tokens +} + +// NextPage returns a new Request that can be executed to return the next +// page of result data. Call .Send() on this request to execute it. +func (r *Request) NextPage() *Request { + tokens := r.nextPageTokens() + if len(tokens) == 0 { + return nil + } + + data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() + nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) + for i, intok := range nr.Operation.InputTokens { + awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) + } + return nr +} + +// EachPage iterates over each page of a paginated request object. The fn +// parameter should be a function with the following sample signature: +// +// func(page *T, lastPage bool) bool { +// return true // return false to stop iterating +// } +// +// Where "T" is the structure type matching the output structure of the given +// operation. For example, a request object generated by +// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput +// as the structure "T". The lastPage value represents whether the page is +// the last page of data or not. The return value of this function should +// return true to keep iterating or false to stop. +func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + for page := r; page != nil; page = page.NextPage() { + if err := page.Send(); err != nil { + return err + } + if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { + return page.Error + } + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go new file mode 100644 index 000000000..8cc8b015a --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -0,0 +1,101 @@ +package request + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Retryer is an interface to control retry logic for a given service. +// The default implementation used by most services is the service.DefaultRetryer +// structure, which contains basic retry logic using exponential backoff. +type Retryer interface { + RetryRules(*Request) time.Duration + ShouldRetry(*Request) bool + MaxRetries() int +} + +// WithRetryer sets a config Retryer value to the given Config returning it +// for chaining. +func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + cfg.Retryer = retryer + return cfg +} + +// retryableCodes is a collection of service response codes which are retry-able +// without any further action. +var retryableCodes = map[string]struct{}{ + "RequestError": {}, + "RequestTimeout": {}, +} + +var throttleCodes = map[string]struct{}{ + "ProvisionedThroughputExceededException": {}, + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once + "TooManyRequestsException": {}, // Lambda functions +} + +// credsExpiredCodes is a collection of error codes which signify the credentials +// need to be refreshed. Expired tokens require refreshing of credentials, and +// resigning before the request can be retried. +var credsExpiredCodes = map[string]struct{}{ + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "RequestExpired": {}, // EC2 Only +} + +func isCodeThrottle(code string) bool { + _, ok := throttleCodes[code] + return ok +} + +func isCodeRetryable(code string) bool { + if _, ok := retryableCodes[code]; ok { + return true + } + + return isCodeExpiredCreds(code) +} + +func isCodeExpiredCreds(code string) bool { + _, ok := credsExpiredCodes[code] + return ok +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +func (r *Request) IsErrorRetryable() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeRetryable(err.Code()) + } + } + return false +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if the request has no Error set +func (r *Request) IsErrorThrottle() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeThrottle(err.Code()) + } + } + return false +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +func (r *Request) IsErrorExpired() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeExpiredCreds(err.Code()) + } + } + return false +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go new file mode 100644 index 000000000..2520286b7 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -0,0 +1,234 @@ +package request + +import ( + "bytes" + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // InvalidParameterErrCode is the error code for invalid parameters errors + InvalidParameterErrCode = "InvalidParameter" + // ParamRequiredErrCode is the error code for required parameter errors + ParamRequiredErrCode = "ParamRequiredError" + // ParamMinValueErrCode is the error code for fields with too low of a + // number value. + ParamMinValueErrCode = "ParamMinValueError" + // ParamMinLenErrCode is the error code for fields without enough elements. + ParamMinLenErrCode = "ParamMinLenError" +) + +// Validator provides a way for types to perform validation logic on their +// input values that external code can use to determine if a type's values +// are valid. +type Validator interface { + Validate() error +} + +// An ErrInvalidParams provides wrapping of invalid parameter errors found when +// validating API operation input parameters. +type ErrInvalidParams struct { + // Context is the base context of the invalid parameter group. + Context string + errs []ErrInvalidParam +} + +// Add adds a new invalid parameter error to the collection of invalid +// parameters. The context of the invalid parameter will be updated to reflect +// this collection. +func (e *ErrInvalidParams) Add(err ErrInvalidParam) { + err.SetContext(e.Context) + e.errs = append(e.errs, err) +} + +// AddNested adds the invalid parameter errors from another ErrInvalidParams +// value into this collection. The nested errors will have their nested context +// updated and base context to reflect the merging. +// +// Use for nested validations errors. +func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { + for _, err := range nested.errs { + err.SetContext(e.Context) + err.AddNestedContext(nestedCtx) + e.errs = append(e.errs, err) + } +} + +// Len returns the number of invalid parameter errors +func (e ErrInvalidParams) Len() int { + return len(e.errs) +} + +// Code returns the code of the error +func (e ErrInvalidParams) Code() string { + return InvalidParameterErrCode +} + +// Message returns the message of the error +func (e ErrInvalidParams) Message() string { + return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) +} + +// Error returns the string formatted form of the invalid parameters. +func (e ErrInvalidParams) Error() string { + w := &bytes.Buffer{} + fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) + + for _, err := range e.errs { + fmt.Fprintf(w, "- %s\n", err.Message()) + } + + return w.String() +} + +// OrigErr returns the invalid parameters as a awserr.BatchedErrors value +func (e ErrInvalidParams) OrigErr() error { + return awserr.NewBatchError( + InvalidParameterErrCode, e.Message(), e.OrigErrs()) +} + +// OrigErrs returns a slice of the invalid parameters +func (e ErrInvalidParams) OrigErrs() []error { + errs := make([]error, len(e.errs)) + for i := 0; i < len(errs); i++ { + errs[i] = e.errs[i] + } + + return errs +} + +// An ErrInvalidParam represents an invalid parameter error type. +type ErrInvalidParam interface { + awserr.Error + + // Field name the error occurred on. + Field() string + + // SetContext updates the context of the error. + SetContext(string) + + // AddNestedContext updates the error's context to include a nested level. + AddNestedContext(string) +} + +type errInvalidParam struct { + context string + nestedContext string + field string + code string + msg string +} + +// Code returns the error code for the type of invalid parameter. +func (e *errInvalidParam) Code() string { + return e.code +} + +// Message returns the reason the parameter was invalid, and its context. +func (e *errInvalidParam) Message() string { + return fmt.Sprintf("%s, %s.", e.msg, e.Field()) +} + +// Error returns the string version of the invalid parameter error. +func (e *errInvalidParam) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.Message()) +} + +// OrigErr returns nil, Implemented for awserr.Error interface. +func (e *errInvalidParam) OrigErr() error { + return nil +} + +// Field Returns the field and context the error occurred. +func (e *errInvalidParam) Field() string { + field := e.context + if len(field) > 0 { + field += "." + } + if len(e.nestedContext) > 0 { + field += fmt.Sprintf("%s.", e.nestedContext) + } + field += e.field + + return field +} + +// SetContext updates the base context of the error. +func (e *errInvalidParam) SetContext(ctx string) { + e.context = ctx +} + +// AddNestedContext prepends a context to the field's path. +func (e *errInvalidParam) AddNestedContext(ctx string) { + if len(e.nestedContext) == 0 { + e.nestedContext = ctx + } else { + e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) + } + +} + +// An ErrParamRequired represents an required parameter error. +type ErrParamRequired struct { + errInvalidParam +} + +// NewErrParamRequired creates a new required parameter error. +func NewErrParamRequired(field string) *ErrParamRequired { + return &ErrParamRequired{ + errInvalidParam{ + code: ParamRequiredErrCode, + field: field, + msg: fmt.Sprintf("missing required field"), + }, + } +} + +// An ErrParamMinValue represents a minimum value parameter error. +type ErrParamMinValue struct { + errInvalidParam + min float64 +} + +// NewErrParamMinValue creates a new minimum value parameter error. +func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { + return &ErrParamMinValue{ + errInvalidParam: errInvalidParam{ + code: ParamMinValueErrCode, + field: field, + msg: fmt.Sprintf("minimum field value of %v", min), + }, + min: min, + } +} + +// MinValue returns the field's require minimum value. +// +// float64 is returned for both int and float min values. +func (e *ErrParamMinValue) MinValue() float64 { + return e.min +} + +// An ErrParamMinLen represents a minimum length parameter error. +type ErrParamMinLen struct { + errInvalidParam + min int +} + +// NewErrParamMinLen creates a new minimum length parameter error. +func NewErrParamMinLen(field string, min int) *ErrParamMinLen { + return &ErrParamMinLen{ + errInvalidParam: errInvalidParam{ + code: ParamMinValueErrCode, + field: field, + msg: fmt.Sprintf("minimum field size of %v", min), + }, + min: min, + } +} + +// MinLen returns the field's required minimum length. +func (e *ErrParamMinLen) MinLen() int { + return e.min +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/session/session.go new file mode 100644 index 000000000..6bc8f1be9 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -0,0 +1,120 @@ +// Package session provides a way to create service clients with shared configuration +// and handlers. +// +// Generally this package should be used instead of the `defaults` package. +// +// A session should be used to share configurations and request handlers between multiple +// service clients. When service clients need specific configuration aws.Config can be +// used to provide additional configuration directly to the service client. +package session + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/endpoints" +) + +// A Session provides a central location to create service clients from and +// store configurations and request handlers for those services. +// +// Sessions are safe to create service clients concurrently, but it is not safe +// to mutate the session concurrently. +type Session struct { + Config *aws.Config + Handlers request.Handlers +} + +// New creates a new instance of the handlers merging in the provided Configs +// on top of the SDK's default configurations. Once the session is created it +// can be mutated to modify Configs or Handlers. The session is safe to be read +// concurrently, but it should not be written to concurrently. +// +// Example: +// // Create a session with the default config and request handlers. +// sess := session.New() +// +// // Create a session with a custom region +// sess := session.New(&aws.Config{Region: aws.String("us-east-1")}) +// +// // Create a session, and add additional handlers for all service +// // clients created with the session to inherit. Adds logging handler. +// sess := session.New() +// sess.Handlers.Send.PushFront(func(r *request.Request) { +// // Log every request made and its payload +// logger.Println("Request: %s/%s, Payload: %s", r.ClientInfo.ServiceName, r.Operation, r.Params) +// }) +// +// // Create a S3 client instance from a session +// sess := session.New() +// svc := s3.New(sess) +func New(cfgs ...*aws.Config) *Session { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Apply the passed in configs so the configuration can be applied to the + // default credential chain + cfg.MergeIn(cfgs...) + cfg.Credentials = defaults.CredChain(cfg, handlers) + + // Reapply any passed in configs to override credentials if set + cfg.MergeIn(cfgs...) + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + + return s +} + +func initHandlers(s *Session) { + // Add the Validate parameter handler if it is not disabled. + s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) + if !aws.BoolValue(s.Config.DisableParamValidation) { + s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) + } +} + +// Copy creates and returns a copy of the current session, coping the config +// and handlers. If any additional configs are provided they will be merged +// on top of the session's copied config. +// +// Example: +// // Create a copy of the current session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) +func (s *Session) Copy(cfgs ...*aws.Config) *Session { + newSession := &Session{ + Config: s.Config.Copy(cfgs...), + Handlers: s.Handlers.Copy(), + } + + initHandlers(newSession) + + return newSession +} + +// ClientConfig satisfies the client.ConfigProvider interface and is used to +// configure the service client instances. Passing the Session to the service +// client's constructor (New) will use this method to configure the client. +// +// Example: +// sess := session.New() +// s3.New(sess) +func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + endpoint, signingRegion := endpoints.NormalizeEndpoint( + aws.StringValue(s.Config.Endpoint), serviceName, + aws.StringValue(s.Config.Region), aws.BoolValue(s.Config.DisableSSL)) + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: endpoint, + SigningRegion: signingRegion, + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go new file mode 100644 index 000000000..244c86da0 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go @@ -0,0 +1,82 @@ +package v4 + +import ( + "net/http" + "strings" +) + +// validator houses a set of rule needed for validation of a +// string value +type rules []rule + +// rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that rule +type rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// mapRule generic rule for maps +type mapRule map[string]struct{} + +// IsValid for the map rule satisfies whether it exists in the map +func (m mapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// whitelist is a generic rule for whitelisting +type whitelist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (w whitelist) IsValid(value string) bool { + return w.rule.IsValid(value) +} + +// blacklist is a generic rule for blacklisting +type blacklist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (b blacklist) IsValid(value string) bool { + return !b.rule.IsValid(value) +} + +type patterns []string + +// IsValid for patterns checks each pattern and returns if a match has +// been found +func (p patterns) IsValid(value string) bool { + for _, pattern := range p { + if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) { + return true + } + } + return false +} + +// inclusiveRules rules allow for rules to depend on one another +type inclusiveRules []rule + +// IsValid will return true if all rules are true +func (r inclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go new file mode 100644 index 000000000..f040f9ce9 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -0,0 +1,644 @@ +// Package v4 implements signing for AWS V4 signer +// +// Provides request signing for request that need to be signed with +// AWS V4 Signatures. +package v4 + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" + + // emptyStringSHA256 is a SHA256 of an empty string + emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` +) + +var ignoredHeaders = rules{ + blacklist{ + mapRule{ + "Authorization": struct{}{}, + "User-Agent": struct{}{}, + }, + }, +} + +// requiredSignedHeaders is a whitelist for build canonical headers. +var requiredSignedHeaders = rules{ + whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, +} + +// allowedHoisting is a whitelist for build query headers. The boolean value +// represents whether or not it is a pattern. +var allowedQueryHoisting = inclusiveRules{ + blacklist{requiredSignedHeaders}, + patterns{"X-Amz-"}, +} + +// Signer applies AWS v4 signing to given request. Use this to sign requests +// that need to be signed with AWS V4 Signatures. +type Signer struct { + // The authentication credentials the request will be signed against. + // This value must be set to sign requests. + Credentials *credentials.Credentials + + // Sets the log level the signer should use when reporting information to + // the logger. If the logger is nil nothing will be logged. See + // aws.LogLevelType for more information on available logging levels + // + // By default nothing will be logged. + Debug aws.LogLevelType + + // The logger loging information will be written to. If there the logger + // is nil, nothing will be logged. + Logger aws.Logger + + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // currentTimeFn returns the time value which represents the current time. + // This value should only be used for testing. If it is nil the default + // time.Now will be used. + currentTimeFn func() time.Time +} + +// NewSigner returns a Signer pointer configured with the credentials and optional +// option values provided. If not options are provided the Signer will use its +// default configuration. +func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer { + v4 := &Signer{ + Credentials: credentials, + } + + for _, option := range options { + option(v4) + } + + return v4 +} + +type signingCtx struct { + ServiceName string + Region string + Request *http.Request + Body io.ReadSeeker + Query url.Values + Time time.Time + ExpireTime time.Duration + SignedHeaderVals http.Header + + credValues credentials.Value + isPresign bool + formattedTime string + formattedShortTime string + + bodyDigest string + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string +} + +// Sign signs AWS v4 requests with the provided body, service name, region the +// request is made to, and time the request is signed at. The signTime allows +// you to specify that a request is signed for the future, and cannot be +// used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. Generally for signed requests this value +// is not needed as the full request context will be captured by the http.Request +// value. It is included for reference though. +// +// Sign differs from Presign in that it will sign the request using HTTP +// header values. This type of signing is intended for http.Request values that +// will not be shared, or are shared in a way the header values on the request +// will not be lost. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, 0, signTime) +} + +// Presign signs AWS v4 requests with the provided body, service name, region +// the request is made to, and time the request is signed at. The signTime +// allows you to specify that a request is signed for the future, and cannot +// be used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. For presigned requests these headers +// and their values must be included on the HTTP request when it is made. This +// is helpful to know what header values need to be shared with the party the +// presigned request will be distributed to. +// +// Presign differs from Sign in that it will sign the request using query string +// instead of header values. This allows you to share the Presigned Request's +// URL with third parties, or distribute it throughout your system with minimal +// dependencies. +// +// Presign also takes an exp value which is the duration the +// signed request will be valid after the signing time. This is allows you to +// set when the request will expire. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +// +// Presigning a S3 request will not compute the body's SHA256 hash by default. +// This is done due to the general use case for S3 presigned URLs is to share +// PUT/GET capabilities. If you would like to include the body's SHA256 in the +// presigned request's signature you can set the "X-Amz-Content-Sha256" +// HTTP header and that will be included in the request's signature. +func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, exp, signTime) +} + +func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + currentTimeFn := v4.currentTimeFn + if currentTimeFn == nil { + currentTimeFn = time.Now + } + + ctx := &signingCtx{ + Request: r, + Body: body, + Query: r.URL.Query(), + Time: signTime, + ExpireTime: exp, + isPresign: exp != 0, + ServiceName: service, + Region: region, + } + + if ctx.isRequestSigned() { + if !v4.Credentials.IsExpired() && currentTimeFn().Before(ctx.Time.Add(10*time.Minute)) { + // If the request is already signed, and the credentials have not + // expired, and the request is not too old ignore the signing request. + return ctx.SignedHeaderVals, nil + } + ctx.Time = currentTimeFn() + ctx.handlePresignRemoval() + } + + var err error + ctx.credValues, err = v4.Credentials.Get() + if err != nil { + return http.Header{}, err + } + + ctx.assignAmzQueryValues() + ctx.build(v4.DisableHeaderHoisting) + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo(ctx) + } + + return ctx.SignedHeaderVals, nil +} + +func (ctx *signingCtx) handlePresignRemoval() { + if !ctx.isPresign { + return + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + ctx.removePresign() + + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + ctx.Request.URL.RawQuery = ctx.Query.Encode() +} + +func (ctx *signingCtx) assignAmzQueryValues() { + if ctx.isPresign { + ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if ctx.credValues.SessionToken != "" { + ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } else { + ctx.Query.Del("X-Amz-Security-Token") + } + + return + } + + if ctx.credValues.SessionToken != "" { + ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } +} + +// SignRequestHandler is a named request handler the SDK will use to sign +// service client request with using the V4 signature. +var SignRequestHandler = request.NamedHandler{ + Name: "v4.SignRequestHandler", Fn: SignSDKRequest, +} + +// SignSDKRequest signs an AWS request with the V4 signature. This +// request handler is bested used only with the SDK's built in service client's +// API operation requests. +// +// This function should not be used on its on its own, but in conjunction with +// an AWS service client's API operation call. To sign a standalone request +// not created by a service client's API operation method use the "Sign" or +// "Presign" functions of the "Signer" type. +// +// If the credentials of the request's config are set to +// credentials.AnonymousCredentials the request will not be signed. +func SignSDKRequest(req *request.Request) { + signSDKRequestWithCurrTime(req, time.Now) +} +func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.ClientInfo.SigningRegion + if region == "" { + region = aws.StringValue(req.Config.Region) + } + + name := req.ClientInfo.SigningName + if name == "" { + name = req.ClientInfo.ServiceName + } + + v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { + v4.Debug = req.Config.LogLevel.Value() + v4.Logger = req.Config.Logger + v4.DisableHeaderHoisting = req.NotHoist + v4.currentTimeFn = curTimeFn + }) + + signingTime := req.Time + if !req.LastSignedAt.IsZero() { + signingTime = req.LastSignedAt + } + + signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.Body, name, region, req.ExpireTime, signingTime) + if err != nil { + req.Error = err + req.SignedHeaderVals = nil + return + } + + req.SignedHeaderVals = signedHeaders + req.LastSignedAt = curTimeFn() +} + +const logSignInfoMsg = `DEBUG: Request Signiture: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *Signer) logSigningInfo(ctx *signingCtx) { + signedURLMsg := "" + if ctx.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (ctx *signingCtx) build(disableHeaderHoisting bool) { + ctx.buildTime() // no depends + ctx.buildCredentialString() // no depends + + unsignedHeaders := ctx.Request.Header + if ctx.isPresign { + if !disableHeaderHoisting { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends + for k := range urlValues { + ctx.Query[k] = urlValues[k] + } + } + } + + ctx.buildBodyDigest() + ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) + ctx.buildCanonicalString() // depends on canon headers / signed headers + ctx.buildStringToSign() // depends on canon string + ctx.buildSignature() // depends on string to sign + + if ctx.isPresign { + ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, + "SignedHeaders=" + ctx.signedHeaders, + "Signature=" + ctx.signature, + } + ctx.Request.Header.Set("Authorization", strings.Join(parts, ", ")) + } +} + +func (ctx *signingCtx) buildTime() { + ctx.formattedTime = ctx.Time.UTC().Format(timeFormat) + ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat) + + if ctx.isPresign { + duration := int64(ctx.ExpireTime / time.Second) + ctx.Query.Set("X-Amz-Date", ctx.formattedTime) + ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime) + } +} + +func (ctx *signingCtx) buildCredentialString() { + ctx.credentialString = strings.Join([]string{ + ctx.formattedShortTime, + ctx.Region, + ctx.ServiceName, + "aws4_request", + }, "/") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) + } +} + +func buildQuery(r rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} +func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { + var headers []string + headers = append(headers, "host") + for k, v := range header { + canonicalKey := http.CanonicalHeaderKey(k) + if !r.IsValid(canonicalKey) { + continue // ignored header + } + if ctx.SignedHeaderVals == nil { + ctx.SignedHeaderVals = make(http.Header) + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { + // include additional values + ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + ctx.SignedHeaderVals[lowerCaseKey] = v + } + sort.Strings(headers) + + ctx.signedHeaders = strings.Join(headers, ";") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) + } + + headerValues := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + headerValues[i] = "host:" + ctx.Request.URL.Host + } else { + headerValues[i] = k + ":" + + strings.Join(ctx.SignedHeaderVals[k], ",") + } + } + + ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n") +} + +func (ctx *signingCtx) buildCanonicalString() { + ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) + uri := ctx.Request.URL.Opaque + if uri != "" { + uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/") + } else { + uri = ctx.Request.URL.Path + } + if uri == "" { + uri = "/" + } + + if ctx.ServiceName != "s3" { + uri = rest.EscapePath(uri, false) + } + + ctx.canonicalString = strings.Join([]string{ + ctx.Request.Method, + uri, + ctx.Request.URL.RawQuery, + ctx.canonicalHeaders + "\n", + ctx.signedHeaders, + ctx.bodyDigest, + }, "\n") +} + +func (ctx *signingCtx) buildStringToSign() { + ctx.stringToSign = strings.Join([]string{ + authHeaderPrefix, + ctx.formattedTime, + ctx.credentialString, + hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))), + }, "\n") +} + +func (ctx *signingCtx) buildSignature() { + secret := ctx.credValues.SecretAccessKey + date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime)) + region := makeHmac(date, []byte(ctx.Region)) + service := makeHmac(region, []byte(ctx.ServiceName)) + credentials := makeHmac(service, []byte("aws4_request")) + signature := makeHmac(credentials, []byte(ctx.stringToSign)) + ctx.signature = hex.EncodeToString(signature) +} + +func (ctx *signingCtx) buildBodyDigest() { + hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + if ctx.isPresign && ctx.ServiceName == "s3" { + hash = "UNSIGNED-PAYLOAD" + } else if ctx.Body == nil { + hash = emptyStringSHA256 + } else { + hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) + } + if ctx.ServiceName == "s3" { + ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) + } + } + ctx.bodyDigest = hash +} + +// isRequestSigned returns if the request is currently signed or presigned +func (ctx *signingCtx) isRequestSigned() bool { + if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { + return true + } + if ctx.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (ctx *signingCtx) removePresign() { + ctx.Query.Del("X-Amz-Algorithm") + ctx.Query.Del("X-Amz-Signature") + ctx.Query.Del("X-Amz-Security-Token") + ctx.Query.Del("X-Amz-Date") + ctx.Query.Del("X-Amz-Expires") + ctx.Query.Del("X-Amz-Credential") + ctx.Query.Del("X-Amz-SignedHeaders") +} + +func makeHmac(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) []byte { + hash := sha256.New() + start, _ := reader.Seek(0, 1) + defer reader.Seek(start, 0) + + io.Copy(hash, reader) + return hash.Sum(nil) +} + +const doubleSpaces = " " + +var doubleSpaceBytes = []byte(doubleSpaces) + +func stripExcessSpaces(headerVals []string) []string { + vals := make([]string, len(headerVals)) + for i, str := range headerVals { + // Trim leading and trailing spaces + trimmed := strings.TrimSpace(str) + + idx := strings.Index(trimmed, doubleSpaces) + var buf []byte + for idx > -1 { + // Multiple adjacent spaces found + if buf == nil { + // first time create the buffer + buf = []byte(trimmed) + } + + stripToIdx := -1 + for j := idx + 1; j < len(buf); j++ { + if buf[j] != ' ' { + buf = append(buf[:idx+1], buf[j:]...) + stripToIdx = j + break + } + } + + if stripToIdx >= 0 { + idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes) + if idx >= 0 { + idx += stripToIdx + } + } else { + idx = -1 + } + } + + if buf != nil { + vals[i] = string(buf) + } else { + vals[i] = trimmed + } + } + return vals +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/types.go new file mode 100644 index 000000000..fa014b49e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -0,0 +1,106 @@ +package aws + +import ( + "io" + "sync" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be returned. +// +// Performs the same functionality as io.Reader Read +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface +// Can be used with the s3manager.Downloader to download content to a buffer +// in memory. Safe to use concurrently. +type WriteAtBuffer struct { + buf []byte + m sync.Mutex + + // GrowthCoeff defines the growth rate of the internal buffer. By + // default, the growth rate is 1, where expanding the internal + // buffer will allocate only enough capacity to fit the new expected + // length. + GrowthCoeff float64 +} + +// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer +// provided by buf. +func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { + return &WriteAtBuffer{buf: buf} +} + +// WriteAt writes a slice of bytes to a buffer starting at the position provided +// The number of bytes written will be returned, or error. Can overwrite previous +// written slices if the write ats overlap. +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + pLen := len(p) + expLen := pos + int64(pLen) + b.m.Lock() + defer b.m.Unlock() + if int64(len(b.buf)) < expLen { + if int64(cap(b.buf)) < expLen { + if b.GrowthCoeff < 1 { + b.GrowthCoeff = 1 + } + newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) + copy(newBuf, b.buf) + b.buf = newBuf + } + b.buf = b.buf[:expLen] + } + copy(b.buf[pos:], p) + return pLen, nil +} + +// Bytes returns a slice of bytes written to the buffer. +func (b *WriteAtBuffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.buf[:len(b.buf):len(b.buf)] +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/version.go new file mode 100644 index 000000000..e78052130 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go" + +// SDKVersion is the version of this SDK +const SDKVersion = "1.2.4" diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go new file mode 100644 index 000000000..2b279e659 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go @@ -0,0 +1,65 @@ +// Package endpoints validates regional endpoints for services. +package endpoints + +//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go +//go:generate gofmt -s -w endpoints_map.go + +import ( + "fmt" + "regexp" + "strings" +) + +// NormalizeEndpoint takes and endpoint and service API information to return a +// normalized endpoint and signing region. If the endpoint is not an empty string +// the service name and region will be used to look up the service's API endpoint. +// If the endpoint is provided the scheme will be added if it is not present. +func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL bool) (normEndpoint, signingRegion string) { + if endpoint == "" { + return EndpointForRegion(serviceName, region, disableSSL) + } + + return AddScheme(endpoint, disableSSL), "" +} + +// EndpointForRegion returns an endpoint and its signing region for a service and region. +// if the service and region pair are not found endpoint and signingRegion will be empty. +func EndpointForRegion(svcName, region string, disableSSL bool) (endpoint, signingRegion string) { + derivedKeys := []string{ + region + "/" + svcName, + region + "/*", + "*/" + svcName, + "*/*", + } + + for _, key := range derivedKeys { + if val, ok := endpointsMap.Endpoints[key]; ok { + ep := val.Endpoint + ep = strings.Replace(ep, "{region}", region, -1) + ep = strings.Replace(ep, "{service}", svcName, -1) + + endpoint = ep + signingRegion = val.SigningRegion + break + } + } + + return AddScheme(endpoint, disableSSL), signingRegion +} + +// Regular expression to determine if the endpoint string is prefixed with a scheme. +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no +// scheme. If disableSSL is true HTTP will be added instead of the default HTTPS. +func AddScheme(endpoint string, disableSSL bool) string { + if endpoint != "" && !schemeRE.MatchString(endpoint) { + scheme := "https" + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + return endpoint +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json new file mode 100644 index 000000000..5f4991c2b --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json @@ -0,0 +1,75 @@ +{ + "version": 2, + "endpoints": { + "*/*": { + "endpoint": "{service}.{region}.amazonaws.com" + }, + "cn-north-1/*": { + "endpoint": "{service}.{region}.amazonaws.com.cn", + "signatureVersion": "v4" + }, + "cn-north-1/ec2metadata": { + "endpoint": "http://169.254.169.254/latest" + }, + "us-gov-west-1/iam": { + "endpoint": "iam.us-gov.amazonaws.com" + }, + "us-gov-west-1/sts": { + "endpoint": "sts.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "us-gov-west-1/ec2metadata": { + "endpoint": "http://169.254.169.254/latest" + }, + "*/cloudfront": { + "endpoint": "cloudfront.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/cloudsearchdomain": { + "endpoint": "", + "signingRegion": "us-east-1" + }, + "*/data.iot": { + "endpoint": "", + "signingRegion": "us-east-1" + }, + "*/ec2metadata": { + "endpoint": "http://169.254.169.254/latest" + }, + "*/iam": { + "endpoint": "iam.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/importexport": { + "endpoint": "importexport.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/route53": { + "endpoint": "route53.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/sts": { + "endpoint": "sts.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/waf": { + "endpoint": "waf.amazonaws.com", + "signingRegion": "us-east-1" + }, + "us-east-1/sdb": { + "endpoint": "sdb.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "us-east-1/s3": { + "endpoint": "s3.amazonaws.com" + }, + "eu-central-1/s3": { + "endpoint": "{service}.{region}.amazonaws.com" + } + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go new file mode 100644 index 000000000..e995315ab --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go @@ -0,0 +1,88 @@ +package endpoints + +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +type endpointStruct struct { + Version int + Endpoints map[string]endpointEntry +} + +type endpointEntry struct { + Endpoint string + SigningRegion string +} + +var endpointsMap = endpointStruct{ + Version: 2, + Endpoints: map[string]endpointEntry{ + "*/*": { + Endpoint: "{service}.{region}.amazonaws.com", + }, + "*/cloudfront": { + Endpoint: "cloudfront.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/cloudsearchdomain": { + Endpoint: "", + SigningRegion: "us-east-1", + }, + "*/data.iot": { + Endpoint: "", + SigningRegion: "us-east-1", + }, + "*/ec2metadata": { + Endpoint: "http://169.254.169.254/latest", + }, + "*/iam": { + Endpoint: "iam.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/importexport": { + Endpoint: "importexport.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/route53": { + Endpoint: "route53.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "*/sts": { + Endpoint: "sts.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/waf": { + Endpoint: "waf.amazonaws.com", + SigningRegion: "us-east-1", + }, + "cn-north-1/*": { + Endpoint: "{service}.{region}.amazonaws.com.cn", + }, + "cn-north-1/ec2metadata": { + Endpoint: "http://169.254.169.254/latest", + }, + "eu-central-1/s3": { + Endpoint: "{service}.{region}.amazonaws.com", + }, + "us-east-1/s3": { + Endpoint: "s3.amazonaws.com", + }, + "us-east-1/sdb": { + Endpoint: "sdb.amazonaws.com", + SigningRegion: "us-east-1", + }, + "us-gov-west-1/ec2metadata": { + Endpoint: "http://169.254.169.254/latest", + }, + "us-gov-west-1/iam": { + Endpoint: "iam.us-gov.amazonaws.com", + }, + "us-gov-west-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-gov-west-1/sts": { + Endpoint: "sts.us-gov-west-1.amazonaws.com", + }, + }, +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go new file mode 100644 index 000000000..53831dff9 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go @@ -0,0 +1,75 @@ +package protocol + +import ( + "crypto/rand" + "fmt" + "reflect" +) + +// RandReader is the random reader the protocol package will use to read +// random bytes from. This is exported for testing, and should not be used. +var RandReader = rand.Reader + +const idempotencyTokenFillTag = `idempotencyToken` + +// CanSetIdempotencyToken returns true if the struct field should be +// automatically populated with a Idempotency token. +// +// Only *string and string type fields that are tagged with idempotencyToken +// which are not already set can be auto filled. +func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { + switch u := v.Interface().(type) { + // To auto fill an Idempotency token the field must be a string, + // tagged for auto fill, and have a zero value. + case *string: + return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + case string: + return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + } + + return false +} + +// GetIdempotencyToken returns a randomly generated idempotency token. +func GetIdempotencyToken() string { + b := make([]byte, 16) + RandReader.Read(b) + + return UUIDVersion4(b) +} + +// SetIdempotencyToken will set the value provided with a Idempotency Token. +// Given that the value can be set. Will panic if value is not setable. +func SetIdempotencyToken(v reflect.Value) { + if v.Kind() == reflect.Ptr { + if v.IsNil() && v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = reflect.Indirect(v) + + if !v.CanSet() { + panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) + } + + b := make([]byte, 16) + _, err := rand.Read(b) + if err != nil { + // TODO handle error + return + } + + v.Set(reflect.ValueOf(UUIDVersion4(b))) +} + +// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided +func UUIDVersion4(u []byte) string { + // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 + // 13th character is "4" + u[6] = (u[6] | 0x40) & 0x4F + // 17th character is "8", "9", "a", or "b" + u[8] = (u[8] | 0x80) & 0xBF + + return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go new file mode 100644 index 000000000..56d69db05 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -0,0 +1,36 @@ +// Package query provides serialisation of AWS query requests, and responses. +package query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// BuildHandler is a named request handler for building query protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} + +// Build builds a request for an AWS Query service. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, false); err != nil { + r.Error = awserr.New("SerializationError", "failed encoding Query request", err) + return + } + + if r.ExpireTime == 0 { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go new file mode 100644 index 000000000..60ea0bd1e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -0,0 +1,230 @@ +package queryutil + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// Parse parses an object i and fills a url.Values object. The isEC2 flag +// indicates if this is the EC2 Query sub-protocol. +func Parse(body url.Values, i interface{}, isEC2 bool) error { + q := queryParser{isEC2: isEC2} + return q.parseValue(body, reflect.ValueOf(i), "", "") +} + +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +type queryParser struct { + isEC2 bool +} + +func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + value = elemOf(value) + + // no need to handle zero values + if !value.IsValid() { + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + return q.parseStruct(v, value, prefix) + case "list": + return q.parseList(v, value, prefix, tag) + case "map": + return q.parseMap(v, value, prefix, tag) + default: + return q.parseScalar(v, value, prefix, tag) + } +} + +func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { + if !value.IsValid() { + return nil + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + elemValue := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + elemValue = reflect.ValueOf(token) + } + + var name string + if q.isEC2 { + name = field.Tag.Get("queryName") + } + if name == "" { + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if name != "" && q.isEC2 { + name = strings.ToUpper(name[0:1]) + name[1:] + } + } + if name == "" { + name = field.Name + } + + if prefix != "" { + name = prefix + "." + name + } + + if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".member" + } + + for i := 0; i < value.Len(); i++ { + slicePrefix := prefix + if slicePrefix == "" { + slicePrefix = strconv.Itoa(i + 1) + } else { + slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) + } + if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".entry" + } + + // sort keys for improved serialization consistency. + // this is not strictly necessary for protocol support. + mapKeyValues := value.MapKeys() + mapKeys := map[string]reflect.Value{} + mapKeyNames := make([]string, len(mapKeyValues)) + for i, mapKey := range mapKeyValues { + name := mapKey.String() + mapKeys[name] = mapKey + mapKeyNames[i] = name + } + sort.Strings(mapKeyNames) + + for i, mapKeyName := range mapKeyNames { + mapKey := mapKeys[mapKeyName] + mapValue := value.MapIndex(mapKey) + + kname := tag.Get("locationNameKey") + if kname == "" { + kname = "key" + } + vname := tag.Get("locationNameValue") + if vname == "" { + vname = "value" + } + + // serialize key + var keyName string + if prefix == "" { + keyName = strconv.Itoa(i+1) + "." + kname + } else { + keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname + } + + if err := q.parseValue(v, mapKey, keyName, ""); err != nil { + return err + } + + // serialize value + var valueName string + if prefix == "" { + valueName = strconv.Itoa(i+1) + "." + vname + } else { + valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname + } + + if err := q.parseValue(v, mapValue, valueName, ""); err != nil { + return err + } + } + + return nil +} + +func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { + switch value := r.Interface().(type) { + case string: + v.Set(name, value) + case []byte: + if !r.IsNil() { + v.Set(name, base64.StdEncoding.EncodeToString(value)) + } + case bool: + v.Set(name, strconv.FormatBool(value)) + case int64: + v.Set(name, strconv.FormatInt(value, 10)) + case int: + v.Set(name, strconv.Itoa(value)) + case float64: + v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + case float32: + v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + v.Set(name, value.UTC().Format(ISO8601UTC)) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) + } + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go new file mode 100644 index 000000000..a3ea40955 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -0,0 +1,35 @@ +package query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalHandler is a named request handler for unmarshaling query protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals a response for an AWS Query service. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") + if err != nil { + r.Error = awserr.New("SerializationError", "failed decoding Query response", err) + return + } + } +} + +// UnmarshalMeta unmarshals header response values for an AWS Query service. +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go new file mode 100644 index 000000000..f21429617 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -0,0 +1,66 @@ +package query + +import ( + "encoding/xml" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"ErrorResponse"` + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +type xmlServiceUnavailableResponse struct { + XMLName xml.Name `xml:"ServiceUnavailableException"` +} + +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + +// UnmarshalError unmarshals an error response for an AWS Query service. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err) + return + } + + // First check for specific error + resp := xmlErrorResponse{} + decodeErr := xml.Unmarshal(bodyBytes, &resp) + if decodeErr == nil { + reqID := resp.RequestID + if reqID == "" { + reqID = r.RequestID + } + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + reqID, + ) + return + } + + // Check for unhandled error + servUnavailResp := xmlServiceUnavailableResponse{} + unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp) + if unavailErr == nil { + r.Error = awserr.NewRequestFailure( + awserr.New("ServiceUnavailableException", "service is unavailable", nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Failed to retrieve any error message from the response body + r.Error = awserr.New("SerializationError", + "failed to decode query XML error response", decodeErr) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go new file mode 100644 index 000000000..5f412516d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -0,0 +1,256 @@ +// Package rest provides RESTful serialization of AWS requests and responses. +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RFC822 returns an RFC822 formatted timestamp for AWS protocols +const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool + +var errValueNotSet = fmt.Errorf("value not set") + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// BuildHandler is a named request handler for building rest protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} + +// Build builds the REST component of a service request. +func Build(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v) + buildBody(r, v) + } +} + +func buildLocationElements(r *request.Request, v reflect.Value) { + query := r.HTTPRequest.URL.Query() + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if m.Kind() == reflect.Ptr { + m = m.Elem() + } + if !m.IsValid() { + continue + } + + var err error + switch field.Tag.Get("location") { + case "headers": // header maps + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag.Get("locationName")) + case "header": + err = buildHeader(&r.HTTPRequest.Header, m, name) + case "uri": + err = buildURI(r.HTTPRequest.URL, m, name) + case "querystring": + err = buildQueryString(query, m, name) + } + r.Error = err + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path) +} + +func buildBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetStringBody(reader) + default: + r.Error = awserr.New("SerializationError", + "failed to encode REST request", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } +} + +func buildHeader(header *http.Header, v reflect.Value, name string) error { + str, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + header.Add(name, str) + + return nil +} + +func buildHeaderMap(header *http.Header, v reflect.Value, prefix string) error { + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key)) + if err == errValueNotSet { + continue + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + + } + + header.Add(prefix+key.String(), str) + } + return nil +} + +func buildURI(u *url.URL, v reflect.Value, name string) error { + value, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + uri := u.Path + uri = strings.Replace(uri, "{"+name+"}", EscapePath(value, true), -1) + uri = strings.Replace(uri, "{"+name+"+}", EscapePath(value, false), -1) + u.Path = uri + + return nil +} + +func buildQueryString(query url.Values, v reflect.Value, name string) error { + switch value := v.Interface().(type) { + case []*string: + for _, item := range value { + query.Add(name, *item) + } + case map[string]*string: + for key, item := range value { + query.Add(key, *item) + } + case map[string][]*string: + for key, items := range value { + for _, item := range items { + query.Add(key, *item) + } + } + default: + str, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + query.Set(name, str) + } + + return nil +} + +func updatePath(url *url.URL, urlPath string) { + scheme, query := url.Scheme, url.RawQuery + + hasSlash := strings.HasSuffix(urlPath, "/") + + // clean up path + urlPath = path.Clean(urlPath) + if hasSlash && !strings.HasSuffix(urlPath, "/") { + urlPath += "/" + } + + // get formatted URL minus scheme so we can build this into Opaque + url.Scheme, url.Path, url.RawQuery = "", "", "" + s := url.String() + url.Scheme = scheme + url.RawQuery = query + + // build opaque URI + url.Opaque = s + urlPath +} + +// EscapePath escapes part of a URL path in Amazon style +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +func convertType(v reflect.Value) (string, error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return "", errValueNotSet + } + + var str string + switch value := v.Interface().(type) { + case string: + str = value + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + str = strconv.FormatFloat(value, 'f', -1, 64) + case time.Time: + str = value.UTC().Format(RFC822) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return "", err + } + return str, nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go new file mode 100644 index 000000000..4366de2e1 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go @@ -0,0 +1,45 @@ +package rest + +import "reflect" + +// PayloadMember returns the payload field member of i if there is one, or nil. +func PayloadMember(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i).Elem() + if !v.IsValid() { + return nil + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + field, _ := v.Type().FieldByName(payloadName) + if field.Tag.Get("type") != "structure" { + return nil + } + + payload := v.FieldByName(payloadName) + if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { + return payload.Interface() + } + } + } + return nil +} + +// PayloadType returns the type of a payload field member of i if there is one, or "". +func PayloadType(i interface{}) string { + v := reflect.Indirect(reflect.ValueOf(i)) + if !v.IsValid() { + return "" + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + if member, ok := v.Type().FieldByName(payloadName); ok { + return member.Tag.Get("type") + } + } + } + return "" +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go new file mode 100644 index 000000000..2cba1d9aa --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -0,0 +1,198 @@ +package rest + +import ( + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals the REST component of a response in a REST service. +func Unmarshal(r *request.Request) { + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalBody(r, v) + } +} + +// UnmarshalMeta unmarshals the REST metadata of a response in a REST service +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalLocationElements(r, v) + } +} + +func unmarshalBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := v.FieldByName(payloadName) + if payload.IsValid() { + switch payload.Interface().(type) { + case []byte: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + payload.Set(reflect.ValueOf(b)) + } + case *string: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + str := string(b) + payload.Set(reflect.ValueOf(&str)) + } + default: + switch payload.Type().String() { + case "io.ReadSeeker": + payload.Set(reflect.ValueOf(aws.ReadSeekCloser(r.HTTPResponse.Body))) + case "aws.ReadSeekCloser", "io.ReadCloser": + payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + default: + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + defer r.HTTPResponse.Body.Close() + r.Error = awserr.New("SerializationError", + "failed to decode REST response", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } + } +} + +func unmarshalLocationElements(r *request.Request, v reflect.Value) { + for i := 0; i < v.NumField(); i++ { + m, field := v.Field(i), v.Type().Field(i) + if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + + switch field.Tag.Get("location") { + case "statusCode": + unmarshalStatusCode(m, r.HTTPResponse.StatusCode) + case "header": + err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name)) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + case "headers": + prefix := field.Tag.Get("locationName") + err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + } + } + if r.Error != nil { + return + } + } +} + +func unmarshalStatusCode(v reflect.Value, statusCode int) { + if !v.IsValid() { + return + } + + switch v.Interface().(type) { + case *int64: + s := int64(statusCode) + v.Set(reflect.ValueOf(&s)) + } +} + +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { + switch r.Interface().(type) { + case map[string]*string: // we only support string map value types + out := map[string]*string{} + for k, v := range headers { + k = http.CanonicalHeaderKey(k) + if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { + out[k[len(prefix):]] = &v[0] + } + } + r.Set(reflect.ValueOf(out)) + } + return nil +} + +func unmarshalHeader(v reflect.Value, header string) error { + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } + + switch v.Interface().(type) { + case *string: + v.Set(reflect.ValueOf(&header)) + case []byte: + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *bool: + b, err := strconv.ParseBool(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *int64: + i, err := strconv.ParseInt(header, 10, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&i)) + case *float64: + f, err := strconv.ParseFloat(header, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&f)) + case *time.Time: + t, err := time.Parse(RFC822, header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&t)) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return err + } + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go new file mode 100644 index 000000000..c74088bfe --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -0,0 +1,69 @@ +// Package restxml provides RESTful XML serialisation of AWS +// requests and responses. +package restxml + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go + +import ( + "bytes" + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/protocol/rest" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// BuildHandler is a named request handler for building restxml protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.restxml.Build", Fn: Build} + +// UnmarshalHandler is a named request handler for unmarshaling restxml protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restxml.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling restxml protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalMeta", Fn: UnmarshalMeta} + +// UnmarshalErrorHandler is a named request handler for unmarshaling restxml protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalError", Fn: UnmarshalError} + +// Build builds a request payload for the REST XML protocol. +func Build(r *request.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + var buf bytes.Buffer + err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to encode rest XML request", err) + return + } + r.SetBufferBody(buf.Bytes()) + } +} + +// Unmarshal unmarshals a payload response for the REST XML protocol. +func Unmarshal(r *request.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + defer r.HTTPResponse.Body.Close() + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, "") + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST XML response", err) + return + } + } else { + rest.Unmarshal(r) + } +} + +// UnmarshalMeta unmarshals response headers for the REST XML protocol. +func UnmarshalMeta(r *request.Request) { + rest.UnmarshalMeta(r) +} + +// UnmarshalError unmarshals a response error for the REST XML protocol. +func UnmarshalError(r *request.Request) { + query.UnmarshalError(r) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go new file mode 100644 index 000000000..da1a68111 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go @@ -0,0 +1,21 @@ +package protocol + +import ( + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body +var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} + +// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. +func UnmarshalDiscardBody(r *request.Request) { + if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { + return + } + + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go new file mode 100644 index 000000000..ceb4132c5 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -0,0 +1,293 @@ +// Package xmlutil provides XML serialisation of AWS requests and responses. +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// BuildXML will serialize params into an xml.Encoder. +// Error will be returned if the serialization of any of the params or nested values fails. +func BuildXML(params interface{}, e *xml.Encoder) error { + b := xmlBuilder{encoder: e, namespaces: map[string]string{}} + root := NewXMLElement(xml.Name{}) + if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { + return err + } + for _, c := range root.Children { + for _, v := range c { + return StructToXML(e, v, false) + } + } + return nil +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +// A xmlBuilder serializes values from Go code to XML +type xmlBuilder struct { + encoder *xml.Encoder + namespaces map[string]string +} + +// buildValue generic XMLNode builder for any type. Will build value for their specific type +// struct, list, map, scalar. +// +// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If +// type is not provided reflect will be used to determine the value's type. +func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + value = elemOf(value) + if !value.IsValid() { // no need to handle zero values + return nil + } else if tag.Get("location") != "" { // don't handle non-body location values + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := value.Type().FieldByName("_"); ok { + tag = tag + reflect.StructTag(" ") + field.Tag + } + return b.buildStruct(value, current, tag) + case "list": + return b.buildList(value, current, tag) + case "map": + return b.buildMap(value, current, tag) + default: + return b.buildScalar(value, current, tag) + } +} + +// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested +// types are converted to XMLNodes also. +func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + fieldAdded := false + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + + // there is an xmlNamespace associated with this struct + if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { + ns := xml.Attr{ + Name: xml.Name{Local: "xmlns"}, + Value: uri, + } + if prefix != "" { + b.namespaces[prefix] = uri // register the namespace + ns.Name.Local = "xmlns:" + prefix + } + + child.Attr = append(child.Attr, ns) + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + member := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + + mTag := field.Tag + if mTag.Get("location") != "" { // skip non-body members + continue + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(token) + } + + memberName := mTag.Get("locationName") + if memberName == "" { + memberName = field.Name + mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) + } + if err := b.buildValue(member, child, mTag); err != nil { + return err + } + + fieldAdded = true + } + + if fieldAdded { // only append this child if we have one ore more valid members + current.AddChild(child) + } + + return nil +} + +// buildList adds the value's list items to the current XMLNode as children nodes. All +// nested values in the list are converted to XMLNodes also. +func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted lists + return nil + } + + // check for unflattened list member + flattened := tag.Get("flattened") != "" + + xname := xml.Name{Local: tag.Get("locationName")} + if flattened { + for i := 0; i < value.Len(); i++ { + child := NewXMLElement(xname) + current.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } else { + list := NewXMLElement(xname) + current.AddChild(list) + + for i := 0; i < value.Len(); i++ { + iname := tag.Get("locationNameList") + if iname == "" { + iname = "member" + } + + child := NewXMLElement(xml.Name{Local: iname}) + list.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } + + return nil +} + +// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All +// nested values in the map are converted to XMLNodes also. +// +// Error will be returned if it is unable to build the map's values into XMLNodes +func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted maps + return nil + } + + maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + current.AddChild(maproot) + current = maproot + + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + // sorting is not required for compliance, but it makes testing easier + keys := make([]string, value.Len()) + for i, k := range value.MapKeys() { + keys[i] = k.String() + } + sort.Strings(keys) + + for _, k := range keys { + v := value.MapIndex(reflect.ValueOf(k)) + + mapcur := current + if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps + child := NewXMLElement(xml.Name{Local: "entry"}) + mapcur.AddChild(child) + mapcur = child + } + + kchild := NewXMLElement(xml.Name{Local: kname}) + kchild.Text = k + vchild := NewXMLElement(xml.Name{Local: vname}) + mapcur.AddChild(kchild) + mapcur.AddChild(vchild) + + if err := b.buildValue(v, vchild, ""); err != nil { + return err + } + } + + return nil +} + +// buildScalar will convert the value into a string and append it as a attribute or child +// of the current XMLNode. +// +// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. +// +// Error will be returned if the value type is unsupported. +func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + var str string + switch converted := value.Interface().(type) { + case string: + str = converted + case []byte: + if !value.IsNil() { + str = base64.StdEncoding.EncodeToString(converted) + } + case bool: + str = strconv.FormatBool(converted) + case int64: + str = strconv.FormatInt(converted, 10) + case int: + str = strconv.Itoa(converted) + case float64: + str = strconv.FormatFloat(converted, 'f', -1, 64) + case float32: + str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + str = converted.UTC().Format(ISO8601UTC) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", + tag.Get("locationName"), value.Interface(), value.Type().Name()) + } + + xname := xml.Name{Local: tag.Get("locationName")} + if tag.Get("xmlAttribute") != "" { // put into current node's attribute list + attr := xml.Attr{Name: xname, Value: str} + current.Attr = append(current.Attr, attr) + } else { // regular text node + current.AddChild(&XMLNode{Name: xname, Text: str}) + } + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go new file mode 100644 index 000000000..49f291a85 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -0,0 +1,260 @@ +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" +) + +// UnmarshalXML deserializes an xml.Decoder into the container v. V +// needs to match the shape of the XML expected to be decoded. +// If the shape doesn't match unmarshaling will fail. +func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { + n, _ := XMLToStruct(d, nil) + if n.Children != nil { + for _, root := range n.Children { + for _, c := range root { + if wrappedChild, ok := c.Children[wrapper]; ok { + c = wrappedChild[0] // pull out wrapped element + } + + err := parse(reflect.ValueOf(v), c, "") + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + return nil + } + return nil +} + +// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect +// will be used to determine the type from r. +func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + rtype := r.Type() + if rtype.Kind() == reflect.Ptr { + rtype = rtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch rtype.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := rtype.FieldByName("_"); ok { + tag = field.Tag + } + return parseStruct(r, node, tag) + case "list": + return parseList(r, node, tag) + case "map": + return parseMap(r, node, tag) + default: + return parseScalar(r, node, tag) + } +} + +// parseStruct deserializes a structure and its fields from an XMLNode. Any nested +// types in the structure will also be deserialized. +func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + if r.Kind() == reflect.Ptr { + if r.IsNil() { // create the structure if it's nil + s := reflect.New(r.Type().Elem()) + r.Set(s) + r = s + } + + r = r.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return parseStruct(r.FieldByName(payload), node, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if c := field.Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + // try to find the field by name in elements + elems := node.Children[name] + + if elems == nil { // try to find the field in attributes + for _, a := range node.Attr { + if name == a.Name.Local { + // turn this into a text node for de-serializing + elems = []*XMLNode{{Text: a.Value}} + } + } + } + + member := r.FieldByName(field.Name) + for _, elem := range elems { + err := parse(member, elem, field.Tag) + if err != nil { + return err + } + } + } + return nil +} + +// parseList deserializes a list of values from an XML node. Each list entry +// will also be deserialized. +func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + + if tag.Get("flattened") == "" { // look at all item entries + mname := "member" + if name := tag.Get("locationNameList"); name != "" { + mname = name + } + + if Children, ok := node.Children[mname]; ok { + if r.IsNil() { + r.Set(reflect.MakeSlice(t, len(Children), len(Children))) + } + + for i, c := range Children { + err := parse(r.Index(i), c, "") + if err != nil { + return err + } + } + } + } else { // flattened list means this is a single element + if r.IsNil() { + r.Set(reflect.MakeSlice(t, 0, 0)) + } + + childR := reflect.Zero(t.Elem()) + r.Set(reflect.Append(r, childR)) + err := parse(r.Index(r.Len()-1), node, "") + if err != nil { + return err + } + } + + return nil +} + +// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode +// will also be deserialized as map entries. +func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + if r.IsNil() { + r.Set(reflect.MakeMap(r.Type())) + } + + if tag.Get("flattened") == "" { // look at all child entries + for _, entry := range node.Children["entry"] { + parseMapEntry(r, entry, tag) + } + } else { // this element is itself an entry + parseMapEntry(r, node, tag) + } + + return nil +} + +// parseMapEntry deserializes a map entry from a XML node. +func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + keys, ok := node.Children[kname] + values := node.Children[vname] + if ok { + for i, key := range keys { + keyR := reflect.ValueOf(key.Text) + value := values[i] + valueR := reflect.New(r.Type().Elem()).Elem() + + parse(valueR, value, "") + r.SetMapIndex(keyR, valueR) + } + } + return nil +} + +// parseScaller deserializes an XMLNode value into a concrete type based on the +// interface type of r. +// +// Error is returned if the deserialization fails due to invalid type conversion, +// or unsupported interface type. +func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + switch r.Interface().(type) { + case *string: + r.Set(reflect.ValueOf(&node.Text)) + return nil + case []byte: + b, err := base64.StdEncoding.DecodeString(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(b)) + case *bool: + v, err := strconv.ParseBool(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *int64: + v, err := strconv.ParseInt(node.Text, 10, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *float64: + v, err := strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + t, err := time.Parse(ISO8601UTC, node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) + } + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go new file mode 100644 index 000000000..72c198a9d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -0,0 +1,105 @@ +package xmlutil + +import ( + "encoding/xml" + "io" + "sort" +) + +// A XMLNode contains the values to be encoded or decoded. +type XMLNode struct { + Name xml.Name `json:",omitempty"` + Children map[string][]*XMLNode `json:",omitempty"` + Text string `json:",omitempty"` + Attr []xml.Attr `json:",omitempty"` +} + +// NewXMLElement returns a pointer to a new XMLNode initialized to default values. +func NewXMLElement(name xml.Name) *XMLNode { + return &XMLNode{ + Name: name, + Children: map[string][]*XMLNode{}, + Attr: []xml.Attr{}, + } +} + +// AddChild adds child to the XMLNode. +func (n *XMLNode) AddChild(child *XMLNode) { + if _, ok := n.Children[child.Name.Local]; !ok { + n.Children[child.Name.Local] = []*XMLNode{} + } + n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) +} + +// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. +func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { + out := &XMLNode{} + for { + tok, err := d.Token() + if tok == nil || err == io.EOF { + break + } + if err != nil { + return out, err + } + + switch typed := tok.(type) { + case xml.CharData: + out.Text = string(typed.Copy()) + case xml.StartElement: + el := typed.Copy() + out.Attr = el.Attr + if out.Children == nil { + out.Children = map[string][]*XMLNode{} + } + + name := typed.Name.Local + slice := out.Children[name] + if slice == nil { + slice = []*XMLNode{} + } + node, e := XMLToStruct(d, &el) + if e != nil { + return out, e + } + node.Name = typed.Name + slice = append(slice, node) + out.Children[name] = slice + case xml.EndElement: + if s != nil && s.Name.Local == typed.Name.Local { // matching end token + return out, nil + } + } + } + return out, nil +} + +// StructToXML writes an XMLNode to a xml.Encoder as tokens. +func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) + + if node.Text != "" { + e.EncodeToken(xml.CharData([]byte(node.Text))) + } else if sorted { + sortedNames := []string{} + for k := range node.Children { + sortedNames = append(sortedNames, k) + } + sort.Strings(sortedNames) + + for _, k := range sortedNames { + for _, v := range node.Children[k] { + StructToXML(e, v, sorted) + } + } + } else { + for _, c := range node.Children { + for _, v := range c { + StructToXML(e, v, sorted) + } + } + } + + e.EncodeToken(xml.EndElement{Name: node.Name}) + return e.Flush() +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go new file mode 100644 index 000000000..b51e9449c --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go @@ -0,0 +1,134 @@ +package waiter + +import ( + "fmt" + "reflect" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides a collection of configuration values to setup a generated +// waiter code with. +type Config struct { + Name string + Delay int + MaxAttempts int + Operation string + Acceptors []WaitAcceptor +} + +// A WaitAcceptor provides the information needed to wait for an API operation +// to complete. +type WaitAcceptor struct { + Expected interface{} + Matcher string + State string + Argument string +} + +// A Waiter provides waiting for an operation to complete. +type Waiter struct { + Config + Client interface{} + Input interface{} +} + +// Wait waits for an operation to complete, expire max attempts, or fail. Error +// is returned if the operation fails. +func (w *Waiter) Wait() error { + client := reflect.ValueOf(w.Client) + in := reflect.ValueOf(w.Input) + method := client.MethodByName(w.Config.Operation + "Request") + + for i := 0; i < w.MaxAttempts; i++ { + res := method.Call([]reflect.Value{in}) + req := res[0].Interface().(*request.Request) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Waiter")) + + err := req.Send() + for _, a := range w.Acceptors { + result := false + var vals []interface{} + switch a.Matcher { + case "pathAll", "path": + // Require all matches to be equal for result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + if len(vals) == 0 { + break + } + result = true + for _, val := range vals { + if !awsutil.DeepEqual(val, a.Expected) { + result = false + break + } + } + case "pathAny": + // Only a single match needs to equal for the result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + for _, val := range vals { + if awsutil.DeepEqual(val, a.Expected) { + result = true + break + } + } + case "status": + s := a.Expected.(int) + result = s == req.HTTPResponse.StatusCode + case "error": + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == a.Expected.(string) + } + case "pathList": + // ignored matcher + default: + logf(client, "WARNING: Waiter for %s encountered unexpected matcher: %s", + w.Config.Operation, a.Matcher) + } + + if !result { + // If there was no matching result found there is nothing more to do + // for this response, retry the request. + continue + } + + switch a.State { + case "success": + // waiter completed + return nil + case "failure": + // Waiter failure state triggered + return awserr.New("ResourceNotReady", + fmt.Sprintf("failed waiting for successful resource state"), err) + case "retry": + // clear the error and retry the operation + err = nil + default: + logf(client, "WARNING: Waiter for %s encountered unexpected state: %s", + w.Config.Operation, a.State) + } + } + if err != nil { + return err + } + + time.Sleep(time.Second * time.Duration(w.Delay)) + } + + return awserr.New("ResourceNotReady", + fmt.Sprintf("exceeded %d wait attempts", w.MaxAttempts), nil) +} + +func logf(client reflect.Value, msg string, args ...interface{}) { + cfgVal := client.FieldByName("Config") + if !cfgVal.IsValid() { + return + } + if cfg, ok := cfgVal.Interface().(*aws.Config); ok && cfg.Logger != nil { + cfg.Logger.Log(fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy.go new file mode 100644 index 000000000..75263b1b6 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy.go @@ -0,0 +1,226 @@ +package sign + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/url" + "strings" + "time" + "unicode" +) + +// An AWSEpochTime wraps a time value providing JSON serialization needed for +// AWS Policy epoch time fields. +type AWSEpochTime struct { + time.Time +} + +// NewAWSEpochTime returns a new AWSEpochTime pointer wrapping the Go time provided. +func NewAWSEpochTime(t time.Time) *AWSEpochTime { + return &AWSEpochTime{t} +} + +// MarshalJSON serializes the epoch time as AWS Profile epoch time. +func (t AWSEpochTime) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`{"AWS:EpochTime":%d}`, t.UTC().Unix())), nil +} + +// An IPAddress wraps an IPAddress source IP providing JSON serialization information +type IPAddress struct { + SourceIP string `json:"AWS:SourceIp"` +} + +// A Condition defines the restrictions for how a signed URL can be used. +type Condition struct { + // Optional IP address mask the signed URL must be requested from. + IPAddress *IPAddress `json:"IpAddress,omitempty"` + + // Optional date that the signed URL cannot be used until. It is invalid + // to make requests with the signed URL prior to this date. + DateGreaterThan *AWSEpochTime `json:",omitempty"` + + // Required date that the signed URL will expire. A DateLessThan is required + // sign cloud front URLs + DateLessThan *AWSEpochTime `json:",omitempty"` +} + +// A Statement is a collection of conditions for resources +type Statement struct { + // The Web or RTMP resource the URL will be signed for + Resource string + + // The set of conditions for this resource + Condition Condition +} + +// A Policy defines the resources that a signed will be signed for. +// +// See the following page for more information on how policies are constructed. +// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-custom-policy.html#private-content-custom-policy-statement +type Policy struct { + // List of resource and condition statements. + // Signed URLs should only provide a single statement. + Statements []Statement `json:"Statement"` +} + +// Override for testing to mock out usage of crypto/rand.Reader +var randReader = rand.Reader + +// Sign will sign a policy using an RSA private key. It will return a base 64 +// encoded signature and policy if no error is encountered. +// +// The signature and policy should be added to the signed URL following the +// guidelines in: +// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-signed-urls.html +func (p *Policy) Sign(privKey *rsa.PrivateKey) (b64Signature, b64Policy []byte, err error) { + if err = p.Validate(); err != nil { + return nil, nil, err + } + + // Build and escape the policy + b64Policy, jsonPolicy, err := encodePolicy(p) + if err != nil { + return nil, nil, err + } + awsEscapeEncoded(b64Policy) + + // Build and escape the signature + b64Signature, err = signEncodedPolicy(randReader, jsonPolicy, privKey) + if err != nil { + return nil, nil, err + } + awsEscapeEncoded(b64Signature) + + return b64Signature, b64Policy, nil +} + +// Validate verifies that the policy is valid and usable, and returns an +// error if there is a problem. +func (p *Policy) Validate() error { + if len(p.Statements) == 0 { + return fmt.Errorf("at least one policy statement is required") + } + for i, s := range p.Statements { + if s.Resource == "" { + return fmt.Errorf("statement at index %d does not have a resource", i) + } + if !isASCII(s.Resource) { + return fmt.Errorf("unable to sign resource, [%s]. "+ + "Resources must only contain ascii characters. "+ + "Hostnames with unicode should be encoded as Punycode, (e.g. golang.org/x/net/idna), "+ + "and URL unicode path/query characters should be escaped.", s.Resource) + } + } + + return nil +} + +// CreateResource constructs, validates, and returns a resource URL string. An +// error will be returned if unable to create the resource string. +func CreateResource(scheme, u string) (string, error) { + scheme = strings.ToLower(scheme) + + if scheme == "http" || scheme == "https" || scheme == "http*" || scheme == "*" { + return u, nil + } + + if scheme == "rtmp" { + parsed, err := url.Parse(u) + if err != nil { + return "", fmt.Errorf("unable to parse rtmp URL, err: %s", err) + } + + rtmpURL := strings.TrimLeft(parsed.Path, "/") + if parsed.RawQuery != "" { + rtmpURL = fmt.Sprintf("%s?%s", rtmpURL, parsed.RawQuery) + } + + return rtmpURL, nil + } + + return "", fmt.Errorf("invalid URL scheme must be http, https, or rtmp. Provided: %s", scheme) +} + +// NewCannedPolicy returns a new Canned Policy constructed using the resource +// and expires time. This can be used to generate the basic model for a Policy +// that can be then augmented with additional conditions. +// +// See the following page for more information on how policies are constructed. +// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-custom-policy.html#private-content-custom-policy-statement +func NewCannedPolicy(resource string, expires time.Time) *Policy { + return &Policy{ + Statements: []Statement{ + { + Resource: resource, + Condition: Condition{ + DateLessThan: NewAWSEpochTime(expires), + }, + }, + }, + } +} + +// encodePolicy encodes the Policy as JSON and also base 64 encodes it. +func encodePolicy(p *Policy) (b64Policy, jsonPolicy []byte, err error) { + jsonPolicy, err = json.Marshal(p) + if err != nil { + return nil, nil, fmt.Errorf("failed to encode policy, %s", err.Error()) + } + + // Remove leading and trailing white space, JSON encoding will note include + // whitespace within the encoding. + jsonPolicy = bytes.TrimSpace(jsonPolicy) + + b64Policy = make([]byte, base64.StdEncoding.EncodedLen(len(jsonPolicy))) + base64.StdEncoding.Encode(b64Policy, jsonPolicy) + return b64Policy, jsonPolicy, nil +} + +// signEncodedPolicy will sign and base 64 encode the JSON encoded policy. +func signEncodedPolicy(randReader io.Reader, jsonPolicy []byte, privKey *rsa.PrivateKey) ([]byte, error) { + hash := sha1.New() + if _, err := bytes.NewReader(jsonPolicy).WriteTo(hash); err != nil { + return nil, fmt.Errorf("failed to calculate signing hash, %s", err.Error()) + } + + sig, err := rsa.SignPKCS1v15(randReader, privKey, crypto.SHA1, hash.Sum(nil)) + if err != nil { + return nil, fmt.Errorf("failed to sign policy, %s", err.Error()) + } + + b64Sig := make([]byte, base64.StdEncoding.EncodedLen(len(sig))) + base64.StdEncoding.Encode(b64Sig, sig) + return b64Sig, nil +} + +// special characters to be replaced with awsEscapeEncoded +var invalidEncodedChar = map[byte]byte{ + '+': '-', + '=': '_', + '/': '~', +} + +// awsEscapeEncoded will replace base64 encoding's special characters to be URL safe. +func awsEscapeEncoded(b []byte) { + for i, v := range b { + if r, ok := invalidEncodedChar[v]; ok { + b[i] = r + } + } +} + +func isASCII(u string) bool { + for _, c := range u { + if c > unicode.MaxASCII { + return false + } + } + return true +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/privkey.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/privkey.go new file mode 100644 index 000000000..ffb3c3a75 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/privkey.go @@ -0,0 +1,68 @@ +package sign + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + "io" + "io/ioutil" + "os" +) + +// LoadPEMPrivKeyFile reads a PEM encoded RSA private key from the file name. +// A new RSA private key will be returned if no error. +func LoadPEMPrivKeyFile(name string) (*rsa.PrivateKey, error) { + file, err := os.Open(name) + if err != nil { + return nil, err + } + defer file.Close() + + return LoadPEMPrivKey(file) +} + +// LoadPEMPrivKey reads a PEM encoded RSA private key from the io.Reader. +// A new RSA private key will be returned if no error. +func LoadPEMPrivKey(reader io.Reader) (*rsa.PrivateKey, error) { + block, err := loadPem(reader) + if err != nil { + return nil, err + } + + return x509.ParsePKCS1PrivateKey(block.Bytes) +} + +// LoadEncryptedPEMPrivKey decrypts the PEM encoded private key using the +// password provided returning a RSA private key. If the PEM data is invalid, +// or unable to decrypt an error will be returned. +func LoadEncryptedPEMPrivKey(reader io.Reader, password []byte) (*rsa.PrivateKey, error) { + block, err := loadPem(reader) + if err != nil { + return nil, err + } + + decryptedBlock, err := x509.DecryptPEMBlock(block, password) + if err != nil { + return nil, err + } + + return x509.ParsePKCS1PrivateKey(decryptedBlock) +} + +func loadPem(reader io.Reader) (*pem.Block, error) { + b, err := ioutil.ReadAll(reader) + if err != nil { + return nil, err + } + + block, _ := pem.Decode(b) + if block == nil { + // pem.Decode will set block to nil if there is no PEM data in the input + // the second parameter will contain the provided bytes that failed + // to be decoded. + return nil, fmt.Errorf("no valid PEM data provided") + } + + return block, nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/randomreader.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/randomreader.go new file mode 100644 index 000000000..7138e22fa --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/randomreader.go @@ -0,0 +1,30 @@ +package sign + +import ( + "bytes" + "encoding/binary" + "math/rand" +) + +// A randomReader wraps a math/rand.Rand within an reader so that it can used +// as a predictable testing replacement for crypto/rand.Reader +type randomReader struct { + b *bytes.Buffer + r *rand.Rand +} + +// newRandomReader returns a new instance of the random reader +func newRandomReader(r *rand.Rand) *randomReader { + return &randomReader{b: &bytes.Buffer{}, r: r} +} + +// Read will read random bytes from up to the length of b. +func (m *randomReader) Read(b []byte) (int, error) { + for i := 0; i < len(b); { + binary.Write(m.b, binary.LittleEndian, m.r.Int63()) + n, _ := m.b.Read(b[i:]) + i += n + } + + return len(b), nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_cookie.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_cookie.go new file mode 100644 index 000000000..9b2deadf1 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_cookie.go @@ -0,0 +1,241 @@ +package sign + +import ( + "crypto/rsa" + "fmt" + "net/http" + "strings" + "time" +) + +const ( + // CookiePolicyName name of the policy cookie + CookiePolicyName = "CloudFront-Policy" + // CookieSignatureName name of the signature cookie + CookieSignatureName = "CloudFront-Signature" + // CookieKeyIDName name of the signing Key ID cookie + CookieKeyIDName = "CloudFront-Key-Pair-Id" +) + +// A CookieOptions optional additonal options that can be applied to the signed +// cookies. +type CookieOptions struct { + Path string + Domain string + Secure bool +} + +// apply will integration the options provided into the base cookie options +// a new copy will be returned. The base CookieOption will not be modified. +func (o CookieOptions) apply(opts ...func(*CookieOptions)) CookieOptions { + if len(opts) == 0 { + return o + } + + for _, opt := range opts { + opt(&o) + } + + return o +} + +// A CookieSigner provides signing utilities to sign Cookies for Amazon CloudFront +// resources. Using a private key and Credential Key Pair key ID the CookieSigner +// only needs to be created once per Credential Key Pair key ID and private key. +// +// More information about signed Cookies and their structure can be found at: +// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-setting-signed-cookie-custom-policy.html +// +// To sign a Cookie, create a CookieSigner with your private key and credential +// pair key ID. Once you have a CookieSigner instance you can call Sign or +// SignWithPolicy to sign the URLs. +// +// The signer is safe to use concurrently, but the optional cookies options +// are not safe to modify concurrently. +type CookieSigner struct { + keyID string + privKey *rsa.PrivateKey + + Opts CookieOptions +} + +// NewCookieSigner constructs and returns a new CookieSigner to be used to for +// signing Amazon CloudFront URL resources with. +func NewCookieSigner(keyID string, privKey *rsa.PrivateKey, opts ...func(*CookieOptions)) *CookieSigner { + signer := &CookieSigner{ + keyID: keyID, + privKey: privKey, + Opts: CookieOptions{}.apply(opts...), + } + + return signer +} + +// Sign returns the cookies needed to allow user agents to make arbetrary +// requests to cloudfront for the resource(s) defined by the policy. +// +// Sign will create a CloudFront policy with only a resource and condition of +// DateLessThan equal to the expires time provided. +// +// The returned slice cookies should all be added to the Client's cookies or +// server's response. +// +// Example: +// s := NewCookieSigner(keyID, privKey) +// +// // Get Signed cookies for a resource that will expire in 1 hour +// cookies, err := s.Sign("*", time.Now().Add(1 * time.Hour)) +// if err != nil { +// fmt.Println("failed to create signed cookies", err) +// return +// } +// +// // Or get Signed cookies for a resource that will expire in 1 hour +// // and set path and domain of cookies +// cookies, err := s.Sign("*", time.Now().Add(1 * time.Hour), func(o *sign.CookieOptions) { +// o.Path = "/" +// o.Domain = ".example.com" +// }) +// if err != nil { +// fmt.Println("failed to create signed cookies", err) +// return +// } +// +// // Server Response via http.ResponseWriter +// for _, c := range cookies { +// http.SetCookie(w, c) +// } +// +// // Client request via the cookie jar +// if client.CookieJar != nil { +// for _, c := range cookies { +// client.Cookie(w, c) +// } +// } +func (s CookieSigner) Sign(u string, expires time.Time, opts ...func(*CookieOptions)) ([]*http.Cookie, error) { + scheme, err := cookieURLScheme(u) + if err != nil { + return nil, err + } + + resource, err := CreateResource(scheme, u) + if err != nil { + return nil, err + } + + p := NewCannedPolicy(resource, expires) + return createCookies(p, s.keyID, s.privKey, s.Opts.apply(opts...)) +} + +// Returns and validates the URL's scheme. +// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-setting-signed-cookie-custom-policy.html#private-content-custom-policy-statement-cookies +func cookieURLScheme(u string) (string, error) { + parts := strings.SplitN(u, "://", 2) + if len(parts) != 2 { + return "", fmt.Errorf("invalid cookie URL, missing scheme") + } + + scheme := strings.ToLower(parts[0]) + if scheme != "http" && scheme != "https" && scheme != "http*" { + return "", fmt.Errorf("invalid cookie URL scheme. Expect http, https, or http*. Go, %s", scheme) + } + + return scheme, nil +} + +// SignWithPolicy returns the cookies needed to allow user agents to make +// arbetrairy requets to cloudfront for the resource(s) defined by the policy. +// +// The returned slice cookies should all be added to the Client's cookies or +// server's response. +// +// Example: +// s := NewCookieSigner(keyID, privKey) +// +// policy := &sign.Policy{ +// Statements: []sign.Statement{ +// { +// // Read the provided documentation on how to set this +// // correctly, you'll probably want to use wildcards. +// Resource: RawCloudFrontURL, +// Condition: sign.Condition{ +// // Optional IP source address range +// IPAddress: &sign.IPAddress{SourceIP: "192.0.2.0/24"}, +// // Optional date URL is not valid until +// DateGreaterThan: &sign.AWSEpochTime{time.Now().Add(30 * time.Minute)}, +// // Required date the URL will expire after +// DateLessThan: &sign.AWSEpochTime{time.Now().Add(1 * time.Hour)}, +// }, +// }, +// }, +// } +// +// // Get Signed cookies for a resource that will expire in 1 hour +// cookies, err := s.SignWithPolicy(policy) +// if err != nil { +// fmt.Println("failed to create signed cookies", err) +// return +// } +// +// // Or get Signed cookies for a resource that will expire in 1 hour +// // and set path and domain of cookies +// cookies, err := s.Sign(policy, func(o *sign.CookieOptions) { +// o.Path = "/" +// o.Domain = ".example.com" +// }) +// if err != nil { +// fmt.Println("failed to create signed cookies", err) +// return +// } +// +// // Server Response via http.ResponseWriter +// for _, c := range cookies { +// http.SetCookie(w, c) +// } +// +// // Client request via the cookie jar +// if client.CookieJar != nil { +// for _, c := range cookies { +// client.Cookie(w, c) +// } +// } +func (s CookieSigner) SignWithPolicy(p *Policy, opts ...func(*CookieOptions)) ([]*http.Cookie, error) { + return createCookies(p, s.keyID, s.privKey, s.Opts.apply(opts...)) +} + +// Prepares the cookies to be attached to the header. An (optional) options +// struct is provided in case people don't want to manually edit their cookies. +func createCookies(p *Policy, keyID string, privKey *rsa.PrivateKey, opt CookieOptions) ([]*http.Cookie, error) { + b64Sig, b64Policy, err := p.Sign(privKey) + if err != nil { + return nil, err + } + + // Creates proper cookies + cPolicy := &http.Cookie{ + Name: CookiePolicyName, + Value: string(b64Policy), + HttpOnly: true, + } + cSignature := &http.Cookie{ + Name: CookieSignatureName, + Value: string(b64Sig), + HttpOnly: true, + } + cKey := &http.Cookie{ + Name: CookieKeyIDName, + Value: keyID, + HttpOnly: true, + } + + cookies := []*http.Cookie{cPolicy, cSignature, cKey} + + // Applie the cookie options + for _, c := range cookies { + c.Path = opt.Path + c.Domain = opt.Domain + c.Secure = opt.Secure + } + + return cookies, nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url.go new file mode 100644 index 000000000..ba56b4a78 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url.go @@ -0,0 +1,205 @@ +// Package sign provides utilities to generate signed URLs for Amazon CloudFront. +// +// More information about signed URLs and their structure can be found at: +// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-canned-policy.html +// +// To sign a URL create a URLSigner with your private key and credential pair key ID. +// Once you have a URLSigner instance you can call Sign or SignWithPolicy to +// sign the URLs. +// +// Example: +// +// // Sign URL to be valid for 1 hour from now. +// signer := sign.NewURLSigner(keyID, privKey) +// signedURL, err := signer.Sign(rawURL, time.Now().Add(1*time.Hour)) +// if err != nil { +// log.Fatalf("Failed to sign url, err: %s\n", err.Error()) +// } +// +package sign + +import ( + "crypto/rsa" + "fmt" + "net/url" + "strings" + "time" +) + +// An URLSigner provides URL signing utilities to sign URLs for Amazon CloudFront +// resources. Using a private key and Credential Key Pair key ID the URLSigner +// only needs to be created once per Credential Key Pair key ID and private key. +// +// The signer is safe to use concurrently. +type URLSigner struct { + keyID string + privKey *rsa.PrivateKey +} + +// NewURLSigner constructs and returns a new URLSigner to be used to for signing +// Amazon CloudFront URL resources with. +func NewURLSigner(keyID string, privKey *rsa.PrivateKey) *URLSigner { + return &URLSigner{ + keyID: keyID, + privKey: privKey, + } +} + +// Sign will sign a single URL to expire at the time of expires sign using the +// Amazon CloudFront default Canned Policy. The URL will be signed with the +// private key and Credential Key Pair Key ID previously provided to URLSigner. +// +// This is the default method of signing Amazon CloudFront URLs. If extra policy +// conditions are need other than URL expiry use SignWithPolicy instead. +// +// Example: +// +// // Sign URL to be valid for 1 hour from now. +// signer := sign.NewURLSigner(keyID, privKey) +// signedURL, err := signer.Sign(rawURL, time.Now().Add(1*time.Hour)) +// if err != nil { +// log.Fatalf("Failed to sign url, err: %s\n", err.Error()) +// } +// +func (s URLSigner) Sign(url string, expires time.Time) (string, error) { + scheme, cleanedURL, err := cleanURLScheme(url) + if err != nil { + return "", err + } + + resource, err := CreateResource(scheme, url) + if err != nil { + return "", err + } + + return signURL(scheme, cleanedURL, s.keyID, NewCannedPolicy(resource, expires), false, s.privKey) +} + +// SignWithPolicy will sign a URL with the Policy provided. The URL will be +// signed with the private key and Credential Key Pair Key ID previously provided to URLSigner. +// +// Use this signing method if you are looking to sign a URL with more than just +// the URL's expiry time, or reusing Policies between multiple URL signings. +// If only the expiry time is needed you can use Sign and provide just the +// URL's expiry time. A minimum of at least one policy statement is required for a signed URL. +// +// Note: It is not safe to use Polices between multiple signers concurrently +// +// Example: +// +// // Sign URL to be valid for 30 minutes from now, expires one hour from now, and +// // restricted to the 192.0.2.0/24 IP address range. +// policy := &sign.Policy{ +// Statements: []sign.Statement{ +// { +// Resource: rawURL, +// Condition: sign.Condition{ +// // Optional IP source address range +// IPAddress: &sign.IPAddress{SourceIP: "192.0.2.0/24"}, +// // Optional date URL is not valid until +// DateGreaterThan: &sign.AWSEpochTime{time.Now().Add(30 * time.Minute)}, +// // Required date the URL will expire after +// DateLessThan: &sign.AWSEpochTime{time.Now().Add(1 * time.Hour)}, +// }, +// }, +// }, +// } +// +// signer := sign.NewURLSigner(keyID, privKey) +// signedURL, err := signer.SignWithPolicy(rawURL, policy) +// if err != nil { +// log.Fatalf("Failed to sign url, err: %s\n", err.Error()) +// } +// +func (s URLSigner) SignWithPolicy(url string, p *Policy) (string, error) { + scheme, cleanedURL, err := cleanURLScheme(url) + if err != nil { + return "", err + } + + return signURL(scheme, cleanedURL, s.keyID, p, true, s.privKey) +} + +func signURL(scheme, url, keyID string, p *Policy, customPolicy bool, privKey *rsa.PrivateKey) (string, error) { + // Validation URL elements + if err := validateURL(url); err != nil { + return "", err + } + + b64Signature, b64Policy, err := p.Sign(privKey) + if err != nil { + return "", err + } + + // build and return signed URL + builtURL := buildSignedURL(url, keyID, p, customPolicy, b64Policy, b64Signature) + if scheme == "rtmp" { + return buildRTMPURL(builtURL) + } + + return builtURL, nil +} + +func buildSignedURL(baseURL, keyID string, p *Policy, customPolicy bool, b64Policy, b64Signature []byte) string { + pred := "?" + if strings.Contains(baseURL, "?") { + pred = "&" + } + signedURL := baseURL + pred + + if customPolicy { + signedURL += "Policy=" + string(b64Policy) + } else { + signedURL += fmt.Sprintf("Expires=%d", p.Statements[0].Condition.DateLessThan.UTC().Unix()) + } + signedURL += fmt.Sprintf("&Signature=%s&Key-Pair-Id=%s", string(b64Signature), keyID) + + return signedURL +} + +func buildRTMPURL(u string) (string, error) { + parsed, err := url.Parse(u) + if err != nil { + return "", fmt.Errorf("unable to parse rtmp signed URL, err: %s", err) + } + + rtmpURL := strings.TrimLeft(parsed.Path, "/") + if parsed.RawQuery != "" { + rtmpURL = fmt.Sprintf("%s?%s", rtmpURL, parsed.RawQuery) + } + + return rtmpURL, nil +} + +func cleanURLScheme(u string) (scheme, cleanedURL string, err error) { + parts := strings.SplitN(u, "://", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("invalid URL, missing scheme and domain/path") + } + scheme = strings.Replace(parts[0], "*", "", 1) + cleanedURL = fmt.Sprintf("%s://%s", scheme, parts[1]) + + return strings.ToLower(scheme), cleanedURL, nil +} + +var illegalQueryParms = []string{"Expires", "Policy", "Signature", "Key-Pair-Id"} + +func validateURL(u string) error { + parsed, err := url.Parse(u) + if err != nil { + return fmt.Errorf("unable to parse URL, err: %s", err.Error()) + } + + if parsed.Scheme == "" { + return fmt.Errorf("URL missing valid scheme, %s", u) + } + + q := parsed.Query() + for _, p := range illegalQueryParms { + if _, ok := q[p]; ok { + return fmt.Errorf("%s cannot be a query parameter for a signed URL", p) + } + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/api.go new file mode 100644 index 000000000..5132954f3 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -0,0 +1,9527 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package s3 provides a client for Amazon Simple Storage Service. +package s3 + +import ( + "fmt" + "io" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +const opAbortMultipartUpload = "AbortMultipartUpload" + +// AbortMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the AbortMultipartUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AbortMultipartUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AbortMultipartUploadRequest method. +// req, resp := client.AbortMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { + op := &request.Operation{ + Name: opAbortMultipartUpload, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &AbortMultipartUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &AbortMultipartUploadOutput{} + req.Data = output + return +} + +// Aborts a multipart upload. +// +// To verify that all parts have been removed, so you don't get charged for +// the part storage, you should call the List Parts operation and ensure the +// parts list is empty. +func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + err := req.Send() + return out, err +} + +const opCompleteMultipartUpload = "CompleteMultipartUpload" + +// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CompleteMultipartUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CompleteMultipartUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CompleteMultipartUploadRequest method. +// req, resp := client.CompleteMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) { + op := &request.Operation{ + Name: opCompleteMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CompleteMultipartUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &CompleteMultipartUploadOutput{} + req.Data = output + return +} + +// Completes a multipart upload by assembling previously uploaded parts. +func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + err := req.Send() + return out, err +} + +const opCopyObject = "CopyObject" + +// CopyObjectRequest generates a "aws/request.Request" representing the +// client's request for the CopyObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CopyObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CopyObjectRequest method. +// req, resp := client.CopyObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) { + op := &request.Operation{ + Name: opCopyObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CopyObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &CopyObjectOutput{} + req.Data = output + return +} + +// Creates a copy of an object that is already stored in Amazon S3. +func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + err := req.Send() + return out, err +} + +const opCreateBucket = "CreateBucket" + +// CreateBucketRequest generates a "aws/request.Request" representing the +// client's request for the CreateBucket operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateBucket method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateBucketRequest method. +// req, resp := client.CreateBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { + op := &request.Operation{ + Name: opCreateBucket, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &CreateBucketInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateBucketOutput{} + req.Data = output + return +} + +// Creates a new bucket. +func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + err := req.Send() + return out, err +} + +const opCreateMultipartUpload = "CreateMultipartUpload" + +// CreateMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CreateMultipartUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateMultipartUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateMultipartUploadRequest method. +// req, resp := client.CreateMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) { + op := &request.Operation{ + Name: opCreateMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?uploads", + } + + if input == nil { + input = &CreateMultipartUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateMultipartUploadOutput{} + req.Data = output + return +} + +// Initiates a multipart upload and returns an upload ID. +// +// Note: After you initiate multipart upload and upload one or more parts, you +// must either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucket = "DeleteBucket" + +// DeleteBucketRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucket operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucket method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketRequest method. +// req, resp := client.DeleteBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { + op := &request.Operation{ + Name: opDeleteBucket, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &DeleteBucketInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketOutput{} + req.Data = output + return +} + +// Deletes the bucket. All objects (including all object versions and Delete +// Markers) in the bucket must be deleted before the bucket itself can be deleted. +func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketCors = "DeleteBucketCors" + +// DeleteBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketCors operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketCors method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketCorsRequest method. +// req, resp := client.DeleteBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { + op := &request.Operation{ + Name: opDeleteBucketCors, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &DeleteBucketCorsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketCorsOutput{} + req.Data = output + return +} + +// Deletes the cors configuration information set for the bucket. +func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketLifecycle = "DeleteBucketLifecycle" + +// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketLifecycle operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketLifecycle method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketLifecycleRequest method. +// req, resp := client.DeleteBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { + op := &request.Operation{ + Name: opDeleteBucketLifecycle, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &DeleteBucketLifecycleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketLifecycleOutput{} + req.Data = output + return +} + +// Deletes the lifecycle configuration from the bucket. +func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketPolicy = "DeleteBucketPolicy" + +// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketPolicyRequest method. +// req, resp := client.DeleteBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { + op := &request.Operation{ + Name: opDeleteBucketPolicy, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &DeleteBucketPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketPolicyOutput{} + req.Data = output + return +} + +// Deletes the policy from the bucket. +func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketReplication = "DeleteBucketReplication" + +// DeleteBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketReplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketReplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketReplicationRequest method. +// req, resp := client.DeleteBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) { + op := &request.Operation{ + Name: opDeleteBucketReplication, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &DeleteBucketReplicationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketReplicationOutput{} + req.Data = output + return +} + +// Deletes the replication configuration from the bucket. +func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { + req, out := c.DeleteBucketReplicationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketTagging = "DeleteBucketTagging" + +// DeleteBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketTaggingRequest method. +// req, resp := client.DeleteBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { + op := &request.Operation{ + Name: opDeleteBucketTagging, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &DeleteBucketTaggingInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketTaggingOutput{} + req.Data = output + return +} + +// Deletes the tags from the bucket. +func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketWebsite = "DeleteBucketWebsite" + +// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketWebsite operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketWebsite method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketWebsiteRequest method. +// req, resp := client.DeleteBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { + op := &request.Operation{ + Name: opDeleteBucketWebsite, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &DeleteBucketWebsiteInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketWebsiteOutput{} + req.Data = output + return +} + +// This operation removes the website configuration from the bucket. +func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + err := req.Send() + return out, err +} + +const opDeleteObject = "DeleteObject" + +// DeleteObjectRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteObjectRequest method. +// req, resp := client.DeleteObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { + op := &request.Operation{ + Name: opDeleteObject, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &DeleteObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteObjectOutput{} + req.Data = output + return +} + +// Removes the null version (if there is one) of an object and inserts a delete +// marker, which becomes the latest version of the object. If there isn't a +// null version, Amazon S3 does not remove any objects. +func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + err := req.Send() + return out, err +} + +const opDeleteObjects = "DeleteObjects" + +// DeleteObjectsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObjects operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteObjects method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteObjectsRequest method. +// req, resp := client.DeleteObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { + op := &request.Operation{ + Name: opDeleteObjects, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}?delete", + } + + if input == nil { + input = &DeleteObjectsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteObjectsOutput{} + req.Data = output + return +} + +// This operation enables you to delete multiple objects from a bucket using +// a single HTTP request. You may specify up to 1000 keys. +func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" + +// GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAccelerateConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketAccelerateConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketAccelerateConfigurationRequest method. +// req, resp := client.GetBucketAccelerateConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketAccelerateConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?accelerate", + } + + if input == nil { + input = &GetBucketAccelerateConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketAccelerateConfigurationOutput{} + req.Data = output + return +} + +// Returns the accelerate configuration of a bucket. +func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) { + req, out := c.GetBucketAccelerateConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketAcl = "GetBucketAcl" + +// GetBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketAclRequest method. +// req, resp := client.GetBucketAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { + op := &request.Operation{ + Name: opGetBucketAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &GetBucketAclInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketAclOutput{} + req.Data = output + return +} + +// Gets the access control policy for the bucket. +func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketCors = "GetBucketCors" + +// GetBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketCors operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketCors method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketCorsRequest method. +// req, resp := client.GetBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { + op := &request.Operation{ + Name: opGetBucketCors, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &GetBucketCorsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketCorsOutput{} + req.Data = output + return +} + +// Returns the cors configuration for the bucket. +func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketLifecycle = "GetBucketLifecycle" + +// GetBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycle operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketLifecycle method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketLifecycleRequest method. +// req, resp := client.GetBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated") + } + op := &request.Operation{ + Name: opGetBucketLifecycle, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketLifecycleOutput{} + req.Data = output + return +} + +// Deprecated, see the GetBucketLifecycleConfiguration operation. +func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { + req, out := c.GetBucketLifecycleRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" + +// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycleConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketLifecycleConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketLifecycleConfigurationRequest method. +// req, resp := client.GetBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketLifecycleConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketLifecycleConfigurationOutput{} + req.Data = output + return +} + +// Returns the lifecycle configuration information set on the bucket. +func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketLocation = "GetBucketLocation" + +// GetBucketLocationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLocation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketLocation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketLocationRequest method. +// req, resp := client.GetBucketLocationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { + op := &request.Operation{ + Name: opGetBucketLocation, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?location", + } + + if input == nil { + input = &GetBucketLocationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketLocationOutput{} + req.Data = output + return +} + +// Returns the region the bucket resides in. +func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketLogging = "GetBucketLogging" + +// GetBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLogging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketLogging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketLoggingRequest method. +// req, resp := client.GetBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { + op := &request.Operation{ + Name: opGetBucketLogging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &GetBucketLoggingInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketLoggingOutput{} + req.Data = output + return +} + +// Returns the logging status of a bucket and the permissions users have to +// view and modify that status. To use GET, you must be the bucket owner. +func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketNotification = "GetBucketNotification" + +// GetBucketNotificationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketNotification operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketNotification method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketNotificationRequest method. +// req, resp := client.GetBucketNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated") + } + op := &request.Operation{ + Name: opGetBucketNotification, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + req = c.newRequest(op, input, output) + output = &NotificationConfigurationDeprecated{} + req.Data = output + return +} + +// Deprecated, see the GetBucketNotificationConfiguration operation. +func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { + req, out := c.GetBucketNotificationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration" + +// GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketNotificationConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketNotificationConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketNotificationConfigurationRequest method. +// req, resp := client.GetBucketNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) { + op := &request.Operation{ + Name: opGetBucketNotificationConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + req = c.newRequest(op, input, output) + output = &NotificationConfiguration{} + req.Data = output + return +} + +// Returns the notification configuration of a bucket. +func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { + req, out := c.GetBucketNotificationConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketPolicy = "GetBucketPolicy" + +// GetBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketPolicyRequest method. +// req, resp := client.GetBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { + op := &request.Operation{ + Name: opGetBucketPolicy, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &GetBucketPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketPolicyOutput{} + req.Data = output + return +} + +// Returns the policy of a specified bucket. +func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketReplication = "GetBucketReplication" + +// GetBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketReplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketReplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketReplicationRequest method. +// req, resp := client.GetBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) { + op := &request.Operation{ + Name: opGetBucketReplication, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &GetBucketReplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketReplicationOutput{} + req.Data = output + return +} + +// Deprecated, see the GetBucketReplicationConfiguration operation. +func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { + req, out := c.GetBucketReplicationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketRequestPayment = "GetBucketRequestPayment" + +// GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketRequestPayment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketRequestPayment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketRequestPaymentRequest method. +// req, resp := client.GetBucketRequestPaymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opGetBucketRequestPayment, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &GetBucketRequestPaymentInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketRequestPaymentOutput{} + req.Data = output + return +} + +// Returns the request payment configuration of a bucket. +func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { + req, out := c.GetBucketRequestPaymentRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketTagging = "GetBucketTagging" + +// GetBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketTaggingRequest method. +// req, resp := client.GetBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { + op := &request.Operation{ + Name: opGetBucketTagging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &GetBucketTaggingInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketTaggingOutput{} + req.Data = output + return +} + +// Returns the tag set associated with the bucket. +func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketVersioning = "GetBucketVersioning" + +// GetBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketVersioning operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketVersioning method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketVersioningRequest method. +// req, resp := client.GetBucketVersioningRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { + op := &request.Operation{ + Name: opGetBucketVersioning, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &GetBucketVersioningInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketVersioningOutput{} + req.Data = output + return +} + +// Returns the versioning state of a bucket. +func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketWebsite = "GetBucketWebsite" + +// GetBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketWebsite operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketWebsite method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketWebsiteRequest method. +// req, resp := client.GetBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { + op := &request.Operation{ + Name: opGetBucketWebsite, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &GetBucketWebsiteInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketWebsiteOutput{} + req.Data = output + return +} + +// Returns the website configuration for a bucket. +func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + err := req.Send() + return out, err +} + +const opGetObject = "GetObject" + +// GetObjectRequest generates a "aws/request.Request" representing the +// client's request for the GetObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetObjectRequest method. +// req, resp := client.GetObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { + op := &request.Operation{ + Name: opGetObject, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &GetObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &GetObjectOutput{} + req.Data = output + return +} + +// Retrieves objects from Amazon S3. +func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + err := req.Send() + return out, err +} + +const opGetObjectAcl = "GetObjectAcl" + +// GetObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetObjectAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetObjectAclRequest method. +// req, resp := client.GetObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { + op := &request.Operation{ + Name: opGetObjectAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &GetObjectAclInput{} + } + + req = c.newRequest(op, input, output) + output = &GetObjectAclOutput{} + req.Data = output + return +} + +// Returns the access control list (ACL) of an object. +func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + err := req.Send() + return out, err +} + +const opGetObjectTorrent = "GetObjectTorrent" + +// GetObjectTorrentRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTorrent operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetObjectTorrent method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetObjectTorrentRequest method. +// req, resp := client.GetObjectTorrentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { + op := &request.Operation{ + Name: opGetObjectTorrent, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?torrent", + } + + if input == nil { + input = &GetObjectTorrentInput{} + } + + req = c.newRequest(op, input, output) + output = &GetObjectTorrentOutput{} + req.Data = output + return +} + +// Return torrent files from a bucket. +func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + err := req.Send() + return out, err +} + +const opHeadBucket = "HeadBucket" + +// HeadBucketRequest generates a "aws/request.Request" representing the +// client's request for the HeadBucket operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the HeadBucket method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the HeadBucketRequest method. +// req, resp := client.HeadBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { + op := &request.Operation{ + Name: opHeadBucket, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &HeadBucketInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &HeadBucketOutput{} + req.Data = output + return +} + +// This operation is useful to determine if a bucket exists and you have permission +// to access it. +func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + err := req.Send() + return out, err +} + +const opHeadObject = "HeadObject" + +// HeadObjectRequest generates a "aws/request.Request" representing the +// client's request for the HeadObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the HeadObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the HeadObjectRequest method. +// req, resp := client.HeadObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { + op := &request.Operation{ + Name: opHeadObject, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &HeadObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &HeadObjectOutput{} + req.Data = output + return +} + +// The HEAD operation retrieves metadata from an object without returning the +// object itself. This operation is useful if you're only interested in an object's +// metadata. To use HEAD, you must have READ access to the object. +func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + err := req.Send() + return out, err +} + +const opListBuckets = "ListBuckets" + +// ListBucketsRequest generates a "aws/request.Request" representing the +// client's request for the ListBuckets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListBuckets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListBucketsRequest method. +// req, resp := client.ListBucketsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { + op := &request.Operation{ + Name: opListBuckets, + HTTPMethod: "GET", + HTTPPath: "/", + } + + if input == nil { + input = &ListBucketsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListBucketsOutput{} + req.Data = output + return +} + +// Returns a list of all buckets owned by the authenticated sender of the request. +func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + err := req.Send() + return out, err +} + +const opListMultipartUploads = "ListMultipartUploads" + +// ListMultipartUploadsRequest generates a "aws/request.Request" representing the +// client's request for the ListMultipartUploads operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListMultipartUploads method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListMultipartUploadsRequest method. +// req, resp := client.ListMultipartUploadsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { + op := &request.Operation{ + Name: opListMultipartUploads, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?uploads", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "UploadIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"}, + LimitToken: "MaxUploads", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListMultipartUploadsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListMultipartUploadsOutput{} + req.Data = output + return +} + +// This operation lists in-progress multipart uploads. +func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + err := req.Send() + return out, err +} + +// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListMultipartUploads method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListMultipartUploads operation. +// pageNum := 0 +// err := client.ListMultipartUploadsPages(params, +// func(page *ListMultipartUploadsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(p *ListMultipartUploadsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListMultipartUploadsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListMultipartUploadsOutput), lastPage) + }) +} + +const opListObjectVersions = "ListObjectVersions" + +// ListObjectVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjectVersions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListObjectVersions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListObjectVersionsRequest method. +// req, resp := client.ListObjectVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { + op := &request.Operation{ + Name: opListObjectVersions, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versions", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "VersionIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListObjectVersionsOutput{} + req.Data = output + return +} + +// Returns metadata about all of the versions of objects in a bucket. +func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + err := req.Send() + return out, err +} + +// ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjectVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjectVersions operation. +// pageNum := 0 +// err := client.ListObjectVersionsPages(params, +// func(page *ListObjectVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(p *ListObjectVersionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListObjectVersionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListObjectVersionsOutput), lastPage) + }) +} + +const opListObjects = "ListObjects" + +// ListObjectsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjects operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListObjects method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListObjectsRequest method. +// req, resp := client.ListObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { + op := &request.Operation{ + Name: opListObjects, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker || Contents[-1].Key"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListObjectsOutput{} + req.Data = output + return +} + +// Returns some or all (up to 1000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. +func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + err := req.Send() + return out, err +} + +// ListObjectsPages iterates over the pages of a ListObjects operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjects method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjects operation. +// pageNum := 0 +// err := client.ListObjectsPages(params, +// func(page *ListObjectsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(p *ListObjectsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListObjectsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListObjectsOutput), lastPage) + }) +} + +const opListObjectsV2 = "ListObjectsV2" + +// ListObjectsV2Request generates a "aws/request.Request" representing the +// client's request for the ListObjectsV2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListObjectsV2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListObjectsV2Request method. +// req, resp := client.ListObjectsV2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) { + op := &request.Operation{ + Name: opListObjectsV2, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?list-type=2", + Paginator: &request.Paginator{ + InputTokens: []string{"ContinuationToken"}, + OutputTokens: []string{"NextContinuationToken"}, + LimitToken: "MaxKeys", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListObjectsV2Input{} + } + + req = c.newRequest(op, input, output) + output = &ListObjectsV2Output{} + req.Data = output + return +} + +// Returns some or all (up to 1000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend +// you use this revised API for new application development. +func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) + err := req.Send() + return out, err +} + +// ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjectsV2 method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjectsV2 operation. +// pageNum := 0 +// err := client.ListObjectsV2Pages(params, +// func(page *ListObjectsV2Output, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(p *ListObjectsV2Output, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListObjectsV2Request(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListObjectsV2Output), lastPage) + }) +} + +const opListParts = "ListParts" + +// ListPartsRequest generates a "aws/request.Request" representing the +// client's request for the ListParts operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListParts method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPartsRequest method. +// req, resp := client.ListPartsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { + op := &request.Operation{ + Name: opListParts, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + Paginator: &request.Paginator{ + InputTokens: []string{"PartNumberMarker"}, + OutputTokens: []string{"NextPartNumberMarker"}, + LimitToken: "MaxParts", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListPartsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPartsOutput{} + req.Data = output + return +} + +// Lists the parts that have been uploaded for a specific multipart upload. +func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + err := req.Send() + return out, err +} + +// ListPartsPages iterates over the pages of a ListParts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListParts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListParts operation. +// pageNum := 0 +// err := client.ListPartsPages(params, +// func(page *ListPartsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListPartsPages(input *ListPartsInput, fn func(p *ListPartsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListPartsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListPartsOutput), lastPage) + }) +} + +const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" + +// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAccelerateConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketAccelerateConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketAccelerateConfigurationRequest method. +// req, resp := client.PutBucketAccelerateConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketAccelerateConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?accelerate", + } + + if input == nil { + input = &PutBucketAccelerateConfigurationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketAccelerateConfigurationOutput{} + req.Data = output + return +} + +// Sets the accelerate configuration of an existing bucket. +func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) { + req, out := c.PutBucketAccelerateConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketAcl = "PutBucketAcl" + +// PutBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketAclRequest method. +// req, resp := client.PutBucketAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { + op := &request.Operation{ + Name: opPutBucketAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &PutBucketAclInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketAclOutput{} + req.Data = output + return +} + +// Sets the permissions on a bucket using access control lists (ACL). +func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketCors = "PutBucketCors" + +// PutBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketCors operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketCors method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketCorsRequest method. +// req, resp := client.PutBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { + op := &request.Operation{ + Name: opPutBucketCors, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &PutBucketCorsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketCorsOutput{} + req.Data = output + return +} + +// Sets the cors configuration for a bucket. +func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketLifecycle = "PutBucketLifecycle" + +// PutBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycle operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketLifecycle method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketLifecycleRequest method. +// req, resp := client.PutBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated") + } + op := &request.Operation{ + Name: opPutBucketLifecycle, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketLifecycleOutput{} + req.Data = output + return +} + +// Deprecated, see the PutBucketLifecycleConfiguration operation. +func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" + +// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycleConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketLifecycleConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketLifecycleConfigurationRequest method. +// req, resp := client.PutBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketLifecycleConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleConfigurationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketLifecycleConfigurationOutput{} + req.Data = output + return +} + +// Sets lifecycle configuration for your bucket. If a lifecycle configuration +// exists, it replaces it. +func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketLogging = "PutBucketLogging" + +// PutBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLogging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketLogging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketLoggingRequest method. +// req, resp := client.PutBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { + op := &request.Operation{ + Name: opPutBucketLogging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &PutBucketLoggingInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketLoggingOutput{} + req.Data = output + return +} + +// Set the logging parameters for a bucket and to specify permissions for who +// can view and modify the logging parameters. To set the logging status of +// a bucket, you must be the bucket owner. +func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketNotification = "PutBucketNotification" + +// PutBucketNotificationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotification operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketNotification method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketNotificationRequest method. +// req, resp := client.PutBucketNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated") + } + op := &request.Operation{ + Name: opPutBucketNotification, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketNotificationOutput{} + req.Data = output + return +} + +// Deprecated, see the PutBucketNotificationConfiguraiton operation. +func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration" + +// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotificationConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketNotificationConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketNotificationConfigurationRequest method. +// req, resp := client.PutBucketNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketNotificationConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationConfigurationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketNotificationConfigurationOutput{} + req.Data = output + return +} + +// Enables notifications of specified events for a bucket. +func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketPolicy = "PutBucketPolicy" + +// PutBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketPolicyRequest method. +// req, resp := client.PutBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { + op := &request.Operation{ + Name: opPutBucketPolicy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &PutBucketPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketPolicyOutput{} + req.Data = output + return +} + +// Replaces a policy on a bucket. If the bucket already has a policy, the one +// in this request completely replaces it. +func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketReplication = "PutBucketReplication" + +// PutBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketReplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketReplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketReplicationRequest method. +// req, resp := client.PutBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { + op := &request.Operation{ + Name: opPutBucketReplication, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &PutBucketReplicationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketReplicationOutput{} + req.Data = output + return +} + +// Creates a new replication configuration (or replaces an existing one, if +// present). +func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketRequestPayment = "PutBucketRequestPayment" + +// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketRequestPayment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketRequestPayment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketRequestPaymentRequest method. +// req, resp := client.PutBucketRequestPaymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opPutBucketRequestPayment, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &PutBucketRequestPaymentInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketRequestPaymentOutput{} + req.Data = output + return +} + +// Sets the request payment configuration for a bucket. By default, the bucket +// owner pays for downloads from the bucket. This configuration parameter enables +// the bucket owner (only) to specify that the person requesting the download +// will be charged for the download. Documentation on requester pays buckets +// can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html +func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketTagging = "PutBucketTagging" + +// PutBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketTaggingRequest method. +// req, resp := client.PutBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { + op := &request.Operation{ + Name: opPutBucketTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &PutBucketTaggingInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketTaggingOutput{} + req.Data = output + return +} + +// Sets the tags for a bucket. +func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketVersioning = "PutBucketVersioning" + +// PutBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketVersioning operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketVersioning method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketVersioningRequest method. +// req, resp := client.PutBucketVersioningRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { + op := &request.Operation{ + Name: opPutBucketVersioning, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &PutBucketVersioningInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketVersioningOutput{} + req.Data = output + return +} + +// Sets the versioning state of an existing bucket. To set the versioning state, +// you must be the bucket owner. +func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketWebsite = "PutBucketWebsite" + +// PutBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketWebsite operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketWebsite method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketWebsiteRequest method. +// req, resp := client.PutBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { + op := &request.Operation{ + Name: opPutBucketWebsite, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &PutBucketWebsiteInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketWebsiteOutput{} + req.Data = output + return +} + +// Set the website configuration for a bucket. +func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + err := req.Send() + return out, err +} + +const opPutObject = "PutObject" + +// PutObjectRequest generates a "aws/request.Request" representing the +// client's request for the PutObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutObjectRequest method. +// req, resp := client.PutObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { + op := &request.Operation{ + Name: opPutObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &PutObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &PutObjectOutput{} + req.Data = output + return +} + +// Adds an object to a bucket. +func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + err := req.Send() + return out, err +} + +const opPutObjectAcl = "PutObjectAcl" + +// PutObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutObjectAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutObjectAclRequest method. +// req, resp := client.PutObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { + op := &request.Operation{ + Name: opPutObjectAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &PutObjectAclInput{} + } + + req = c.newRequest(op, input, output) + output = &PutObjectAclOutput{} + req.Data = output + return +} + +// uses the acl subresource to set the access control list (ACL) permissions +// for an object that already exists in a bucket +func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + err := req.Send() + return out, err +} + +const opRestoreObject = "RestoreObject" + +// RestoreObjectRequest generates a "aws/request.Request" representing the +// client's request for the RestoreObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RestoreObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RestoreObjectRequest method. +// req, resp := client.RestoreObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { + op := &request.Operation{ + Name: opRestoreObject, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?restore", + } + + if input == nil { + input = &RestoreObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &RestoreObjectOutput{} + req.Data = output + return +} + +// Restores an archived copy of an object back into Amazon S3 +func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + err := req.Send() + return out, err +} + +const opUploadPart = "UploadPart" + +// UploadPartRequest generates a "aws/request.Request" representing the +// client's request for the UploadPart operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UploadPart method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UploadPartRequest method. +// req, resp := client.UploadPartRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { + op := &request.Operation{ + Name: opUploadPart, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartInput{} + } + + req = c.newRequest(op, input, output) + output = &UploadPartOutput{} + req.Data = output + return +} + +// Uploads a part in a multipart upload. +// +// Note: After you initiate multipart upload and upload one or more parts, you +// must either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + err := req.Send() + return out, err +} + +const opUploadPartCopy = "UploadPartCopy" + +// UploadPartCopyRequest generates a "aws/request.Request" representing the +// client's request for the UploadPartCopy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UploadPartCopy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UploadPartCopyRequest method. +// req, resp := client.UploadPartCopyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { + op := &request.Operation{ + Name: opUploadPartCopy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartCopyInput{} + } + + req = c.newRequest(op, input, output) + output = &UploadPartCopyOutput{} + req.Data = output + return +} + +// Uploads a part by copying data from an existing object as data source. +func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + err := req.Send() + return out, err +} + +// Specifies the days since the initiation of an Incomplete Multipart Upload +// that Lifecycle will wait before permanently removing all parts of the upload. +type AbortIncompleteMultipartUpload struct { + _ struct{} `type:"structure"` + + // Indicates the number of days that must pass since initiation for Lifecycle + // to abort an Incomplete Multipart Upload. + DaysAfterInitiation *int64 `type:"integer"` +} + +// String returns the string representation +func (s AbortIncompleteMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortIncompleteMultipartUpload) GoString() string { + return s.String() +} + +type AbortMultipartUploadInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AbortMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AbortMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AbortMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AbortMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s AbortMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadOutput) GoString() string { + return s.String() +} + +type AccelerateConfiguration struct { + _ struct{} `type:"structure"` + + // The accelerate configuration of the bucket. + Status *string `type:"string" enum:"BucketAccelerateStatus"` +} + +// String returns the string representation +func (s AccelerateConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccelerateConfiguration) GoString() string { + return s.String() +} + +type AccessControlPolicy struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s AccessControlPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessControlPolicy) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccessControlPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccessControlPolicy"} + if s.Grants != nil { + for i, v := range s.Grants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Grants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type Bucket struct { + _ struct{} `type:"structure"` + + // Date the bucket was created. + CreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The name of the bucket. + Name *string `type:"string"` +} + +// String returns the string representation +func (s Bucket) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Bucket) GoString() string { + return s.String() +} + +type BucketLifecycleConfiguration struct { + _ struct{} `type:"structure"` + + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s BucketLifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLifecycleConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketLifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketLifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type BucketLoggingStatus struct { + _ struct{} `type:"structure"` + + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s BucketLoggingStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLoggingStatus) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketLoggingStatus) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketLoggingStatus"} + if s.LoggingEnabled != nil { + if err := s.LoggingEnabled.Validate(); err != nil { + invalidParams.AddNested("LoggingEnabled", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CORSConfiguration struct { + _ struct{} `type:"structure"` + + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s CORSConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSConfiguration"} + if s.CORSRules == nil { + invalidParams.Add(request.NewErrParamRequired("CORSRules")) + } + if s.CORSRules != nil { + for i, v := range s.CORSRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CORSRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CORSRule struct { + _ struct{} `type:"structure"` + + // Specifies which headers are allowed in a pre-flight OPTIONS request. + AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` + + // Identifies HTTP methods that the domain/origin specified in the rule is allowed + // to execute. + AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` + + // One or more origins you want customers to be able to access the bucket from. + AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"` + + // One or more headers in the response that you want customers to be able to + // access from their applications (for example, from a JavaScript XMLHttpRequest + // object). + ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"` + + // The time in seconds that your browser is to cache the preflight response + // for the specified resource. + MaxAgeSeconds *int64 `type:"integer"` +} + +// String returns the string representation +func (s CORSRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSRule"} + if s.AllowedMethods == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedMethods")) + } + if s.AllowedOrigins == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedOrigins")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CloudFunctionConfiguration struct { + _ struct{} `type:"structure"` + + CloudFunction *string `type:"string"` + + // Bucket event for which to send notifications. + Event *string `deprecated:"true" type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + InvocationRole *string `type:"string"` +} + +// String returns the string representation +func (s CloudFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudFunctionConfiguration) GoString() string { + return s.String() +} + +type CommonPrefix struct { + _ struct{} `type:"structure"` + + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s CommonPrefix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CommonPrefix) GoString() string { + return s.String() +} + +type CompleteMultipartUploadInput struct { + _ struct{} `type:"structure" payload:"MultipartUpload"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CompleteMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CompleteMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CompleteMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + Bucket *string `type:"string"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + Key *string `min:"1" type:"string"` + + Location *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s CompleteMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadOutput) GoString() string { + return s.String() +} + +type CompletedMultipartUpload struct { + _ struct{} `type:"structure"` + + Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s CompletedMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedMultipartUpload) GoString() string { + return s.String() +} + +type CompletedPart struct { + _ struct{} `type:"structure"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Part number that identifies the part. This is a positive integer between + // 1 and 10,000. + PartNumber *int64 `type:"integer"` +} + +// String returns the string representation +func (s CompletedPart) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedPart) GoString() string { + return s.String() +} + +type Condition struct { + _ struct{} `type:"structure"` + + // The HTTP error code when the redirect is applied. In the event of an error, + // if the error code equals this value, then the specified redirect is applied. + // Required when parent element Condition is specified and sibling KeyPrefixEquals + // is not specified. If both are specified, then both must be true for the redirect + // to be applied. + HttpErrorCodeReturnedEquals *string `type:"string"` + + // The object key name prefix when the redirect is applied. For example, to + // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. + // To redirect request for all pages with the prefix docs/, the key prefix will + // be /docs, which identifies all objects in the docs/ folder. Required when + // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals + // is not specified. If both conditions are specified, both must be true for + // the redirect to be applied. + KeyPrefixEquals *string `type:"string"` +} + +// String returns the string representation +func (s Condition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Condition) GoString() string { + return s.String() +} + +type CopyObjectInput struct { + _ struct{} `type:"structure"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The name of the source bucket and key name of the source object, separated + // by a slash (/). Must be URL-encoded. + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether the metadata is copied from the source object or replaced + // with metadata provided in the request. + MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s CopyObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.CopySource == nil { + invalidParams.Add(request.NewErrParamRequired("CopySource")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CopyObjectOutput struct { + _ struct{} `type:"structure" payload:"CopyObjectResult"` + + CopyObjectResult *CopyObjectResult `type:"structure"` + + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If the object expiration is configured, the response includes this header. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version ID of the newly created copy. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s CopyObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectOutput) GoString() string { + return s.String() +} + +type CopyObjectResult struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s CopyObjectResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectResult) GoString() string { + return s.String() +} + +type CopyPartResult struct { + _ struct{} `type:"structure"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // Date and time at which the object was uploaded. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s CopyPartResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyPartResult) GoString() string { + return s.String() +} + +type CreateBucketConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the region where the bucket will be created. If you don't specify + // a region, the bucket will be created in US Standard. + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation +func (s CreateBucketConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketConfiguration) GoString() string { + return s.String() +} + +type CreateBucketInput struct { + _ struct{} `type:"structure" payload:"CreateBucketConfiguration"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +} + +// String returns the string representation +func (s CreateBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateBucketOutput struct { + _ struct{} `type:"structure"` + + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketOutput) GoString() string { + return s.String() +} + +type CreateMultipartUploadInput struct { + _ struct{} `type:"structure"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s CreateMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // Date when multipart upload will become eligible for abort operation by lifecycle. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"` + + // Id of the lifecycle rule that makes a multipart upload eligible for abort + // operation. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `locationName:"Bucket" type:"string"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // ID for the initiated multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s CreateMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMultipartUploadOutput) GoString() string { + return s.String() +} + +type Delete struct { + _ struct{} `type:"structure"` + + Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` + + // Element to enable quiet mode for the request. When you add this element, + // you must set its value to true. + Quiet *bool `type:"boolean"` +} + +// String returns the string representation +func (s Delete) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Delete) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Delete) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Delete"} + if s.Objects == nil { + invalidParams.Add(request.NewErrParamRequired("Objects")) + } + if s.Objects != nil { + for i, v := range s.Objects { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Objects", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteBucketCorsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsOutput) GoString() string { + return s.String() +} + +type DeleteBucketInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteBucketLifecycleInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleOutput) GoString() string { + return s.String() +} + +type DeleteBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketOutput) GoString() string { + return s.String() +} + +type DeleteBucketPolicyInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketPolicyOutput) GoString() string { + return s.String() +} + +type DeleteBucketReplicationInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketReplicationOutput) GoString() string { + return s.String() +} + +type DeleteBucketTaggingInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketTaggingOutput) GoString() string { + return s.String() +} + +type DeleteBucketWebsiteInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketWebsiteOutput) GoString() string { + return s.String() +} + +type DeleteMarkerEntry struct { + _ struct{} `type:"structure"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s DeleteMarkerEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMarkerEntry) GoString() string { + return s.String() +} + +type DeleteObjectInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteObjectOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Returns the version ID of the delete marker created as a result of the DELETE + // operation. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectOutput) GoString() string { + return s.String() +} + +type DeleteObjectsInput struct { + _ struct{} `type:"structure" payload:"Delete"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Delete *Delete `locationName:"Delete" type:"structure" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s DeleteObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Delete == nil { + invalidParams.Add(request.NewErrParamRequired("Delete")) + } + if s.Delete != nil { + if err := s.Delete.Validate(); err != nil { + invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteObjectsOutput struct { + _ struct{} `type:"structure"` + + Deleted []*DeletedObject `type:"list" flattened:"true"` + + Errors []*Error `locationName:"Error" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s DeleteObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectsOutput) GoString() string { + return s.String() +} + +type DeletedObject struct { + _ struct{} `type:"structure"` + + DeleteMarker *bool `type:"boolean"` + + DeleteMarkerVersionId *string `type:"string"` + + Key *string `min:"1" type:"string"` + + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s DeletedObject) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletedObject) GoString() string { + return s.String() +} + +type Destination struct { + _ struct{} `type:"structure"` + + // Amazon resource name (ARN) of the bucket where you want Amazon S3 to store + // replicas of the object identified by the rule. + Bucket *string `type:"string" required:"true"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` +} + +// String returns the string representation +func (s Destination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Destination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Destination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Destination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type Error struct { + _ struct{} `type:"structure"` + + Code *string `type:"string"` + + Key *string `min:"1" type:"string"` + + Message *string `type:"string"` + + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s Error) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Error) GoString() string { + return s.String() +} + +type ErrorDocument struct { + _ struct{} `type:"structure"` + + // The object key name to use when a 4XX class error occurs. + Key *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ErrorDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorDocument) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ErrorDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Container for key value pair that defines the criteria for the filter rule. +type FilterRule struct { + _ struct{} `type:"structure"` + + // Object key name prefix or suffix identifying one or more objects to which + // the filtering rule applies. Maximum prefix length can be up to 1,024 characters. + // Overlapping prefixes and suffixes are not supported. For more information, + // go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Name *string `type:"string" enum:"FilterRuleName"` + + Value *string `type:"string"` +} + +// String returns the string representation +func (s FilterRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilterRule) GoString() string { + return s.String() +} + +type GetBucketAccelerateConfigurationInput struct { + _ struct{} `type:"structure"` + + // Name of the bucket for which the accelerate configuration is retrieved. + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketAccelerateConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAccelerateConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAccelerateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAccelerateConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetBucketAccelerateConfigurationOutput struct { + _ struct{} `type:"structure"` + + // The accelerate configuration of the bucket. + Status *string `type:"string" enum:"BucketAccelerateStatus"` +} + +// String returns the string representation +func (s GetBucketAccelerateConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAccelerateConfigurationOutput) GoString() string { + return s.String() +} + +type GetBucketAclInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetBucketAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s GetBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAclOutput) GoString() string { + return s.String() +} + +type GetBucketCorsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetBucketCorsOutput struct { + _ struct{} `type:"structure"` + + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketCorsOutput) GoString() string { + return s.String() +} + +type GetBucketLifecycleConfigurationInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` + + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +type GetBucketLifecycleInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetBucketLifecycleOutput struct { + _ struct{} `type:"structure"` + + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleOutput) GoString() string { + return s.String() +} + +type GetBucketLocationInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLocationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLocationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLocationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLocationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetBucketLocationOutput struct { + _ struct{} `type:"structure"` + + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation +func (s GetBucketLocationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLocationOutput) GoString() string { + return s.String() +} + +type GetBucketLoggingInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetBucketLoggingOutput struct { + _ struct{} `type:"structure"` + + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s GetBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingOutput) GoString() string { + return s.String() +} + +type GetBucketNotificationConfigurationRequest struct { + _ struct{} `type:"structure"` + + // Name of the bucket to get the notification configuration for. + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketNotificationConfigurationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketNotificationConfigurationRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketNotificationConfigurationRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketNotificationConfigurationRequest"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetBucketPolicyInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetBucketPolicyOutput struct { + _ struct{} `type:"structure" payload:"Policy"` + + // The bucket policy as a JSON document. + Policy *string `type:"string"` +} + +// String returns the string representation +func (s GetBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyOutput) GoString() string { + return s.String() +} + +type GetBucketReplicationInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetBucketReplicationOutput struct { + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + + // Container for replication rules. You can add as many as 1,000 rules. Total + // replication configuration size can be up to 2 MB. + ReplicationConfiguration *ReplicationConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketReplicationOutput) GoString() string { + return s.String() +} + +type GetBucketRequestPaymentInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketRequestPaymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketRequestPaymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketRequestPaymentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + Payer *string `type:"string" enum:"Payer"` +} + +// String returns the string representation +func (s GetBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +type GetBucketTaggingInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetBucketTaggingOutput struct { + _ struct{} `type:"structure"` + + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s GetBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketTaggingOutput) GoString() string { + return s.String() +} + +type GetBucketVersioningInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketVersioningInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketVersioningInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetBucketVersioningOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation +func (s GetBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketVersioningOutput) GoString() string { + return s.String() +} + +type GetBucketWebsiteInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetBucketWebsiteOutput struct { + _ struct{} `type:"structure"` + + ErrorDocument *ErrorDocument `type:"structure"` + + IndexDocument *IndexDocument `type:"structure"` + + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation +func (s GetBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketWebsiteOutput) GoString() string { + return s.String() +} + +type GetObjectAclInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetObjectAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s GetObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectAclOutput) GoString() string { + return s.String() +} + +type GetObjectInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Sets the Cache-Control header of the response. + ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` + + // Sets the Content-Disposition header of the response + ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` + + // Sets the Content-Type header of the response. + ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` + + // Sets the Expires header of the response. + ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"iso8601"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetObjectOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Object data. + Body io.ReadCloser `type:"blob"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The portion of the object returned in the response. + ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Last modified date of the object + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s GetObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectOutput) GoString() string { + return s.String() +} + +type GetObjectTorrentInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s GetObjectTorrentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTorrentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectTorrentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectTorrentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetObjectTorrentOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + Body io.ReadCloser `type:"blob"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s GetObjectTorrentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTorrentOutput) GoString() string { + return s.String() +} + +type Grant struct { + _ struct{} `type:"structure"` + + Grantee *Grantee `type:"structure"` + + // Specifies the permission given to the grantee. + Permission *string `type:"string" enum:"Permission"` +} + +// String returns the string representation +func (s Grant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Grant) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Grant) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grant"} + if s.Grantee != nil { + if err := s.Grantee.Validate(); err != nil { + invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type Grantee struct { + _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Screen name of the grantee. + DisplayName *string `type:"string"` + + // Email address of the grantee. + EmailAddress *string `type:"string"` + + // The canonical user ID of the grantee. + ID *string `type:"string"` + + // Type of grantee + Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"` + + // URI of the grantee group. + URI *string `type:"string"` +} + +// String returns the string representation +func (s Grantee) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Grantee) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Grantee) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grantee"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type HeadBucketInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s HeadBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeadBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type HeadBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s HeadBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadBucketOutput) GoString() string { + return s.String() +} + +type HeadObjectInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s HeadObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeadObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type HeadObjectOutput struct { + _ struct{} `type:"structure"` + + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Last modified date of the object + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s HeadObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadObjectOutput) GoString() string { + return s.String() +} + +type IndexDocument struct { + _ struct{} `type:"structure"` + + // A suffix that is appended to a request that is for a directory on the website + // endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ + // the data that is returned will be for the object with the key name images/index.html) + // The suffix must not be empty and must not include a slash character. + Suffix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s IndexDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IndexDocument) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IndexDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IndexDocument"} + if s.Suffix == nil { + invalidParams.Add(request.NewErrParamRequired("Suffix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type Initiator struct { + _ struct{} `type:"structure"` + + // Name of the Principal. + DisplayName *string `type:"string"` + + // If the principal is an AWS account, it provides the Canonical User ID. If + // the principal is an IAM User, it provides a user ARN value. + ID *string `type:"string"` +} + +// String returns the string representation +func (s Initiator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Initiator) GoString() string { + return s.String() +} + +// Container for object key name prefix and suffix filtering rules. +type KeyFilter struct { + _ struct{} `type:"structure"` + + // A list of containers for key value pair that defines the criteria for the + // filter rule. + FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s KeyFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyFilter) GoString() string { + return s.String() +} + +// Container for specifying the AWS Lambda notification configuration. +type LambdaFunctionConfiguration struct { + _ struct{} `type:"structure"` + + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Container for object key name filtering rules. For information about key + // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Lambda cloud function ARN that Amazon S3 can invoke when it detects events + // of the specified type. + LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` +} + +// String returns the string representation +func (s LambdaFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaFunctionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LambdaFunctionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LambdaFunctionConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.LambdaFunctionArn == nil { + invalidParams.Add(request.NewErrParamRequired("LambdaFunctionArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type LifecycleConfiguration struct { + _ struct{} `type:"structure"` + + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s LifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type LifecycleExpiration struct { + _ struct{} `type:"structure"` + + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` + + // Indicates whether Amazon S3 will remove a delete marker with no noncurrent + // versions. If set to true, the delete marker will be expired; if set to false + // the policy takes no action. This cannot be specified with Days or Date in + // a Lifecycle Expiration Policy. + ExpiredObjectDeleteMarker *bool `type:"boolean"` +} + +// String returns the string representation +func (s LifecycleExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleExpiration) GoString() string { + return s.String() +} + +type LifecycleRule struct { + _ struct{} `type:"structure"` + + // Specifies the days since the initiation of an Incomplete Multipart Upload + // that Lifecycle will wait before permanently removing all parts of the upload. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + + Expiration *LifecycleExpiration `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` + + // Prefix identifying one or more objects to which the rule applies. + Prefix *string `type:"string" required:"true"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s LifecycleRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListBucketsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListBucketsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketsInput) GoString() string { + return s.String() +} + +type ListBucketsOutput struct { + _ struct{} `type:"structure"` + + Buckets []*Bucket `locationNameList:"Bucket" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s ListBucketsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketsOutput) GoString() string { + return s.String() +} + +type ListMultipartUploadsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Together with upload-id-marker, this parameter specifies the multipart upload + // after which listing should begin. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of multipart uploads, from 1 to 1,000, to return + // in the response body. 1,000 is the maximum number of uploads that can be + // returned in a response. + MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` + + // Lists in-progress uploads only for those keys that begin with the specified + // prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter + // is ignored. + UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` +} + +// String returns the string representation +func (s ListMultipartUploadsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListMultipartUploadsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListMultipartUploadsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListMultipartUploadsOutput struct { + _ struct{} `type:"structure"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // Indicates whether the returned list of multipart uploads is truncated. A + // value of true indicates that the list was truncated. The list can be truncated + // if the number of multipart uploads exceeds the limit allowed or specified + // by max uploads. + IsTruncated *bool `type:"boolean"` + + // The key at or after which the listing began. + KeyMarker *string `type:"string"` + + // Maximum number of multipart uploads that could have been included in the + // response. + MaxUploads *int64 `type:"integer"` + + // When a list is truncated, this element specifies the value that should be + // used for the key-marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // When a list is truncated, this element specifies the value that should be + // used for the upload-id-marker request parameter in a subsequent request. + NextUploadIdMarker *string `type:"string"` + + // When a prefix is provided in the request, this field contains the specified + // prefix. The result contains only keys starting with the specified prefix. + Prefix *string `type:"string"` + + // Upload ID after which listing began. + UploadIdMarker *string `type:"string"` + + Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ListMultipartUploadsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsOutput) GoString() string { + return s.String() +} + +type ListObjectVersionsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Specifies the key to start with when listing objects in a bucket. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Specifies the object version you want to start listing from. + VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` +} + +// String returns the string representation +func (s ListObjectVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectVersionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectVersionsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListObjectVersionsOutput struct { + _ struct{} `type:"structure"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. If your results were truncated, you can + // make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker + // response parameters as a starting place in another request to return the + // rest of the results. + IsTruncated *bool `type:"boolean"` + + // Marks the last Key returned in a truncated response. + KeyMarker *string `type:"string"` + + MaxKeys *int64 `type:"integer"` + + Name *string `type:"string"` + + // Use this value for the key marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // Use this value for the next version id marker parameter in a subsequent request. + NextVersionIdMarker *string `type:"string"` + + Prefix *string `type:"string"` + + VersionIdMarker *string `type:"string"` + + Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ListObjectVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectVersionsOutput) GoString() string { + return s.String() +} + +type ListObjectsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Specifies the key to start with when listing objects in a bucket. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` +} + +// String returns the string representation +func (s ListObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListObjectsOutput struct { + _ struct{} `type:"structure"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + Contents []*Object `type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. + IsTruncated *bool `type:"boolean"` + + Marker *string `type:"string"` + + MaxKeys *int64 `type:"integer"` + + Name *string `type:"string"` + + // When response is truncated (the IsTruncated element value in the response + // is true), you can use the key name in this field as marker in the subsequent + // request to get next set of objects. Amazon S3 lists objects in alphabetical + // order Note: This element is returned only if you have delimiter request parameter + // specified. If response does not include the NextMaker and it is truncated, + // you can use the value of the last Key in the response as the marker in the + // subsequent request to get the next set of object keys. + NextMarker *string `type:"string"` + + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s ListObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsOutput) GoString() string { + return s.String() +} + +type ListObjectsV2Input struct { + _ struct{} `type:"structure"` + + // Name of the bucket to list. + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // ContinuationToken indicates Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The owner field is not present in listV2 by default, if you want to return + // owner field with each key in the result then set the fetch owner field to + // true + FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. StartAfter can be any key in the bucket + StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` +} + +// String returns the string representation +func (s ListObjectsV2Input) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsV2Input) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectsV2Input) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsV2Input"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListObjectsV2Output struct { + _ struct{} `type:"structure"` + + // CommonPrefixes contains all (if there are any) keys between Prefix and the + // next occurrence of the string specified by delimiter + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Metadata about each object returned. + Contents []*Object `type:"list" flattened:"true"` + + // ContinuationToken indicates Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key + ContinuationToken *string `type:"string"` + + // A delimiter is a character you use to group keys. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. + IsTruncated *bool `type:"boolean"` + + // KeyCount is the number of keys returned with this request. KeyCount will + // always be less than equals to MaxKeys field. Say you ask for 50 keys, your + // result will include less than equals 50 keys + KeyCount *int64 `type:"integer"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `type:"integer"` + + // Name of the bucket to list. + Name *string `type:"string"` + + // NextContinuationToken is sent when isTruncated is true which means there + // are more keys in the bucket that can be listed. The next list requests to + // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken + // is obfuscated and is not a real key + NextContinuationToken *string `type:"string"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `type:"string"` + + // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. StartAfter can be any key in the bucket + StartAfter *string `type:"string"` +} + +// String returns the string representation +func (s ListObjectsV2Output) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsV2Output) GoString() string { + return s.String() +} + +type ListPartsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Sets the maximum number of parts to return. + MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"` + + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. + PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Upload ID identifying the multipart upload whose parts are being listed. + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListPartsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPartsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPartsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListPartsOutput struct { + _ struct{} `type:"structure"` + + // Date when multipart upload will become eligible for abort operation by lifecycle. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"` + + // Id of the lifecycle rule that makes a multipart upload eligible for abort + // operation. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Indicates whether the returned list of parts is truncated. + IsTruncated *bool `type:"boolean"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // Maximum number of parts that were allowed in the response. + MaxParts *int64 `type:"integer"` + + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the part-number-marker request parameter + // in a subsequent request. + NextPartNumberMarker *int64 `type:"integer"` + + Owner *Owner `type:"structure"` + + // Part number after which listing begins. + PartNumberMarker *int64 `type:"integer"` + + Parts []*Part `locationName:"Part" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID identifying the multipart upload whose parts are being listed. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s ListPartsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsOutput) GoString() string { + return s.String() +} + +type LoggingEnabled struct { + _ struct{} `type:"structure"` + + // Specifies the bucket where you want Amazon S3 to store server access logs. + // You can have your logs delivered to any bucket that you own, including the + // same bucket that is being logged. You can also configure multiple buckets + // to deliver their logs to the same target bucket. In this case you should + // choose a different TargetPrefix for each source bucket so that the delivered + // log files can be distinguished by key. + TargetBucket *string `type:"string"` + + TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` + + // This element lets you specify a prefix for the keys that the log files will + // be stored under. + TargetPrefix *string `type:"string"` +} + +// String returns the string representation +func (s LoggingEnabled) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoggingEnabled) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LoggingEnabled) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"} + if s.TargetGrants != nil { + for i, v := range s.TargetGrants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetGrants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type MultipartUpload struct { + _ struct{} `type:"structure"` + + // Date and time at which the multipart upload was initiated. + Initiated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Key of the object for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + Owner *Owner `type:"structure"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID that identifies the multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s MultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MultipartUpload) GoString() string { + return s.String() +} + +// Specifies when noncurrent object versions expire. Upon expiration, Amazon +// S3 permanently deletes the noncurrent object versions. You set this lifecycle +// configuration action on a bucket that has versioning enabled (or suspended) +// to request that Amazon S3 delete noncurrent object versions at a specific +// period in the object's lifetime. +type NoncurrentVersionExpiration struct { + _ struct{} `type:"structure"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent + // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in + // the Amazon Simple Storage Service Developer Guide. + NoncurrentDays *int64 `type:"integer"` +} + +// String returns the string representation +func (s NoncurrentVersionExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NoncurrentVersionExpiration) GoString() string { + return s.String() +} + +// Container for the transition rule that describes when noncurrent objects +// transition to the STANDARD_IA or GLACIER storage class. If your bucket is +// versioning-enabled (or versioning is suspended), you can set this action +// to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA +// or GLACIER storage class at a specific period in the object's lifetime. +type NoncurrentVersionTransition struct { + _ struct{} `type:"structure"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent + // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in + // the Amazon Simple Storage Service Developer Guide. + NoncurrentDays *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation +func (s NoncurrentVersionTransition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NoncurrentVersionTransition) GoString() string { + return s.String() +} + +// Container for specifying the notification configuration of the bucket. If +// this element is empty, notifications are turned off on the bucket. +type NotificationConfiguration struct { + _ struct{} `type:"structure"` + + LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` + + QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` + + TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s NotificationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NotificationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NotificationConfiguration"} + if s.LambdaFunctionConfigurations != nil { + for i, v := range s.LambdaFunctionConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LambdaFunctionConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.QueueConfigurations != nil { + for i, v := range s.QueueConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueueConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.TopicConfigurations != nil { + for i, v := range s.TopicConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TopicConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type NotificationConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` + + QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` + + TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` +} + +// String returns the string representation +func (s NotificationConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfigurationDeprecated) GoString() string { + return s.String() +} + +// Container for object key name filtering rules. For information about key +// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// in the Amazon Simple Storage Service Developer Guide. +type NotificationConfigurationFilter struct { + _ struct{} `type:"structure"` + + // Container for object key name prefix and suffix filtering rules. + Key *KeyFilter `locationName:"S3Key" type:"structure"` +} + +// String returns the string representation +func (s NotificationConfigurationFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfigurationFilter) GoString() string { + return s.String() +} + +type Object struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + Key *string `min:"1" type:"string"` + + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectStorageClass"` +} + +// String returns the string representation +func (s Object) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Object) GoString() string { + return s.String() +} + +type ObjectIdentifier struct { + _ struct{} `type:"structure"` + + // Key name of the object to delete. + Key *string `min:"1" type:"string" required:"true"` + + // VersionId for the specific version of the object to delete. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s ObjectIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectIdentifier) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ObjectIdentifier) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ObjectIdentifier"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ObjectVersion struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + // Size in bytes of the object. + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s ObjectVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectVersion) GoString() string { + return s.String() +} + +type Owner struct { + _ struct{} `type:"structure"` + + DisplayName *string `type:"string"` + + ID *string `type:"string"` +} + +// String returns the string representation +func (s Owner) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Owner) GoString() string { + return s.String() +} + +type Part struct { + _ struct{} `type:"structure"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Date and time at which the part was uploaded. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Part number identifying the part. This is a positive integer between 1 and + // 10,000. + PartNumber *int64 `type:"integer"` + + // Size of the uploaded part data. + Size *int64 `type:"integer"` +} + +// String returns the string representation +func (s Part) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Part) GoString() string { + return s.String() +} + +type PutBucketAccelerateConfigurationInput struct { + _ struct{} `type:"structure" payload:"AccelerateConfiguration"` + + // Specifies the Accelerate Configuration you want to set for the bucket. + AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true"` + + // Name of the bucket for which the accelerate configuration is set. + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutBucketAccelerateConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAccelerateConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAccelerateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAccelerateConfigurationInput"} + if s.AccelerateConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("AccelerateConfiguration")) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutBucketAccelerateConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketAccelerateConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAccelerateConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketAclInput struct { + _ struct{} `type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +} + +// String returns the string representation +func (s PutBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutBucketAclOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAclOutput) GoString() string { + return s.String() +} + +type PutBucketCorsInput struct { + _ struct{} `type:"structure" payload:"CORSConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.CORSConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("CORSConfiguration")) + } + if s.CORSConfiguration != nil { + if err := s.CORSConfiguration.Validate(); err != nil { + invalidParams.AddNested("CORSConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketCorsOutput) GoString() string { + return s.String() +} + +type PutBucketLifecycleConfigurationInput struct { + _ struct{} `type:"structure" payload:"LifecycleConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketLifecycleInput struct { + _ struct{} `type:"structure" payload:"LifecycleConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleOutput) GoString() string { + return s.String() +} + +type PutBucketLoggingInput struct { + _ struct{} `type:"structure" payload:"BucketLoggingStatus"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.BucketLoggingStatus == nil { + invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus")) + } + if s.BucketLoggingStatus != nil { + if err := s.BucketLoggingStatus.Validate(); err != nil { + invalidParams.AddNested("BucketLoggingStatus", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutBucketLoggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLoggingOutput) GoString() string { + return s.String() +} + +type PutBucketNotificationConfigurationInput struct { + _ struct{} `type:"structure" payload:"NotificationConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for specifying the notification configuration of the bucket. If + // this element is empty, notifications are turned off on the bucket. + NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketNotificationConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketNotificationConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.NotificationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + } + if s.NotificationConfiguration != nil { + if err := s.NotificationConfiguration.Validate(); err != nil { + invalidParams.AddNested("NotificationConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutBucketNotificationConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketNotificationConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketNotificationInput struct { + _ struct{} `type:"structure" payload:"NotificationConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketNotificationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketNotificationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.NotificationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutBucketNotificationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketNotificationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationOutput) GoString() string { + return s.String() +} + +type PutBucketPolicyInput struct { + _ struct{} `type:"structure" payload:"Policy"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The bucket policy as a JSON document. + Policy *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PutBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Policy == nil { + invalidParams.Add(request.NewErrParamRequired("Policy")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketPolicyOutput) GoString() string { + return s.String() +} + +type PutBucketReplicationInput struct { + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for replication rules. You can add as many as 1,000 rules. Total + // replication configuration size can be up to 2 MB. + ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.ReplicationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration")) + } + if s.ReplicationConfiguration != nil { + if err := s.ReplicationConfiguration.Validate(); err != nil { + invalidParams.AddNested("ReplicationConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketReplicationOutput) GoString() string { + return s.String() +} + +type PutBucketRequestPaymentInput struct { + _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketRequestPaymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketRequestPaymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketRequestPaymentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.RequestPaymentConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("RequestPaymentConfiguration")) + } + if s.RequestPaymentConfiguration != nil { + if err := s.RequestPaymentConfiguration.Validate(); err != nil { + invalidParams.AddNested("RequestPaymentConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +type PutBucketTaggingInput struct { + _ struct{} `type:"structure" payload:"Tagging"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Tagging == nil { + invalidParams.Add(request.NewErrParamRequired("Tagging")) + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketTaggingOutput) GoString() string { + return s.String() +} + +type PutBucketVersioningInput struct { + _ struct{} `type:"structure" payload:"VersioningConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketVersioningInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketVersioningInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.VersioningConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutBucketVersioningOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketVersioningOutput) GoString() string { + return s.String() +} + +type PutBucketWebsiteInput struct { + _ struct{} `type:"structure" payload:"WebsiteConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.WebsiteConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration")) + } + if s.WebsiteConfiguration != nil { + if err := s.WebsiteConfiguration.Validate(); err != nil { + invalidParams.AddNested("WebsiteConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketWebsiteOutput) GoString() string { + return s.String() +} + +type PutObjectAclInput struct { + _ struct{} `type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PutObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutObjectAclOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s PutObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectAclOutput) GoString() string { + return s.String() +} + +type PutObjectInput struct { + _ struct{} `type:"structure" payload:"Body"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + // Name of the bucket to which the PUT operation was initiated. + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Object key for which the PUT operation was initiated. + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s PutObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutObjectOutput struct { + _ struct{} `type:"structure"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s PutObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectOutput) GoString() string { + return s.String() +} + +// Container for specifying an configuration when you want Amazon S3 to publish +// events to an Amazon Simple Queue Service (Amazon SQS) queue. +type QueueConfiguration struct { + _ struct{} `type:"structure"` + + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Container for object key name filtering rules. For information about key + // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects + // events of specified type. + QueueArn *string `locationName:"Queue" type:"string" required:"true"` +} + +// String returns the string representation +func (s QueueConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueueConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *QueueConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "QueueConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.QueueArn == nil { + invalidParams.Add(request.NewErrParamRequired("QueueArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type QueueConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // Bucket event for which to send notifications. + Event *string `deprecated:"true" type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + Queue *string `type:"string"` +} + +// String returns the string representation +func (s QueueConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueueConfigurationDeprecated) GoString() string { + return s.String() +} + +type Redirect struct { + _ struct{} `type:"structure"` + + // The host name to use in the redirect request. + HostName *string `type:"string"` + + // The HTTP redirect code to use on the response. Not required if one of the + // siblings is present. + HttpRedirectCode *string `type:"string"` + + // Protocol to use (http, https) when redirecting requests. The default is the + // protocol that is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` + + // The object key prefix to use in the redirect request. For example, to redirect + // requests for all pages with prefix docs/ (objects in the docs/ folder) to + // documents/, you can set a condition block with KeyPrefixEquals set to docs/ + // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required + // if one of the siblings is present. Can be present only if ReplaceKeyWith + // is not provided. + ReplaceKeyPrefixWith *string `type:"string"` + + // The specific object key to use in the redirect request. For example, redirect + // request to error.html. Not required if one of the sibling is present. Can + // be present only if ReplaceKeyPrefixWith is not provided. + ReplaceKeyWith *string `type:"string"` +} + +// String returns the string representation +func (s Redirect) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Redirect) GoString() string { + return s.String() +} + +type RedirectAllRequestsTo struct { + _ struct{} `type:"structure"` + + // Name of the host where requests will be redirected. + HostName *string `type:"string" required:"true"` + + // Protocol to use (http, https) when redirecting requests. The default is the + // protocol that is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` +} + +// String returns the string representation +func (s RedirectAllRequestsTo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedirectAllRequestsTo) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RedirectAllRequestsTo) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RedirectAllRequestsTo"} + if s.HostName == nil { + invalidParams.Add(request.NewErrParamRequired("HostName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Container for replication rules. You can add as many as 1,000 rules. Total +// replication configuration size can be up to 2 MB. +type ReplicationConfiguration struct { + _ struct{} `type:"structure"` + + // Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating + // the objects. + Role *string `type:"string" required:"true"` + + // Container for information about a particular replication rule. Replication + // configuration must have at least one rule and can contain up to 1,000 rules. + Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s ReplicationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationConfiguration"} + if s.Role == nil { + invalidParams.Add(request.NewErrParamRequired("Role")) + } + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ReplicationRule struct { + _ struct{} `type:"structure"` + + Destination *Destination `type:"structure" required:"true"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Object keyname prefix identifying one or more objects to which the rule applies. + // Maximum prefix length can be up to 1,024 characters. Overlapping prefixes + // are not supported. + Prefix *string `type:"string" required:"true"` + + // The rule is ignored if status is not Enabled. + Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` +} + +// String returns the string representation +func (s ReplicationRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRule"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RequestPaymentConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + Payer *string `type:"string" required:"true" enum:"Payer"` +} + +// String returns the string representation +func (s RequestPaymentConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestPaymentConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestPaymentConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestPaymentConfiguration"} + if s.Payer == nil { + invalidParams.Add(request.NewErrParamRequired("Payer")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RestoreObjectInput struct { + _ struct{} `type:"structure" payload:"RestoreRequest"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure"` + + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s RestoreObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.RestoreRequest != nil { + if err := s.RestoreRequest.Validate(); err != nil { + invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RestoreObjectOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s RestoreObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectOutput) GoString() string { + return s.String() +} + +type RestoreRequest struct { + _ struct{} `type:"structure"` + + // Lifetime of the active copy in days + Days *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s RestoreRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"} + if s.Days == nil { + invalidParams.Add(request.NewErrParamRequired("Days")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RoutingRule struct { + _ struct{} `type:"structure"` + + // A container for describing a condition that must be met for the specified + // redirect to apply. For example, 1. If request is for pages in the /docs folder, + // redirect to the /documents folder. 2. If request results in HTTP error 4xx, + // redirect request to another host where you might process the error. + Condition *Condition `type:"structure"` + + // Container for redirect information. You can redirect requests to another + // host, to another page, or with another protocol. In the event of an error, + // you can can specify a different error code to return. + Redirect *Redirect `type:"structure" required:"true"` +} + +// String returns the string representation +func (s RoutingRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoutingRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RoutingRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RoutingRule"} + if s.Redirect == nil { + invalidParams.Add(request.NewErrParamRequired("Redirect")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type Rule struct { + _ struct{} `type:"structure"` + + // Specifies the days since the initiation of an Incomplete Multipart Upload + // that Lifecycle will wait before permanently removing all parts of the upload. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + + Expiration *LifecycleExpiration `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + // Container for the transition rule that describes when noncurrent objects + // transition to the STANDARD_IA or GLACIER storage class. If your bucket is + // versioning-enabled (or versioning is suspended), you can set this action + // to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA + // or GLACIER storage class at a specific period in the object's lifetime. + NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` + + // Prefix identifying one or more objects to which the rule applies. + Prefix *string `type:"string" required:"true"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + Transition *Transition `type:"structure"` +} + +// String returns the string representation +func (s Rule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Rule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Rule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Rule"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type Tag struct { + _ struct{} `type:"structure"` + + // Name of the tag. + Key *string `min:"1" type:"string" required:"true"` + + // Value of the tag. + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type Tagging struct { + _ struct{} `type:"structure"` + + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s Tagging) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tagging) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tagging) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tagging"} + if s.TagSet == nil { + invalidParams.Add(request.NewErrParamRequired("TagSet")) + } + if s.TagSet != nil { + for i, v := range s.TagSet { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagSet", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type TargetGrant struct { + _ struct{} `type:"structure"` + + Grantee *Grantee `type:"structure"` + + // Logging permissions assigned to the Grantee for the bucket. + Permission *string `type:"string" enum:"BucketLogsPermission"` +} + +// String returns the string representation +func (s TargetGrant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetGrant) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TargetGrant) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TargetGrant"} + if s.Grantee != nil { + if err := s.Grantee.Validate(); err != nil { + invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Container for specifying the configuration when you want Amazon S3 to publish +// events to an Amazon Simple Notification Service (Amazon SNS) topic. +type TopicConfiguration struct { + _ struct{} `type:"structure"` + + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Container for object key name filtering rules. For information about key + // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects + // events of specified type. + TopicArn *string `locationName:"Topic" type:"string" required:"true"` +} + +// String returns the string representation +func (s TopicConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TopicConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TopicConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.TopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("TopicArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type TopicConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // Bucket event for which to send notifications. + Event *string `deprecated:"true" type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SNS topic to which Amazon S3 will publish a message to report the + // specified events for the bucket. + Topic *string `type:"string"` +} + +// String returns the string representation +func (s TopicConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicConfigurationDeprecated) GoString() string { + return s.String() +} + +type Transition struct { + _ struct{} `type:"structure"` + + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation +func (s Transition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Transition) GoString() string { + return s.String() +} + +type UploadPartCopyInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The name of the source bucket and key name of the source object, separated + // by a slash (/). Must be URL-encoded. + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + + // The range of bytes to copy from the source object. The range value must use + // the form bytes=first-last, where the first and last are the zero-based byte + // offsets to copy. For example, bytes=0-9 indicates that you want to copy the + // first ten bytes of the source. You can copy a range only if the source object + // is greater than 5 GB. + CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"` + + // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being copied. This is a positive integer between 1 and + // 10,000. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being copied. + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadPartCopyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartCopyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadPartCopyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadPartCopyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.CopySource == nil { + invalidParams.Add(request.NewErrParamRequired("CopySource")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.PartNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PartNumber")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UploadPartCopyOutput struct { + _ struct{} `type:"structure" payload:"CopyPartResult"` + + CopyPartResult *CopyPartResult `type:"structure"` + + // The version of the source object that was copied, if you have enabled versioning + // on the source bucket. + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s UploadPartCopyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartCopyOutput) GoString() string { + return s.String() +} + +type UploadPartInput struct { + _ struct{} `type:"structure" payload:"Body"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // Object key for which the multipart upload was initiated. + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being uploaded. This is a positive integer between 1 + // and 10,000. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being uploaded. + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadPartInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadPartInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadPartInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.PartNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PartNumber")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UploadPartOutput struct { + _ struct{} `type:"structure"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s UploadPartOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartOutput) GoString() string { + return s.String() +} + +type VersioningConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADelete"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation +func (s VersioningConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VersioningConfiguration) GoString() string { + return s.String() +} + +type WebsiteConfiguration struct { + _ struct{} `type:"structure"` + + ErrorDocument *ErrorDocument `type:"structure"` + + IndexDocument *IndexDocument `type:"structure"` + + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation +func (s WebsiteConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WebsiteConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WebsiteConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WebsiteConfiguration"} + if s.ErrorDocument != nil { + if err := s.ErrorDocument.Validate(); err != nil { + invalidParams.AddNested("ErrorDocument", err.(request.ErrInvalidParams)) + } + } + if s.IndexDocument != nil { + if err := s.IndexDocument.Validate(); err != nil { + invalidParams.AddNested("IndexDocument", err.(request.ErrInvalidParams)) + } + } + if s.RedirectAllRequestsTo != nil { + if err := s.RedirectAllRequestsTo.Validate(); err != nil { + invalidParams.AddNested("RedirectAllRequestsTo", err.(request.ErrInvalidParams)) + } + } + if s.RoutingRules != nil { + for i, v := range s.RoutingRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RoutingRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +const ( + // @enum BucketAccelerateStatus + BucketAccelerateStatusEnabled = "Enabled" + // @enum BucketAccelerateStatus + BucketAccelerateStatusSuspended = "Suspended" +) + +const ( + // @enum BucketCannedACL + BucketCannedACLPrivate = "private" + // @enum BucketCannedACL + BucketCannedACLPublicRead = "public-read" + // @enum BucketCannedACL + BucketCannedACLPublicReadWrite = "public-read-write" + // @enum BucketCannedACL + BucketCannedACLAuthenticatedRead = "authenticated-read" +) + +const ( + // @enum BucketLocationConstraint + BucketLocationConstraintEu = "EU" + // @enum BucketLocationConstraint + BucketLocationConstraintEuWest1 = "eu-west-1" + // @enum BucketLocationConstraint + BucketLocationConstraintUsWest1 = "us-west-1" + // @enum BucketLocationConstraint + BucketLocationConstraintUsWest2 = "us-west-2" + // @enum BucketLocationConstraint + BucketLocationConstraintApSouth1 = "ap-south-1" + // @enum BucketLocationConstraint + BucketLocationConstraintApSoutheast1 = "ap-southeast-1" + // @enum BucketLocationConstraint + BucketLocationConstraintApSoutheast2 = "ap-southeast-2" + // @enum BucketLocationConstraint + BucketLocationConstraintApNortheast1 = "ap-northeast-1" + // @enum BucketLocationConstraint + BucketLocationConstraintSaEast1 = "sa-east-1" + // @enum BucketLocationConstraint + BucketLocationConstraintCnNorth1 = "cn-north-1" + // @enum BucketLocationConstraint + BucketLocationConstraintEuCentral1 = "eu-central-1" +) + +const ( + // @enum BucketLogsPermission + BucketLogsPermissionFullControl = "FULL_CONTROL" + // @enum BucketLogsPermission + BucketLogsPermissionRead = "READ" + // @enum BucketLogsPermission + BucketLogsPermissionWrite = "WRITE" +) + +const ( + // @enum BucketVersioningStatus + BucketVersioningStatusEnabled = "Enabled" + // @enum BucketVersioningStatus + BucketVersioningStatusSuspended = "Suspended" +) + +// Requests Amazon S3 to encode the object keys in the response and specifies +// the encoding method to use. An object key may contain any Unicode character; +// however, XML 1.0 parser cannot parse some characters, such as characters +// with an ASCII value from 0 to 10. For characters that are not supported in +// XML 1.0, you can add this parameter to request that Amazon S3 encode the +// keys in the response. +const ( + // @enum EncodingType + EncodingTypeUrl = "url" +) + +// Bucket event for which to send notifications. +const ( + // @enum Event + EventS3ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" + // @enum Event + EventS3ObjectCreated = "s3:ObjectCreated:*" + // @enum Event + EventS3ObjectCreatedPut = "s3:ObjectCreated:Put" + // @enum Event + EventS3ObjectCreatedPost = "s3:ObjectCreated:Post" + // @enum Event + EventS3ObjectCreatedCopy = "s3:ObjectCreated:Copy" + // @enum Event + EventS3ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" + // @enum Event + EventS3ObjectRemoved = "s3:ObjectRemoved:*" + // @enum Event + EventS3ObjectRemovedDelete = "s3:ObjectRemoved:Delete" + // @enum Event + EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" +) + +const ( + // @enum ExpirationStatus + ExpirationStatusEnabled = "Enabled" + // @enum ExpirationStatus + ExpirationStatusDisabled = "Disabled" +) + +const ( + // @enum FilterRuleName + FilterRuleNamePrefix = "prefix" + // @enum FilterRuleName + FilterRuleNameSuffix = "suffix" +) + +const ( + // @enum MFADelete + MFADeleteEnabled = "Enabled" + // @enum MFADelete + MFADeleteDisabled = "Disabled" +) + +const ( + // @enum MFADeleteStatus + MFADeleteStatusEnabled = "Enabled" + // @enum MFADeleteStatus + MFADeleteStatusDisabled = "Disabled" +) + +const ( + // @enum MetadataDirective + MetadataDirectiveCopy = "COPY" + // @enum MetadataDirective + MetadataDirectiveReplace = "REPLACE" +) + +const ( + // @enum ObjectCannedACL + ObjectCannedACLPrivate = "private" + // @enum ObjectCannedACL + ObjectCannedACLPublicRead = "public-read" + // @enum ObjectCannedACL + ObjectCannedACLPublicReadWrite = "public-read-write" + // @enum ObjectCannedACL + ObjectCannedACLAuthenticatedRead = "authenticated-read" + // @enum ObjectCannedACL + ObjectCannedACLAwsExecRead = "aws-exec-read" + // @enum ObjectCannedACL + ObjectCannedACLBucketOwnerRead = "bucket-owner-read" + // @enum ObjectCannedACL + ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control" +) + +const ( + // @enum ObjectStorageClass + ObjectStorageClassStandard = "STANDARD" + // @enum ObjectStorageClass + ObjectStorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + // @enum ObjectStorageClass + ObjectStorageClassGlacier = "GLACIER" +) + +const ( + // @enum ObjectVersionStorageClass + ObjectVersionStorageClassStandard = "STANDARD" +) + +const ( + // @enum Payer + PayerRequester = "Requester" + // @enum Payer + PayerBucketOwner = "BucketOwner" +) + +const ( + // @enum Permission + PermissionFullControl = "FULL_CONTROL" + // @enum Permission + PermissionWrite = "WRITE" + // @enum Permission + PermissionWriteAcp = "WRITE_ACP" + // @enum Permission + PermissionRead = "READ" + // @enum Permission + PermissionReadAcp = "READ_ACP" +) + +const ( + // @enum Protocol + ProtocolHttp = "http" + // @enum Protocol + ProtocolHttps = "https" +) + +const ( + // @enum ReplicationRuleStatus + ReplicationRuleStatusEnabled = "Enabled" + // @enum ReplicationRuleStatus + ReplicationRuleStatusDisabled = "Disabled" +) + +const ( + // @enum ReplicationStatus + ReplicationStatusComplete = "COMPLETE" + // @enum ReplicationStatus + ReplicationStatusPending = "PENDING" + // @enum ReplicationStatus + ReplicationStatusFailed = "FAILED" + // @enum ReplicationStatus + ReplicationStatusReplica = "REPLICA" +) + +// If present, indicates that the requester was successfully charged for the +// request. +const ( + // @enum RequestCharged + RequestChargedRequester = "requester" +) + +// Confirms that the requester knows that she or he will be charged for the +// request. Bucket owners need not specify this parameter in their requests. +// Documentation on downloading objects from requester pays buckets can be found +// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html +const ( + // @enum RequestPayer + RequestPayerRequester = "requester" +) + +const ( + // @enum ServerSideEncryption + ServerSideEncryptionAes256 = "AES256" + // @enum ServerSideEncryption + ServerSideEncryptionAwsKms = "aws:kms" +) + +const ( + // @enum StorageClass + StorageClassStandard = "STANDARD" + // @enum StorageClass + StorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + // @enum StorageClass + StorageClassStandardIa = "STANDARD_IA" +) + +const ( + // @enum TransitionStorageClass + TransitionStorageClassGlacier = "GLACIER" + // @enum TransitionStorageClass + TransitionStorageClassStandardIa = "STANDARD_IA" +) + +const ( + // @enum Type + TypeCanonicalUser = "CanonicalUser" + // @enum Type + TypeAmazonCustomerByEmail = "AmazonCustomerByEmail" + // @enum Type + TypeGroup = "Group" +) diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go new file mode 100644 index 000000000..c3a2702da --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go @@ -0,0 +1,43 @@ +package s3 + +import ( + "io/ioutil" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`) + +func buildGetBucketLocation(r *request.Request) { + if r.DataFilled() { + out := r.Data.(*GetBucketLocationOutput) + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed reading response body", err) + return + } + + match := reBucketLocation.FindSubmatch(b) + if len(match) > 1 { + loc := string(match[1]) + out.LocationConstraint = &loc + } + } +} + +func populateLocationConstraint(r *request.Request) { + if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" { + in := r.Params.(*CreateBucketInput) + if in.CreateBucketConfiguration == nil { + r.Params = awsutil.CopyOf(r.Params) + in = r.Params.(*CreateBucketInput) + in.CreateBucketConfiguration = &CreateBucketConfiguration{ + LocationConstraint: r.Config.Region, + } + } + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go new file mode 100644 index 000000000..9fc5df94d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go @@ -0,0 +1,36 @@ +package s3 + +import ( + "crypto/md5" + "encoding/base64" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// contentMD5 computes and sets the HTTP Content-MD5 header for requests that +// require it. +func contentMD5(r *request.Request) { + h := md5.New() + + // hash the body. seek back to the first position after reading to reset + // the body for transmission. copy errors may be assumed to be from the + // body. + _, err := io.Copy(h, r.Body) + if err != nil { + r.Error = awserr.New("ContentMD5", "failed to read body", err) + return + } + _, err = r.Body.Seek(0, 0) + if err != nil { + r.Error = awserr.New("ContentMD5", "failed to seek body", err) + return + } + + // encode the md5 checksum in base64 and set the request header. + sum := h.Sum(nil) + sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum))) + base64.StdEncoding.Encode(sum64, sum) + r.HTTPRequest.Header.Set("Content-MD5", string(sum64)) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go new file mode 100644 index 000000000..846334723 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -0,0 +1,46 @@ +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" +) + +func init() { + initClient = defaultInitClientFn + initRequest = defaultInitRequestFn +} + +func defaultInitClientFn(c *client.Client) { + // Support building custom endpoints based on config + c.Handlers.Build.PushFront(updateEndpointForS3Config) + + // Require SSL when using SSE keys + c.Handlers.Validate.PushBack(validateSSERequiresSSL) + c.Handlers.Build.PushBack(computeSSEKeys) + + // S3 uses custom error unmarshaling logic + c.Handlers.UnmarshalError.Clear() + c.Handlers.UnmarshalError.PushBack(unmarshalError) +} + +func defaultInitRequestFn(r *request.Request) { + // Add reuest handlers for specific platforms. + // e.g. 100-continue support for PUT requests using Go 1.6 + platformRequestHandlers(r) + + switch r.Operation.Name { + case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy, + opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration, + opPutBucketReplication: + // These S3 operations require Content-MD5 to be set + r.Handlers.Build.PushBack(contentMD5) + case opGetBucketLocation: + // GetBucketLocation has custom parsing logic + r.Handlers.Unmarshal.PushFront(buildGetBucketLocation) + case opCreateBucket: + // Auto-populate LocationConstraint with current region + r.Handlers.Validate.PushFront(populateLocationConstraint) + case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: + r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go new file mode 100644 index 000000000..517292903 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go @@ -0,0 +1,165 @@ +package s3 + +import ( + "fmt" + "net/url" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +// an operationBlacklist is a list of operation names that should a +// request handler should not be executed with. +type operationBlacklist []string + +// Continue will return true of the Request's operation name is not +// in the blacklist. False otherwise. +func (b operationBlacklist) Continue(r *request.Request) bool { + for i := 0; i < len(b); i++ { + if b[i] == r.Operation.Name { + return false + } + } + return true +} + +var accelerateOpBlacklist = operationBlacklist{ + opListBuckets, opCreateBucket, opDeleteBucket, +} + +// Request handler to automatically add the bucket name to the endpoint domain +// if possible. This style of bucket is valid for all bucket names which are +// DNS compatible and do not contain "." +func updateEndpointForS3Config(r *request.Request) { + forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle) + accelerate := aws.BoolValue(r.Config.S3UseAccelerate) + + if accelerate && accelerateOpBlacklist.Continue(r) { + if forceHostStyle { + if r.Config.Logger != nil { + r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.") + } + } + updateEndpointForAccelerate(r) + } else if !forceHostStyle && r.Operation.Name != opGetBucketLocation { + updateEndpointForHostStyle(r) + } +} + +func updateEndpointForHostStyle(r *request.Request) { + bucket, ok := bucketNameFromReqParams(r.Params) + if !ok { + // Ignore operation requests if the bucketname was not provided + // if this is an input validation error the validation handler + // will report it. + return + } + + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) { + // bucket name must be valid to put into the host + return + } + + moveBucketToHost(r.HTTPRequest.URL, bucket) +} + +func updateEndpointForAccelerate(r *request.Request) { + bucket, ok := bucketNameFromReqParams(r.Params) + if !ok { + // Ignore operation requests if the bucketname was not provided + // if this is an input validation error the validation handler + // will report it. + return + } + + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) { + r.Error = awserr.New("InvalidParameterException", + fmt.Sprintf("bucket name %s is not compatibile with S3 Accelerate", bucket), + nil) + return + } + + // Change endpoint from s3(-[a-z0-1-])?.amazonaws.com to s3-accelerate.amazonaws.com + r.HTTPRequest.URL.Host = replaceHostRegion(r.HTTPRequest.URL.Host, "accelerate") + moveBucketToHost(r.HTTPRequest.URL, bucket) +} + +// Attempts to retrieve the bucket name from the request input parameters. +// If no bucket is found, or the field is empty "", false will be returned. +func bucketNameFromReqParams(params interface{}) (string, bool) { + b, _ := awsutil.ValuesAtPath(params, "Bucket") + if len(b) == 0 { + return "", false + } + + if bucket, ok := b[0].(*string); ok { + if bucketStr := aws.StringValue(bucket); bucketStr != "" { + return bucketStr, true + } + } + + return "", false +} + +// hostCompatibleBucketName returns true if the request should +// put the bucket in the host. This is false if S3ForcePathStyle is +// explicitly set or if the bucket is not DNS compatible. +func hostCompatibleBucketName(u *url.URL, bucket string) bool { + // Bucket might be DNS compatible but dots in the hostname will fail + // certificate validation, so do not use host-style. + if u.Scheme == "https" && strings.Contains(bucket, ".") { + return false + } + + // if the bucket is DNS compatible + return dnsCompatibleBucketName(bucket) +} + +var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) +var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) + +// dnsCompatibleBucketName returns true if the bucket name is DNS compatible. +// Buckets created outside of the classic region MUST be DNS compatible. +func dnsCompatibleBucketName(bucket string) bool { + return reDomain.MatchString(bucket) && + !reIPAddress.MatchString(bucket) && + !strings.Contains(bucket, "..") +} + +// moveBucketToHost moves the bucket name from the URI path to URL host. +func moveBucketToHost(u *url.URL, bucket string) { + u.Host = bucket + "." + u.Host + u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1) + if u.Path == "" { + u.Path = "/" + } +} + +const s3HostPrefix = "s3" + +// replaceHostRegion replaces the S3 region string in the host with the +// value provided. If v is empty the host prefix returned will be s3. +func replaceHostRegion(host, v string) string { + if !strings.HasPrefix(host, s3HostPrefix) { + return host + } + + suffix := host[len(s3HostPrefix):] + for i := len(s3HostPrefix); i < len(host); i++ { + if host[i] == '.' { + // Trim until '.' leave the it in place. + suffix = host[i:] + break + } + } + + if len(v) == 0 { + return fmt.Sprintf("s3%s", suffix) + } + + return fmt.Sprintf("s3-%s%s", v, suffix) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go new file mode 100644 index 000000000..8e6f3307d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go @@ -0,0 +1,8 @@ +// +build !go1.6 + +package s3 + +import "github.com/aws/aws-sdk-go/aws/request" + +func platformRequestHandlers(r *request.Request) { +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go new file mode 100644 index 000000000..14d05f7b7 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go @@ -0,0 +1,28 @@ +// +build go1.6 + +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +func platformRequestHandlers(r *request.Request) { + if r.Operation.HTTPMethod == "PUT" { + // 100-Continue should only be used on put requests. + r.Handlers.Sign.PushBack(add100Continue) + } +} + +func add100Continue(r *request.Request) { + if aws.BoolValue(r.Config.S3Disable100Continue) { + return + } + if r.HTTPRequest.ContentLength < 1024*1024*2 { + // Ignore requests smaller than 2MB. This helps prevent delaying + // requests unnecessarily. + return + } + + r.HTTPRequest.Header.Set("Expect", "100-Continue") +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/service.go new file mode 100644 index 000000000..5833952a2 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -0,0 +1,86 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +// S3 is a client for Amazon S3. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type S3 struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "s3" + +// New creates a new instance of the S3 client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a S3 client from just a session. +// svc := s3.New(mySession) +// +// // Create a S3 client with additional configuration +// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *S3 { + svc := &S3{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2006-03-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a S3 operation and runs any +// custom request initialization. +func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go new file mode 100644 index 000000000..268ea2fb4 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go @@ -0,0 +1,44 @@ +package s3 + +import ( + "crypto/md5" + "encoding/base64" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil) + +func validateSSERequiresSSL(r *request.Request) { + if r.HTTPRequest.URL.Scheme != "https" { + p, _ := awsutil.ValuesAtPath(r.Params, "SSECustomerKey||CopySourceSSECustomerKey") + if len(p) > 0 { + r.Error = errSSERequiresSSL + } + } +} + +func computeSSEKeys(r *request.Request) { + headers := []string{ + "x-amz-server-side-encryption-customer-key", + "x-amz-copy-source-server-side-encryption-customer-key", + } + + for _, h := range headers { + md5h := h + "-md5" + if key := r.HTTPRequest.Header.Get(h); key != "" { + // Base64-encode the value + b64v := base64.StdEncoding.EncodeToString([]byte(key)) + r.HTTPRequest.Header.Set(h, b64v) + + // Add MD5 if it wasn't computed + if r.HTTPRequest.Header.Get(md5h) == "" { + sum := md5.Sum([]byte(key)) + b64sum := base64.StdEncoding.EncodeToString(sum[:]) + r.HTTPRequest.Header.Set(md5h, b64sum) + } + } + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go new file mode 100644 index 000000000..ce65fcdaf --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go @@ -0,0 +1,36 @@ +package s3 + +import ( + "bytes" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +func copyMultipartStatusOKUnmarhsalError(r *request.Request) { + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "unable to read response body", err) + return + } + body := bytes.NewReader(b) + r.HTTPResponse.Body = aws.ReadSeekCloser(body) + defer r.HTTPResponse.Body.(aws.ReaderSeekerCloser).Seek(0, 0) + + if body.Len() == 0 { + // If there is no body don't attempt to parse the body. + return + } + + unmarshalError(r) + if err, ok := r.Error.(awserr.Error); ok && err != nil { + if err.Code() == "SerializationError" { + r.Error = nil + return + } + r.HTTPResponse.StatusCode = http.StatusServiceUnavailable + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go new file mode 100644 index 000000000..59e4950b8 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go @@ -0,0 +1,59 @@ +package s3 + +import ( + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code"` + Message string `xml:"Message"` +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + defer io.Copy(ioutil.Discard, r.HTTPResponse.Body) + + if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { + r.Error = awserr.NewRequestFailure( + awserr.New("BucketRegionError", + fmt.Sprintf("incorrect region, the bucket is not in '%s' region", + aws.StringValue(r.Config.Region)), + nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + if r.HTTPResponse.ContentLength == 0 { + // No body, use status code to generate an awserr.Error + r.Error = awserr.NewRequestFailure( + awserr.New(strings.Replace(r.HTTPResponse.Status, " ", "", -1), r.HTTPResponse.Status, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + resp := &xmlErrorResponse{} + err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) + if err != nil && err != io.EOF { + r.Error = awserr.New("SerializationError", "failed to decode S3 XML error response", nil) + } else { + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go new file mode 100644 index 000000000..cbd3d3116 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go @@ -0,0 +1,123 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package s3 + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error { + waiterCfg := waiter.Config{ + Operation: "HeadBucket", + Delay: 5, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 301, + }, + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 403, + }, + { + State: "retry", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error { + waiterCfg := waiter.Config{ + Operation: "HeadBucket", + Delay: 5, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error { + waiterCfg := waiter.Config{ + Operation: "HeadObject", + Delay: 5, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "retry", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error { + waiterCfg := waiter.Config{ + Operation: "HeadObject", + Delay: 5, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/.gitignore b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/.gitignore new file mode 100644 index 000000000..7adca9439 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/.gitignore @@ -0,0 +1,4 @@ +testdata/conf_out.ini +ini.sublime-project +ini.sublime-workspace +testdata/conf_reflect.ini diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/LICENSE new file mode 100644 index 000000000..37ec93a14 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README.md new file mode 100644 index 000000000..1272038a9 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README.md @@ -0,0 +1,560 @@ +ini [![Build Status](https://drone.io/github.com/go-ini/ini/status.png)](https://drone.io/github.com/go-ini/ini/latest) [![](http://gocover.io/_badge/github.com/go-ini/ini)](http://gocover.io/github.com/go-ini/ini) +=== + +![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) + +Package ini provides INI file read and write functionality in Go. + +[简体中文](README_ZH.md) + +## Feature + +- Load multiple data sources(`[]byte` or file) with overwrites. +- Read with recursion values. +- Read with parent-child sections. +- Read with auto-increment key names. +- Read with multiple-line values. +- Read with tons of helper methods. +- Read and convert values to Go types. +- Read and **WRITE** comments of sections and keys. +- Manipulate sections, keys and comments with ease. +- Keep sections and keys in order as you parse and save. + +## Installation + + go get gopkg.in/ini.v1 + +## Getting Started + +### Loading from data sources + +A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many as** data sources you want. Passing other types will simply return an error. + +```go +cfg, err := ini.Load([]byte("raw data"), "filename") +``` + +Or start with an empty object: + +```go +cfg := ini.Empty() +``` + +When you cannot decide how many data sources to load at the beginning, you still able to **Append()** them later. + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +### Working with sections + +To get a section, you would need to: + +```go +section, err := cfg.GetSection("section name") +``` + +For a shortcut for default section, just give an empty string as name: + +```go +section, err := cfg.GetSection("") +``` + +When you're pretty sure the section exists, following code could make your life easier: + +```go +section := cfg.Section("") +``` + +What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you. + +To create a new section: + +```go +err := cfg.NewSection("new section") +``` + +To get a list of sections or section names: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### Working with keys + +To get a key under a section: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +Same rule applies to key operations: + +```go +key := cfg.Section("").Key("key name") +``` + +To create a new key: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +To get a list of keys or key names: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +To get a clone hash of keys and corresponding values: + +```go +hash := cfg.GetSection("").KeysHash() +``` + +### Working with values + +To get a string value: + +```go +val := cfg.Section("").Key("key name").String() +``` + +To validate key value on the fly: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +To get value with types: + +```go +// For boolean values: +// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On +// false when value is: 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// Methods start with Must also accept one argument for default value +// when key not found or fail to parse value to given type. +// Except method MustString, which you have to pass a default value. + +v = cfg.Section("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +What if my value is three-line long? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +Not a problem! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +That's cool, how about continuation lines? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +Piece of cake! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +Note that single quotes around values will be stripped: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +That's all? Hmm, no. + +#### Helper methods of working with values + +To get value with given candidates: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates. + +To validate value in a given range: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +To auto-split value into slice: + +```go +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +### Save your configuration + +Finally, it's time to save your configuration to somewhere. + +A typical way to save configuration is writing it to a file: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +Another way to save is writing to a `io.Writer` interface: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +## Advanced Usage + +### Recursive Values + +For all value of keys, there is a special syntax `%()s`, where `` is the key name in same section or default section, and `%()s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions. + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +### Parent-child Sections + +You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section. + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +### Auto-increment Key Names + +If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### Map To Struct + +Want more objective way to play with INI? Cool. + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // Things can be simpler. + err = ini.MapTo(p, "path/to/ini") + // ... + + // Just map a section? Fine. + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +Can I have default value for field? Absolutely. + +Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type. + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +It's really cool, but what's the point if you can't give me my file back from struct? + +### Reflect From Struct + +Why not? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string + None []int +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +So, what do I get? + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +Places = HangZhou,Boston +None = +``` + +#### Name Mapper + +To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name. + +There are 2 built-in name mappers: + +- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key. +- `TitleUnderscore`: it converts to format `title_underscore` then match section or key. + +To use them: + +```go +type Info struct { + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +Same rules of name mapper apply to `ini.ReflectFromWithMapper` function. + +#### Other Notes On Map/Reflect + +Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome. + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## Getting Help + +- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) +- [File An Issue](https://github.com/go-ini/ini/issues/new) + +## FAQs + +### What does `BlockMode` field do? + +By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster. + +### Why another INI library? + +Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster. + +To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path) + +## License + +This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README_ZH.md new file mode 100644 index 000000000..45e19eddd --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README_ZH.md @@ -0,0 +1,547 @@ +本包提供了 Go 语言中读写 INI 文件的功能。 + +## 功能特性 + +- 支持覆盖加载多个数据源(`[]byte` 或文件) +- 支持递归读取键值 +- 支持读取父子分区 +- 支持读取自增键名 +- 支持读取多行的键值 +- 支持大量辅助方法 +- 支持在读取时直接转换为 Go 语言类型 +- 支持读取和 **写入** 分区和键的注释 +- 轻松操作分区、键值和注释 +- 在保存文件时分区和键值会保持原有的顺序 + +## 下载安装 + + go get gopkg.in/ini.v1 + +## 开始使用 + +### 从数据源加载 + +一个 **数据源** 可以是 `[]byte` 类型的原始数据,或 `string` 类型的文件路径。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。 + +```go +cfg, err := ini.Load([]byte("raw data"), "filename") +``` + +或者从一个空白的文件开始: + +```go +cfg := ini.Empty() +``` + +当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。 + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +### 操作分区(Section) + +获取指定分区: + +```go +section, err := cfg.GetSection("section name") +``` + +如果您想要获取默认分区,则可以用空字符串代替分区名: + +```go +section, err := cfg.GetSection("") +``` + +当您非常确定某个分区是存在的,可以使用以下简便方法: + +```go +section := cfg.Section("") +``` + +如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。 + +创建一个分区: + +```go +err := cfg.NewSection("new section") +``` + +获取所有分区对象或名称: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### 操作键(Key) + +获取某个分区下的键: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +和分区一样,您也可以直接获取键而忽略错误处理: + +```go +key := cfg.Section("").Key("key name") +``` + +创建一个新的键: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +获取分区下的所有键或键名: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +获取分区下的所有键值对的克隆: + +```go +hash := cfg.GetSection("").KeysHash() +``` + +### 操作键值(Value) + +获取一个类型为字符串(string)的值: + +```go +val := cfg.Section("").Key("key name").String() +``` + +获取值的同时通过自定义函数进行处理验证: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +获取其它类型的值: + +```go +// 布尔值的规则: +// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On +// false 当值为:0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值, +// 当键不存在或者转换失败时,则会直接返回该默认值。 +// 但是,MustString 方法必须传递一个默认值。 + +v = cfg.Seciont("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +如果我的值有好多行怎么办? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +嗯哼?小 case! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +简直是小菜一碟! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +需要注意的是,值两侧的单引号会被自动剔除: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +这就是全部了?哈哈,当然不是。 + +#### 操作键值的辅助方法 + +获取键值时设定候选值: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。 + +验证获取的值是否在指定范围内: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +自动分割键值为切片(slice): + +```go +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +### 保存配置 + +终于到了这个时刻,是时候保存一下配置了。 + +比较原始的做法是输出配置到某个文件: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +### 高级用法 + +#### 递归读取键值 + +在获取所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` 可以是相同分区或者默认分区下的键名。字符串 `%()s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。 + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +#### 读取父子分区 + +您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。 + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +#### 读取自增键名 + +如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。 + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### 映射到结构 + +想要使用更加面向对象的方式玩转 INI 吗?好主意。 + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // 一切竟可以如此的简单。 + err = ini.MapTo(p, "path/to/ini") + // ... + + // 嗯哼?只需要映射一个分区吗? + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。 + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用? + +### 从结构反射 + +可是,我有说不能吗? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string + None []int +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +瞧瞧,奇迹发生了。 + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +Places = HangZhou,Boston +None = +``` + +#### 名称映射器(Name Mapper) + +为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。 + +目前有 2 款内置的映射器: + +- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。 +- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。 + +使用方法: + +```go +type Info struct{ + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。 + +#### 映射/反射的其它说明 + +任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚! + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## 获取帮助 + +- [API 文档](https://gowalker.org/gopkg.in/ini.v1) +- [创建工单](https://github.com/go-ini/ini/issues/new) + +## 常见问题 + +### 字段 `BlockMode` 是什么? + +默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。 + +### 为什么要写另一个 INI 解析库? + +许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。 + +为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了) diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/ini.go new file mode 100644 index 000000000..1fee789a1 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/ini.go @@ -0,0 +1,1226 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package ini provides INI file read and write functionality in Go. +package ini + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "os" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +const ( + DEFAULT_SECTION = "DEFAULT" + // Maximum allowed depth when recursively substituing variable names. + _DEPTH_VALUES = 99 + + _VERSION = "1.6.0" +) + +func Version() string { + return _VERSION +} + +var ( + LineBreak = "\n" + + // Variable regexp pattern: %(variable)s + varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) + + // Write spaces around "=" to look better. + PrettyFormat = true +) + +func init() { + if runtime.GOOS == "windows" { + LineBreak = "\r\n" + } +} + +func inSlice(str string, s []string) bool { + for _, v := range s { + if str == v { + return true + } + } + return false +} + +// dataSource is a interface that returns file content. +type dataSource interface { + ReadCloser() (io.ReadCloser, error) +} + +type sourceFile struct { + name string +} + +func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { + return os.Open(s.name) +} + +type bytesReadCloser struct { + reader io.Reader +} + +func (rc *bytesReadCloser) Read(p []byte) (n int, err error) { + return rc.reader.Read(p) +} + +func (rc *bytesReadCloser) Close() error { + return nil +} + +type sourceData struct { + data []byte +} + +func (s *sourceData) ReadCloser() (io.ReadCloser, error) { + return &bytesReadCloser{bytes.NewReader(s.data)}, nil +} + +// ____ __. +// | |/ _|____ ___.__. +// | <_/ __ < | | +// | | \ ___/\___ | +// |____|__ \___ > ____| +// \/ \/\/ + +// Key represents a key under a section. +type Key struct { + s *Section + Comment string + name string + value string + isAutoIncr bool +} + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// String returns string representation of value. +func (k *Key) String() string { + val := k.value + if strings.Index(val, "%") == -1 { + return val + } + + for i := 0; i < _DEPTH_VALUES; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := strings.TrimLeft(vr, "%(") + noption = strings.TrimRight(noption, ")s") + + // Search in the same section. + nk, err := k.s.GetKey(noption) + if err != nil { + // Search again in default section. + nk, _ = k.s.f.Section("").GetKey(noption) + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + return strconv.Atoi(k.String()) +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 10, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 10, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 10, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string devide by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + vals := strings.Split(str, delim) + for i := range vals { + vals[i] = strings.TrimSpace(vals[i]) + } + return vals +} + +// Float64s returns list of float64 devide by given delimiter. +func (k *Key) Float64s(delim string) []float64 { + strs := k.Strings(delim) + vals := make([]float64, len(strs)) + for i := range strs { + vals[i], _ = strconv.ParseFloat(strs[i], 64) + } + return vals +} + +// Ints returns list of int devide by given delimiter. +func (k *Key) Ints(delim string) []int { + strs := k.Strings(delim) + vals := make([]int, len(strs)) + for i := range strs { + vals[i], _ = strconv.Atoi(strs[i]) + } + return vals +} + +// Int64s returns list of int64 devide by given delimiter. +func (k *Key) Int64s(delim string) []int64 { + strs := k.Strings(delim) + vals := make([]int64, len(strs)) + for i := range strs { + vals[i], _ = strconv.ParseInt(strs[i], 10, 64) + } + return vals +} + +// Uints returns list of uint devide by given delimiter. +func (k *Key) Uints(delim string) []uint { + strs := k.Strings(delim) + vals := make([]uint, len(strs)) + for i := range strs { + u, _ := strconv.ParseUint(strs[i], 10, 64) + vals[i] = uint(u) + } + return vals +} + +// Uint64s returns list of uint64 devide by given delimiter. +func (k *Key) Uint64s(delim string) []uint64 { + strs := k.Strings(delim) + vals := make([]uint64, len(strs)) + for i := range strs { + vals[i], _ = strconv.ParseUint(strs[i], 10, 64) + } + return vals +} + +// TimesFormat parses with given format and returns list of time.Time devide by given delimiter. +func (k *Key) TimesFormat(format, delim string) []time.Time { + strs := k.Strings(delim) + vals := make([]time.Time, len(strs)) + for i := range strs { + vals[i], _ = time.Parse(format, strs[i]) + } + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time devide by given delimiter. +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + k.value = v +} + +// _________ __ .__ +// / _____/ ____ _____/ |_|__| ____ ____ +// \_____ \_/ __ \_/ ___\ __\ |/ _ \ / \ +// / \ ___/\ \___| | | ( <_> ) | \ +// /_______ /\___ >\___ >__| |__|\____/|___| / +// \/ \/ \/ \/ + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string +} + +func newSection(f *File, name string) *Section { + return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)} +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + s.keys[name].value = val + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = &Key{s, "", name, val, false} + s.keysHash[name] = val + return s.keys[name], nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + // FIXME: change to section level lock? + if s.f.BlockMode { + s.f.lock.RLock() + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } else { + break + } + } + return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) + } + return key, nil +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := map[string]string{} + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + return + } + } +} + +// ___________.__.__ +// \_ _____/|__| | ____ +// | __) | | | _/ __ \ +// | \ | | |_\ ___/ +// \___ / |__|____/\___ > +// \/ \/ + +// File represents a combination of a or more INI file(s) in memory. +type File struct { + // Should make things safe, but sometimes doesn't matter. + BlockMode bool + // Make sure data is safe in multiple goroutines. + lock sync.RWMutex + + // Allow combination of multiple data sources. + dataSources []dataSource + // Actual data is stored here. + sections map[string]*Section + + // To keep data in order. + sectionList []string + + NameMapper +} + +// newFile initializes File object with given data sources. +func newFile(dataSources []dataSource) *File { + return &File{ + BlockMode: true, + dataSources: dataSources, + sections: make(map[string]*Section), + sectionList: make([]string, 0, 10), + } +} + +func parseDataSource(source interface{}) (dataSource, error) { + switch s := source.(type) { + case string: + return sourceFile{s}, nil + case []byte: + return &sourceData{s}, nil + default: + return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) + } +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +func Load(source interface{}, others ...interface{}) (_ *File, err error) { + sources := make([]dataSource, len(others)+1) + sources[0], err = parseDataSource(source) + if err != nil { + return nil, err + } + for i := range others { + sources[i+1], err = parseDataSource(others[i]) + if err != nil { + return nil, err + } + } + f := newFile(sources) + return f, f.Reload() +} + +// Empty returns an empty file object. +func Empty() *File { + // Ignore error here, we sure our data is good. + f, _ := Load([]byte("")) + return f +} + +// NewSection creates a new section. +func (f *File) NewSection(name string) (*Section, error) { + if len(name) == 0 { + return nil, errors.New("error creating new section: empty section name") + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if inSlice(name, f.sectionList) { + return f.sections[name], nil + } + + f.sectionList = append(f.sectionList, name) + f.sections[name] = newSection(f, name) + return f.sections[name], nil +} + +// NewSections creates a list of sections. +func (f *File) NewSections(names ...string) (err error) { + for _, name := range names { + if _, err = f.NewSection(name); err != nil { + return err + } + } + return nil +} + +// GetSection returns section by given name. +func (f *File) GetSection(name string) (*Section, error) { + if len(name) == 0 { + name = DEFAULT_SECTION + } + + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sec := f.sections[name] + if sec == nil { + return nil, fmt.Errorf("error when getting section: section '%s' not exists", name) + } + return sec, nil +} + +// Section assumes named section exists and returns a zero-value when not. +func (f *File) Section(name string) *Section { + sec, err := f.GetSection(name) + if err != nil { + // Note: It's OK here because the only possible error is empty section name, + // but if it's empty, this piece of code won't be executed. + sec, _ = f.NewSection(name) + return sec + } + return sec +} + +// Section returns list of Section. +func (f *File) Sections() []*Section { + sections := make([]*Section, len(f.sectionList)) + for i := range f.sectionList { + sections[i] = f.Section(f.sectionList[i]) + } + return sections +} + +// SectionStrings returns list of section names. +func (f *File) SectionStrings() []string { + list := make([]string, len(f.sectionList)) + copy(list, f.sectionList) + return list +} + +// DeleteSection deletes a section. +func (f *File) DeleteSection(name string) { + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if len(name) == 0 { + name = DEFAULT_SECTION + } + + for i, s := range f.sectionList { + if s == name { + f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) + delete(f.sections, name) + return + } + } +} + +func cutComment(str string) string { + i := strings.Index(str, "#") + if i == -1 { + return str + } + return str[:i] +} + +func checkMultipleLines(buf *bufio.Reader, line, val, valQuote string) (string, error) { + isEnd := false + for { + next, err := buf.ReadString('\n') + if err != nil { + if err != io.EOF { + return "", err + } + isEnd = true + } + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + break + } + val += next + if isEnd { + return "", fmt.Errorf("error parsing line: missing closing key quote from '%s' to '%s'", line, next) + } + } + return val, nil +} + +func checkContinuationLines(buf *bufio.Reader, val string) (string, bool, error) { + isEnd := false + for { + valLen := len(val) + if valLen == 0 || val[valLen-1] != '\\' { + break + } + val = val[:valLen-1] + + next, err := buf.ReadString('\n') + if err != nil { + if err != io.EOF { + return "", isEnd, err + } + isEnd = true + } + + next = strings.TrimSpace(next) + if len(next) == 0 { + break + } + val += next + } + return val, isEnd, nil +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) error { + buf := bufio.NewReader(reader) + + // Handle BOM-UTF8. + // http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding + mask, err := buf.Peek(3) + if err == nil && len(mask) >= 3 && mask[0] == 239 && mask[1] == 187 && mask[2] == 191 { + buf.Read(mask) + } + + count := 1 + comments := "" + isEnd := false + + section, err := f.NewSection(DEFAULT_SECTION) + if err != nil { + return err + } + + for { + line, err := buf.ReadString('\n') + line = strings.TrimSpace(line) + length := len(line) + + // Check error and ignore io.EOF just for a moment. + if err != nil { + if err != io.EOF { + return fmt.Errorf("error reading next line: %v", err) + } + // The last line of file could be an empty line. + if length == 0 { + break + } + isEnd = true + } + + // Skip empty lines. + if length == 0 { + continue + } + + switch { + case line[0] == '#' || line[0] == ';': // Comments. + if len(comments) == 0 { + comments = line + } else { + comments += LineBreak + line + } + continue + case line[0] == '[' && line[length-1] == ']': // New sction. + section, err = f.NewSection(strings.TrimSpace(line[1 : length-1])) + if err != nil { + return err + } + + if len(comments) > 0 { + section.Comment = comments + comments = "" + } + // Reset counter. + count = 1 + continue + } + + // Other possibilities. + var ( + i int + keyQuote string + kname string + valQuote string + val string + ) + + // Key name surrounded by quotes. + if line[0] == '"' { + if length > 6 && line[0:3] == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + if len(keyQuote) > 0 { + qLen := len(keyQuote) + pos := strings.Index(line[qLen:], keyQuote) + if pos == -1 { + return fmt.Errorf("error parsing line: missing closing key quote: %s", line) + } + pos = pos + qLen + i = strings.IndexAny(line[pos:], "=:") + if i < 0 { + return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line) + } else if i == pos { + return fmt.Errorf("error parsing line: key is empty: %s", line) + } + i = i + pos + kname = line[qLen:pos] // Just keep spaces inside quotes. + } else { + i = strings.IndexAny(line, "=:") + if i < 0 { + return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line) + } else if i == 0 { + return fmt.Errorf("error parsing line: key is empty: %s", line) + } + kname = strings.TrimSpace(line[0:i]) + } + + isAutoIncr := false + // Auto increment. + if kname == "-" { + isAutoIncr = true + kname = "#" + fmt.Sprint(count) + count++ + } + + lineRight := strings.TrimSpace(line[i+1:]) + lineRightLength := len(lineRight) + firstChar := "" + if lineRightLength >= 2 { + firstChar = lineRight[0:1] + } + if firstChar == "`" { + valQuote = "`" + } else if firstChar == `"` { + if lineRightLength >= 3 && lineRight[0:3] == `"""` { + valQuote = `"""` + } else { + valQuote = `"` + } + } else if firstChar == `'` { + valQuote = `'` + } + + if len(valQuote) > 0 { + qLen := len(valQuote) + pos := strings.LastIndex(lineRight[qLen:], valQuote) + // For multiple-line value check. + if pos == -1 { + if valQuote == `"` || valQuote == `'` { + return fmt.Errorf("error parsing line: single quote does not allow multiple-line value: %s", line) + } + + val = lineRight[qLen:] + "\n" + val, err = checkMultipleLines(buf, line, val, valQuote) + if err != nil { + return err + } + } else { + val = lineRight[qLen : pos+qLen] + } + } else { + val = strings.TrimSpace(cutComment(lineRight)) + val, isEnd, err = checkContinuationLines(buf, val) + if err != nil { + return err + } + } + + k, err := section.NewKey(kname, val) + if err != nil { + return err + } + k.isAutoIncr = isAutoIncr + if len(comments) > 0 { + k.Comment = comments + comments = "" + } + + if isEnd { + break + } + } + return nil +} + +func (f *File) reload(s dataSource) error { + r, err := s.ReadCloser() + if err != nil { + return err + } + defer r.Close() + + return f.parse(r) +} + +// Reload reloads and parses all data sources. +func (f *File) Reload() (err error) { + for _, s := range f.dataSources { + if err = f.reload(s); err != nil { + return err + } + } + return nil +} + +// Append appends one or more data sources and reloads automatically. +func (f *File) Append(source interface{}, others ...interface{}) error { + ds, err := parseDataSource(source) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + for _, s := range others { + ds, err = parseDataSource(s) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + } + return f.Reload() +} + +// WriteToIndent writes file content into io.Writer with given value indention. +func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { + equalSign := "=" + if PrettyFormat { + equalSign = " = " + } + + // Use buffer to make sure target is safe until finish encoding. + buf := bytes.NewBuffer(nil) + for i, sname := range f.sectionList { + sec := f.Section(sname) + if len(sec.Comment) > 0 { + if sec.Comment[0] != '#' && sec.Comment[0] != ';' { + sec.Comment = "; " + sec.Comment + } + if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil { + return 0, err + } + } + + if i > 0 { + if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return 0, err + } + } else { + // Write nothing if default section is empty. + if len(sec.keyList) == 0 { + continue + } + } + + for _, kname := range sec.keyList { + key := sec.Key(kname) + if len(key.Comment) > 0 { + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + if key.Comment[0] != '#' && key.Comment[0] != ';' { + key.Comment = "; " + key.Comment + } + if _, err = buf.WriteString(key.Comment + LineBreak); err != nil { + return 0, err + } + } + + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + + switch { + case key.isAutoIncr: + kname = "-" + case strings.Contains(kname, "`") || strings.Contains(kname, `"`): + kname = `"""` + kname + `"""` + case strings.Contains(kname, `=`) || strings.Contains(kname, `:`): + kname = "`" + kname + "`" + } + + val := key.value + // In case key value contains "\n", "`" or "\"". + if strings.Contains(val, "\n") || strings.Contains(val, "`") || strings.Contains(val, `"`) || + strings.Contains(val, "#") { + val = `"""` + val + `"""` + } + if _, err = buf.WriteString(kname + equalSign + val + LineBreak); err != nil { + return 0, err + } + } + + // Put a line between sections. + if _, err = buf.WriteString(LineBreak); err != nil { + return 0, err + } + } + + return buf.WriteTo(w) +} + +// WriteTo writes file content into io.Writer. +func (f *File) WriteTo(w io.Writer) (int64, error) { + return f.WriteToIndent(w, "") +} + +// SaveToIndent writes content to file system with given value indention. +func (f *File) SaveToIndent(filename, indent string) error { + // Note: Because we are truncating with os.Create, + // so it's safer to save to a temporary file location and rename afte done. + tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp" + defer os.Remove(tmpPath) + + fw, err := os.Create(tmpPath) + if err != nil { + return err + } + + if _, err = f.WriteToIndent(fw, indent); err != nil { + fw.Close() + return err + } + fw.Close() + + // Remove old file and rename the new one. + os.Remove(filename) + return os.Rename(tmpPath, filename) +} + +// SaveTo writes content to file system. +func (f *File) SaveTo(filename string) error { + return f.SaveToIndent(filename, "") +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/struct.go new file mode 100644 index 000000000..c11843710 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/struct.go @@ -0,0 +1,350 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "time" + "unicode" +) + +// NameMapper represents a ini tag name mapper. +type NameMapper func(string) string + +// Built-in name getters. +var ( + // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. + AllCapsUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + } + newstr = append(newstr, unicode.ToUpper(chr)) + } + return string(newstr) + } + // TitleUnderscore converts to format title_underscore. + TitleUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + chr -= ('A' - 'a') + } + newstr = append(newstr, chr) + } + return string(newstr) + } +) + +func (s *Section) parseFieldName(raw, actual string) string { + if len(actual) > 0 { + return actual + } + if s.f.NameMapper != nil { + return s.f.NameMapper(raw) + } + return raw +} + +func parseDelim(actual string) string { + if len(actual) > 0 { + return actual + } + return "," +} + +var reflectTime = reflect.TypeOf(time.Now()).Kind() + +// setWithProperType sets proper value to field based on its type, +// but it does not return error for failing parsing, +// because we want to use default value that is already assigned to strcut. +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + if len(key.String()) == 0 { + return nil + } + field.SetString(key.String()) + case reflect.Bool: + boolVal, err := key.Bool() + if err != nil { + return nil + } + field.SetBool(boolVal) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + durationVal, err := key.Duration() + if err == nil { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + intVal, err := key.Int64() + if err != nil { + return nil + } + field.SetInt(intVal) + // byte is an alias for uint8, so supporting uint8 breaks support for byte + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + durationVal, err := key.Duration() + if err == nil { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + uintVal, err := key.Uint64() + if err != nil { + return nil + } + field.SetUint(uintVal) + + case reflect.Float64: + floatVal, err := key.Float64() + if err != nil { + return nil + } + field.SetFloat(floatVal) + case reflectTime: + timeVal, err := key.Time() + if err != nil { + return nil + } + field.Set(reflect.ValueOf(timeVal)) + case reflect.Slice: + vals := key.Strings(delim) + numVals := len(vals) + if numVals == 0 { + return nil + } + + sliceOf := field.Type().Elem().Kind() + + var times []time.Time + if sliceOf == reflectTime { + times = key.Times(delim) + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(times[i])) + default: + slice.Index(i).Set(reflect.ValueOf(vals[i])) + } + } + field.Set(slice) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func (s *Section) mapTo(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + fieldName := s.parseFieldName(tpField.Name, tag) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous + isStruct := tpField.Type.Kind() == reflect.Struct + if isAnonymous { + field.Set(reflect.New(tpField.Type.Elem())) + } + + if isAnonymous || isStruct { + if sec, err := s.f.GetSection(fieldName); err == nil { + if err = sec.mapTo(field); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + continue + } + } + + if key, err := s.GetKey(fieldName); err == nil { + if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + } + } + return nil +} + +// MapTo maps section to given struct. +func (s *Section) MapTo(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot map to non-pointer struct") + } + + return s.mapTo(val) +} + +// MapTo maps file to given struct. +func (f *File) MapTo(v interface{}) error { + return f.Section("").MapTo(v) +} + +// MapTo maps data sources to given struct with name mapper. +func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.MapTo(v) +} + +// MapTo maps data sources to given struct. +func MapTo(v, source interface{}, others ...interface{}) error { + return MapToWithMapper(v, nil, source, others...) +} + +// reflectWithProperType does the opposite thing with setWithProperType. +func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + key.SetValue(field.String()) + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float64, + reflectTime: + key.SetValue(fmt.Sprint(field)) + case reflect.Slice: + vals := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + + var buf bytes.Buffer + isTime := fmt.Sprint(field.Type()) == "[]time.Time" + for i := 0; i < field.Len(); i++ { + if isTime { + buf.WriteString(vals.Index(i).Interface().(time.Time).Format(time.RFC3339)) + } else { + buf.WriteString(fmt.Sprint(vals.Index(i))) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-1]) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func (s *Section) reflectFrom(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + fieldName := s.parseFieldName(tpField.Name, tag) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || + (tpField.Type.Kind() == reflect.Struct) { + // Note: The only error here is section doesn't exist. + sec, err := s.f.GetSection(fieldName) + if err != nil { + // Note: fieldName can never be empty here, ignore error. + sec, _ = s.f.NewSection(fieldName) + } + if err = sec.reflectFrom(field); err != nil { + return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) + } + continue + } + + // Note: Same reason as secion. + key, err := s.GetKey(fieldName) + if err != nil { + key, _ = s.NewKey(fieldName, "") + } + if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) + } + + } + return nil +} + +// ReflectFrom reflects secion from given struct. +func (s *Section) ReflectFrom(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot reflect from non-pointer struct") + } + + return s.reflectFrom(val) +} + +// ReflectFrom reflects file from given struct. +func (f *File) ReflectFrom(v interface{}) error { + return f.Section("").ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct with name mapper. +func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { + cfg.NameMapper = mapper + return cfg.ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct. +func ReflectFrom(cfg *File, v interface{}) error { + return ReflectFromWithMapper(cfg, v, nil) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.gitignore b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.gitignore new file mode 100644 index 000000000..531fcc11c --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.gitignore @@ -0,0 +1,4 @@ +jpgo +jmespath-fuzz.zip +cpu.out +go-jmespath.test diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.travis.yml b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.travis.yml new file mode 100644 index 000000000..1f9807757 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.travis.yml @@ -0,0 +1,9 @@ +language: go + +sudo: false + +go: + - 1.4 + +install: go get -v -t ./... +script: make test diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/LICENSE new file mode 100644 index 000000000..b03310a91 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/LICENSE @@ -0,0 +1,13 @@ +Copyright 2015 James Saryerwinnie + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/Makefile new file mode 100644 index 000000000..a828d2848 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/Makefile @@ -0,0 +1,44 @@ + +CMD = jpgo + +help: + @echo "Please use \`make ' where is one of" + @echo " test to run all the tests" + @echo " build to build the library and jp executable" + @echo " generate to run codegen" + + +generate: + go generate ./... + +build: + rm -f $(CMD) + go build ./... + rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... + mv cmd/$(CMD)/$(CMD) . + +test: + go test -v ./... + +check: + go vet ./... + @echo "golint ./..." + @lint=`golint ./...`; \ + lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ + echo "$$lint"; \ + if [ "$$lint" != "" ]; then exit 1; fi + +htmlc: + go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov + +buildfuzz: + go-fuzz-build github.com/jmespath/go-jmespath/fuzz + +fuzz: buildfuzz + go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata + +bench: + go test -bench . -cpuprofile cpu.out + +pprof-cpu: + go tool pprof ./go-jmespath.test ./cpu.out diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/README.md new file mode 100644 index 000000000..187ef676d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/README.md @@ -0,0 +1,7 @@ +# go-jmespath - A JMESPath implementation in Go + +[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) + + + +See http://jmespath.org for more info. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/api.go new file mode 100644 index 000000000..9cfa988bc --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/api.go @@ -0,0 +1,49 @@ +package jmespath + +import "strconv" + +// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is +// safe for concurrent use by multiple goroutines. +type JMESPath struct { + ast ASTNode + intr *treeInterpreter +} + +// Compile parses a JMESPath expression and returns, if successful, a JMESPath +// object that can be used to match against data. +func Compile(expression string) (*JMESPath, error) { + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + jmespath := &JMESPath{ast: ast, intr: newInterpreter()} + return jmespath, nil +} + +// MustCompile is like Compile but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled +// JMESPaths. +func MustCompile(expression string) *JMESPath { + jmespath, err := Compile(expression) + if err != nil { + panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) + } + return jmespath +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func (jp *JMESPath) Search(data interface{}) (interface{}, error) { + return jp.intr.Execute(jp.ast, data) +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func Search(expression string, data interface{}) (interface{}, error) { + intr := newInterpreter() + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + return intr.Execute(ast, data) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go new file mode 100644 index 000000000..1cd2d239c --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type astNodeType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" + +var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} + +func (i astNodeType) String() string { + if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { + return fmt.Sprintf("astNodeType(%d)", i) + } + return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/functions.go new file mode 100644 index 000000000..9b7cd89b4 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/functions.go @@ -0,0 +1,842 @@ +package jmespath + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "unicode/utf8" +) + +type jpFunction func(arguments []interface{}) (interface{}, error) + +type jpType string + +const ( + jpUnknown jpType = "unknown" + jpNumber jpType = "number" + jpString jpType = "string" + jpArray jpType = "array" + jpObject jpType = "object" + jpArrayNumber jpType = "array[number]" + jpArrayString jpType = "array[string]" + jpExpref jpType = "expref" + jpAny jpType = "any" +) + +type functionEntry struct { + name string + arguments []argSpec + handler jpFunction + hasExpRef bool +} + +type argSpec struct { + types []jpType + variadic bool +} + +type byExprString struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprString) Len() int { + return len(a.items) +} +func (a *byExprString) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprString) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(string) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(string) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type byExprFloat struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprFloat) Len() int { + return len(a.items) +} +func (a *byExprFloat) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprFloat) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(float64) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(float64) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type functionCaller struct { + functionTable map[string]functionEntry +} + +func newFunctionCaller() *functionCaller { + caller := &functionCaller{} + caller.functionTable = map[string]functionEntry{ + "length": { + name: "length", + arguments: []argSpec{ + {types: []jpType{jpString, jpArray, jpObject}}, + }, + handler: jpfLength, + }, + "starts_with": { + name: "starts_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfStartsWith, + }, + "abs": { + name: "abs", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfAbs, + }, + "avg": { + name: "avg", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfAvg, + }, + "ceil": { + name: "ceil", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfCeil, + }, + "contains": { + name: "contains", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + {types: []jpType{jpAny}}, + }, + handler: jpfContains, + }, + "ends_with": { + name: "ends_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfEndsWith, + }, + "floor": { + name: "floor", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfFloor, + }, + "map": { + name: "amp", + arguments: []argSpec{ + {types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, + }, + handler: jpfMap, + hasExpRef: true, + }, + "max": { + name: "max", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMax, + }, + "merge": { + name: "merge", + arguments: []argSpec{ + {types: []jpType{jpObject}, variadic: true}, + }, + handler: jpfMerge, + }, + "max_by": { + name: "max_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMaxBy, + hasExpRef: true, + }, + "sum": { + name: "sum", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfSum, + }, + "min": { + name: "min", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMin, + }, + "min_by": { + name: "min_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMinBy, + hasExpRef: true, + }, + "type": { + name: "type", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfType, + }, + "keys": { + name: "keys", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfKeys, + }, + "values": { + name: "values", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfValues, + }, + "sort": { + name: "sort", + arguments: []argSpec{ + {types: []jpType{jpArrayString, jpArrayNumber}}, + }, + handler: jpfSort, + }, + "sort_by": { + name: "sort_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfSortBy, + hasExpRef: true, + }, + "join": { + name: "join", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpArrayString}}, + }, + handler: jpfJoin, + }, + "reverse": { + name: "reverse", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + }, + handler: jpfReverse, + }, + "to_array": { + name: "to_array", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToArray, + }, + "to_string": { + name: "to_string", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToString, + }, + "to_number": { + name: "to_number", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToNumber, + }, + "not_null": { + name: "not_null", + arguments: []argSpec{ + {types: []jpType{jpAny}, variadic: true}, + }, + handler: jpfNotNull, + }, + } + return caller +} + +func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { + if len(e.arguments) == 0 { + return arguments, nil + } + if !e.arguments[len(e.arguments)-1].variadic { + if len(e.arguments) != len(arguments) { + return nil, errors.New("incorrect number of args") + } + for i, spec := range e.arguments { + userArg := arguments[i] + err := spec.typeCheck(userArg) + if err != nil { + return nil, err + } + } + return arguments, nil + } + if len(arguments) < len(e.arguments) { + return nil, errors.New("Invalid arity.") + } + return arguments, nil +} + +func (a *argSpec) typeCheck(arg interface{}) error { + for _, t := range a.types { + switch t { + case jpNumber: + if _, ok := arg.(float64); ok { + return nil + } + case jpString: + if _, ok := arg.(string); ok { + return nil + } + case jpArray: + if isSliceType(arg) { + return nil + } + case jpObject: + if _, ok := arg.(map[string]interface{}); ok { + return nil + } + case jpArrayNumber: + if _, ok := toArrayNum(arg); ok { + return nil + } + case jpArrayString: + if _, ok := toArrayStr(arg); ok { + return nil + } + case jpAny: + return nil + case jpExpref: + if _, ok := arg.(expRef); ok { + return nil + } + } + } + return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) +} + +func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { + entry, ok := f.functionTable[name] + if !ok { + return nil, errors.New("unknown function: " + name) + } + resolvedArgs, err := entry.resolveArgs(arguments) + if err != nil { + return nil, err + } + if entry.hasExpRef { + var extra []interface{} + extra = append(extra, intr) + resolvedArgs = append(extra, resolvedArgs...) + } + return entry.handler(resolvedArgs) +} + +func jpfAbs(arguments []interface{}) (interface{}, error) { + num := arguments[0].(float64) + return math.Abs(num), nil +} + +func jpfLength(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if c, ok := arg.(string); ok { + return float64(utf8.RuneCountInString(c)), nil + } else if isSliceType(arg) { + v := reflect.ValueOf(arg) + return float64(v.Len()), nil + } else if c, ok := arg.(map[string]interface{}); ok { + return float64(len(c)), nil + } + return nil, errors.New("could not compute length()") +} + +func jpfStartsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + prefix := arguments[1].(string) + return strings.HasPrefix(search, prefix), nil +} + +func jpfAvg(arguments []interface{}) (interface{}, error) { + // We've already type checked the value so we can safely use + // type assertions. + args := arguments[0].([]interface{}) + length := float64(len(args)) + numerator := 0.0 + for _, n := range args { + numerator += n.(float64) + } + return numerator / length, nil +} +func jpfCeil(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Ceil(val), nil +} +func jpfContains(arguments []interface{}) (interface{}, error) { + search := arguments[0] + el := arguments[1] + if searchStr, ok := search.(string); ok { + if elStr, ok := el.(string); ok { + return strings.Index(searchStr, elStr) != -1, nil + } + return false, nil + } + // Otherwise this is a generic contains for []interface{} + general := search.([]interface{}) + for _, item := range general { + if item == el { + return true, nil + } + } + return false, nil +} +func jpfEndsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + suffix := arguments[1].(string) + return strings.HasSuffix(search, suffix), nil +} +func jpfFloor(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Floor(val), nil +} +func jpfMap(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + exp := arguments[1].(expRef) + node := exp.ref + arr := arguments[2].([]interface{}) + mapped := make([]interface{}, 0, len(arr)) + for _, value := range arr { + current, err := intr.Execute(node, value) + if err != nil { + return nil, err + } + mapped = append(mapped, current) + } + return mapped, nil +} +func jpfMax(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil + } + // Otherwise we're dealing with a max() of strings. + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil +} +func jpfMerge(arguments []interface{}) (interface{}, error) { + final := make(map[string]interface{}) + for _, m := range arguments { + mapped := m.(map[string]interface{}) + for key, value := range mapped { + final[key] = value + } + } + return final, nil +} +func jpfMaxBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + switch t := start.(type) { + case float64: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + case string: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + default: + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfSum(arguments []interface{}) (interface{}, error) { + items, _ := toArrayNum(arguments[0]) + sum := 0.0 + for _, item := range items { + sum += item + } + return sum, nil +} + +func jpfMin(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil + } + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil +} + +func jpfMinBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if t, ok := start.(float64); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else if t, ok := start.(string); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfType(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if _, ok := arg.(float64); ok { + return "number", nil + } + if _, ok := arg.(string); ok { + return "string", nil + } + if _, ok := arg.([]interface{}); ok { + return "array", nil + } + if _, ok := arg.(map[string]interface{}); ok { + return "object", nil + } + if arg == nil { + return "null", nil + } + if arg == true || arg == false { + return "boolean", nil + } + return nil, errors.New("unknown type") +} +func jpfKeys(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for key := range arg { + collected = append(collected, key) + } + return collected, nil +} +func jpfValues(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for _, value := range arg { + collected = append(collected, value) + } + return collected, nil +} +func jpfSort(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + d := sort.Float64Slice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil + } + // Otherwise we're dealing with sort()'ing strings. + items, _ := toArrayStr(arguments[0]) + d := sort.StringSlice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil +} +func jpfSortBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return arr, nil + } else if len(arr) == 1 { + return arr, nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if _, ok := start.(float64); ok { + sortable := &byExprFloat{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else if _, ok := start.(string); ok { + sortable := &byExprString{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfJoin(arguments []interface{}) (interface{}, error) { + sep := arguments[0].(string) + // We can't just do arguments[1].([]string), we have to + // manually convert each item to a string. + arrayStr := []string{} + for _, item := range arguments[1].([]interface{}) { + arrayStr = append(arrayStr, item.(string)) + } + return strings.Join(arrayStr, sep), nil +} +func jpfReverse(arguments []interface{}) (interface{}, error) { + if s, ok := arguments[0].(string); ok { + r := []rune(s) + for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r), nil + } + items := arguments[0].([]interface{}) + length := len(items) + reversed := make([]interface{}, length) + for i, item := range items { + reversed[length-(i+1)] = item + } + return reversed, nil +} +func jpfToArray(arguments []interface{}) (interface{}, error) { + if _, ok := arguments[0].([]interface{}); ok { + return arguments[0], nil + } + return arguments[:1:1], nil +} +func jpfToString(arguments []interface{}) (interface{}, error) { + if v, ok := arguments[0].(string); ok { + return v, nil + } + result, err := json.Marshal(arguments[0]) + if err != nil { + return nil, err + } + return string(result), nil +} +func jpfToNumber(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if v, ok := arg.(float64); ok { + return v, nil + } + if v, ok := arg.(string); ok { + conv, err := strconv.ParseFloat(v, 64) + if err != nil { + return nil, nil + } + return conv, nil + } + if _, ok := arg.([]interface{}); ok { + return nil, nil + } + if _, ok := arg.(map[string]interface{}); ok { + return nil, nil + } + if arg == nil { + return nil, nil + } + if arg == true || arg == false { + return nil, nil + } + return nil, errors.New("unknown type") +} +func jpfNotNull(arguments []interface{}) (interface{}, error) { + for _, arg := range arguments { + if arg != nil { + return arg, nil + } + } + return nil, nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/interpreter.go new file mode 100644 index 000000000..13c74604c --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/interpreter.go @@ -0,0 +1,418 @@ +package jmespath + +import ( + "errors" + "reflect" + "unicode" + "unicode/utf8" +) + +/* This is a tree based interpreter. It walks the AST and directly + interprets the AST to search through a JSON document. +*/ + +type treeInterpreter struct { + fCall *functionCaller +} + +func newInterpreter() *treeInterpreter { + interpreter := treeInterpreter{} + interpreter.fCall = newFunctionCaller() + return &interpreter +} + +type expRef struct { + ref ASTNode +} + +// Execute takes an ASTNode and input data and interprets the AST directly. +// It will produce the result of applying the JMESPath expression associated +// with the ASTNode to the input data "value". +func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { + switch node.nodeType { + case ASTComparator: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + right, err := intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + switch node.value { + case tEQ: + return objsEqual(left, right), nil + case tNE: + return !objsEqual(left, right), nil + } + leftNum, ok := left.(float64) + if !ok { + return nil, nil + } + rightNum, ok := right.(float64) + if !ok { + return nil, nil + } + switch node.value { + case tGT: + return leftNum > rightNum, nil + case tGTE: + return leftNum >= rightNum, nil + case tLT: + return leftNum < rightNum, nil + case tLTE: + return leftNum <= rightNum, nil + } + case ASTExpRef: + return expRef{ref: node.children[0]}, nil + case ASTFunctionExpression: + resolvedArgs := []interface{}{} + for _, arg := range node.children { + current, err := intr.Execute(arg, value) + if err != nil { + return nil, err + } + resolvedArgs = append(resolvedArgs, current) + } + return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) + case ASTField: + if m, ok := value.(map[string]interface{}); ok { + key := node.value.(string) + return m[key], nil + } + return intr.fieldFromStruct(node.value.(string), value) + case ASTFilterProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.filterProjectionWithReflection(node, left) + } + return nil, nil + } + compareNode := node.children[2] + collected := []interface{}{} + for _, element := range sliceType { + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil + case ASTFlatten: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + // If we can't type convert to []interface{}, there's + // a chance this could still work via reflection if we're + // dealing with user provided types. + if isSliceType(left) { + return intr.flattenWithReflection(left) + } + return nil, nil + } + flattened := []interface{}{} + for _, element := range sliceType { + if elementSlice, ok := element.([]interface{}); ok { + flattened = append(flattened, elementSlice...) + } else if isSliceType(element) { + reflectFlat := []interface{}{} + v := reflect.ValueOf(element) + for i := 0; i < v.Len(); i++ { + reflectFlat = append(reflectFlat, v.Index(i).Interface()) + } + flattened = append(flattened, reflectFlat...) + } else { + flattened = append(flattened, element) + } + } + return flattened, nil + case ASTIdentity, ASTCurrentNode: + return value, nil + case ASTIndex: + if sliceType, ok := value.([]interface{}); ok { + index := node.value.(int) + if index < 0 { + index += len(sliceType) + } + if index < len(sliceType) && index >= 0 { + return sliceType[index], nil + } + return nil, nil + } + // Otherwise try via reflection. + rv := reflect.ValueOf(value) + if rv.Kind() == reflect.Slice { + index := node.value.(int) + if index < 0 { + index += rv.Len() + } + if index < rv.Len() && index >= 0 { + v := rv.Index(index) + return v.Interface(), nil + } + } + return nil, nil + case ASTKeyValPair: + return intr.Execute(node.children[0], value) + case ASTLiteral: + return node.value, nil + case ASTMultiSelectHash: + if value == nil { + return nil, nil + } + collected := make(map[string]interface{}) + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + key := child.value.(string) + collected[key] = current + } + return collected, nil + case ASTMultiSelectList: + if value == nil { + return nil, nil + } + collected := []interface{}{} + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + collected = append(collected, current) + } + return collected, nil + case ASTOrExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + matched, err = intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + } + return matched, nil + case ASTAndExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return matched, nil + } + return intr.Execute(node.children[1], value) + case ASTNotExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return true, nil + } + return false, nil + case ASTPipe: + result := value + var err error + for _, child := range node.children { + result, err = intr.Execute(child, result) + if err != nil { + return nil, err + } + } + return result, nil + case ASTProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.projectWithReflection(node, left) + } + return nil, nil + } + collected := []interface{}{} + var current interface{} + for _, element := range sliceType { + current, err = intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + case ASTSubexpression, ASTIndexExpression: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + return intr.Execute(node.children[1], left) + case ASTSlice: + sliceType, ok := value.([]interface{}) + if !ok { + if isSliceType(value) { + return intr.sliceWithReflection(node, value) + } + return nil, nil + } + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + return slice(sliceType, sliceParams) + case ASTValueProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + mapType, ok := left.(map[string]interface{}) + if !ok { + return nil, nil + } + values := make([]interface{}, len(mapType)) + for _, value := range mapType { + values = append(values, value) + } + collected := []interface{}{} + for _, element := range values { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + } + return nil, errors.New("Unknown AST node: " + node.nodeType.String()) +} + +func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { + rv := reflect.ValueOf(value) + first, n := utf8.DecodeRuneInString(key) + fieldName := string(unicode.ToUpper(first)) + key[n:] + if rv.Kind() == reflect.Struct { + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } else if rv.Kind() == reflect.Ptr { + // Handle multiple levels of indirection? + if rv.IsNil() { + return nil, nil + } + rv = rv.Elem() + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } + return nil, nil +} + +func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + flattened := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + if reflect.TypeOf(element).Kind() == reflect.Slice { + // Then insert the contents of the element + // slice into the flattened slice, + // i.e flattened = append(flattened, mySlice...) + elementV := reflect.ValueOf(element) + for j := 0; j < elementV.Len(); j++ { + flattened = append( + flattened, elementV.Index(j).Interface()) + } + } else { + flattened = append(flattened, element) + } + } + return flattened, nil +} + +func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + final := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + final = append(final, element) + } + return slice(final, sliceParams) +} + +func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { + compareNode := node.children[2] + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil +} + +func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if result != nil { + collected = append(collected, result) + } + } + return collected, nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/lexer.go new file mode 100644 index 000000000..817900c8f --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/lexer.go @@ -0,0 +1,420 @@ +package jmespath + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +type token struct { + tokenType tokType + value string + position int + length int +} + +type tokType int + +const eof = -1 + +// Lexer contains information about the expression being tokenized. +type Lexer struct { + expression string // The expression provided by the user. + currentPos int // The current position in the string. + lastWidth int // The width of the current rune. This + buf bytes.Buffer // Internal buffer used for building up values. +} + +// SyntaxError is the main error used whenever a lexing or parsing error occurs. +type SyntaxError struct { + msg string // Error message displayed to user + Expression string // Expression that generated a SyntaxError + Offset int // The location in the string where the error occurred +} + +func (e SyntaxError) Error() string { + // In the future, it would be good to underline the specific + // location where the error occurred. + return "SyntaxError: " + e.msg +} + +// HighlightLocation will show where the syntax error occurred. +// It will place a "^" character on a line below the expression +// at the point where the syntax error occurred. +func (e SyntaxError) HighlightLocation() string { + return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" +} + +//go:generate stringer -type=tokType +const ( + tUnknown tokType = iota + tStar + tDot + tFilter + tFlatten + tLparen + tRparen + tLbracket + tRbracket + tLbrace + tRbrace + tOr + tPipe + tNumber + tUnquotedIdentifier + tQuotedIdentifier + tComma + tColon + tLT + tLTE + tGT + tGTE + tEQ + tNE + tJSONLiteral + tStringLiteral + tCurrent + tExpref + tAnd + tNot + tEOF +) + +var basicTokens = map[rune]tokType{ + '.': tDot, + '*': tStar, + ',': tComma, + ':': tColon, + '{': tLbrace, + '}': tRbrace, + ']': tRbracket, // tLbracket not included because it could be "[]" + '(': tLparen, + ')': tRparen, + '@': tCurrent, +} + +// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. +// When using this bitmask just be sure to shift the rune down 64 bits +// before checking against identifierStartBits. +const identifierStartBits uint64 = 576460745995190270 + +// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. +var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} + +var whiteSpace = map[rune]bool{ + ' ': true, '\t': true, '\n': true, '\r': true, +} + +func (t token) String() string { + return fmt.Sprintf("Token{%+v, %s, %d, %d}", + t.tokenType, t.value, t.position, t.length) +} + +// NewLexer creates a new JMESPath lexer. +func NewLexer() *Lexer { + lexer := Lexer{} + return &lexer +} + +func (lexer *Lexer) next() rune { + if lexer.currentPos >= len(lexer.expression) { + lexer.lastWidth = 0 + return eof + } + r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) + lexer.lastWidth = w + lexer.currentPos += w + return r +} + +func (lexer *Lexer) back() { + lexer.currentPos -= lexer.lastWidth +} + +func (lexer *Lexer) peek() rune { + t := lexer.next() + lexer.back() + return t +} + +// tokenize takes an expression and returns corresponding tokens. +func (lexer *Lexer) tokenize(expression string) ([]token, error) { + var tokens []token + lexer.expression = expression + lexer.currentPos = 0 + lexer.lastWidth = 0 +loop: + for { + r := lexer.next() + if identifierStartBits&(1<<(uint64(r)-64)) > 0 { + t := lexer.consumeUnquotedIdentifier() + tokens = append(tokens, t) + } else if val, ok := basicTokens[r]; ok { + // Basic single char token. + t := token{ + tokenType: val, + value: string(r), + position: lexer.currentPos - lexer.lastWidth, + length: 1, + } + tokens = append(tokens, t) + } else if r == '-' || (r >= '0' && r <= '9') { + t := lexer.consumeNumber() + tokens = append(tokens, t) + } else if r == '[' { + t := lexer.consumeLBracket() + tokens = append(tokens, t) + } else if r == '"' { + t, err := lexer.consumeQuotedIdentifier() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '\'' { + t, err := lexer.consumeRawStringLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '`' { + t, err := lexer.consumeLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '|' { + t := lexer.matchOrElse(r, '|', tOr, tPipe) + tokens = append(tokens, t) + } else if r == '<' { + t := lexer.matchOrElse(r, '=', tLTE, tLT) + tokens = append(tokens, t) + } else if r == '>' { + t := lexer.matchOrElse(r, '=', tGTE, tGT) + tokens = append(tokens, t) + } else if r == '!' { + t := lexer.matchOrElse(r, '=', tNE, tNot) + tokens = append(tokens, t) + } else if r == '=' { + t := lexer.matchOrElse(r, '=', tEQ, tUnknown) + tokens = append(tokens, t) + } else if r == '&' { + t := lexer.matchOrElse(r, '&', tAnd, tExpref) + tokens = append(tokens, t) + } else if r == eof { + break loop + } else if _, ok := whiteSpace[r]; ok { + // Ignore whitespace + } else { + return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) + } + } + tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) + return tokens, nil +} + +// Consume characters until the ending rune "r" is reached. +// If the end of the expression is reached before seeing the +// terminating rune "r", then an error is returned. +// If no error occurs then the matching substring is returned. +// The returned string will not include the ending rune. +func (lexer *Lexer) consumeUntil(end rune) (string, error) { + start := lexer.currentPos + current := lexer.next() + for current != end && current != eof { + if current == '\\' && lexer.peek() != eof { + lexer.next() + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return "", SyntaxError{ + msg: "Unclosed delimiter: " + string(end), + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil +} + +func (lexer *Lexer) consumeLiteral() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('`') + if err != nil { + return token{}, err + } + value = strings.Replace(value, "\\`", "`", -1) + return token{ + tokenType: tJSONLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) consumeRawStringLiteral() (token, error) { + start := lexer.currentPos + currentIndex := start + current := lexer.next() + for current != '\'' && lexer.peek() != eof { + if current == '\\' && lexer.peek() == '\'' { + chunk := lexer.expression[currentIndex : lexer.currentPos-1] + lexer.buf.WriteString(chunk) + lexer.buf.WriteString("'") + lexer.next() + currentIndex = lexer.currentPos + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return token{}, SyntaxError{ + msg: "Unclosed delimiter: '", + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + if currentIndex < lexer.currentPos { + lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) + } + value := lexer.buf.String() + // Reset the buffer so it can reused again. + lexer.buf.Reset() + return token{ + tokenType: tStringLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: lexer.expression, + Offset: lexer.currentPos - 1, + } +} + +// Checks for a two char token, otherwise matches a single character +// token. This is used whenever a two char token overlaps a single +// char token, e.g. "||" -> tPipe, "|" -> tOr. +func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == second { + t = token{ + tokenType: matchedType, + value: string(first) + string(second), + position: start, + length: 2, + } + } else { + lexer.back() + t = token{ + tokenType: singleCharType, + value: string(first), + position: start, + length: 1, + } + } + return t +} + +func (lexer *Lexer) consumeLBracket() token { + // There's three options here: + // 1. A filter expression "[?" + // 2. A flatten operator "[]" + // 3. A bare rbracket "[" + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == '?' { + t = token{ + tokenType: tFilter, + value: "[?", + position: start, + length: 2, + } + } else if nextRune == ']' { + t = token{ + tokenType: tFlatten, + value: "[]", + position: start, + length: 2, + } + } else { + t = token{ + tokenType: tLbracket, + value: "[", + position: start, + length: 1, + } + lexer.back() + } + return t +} + +func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('"') + if err != nil { + return token{}, err + } + var decoded string + asJSON := []byte("\"" + value + "\"") + if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { + return token{}, err + } + return token{ + tokenType: tQuotedIdentifier, + value: decoded, + position: start - 1, + length: len(decoded), + }, nil +} + +func (lexer *Lexer) consumeUnquotedIdentifier() token { + // Consume runes until we reach the end of an unquoted + // identifier. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tUnquotedIdentifier, + value: value, + position: start, + length: lexer.currentPos - start, + } +} + +func (lexer *Lexer) consumeNumber() token { + // Consume runes until we reach something that's not a number. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < '0' || r > '9' { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tNumber, + value: value, + position: start, + length: lexer.currentPos - start, + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/parser.go new file mode 100644 index 000000000..1240a1755 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/parser.go @@ -0,0 +1,603 @@ +package jmespath + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type astNodeType int + +//go:generate stringer -type astNodeType +const ( + ASTEmpty astNodeType = iota + ASTComparator + ASTCurrentNode + ASTExpRef + ASTFunctionExpression + ASTField + ASTFilterProjection + ASTFlatten + ASTIdentity + ASTIndex + ASTIndexExpression + ASTKeyValPair + ASTLiteral + ASTMultiSelectHash + ASTMultiSelectList + ASTOrExpression + ASTAndExpression + ASTNotExpression + ASTPipe + ASTProjection + ASTSubexpression + ASTSlice + ASTValueProjection +) + +// ASTNode represents the abstract syntax tree of a JMESPath expression. +type ASTNode struct { + nodeType astNodeType + value interface{} + children []ASTNode +} + +func (node ASTNode) String() string { + return node.PrettyPrint(0) +} + +// PrettyPrint will pretty print the parsed AST. +// The AST is an implementation detail and this pretty print +// function is provided as a convenience method to help with +// debugging. You should not rely on its output as the internal +// structure of the AST may change at any time. +func (node ASTNode) PrettyPrint(indent int) string { + spaces := strings.Repeat(" ", indent) + output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) + nextIndent := indent + 2 + if node.value != nil { + if converted, ok := node.value.(fmt.Stringer); ok { + // Account for things like comparator nodes + // that are enums with a String() method. + output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) + } else { + output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) + } + } + lastIndex := len(node.children) + if lastIndex > 0 { + output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) + childIndent := nextIndent + 2 + for _, elem := range node.children { + output += elem.PrettyPrint(childIndent) + } + } + output += fmt.Sprintf("%s}\n", spaces) + return output +} + +var bindingPowers = map[tokType]int{ + tEOF: 0, + tUnquotedIdentifier: 0, + tQuotedIdentifier: 0, + tRbracket: 0, + tRparen: 0, + tComma: 0, + tRbrace: 0, + tNumber: 0, + tCurrent: 0, + tExpref: 0, + tColon: 0, + tPipe: 1, + tOr: 2, + tAnd: 3, + tEQ: 5, + tLT: 5, + tLTE: 5, + tGT: 5, + tGTE: 5, + tNE: 5, + tFlatten: 9, + tStar: 20, + tFilter: 21, + tDot: 40, + tNot: 45, + tLbrace: 50, + tLbracket: 55, + tLparen: 60, +} + +// Parser holds state about the current expression being parsed. +type Parser struct { + expression string + tokens []token + index int +} + +// NewParser creates a new JMESPath parser. +func NewParser() *Parser { + p := Parser{} + return &p +} + +// Parse will compile a JMESPath expression. +func (p *Parser) Parse(expression string) (ASTNode, error) { + lexer := NewLexer() + p.expression = expression + p.index = 0 + tokens, err := lexer.tokenize(expression) + if err != nil { + return ASTNode{}, err + } + p.tokens = tokens + parsed, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() != tEOF { + return ASTNode{}, p.syntaxError(fmt.Sprintf( + "Unexpected token at the end of the expresssion: %s", p.current())) + } + return parsed, nil +} + +func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { + var err error + leftToken := p.lookaheadToken(0) + p.advance() + leftNode, err := p.nud(leftToken) + if err != nil { + return ASTNode{}, err + } + currentToken := p.current() + for bindingPower < bindingPowers[currentToken] { + p.advance() + leftNode, err = p.led(currentToken, leftNode) + if err != nil { + return ASTNode{}, err + } + currentToken = p.current() + } + return leftNode, nil +} + +func (p *Parser) parseIndexExpression() (ASTNode, error) { + if p.lookahead(0) == tColon || p.lookahead(1) == tColon { + return p.parseSliceExpression() + } + indexStr := p.lookaheadToken(0).value + parsedInt, err := strconv.Atoi(indexStr) + if err != nil { + return ASTNode{}, err + } + indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} + p.advance() + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return indexNode, nil +} + +func (p *Parser) parseSliceExpression() (ASTNode, error) { + parts := []*int{nil, nil, nil} + index := 0 + current := p.current() + for current != tRbracket && index < 3 { + if current == tColon { + index++ + p.advance() + } else if current == tNumber { + parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) + if err != nil { + return ASTNode{}, err + } + parts[index] = &parsedInt + p.advance() + } else { + return ASTNode{}, p.syntaxError( + "Expected tColon or tNumber" + ", received: " + p.current().String()) + } + current = p.current() + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTSlice, + value: parts, + }, nil +} + +func (p *Parser) match(tokenType tokType) error { + if p.current() == tokenType { + p.advance() + return nil + } + return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) +} + +func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { + switch tokenType { + case tDot: + if p.current() != tStar { + right, err := p.parseDotRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTSubexpression, + children: []ASTNode{node, right}, + }, err + } + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTValueProjection, + children: []ASTNode{node, right}, + }, err + case tPipe: + right, err := p.parseExpression(bindingPowers[tPipe]) + return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err + case tOr: + right, err := p.parseExpression(bindingPowers[tOr]) + return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err + case tAnd: + right, err := p.parseExpression(bindingPowers[tAnd]) + return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err + case tLparen: + name := node.value + var args []ASTNode + for p.current() != tRparen { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() == tComma { + if err := p.match(tComma); err != nil { + return ASTNode{}, err + } + } + args = append(args, expression) + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTFunctionExpression, + value: name, + children: args, + }, nil + case tFilter: + return p.parseFilter(node) + case tFlatten: + left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{left, right}, + }, err + case tEQ, tNE, tGT, tGTE, tLT, tLTE: + right, err := p.parseExpression(bindingPowers[tokenType]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTComparator, + value: tokenType, + children: []ASTNode{node, right}, + }, nil + case tLbracket: + tokenType := p.current() + var right ASTNode + var err error + if tokenType == tNumber || tokenType == tColon { + right, err = p.parseIndexExpression() + if err != nil { + return ASTNode{}, err + } + return p.projectIfSlice(node, right) + } + // Otherwise this is a projection. + if err := p.match(tStar); err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{node, right}, + }, nil + } + return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) +} + +func (p *Parser) nud(token token) (ASTNode, error) { + switch token.tokenType { + case tJSONLiteral: + var parsed interface{} + err := json.Unmarshal([]byte(token.value), &parsed) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTLiteral, value: parsed}, nil + case tStringLiteral: + return ASTNode{nodeType: ASTLiteral, value: token.value}, nil + case tUnquotedIdentifier: + return ASTNode{ + nodeType: ASTField, + value: token.value, + }, nil + case tQuotedIdentifier: + node := ASTNode{nodeType: ASTField, value: token.value} + if p.current() == tLparen { + return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) + } + return node, nil + case tStar: + left := ASTNode{nodeType: ASTIdentity} + var right ASTNode + var err error + if p.current() == tRbracket { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + } + return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err + case tFilter: + return p.parseFilter(ASTNode{nodeType: ASTIdentity}) + case tLbrace: + return p.parseMultiSelectHash() + case tFlatten: + left := ASTNode{ + nodeType: ASTFlatten, + children: []ASTNode{{nodeType: ASTIdentity}}, + } + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil + case tLbracket: + tokenType := p.current() + //var right ASTNode + if tokenType == tNumber || tokenType == tColon { + right, err := p.parseIndexExpression() + if err != nil { + return ASTNode{}, nil + } + return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) + } else if tokenType == tStar && p.lookahead(1) == tRbracket { + p.advance() + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{{nodeType: ASTIdentity}, right}, + }, nil + } else { + return p.parseMultiSelectList() + } + case tCurrent: + return ASTNode{nodeType: ASTCurrentNode}, nil + case tExpref: + expression, err := p.parseExpression(bindingPowers[tExpref]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil + case tNot: + expression, err := p.parseExpression(bindingPowers[tNot]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil + case tLparen: + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return expression, nil + case tEOF: + return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) + } + + return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) +} + +func (p *Parser) parseMultiSelectList() (ASTNode, error) { + var expressions []ASTNode + for { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + expressions = append(expressions, expression) + if p.current() == tRbracket { + break + } + err = p.match(tComma) + if err != nil { + return ASTNode{}, err + } + } + err := p.match(tRbracket) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTMultiSelectList, + children: expressions, + }, nil +} + +func (p *Parser) parseMultiSelectHash() (ASTNode, error) { + var children []ASTNode + for { + keyToken := p.lookaheadToken(0) + if err := p.match(tUnquotedIdentifier); err != nil { + if err := p.match(tQuotedIdentifier); err != nil { + return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") + } + } + keyName := keyToken.value + err := p.match(tColon) + if err != nil { + return ASTNode{}, err + } + value, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + node := ASTNode{ + nodeType: ASTKeyValPair, + value: keyName, + children: []ASTNode{value}, + } + children = append(children, node) + if p.current() == tComma { + err := p.match(tComma) + if err != nil { + return ASTNode{}, nil + } + } else if p.current() == tRbrace { + err := p.match(tRbrace) + if err != nil { + return ASTNode{}, nil + } + break + } + } + return ASTNode{ + nodeType: ASTMultiSelectHash, + children: children, + }, nil +} + +func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { + indexExpr := ASTNode{ + nodeType: ASTIndexExpression, + children: []ASTNode{left, right}, + } + if right.nodeType == ASTSlice { + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{indexExpr, right}, + }, err + } + return indexExpr, nil +} +func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { + var right, condition ASTNode + var err error + condition, err = p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + if p.current() == tFlatten { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tFilter]) + if err != nil { + return ASTNode{}, err + } + } + + return ASTNode{ + nodeType: ASTFilterProjection, + children: []ASTNode{node, right, condition}, + }, nil +} + +func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { + lookahead := p.current() + if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { + return p.parseExpression(bindingPower) + } else if lookahead == tLbracket { + if err := p.match(tLbracket); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectList() + } else if lookahead == tLbrace { + if err := p.match(tLbrace); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectHash() + } + return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") +} + +func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { + current := p.current() + if bindingPowers[current] < 10 { + return ASTNode{nodeType: ASTIdentity}, nil + } else if current == tLbracket { + return p.parseExpression(bindingPower) + } else if current == tFilter { + return p.parseExpression(bindingPower) + } else if current == tDot { + err := p.match(tDot) + if err != nil { + return ASTNode{}, err + } + return p.parseDotRHS(bindingPower) + } else { + return ASTNode{}, p.syntaxError("Error") + } +} + +func (p *Parser) lookahead(number int) tokType { + return p.lookaheadToken(number).tokenType +} + +func (p *Parser) current() tokType { + return p.lookahead(0) +} + +func (p *Parser) lookaheadToken(number int) token { + return p.tokens[p.index+number] +} + +func (p *Parser) advance() { + p.index++ +} + +func tokensOneOf(elements []tokType, token tokType) bool { + for _, elem := range elements { + if elem == token { + return true + } + } + return false +} + +func (p *Parser) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: p.lookaheadToken(0).position, + } +} + +// Create a SyntaxError based on the provided token. +// This differs from syntaxError() which creates a SyntaxError +// based on the current lookahead token. +func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: t.position, + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/toktype_string.go new file mode 100644 index 000000000..dae79cbdf --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/toktype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=tokType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" + +var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} + +func (i tokType) String() string { + if i < 0 || i >= tokType(len(_tokType_index)-1) { + return fmt.Sprintf("tokType(%d)", i) + } + return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/util.go new file mode 100644 index 000000000..ddc1b7d7d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/util.go @@ -0,0 +1,185 @@ +package jmespath + +import ( + "errors" + "reflect" +) + +// IsFalse determines if an object is false based on the JMESPath spec. +// JMESPath defines false values to be any of: +// - An empty string array, or hash. +// - The boolean value false. +// - nil +func isFalse(value interface{}) bool { + switch v := value.(type) { + case bool: + return !v + case []interface{}: + return len(v) == 0 + case map[string]interface{}: + return len(v) == 0 + case string: + return len(v) == 0 + case nil: + return true + } + // Try the reflection cases before returning false. + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Struct: + // A struct type will never be false, even if + // all of its values are the zero type. + return false + case reflect.Slice, reflect.Map: + return rv.Len() == 0 + case reflect.Ptr: + if rv.IsNil() { + return true + } + // If it's a pointer type, we'll try to deref the pointer + // and evaluate the pointer value for isFalse. + element := rv.Elem() + return isFalse(element.Interface()) + } + return false +} + +// ObjsEqual is a generic object equality check. +// It will take two arbitrary objects and recursively determine +// if they are equal. +func objsEqual(left interface{}, right interface{}) bool { + return reflect.DeepEqual(left, right) +} + +// SliceParam refers to a single part of a slice. +// A slice consists of a start, a stop, and a step, similar to +// python slices. +type sliceParam struct { + N int + Specified bool +} + +// Slice supports [start:stop:step] style slicing that's supported in JMESPath. +func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { + computed, err := computeSliceParams(len(slice), parts) + if err != nil { + return nil, err + } + start, stop, step := computed[0], computed[1], computed[2] + result := []interface{}{} + if step > 0 { + for i := start; i < stop; i += step { + result = append(result, slice[i]) + } + } else { + for i := start; i > stop; i += step { + result = append(result, slice[i]) + } + } + return result, nil +} + +func computeSliceParams(length int, parts []sliceParam) ([]int, error) { + var start, stop, step int + if !parts[2].Specified { + step = 1 + } else if parts[2].N == 0 { + return nil, errors.New("Invalid slice, step cannot be 0") + } else { + step = parts[2].N + } + var stepValueNegative bool + if step < 0 { + stepValueNegative = true + } else { + stepValueNegative = false + } + + if !parts[0].Specified { + if stepValueNegative { + start = length - 1 + } else { + start = 0 + } + } else { + start = capSlice(length, parts[0].N, step) + } + + if !parts[1].Specified { + if stepValueNegative { + stop = -1 + } else { + stop = length + } + } else { + stop = capSlice(length, parts[1].N, step) + } + return []int{start, stop, step}, nil +} + +func capSlice(length int, actual int, step int) int { + if actual < 0 { + actual += length + if actual < 0 { + if step < 0 { + actual = -1 + } else { + actual = 0 + } + } + } else if actual >= length { + if step < 0 { + actual = length - 1 + } else { + actual = length + } + } + return actual +} + +// ToArrayNum converts an empty interface type to a slice of float64. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. +func toArrayNum(data interface{}) ([]float64, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]float64, len(d)) + for i, el := range d { + item, ok := el.(float64) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +// ToArrayStr converts an empty interface type to a slice of strings. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. If the input data could be entirely +// converted, then the converted data, along with a second value of true, +// will be returned. +func toArrayStr(data interface{}) ([]string, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]string, len(d)) + for i, el := range d { + item, ok := el.(string) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +func isSliceType(v interface{}) bool { + if v == nil { + return false + } + return reflect.TypeOf(v).Kind() == reflect.Slice +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/.travis.yml b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/.travis.yml new file mode 100644 index 000000000..a77773b8e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.1 + - 1.2 + - 1.3 + - tip + +install: + - go get github.com/bugsnag/panicwrap + - go get github.com/bugsnag/osext + - go get github.com/bitly/go-simplejson + - go get github.com/revel/revel diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/LICENSE.txt b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/LICENSE.txt new file mode 100644 index 000000000..3cb0ec0ff --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/LICENSE.txt @@ -0,0 +1,20 @@ +Copyright (c) 2014 Bugsnag + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/README.md b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/README.md new file mode 100644 index 000000000..b54322939 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/README.md @@ -0,0 +1,489 @@ +Bugsnag Notifier for Golang +=========================== + +The Bugsnag Notifier for Golang gives you instant notification of panics, or +unexpected errors, in your golang app. Any unhandled panics will trigger a +notification to be sent to your Bugsnag project. + +[Bugsnag](http://bugsnag.com) captures errors in real-time from your web, +mobile and desktop applications, helping you to understand and resolve them +as fast as possible. [Create a free account](http://bugsnag.com) to start +capturing exceptions from your applications. + +## How to Install + +1. Download the code + + ```shell + go get github.com/bugsnag/bugsnag-go + ``` + +### Using with net/http apps + +For a golang app based on [net/http](https://godoc.org/net/http), integrating +Bugsnag takes two steps. You should also use these instructions if you're using +the [gorilla toolkit](http://www.gorillatoolkit.org/), or the +[pat](https://github.com/bmizerany/pat/) muxer. + +1. Configure bugsnag at the start of your `main()` function: + + ```go + import "github.com/bugsnag/bugsnag-go" + + func main() { + bugsnag.Configure(bugsnag.Configuration{ + APIKey: "YOUR_API_KEY_HERE", + ReleaseStage: "production", + // more configuration options + }) + + // rest of your program. + } + ``` + +2. Wrap your server in a [bugsnag.Handler](https://godoc.org/github.com/bugsnag/bugsnag-go/#Handler) + + ```go + // a. If you're using the builtin http mux, you can just pass + // bugsnag.Handler(nil) to http.ListenAndServer + http.ListenAndServe(":8080", bugsnag.Handler(nil)) + + // b. If you're creating a server manually yourself, you can set + // its handlers the same way + srv := http.Server{ + Handler: bugsnag.Handler(nil) + } + + // c. If you're not using the builtin http mux, wrap your own handler + // (though make sure that it doesn't already catch panics) + http.ListenAndServe(":8080", bugsnag.Handler(handler)) + ``` + +### Using with Revel apps + +There are two steps to get panic handling in [revel](https://revel.github.io) apps. + +1. Add the `bugsnagrevel.Filter` immediately after the `revel.PanicFilter` in `app/init.go`: + + ```go + + import "github.com/bugsnag/bugsnag-go/revel" + + revel.Filters = []revel.Filter{ + revel.PanicFilter, + bugsnagrevel.Filter, + // ... + } + ``` + +2. Set bugsnag.apikey in the top section of `conf/app.conf`. + + ``` + module.static=github.com/revel/revel/modules/static + + bugsnag.apikey=YOUR_API_KEY_HERE + + [dev] + ``` + +### Using with Google App Engine + +1. Configure bugsnag at the start of your `init()` function: + + ```go + import "github.com/bugsnag/bugsnag-go" + + func init() { + bugsnag.Configure(bugsnag.Configuration{ + APIKey: "YOUR_API_KEY_HERE", + }) + + // ... + } + ``` + +2. Wrap *every* http.Handler or http.HandlerFunc with Bugsnag: + + ```go + // a. If you're using HandlerFuncs + http.HandleFunc("/", bugsnag.HandlerFunc( + func (w http.ResponseWriter, r *http.Request) { + // ... + })) + + // b. If you're using Handlers + http.Handle("/", bugsnag.Handler(myHttpHandler)) + ``` + +3. In order to use Bugsnag, you must provide the current +[`appengine.Context`](https://developers.google.com/appengine/docs/go/reference#Context), or +current `*http.Request` as rawData. The easiest way to do this is to create a new notifier. + + ```go + c := appengine.NewContext(r) + notifier := bugsnag.New(c) + + if err != nil { + notifier.Notify(err) + } + + go func () { + defer notifier.Recover() + + // ... + }() + ``` + + +## Notifying Bugsnag manually + +Bugsnag will automatically handle any panics that crash your program and notify +you of them. If you've integrated with `revel` or `net/http`, then you'll also +be notified of any panics() that happen while processing a request. + +Sometimes however it's useful to manually notify Bugsnag of a problem. To do this, +call [`bugsnag.Notify()`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Notify) + +```go +if err != nil { + bugsnag.Notify(err) +} +``` + +### Manual panic handling + +To avoid a panic in a goroutine from crashing your entire app, you can use +[`bugsnag.Recover()`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Recover) +to stop a panic from unwinding the stack any further. When `Recover()` is hit, +it will send any current panic to Bugsnag and then stop panicking. This is +most useful at the start of a goroutine: + +```go +go func() { + defer bugsnag.Recover() + + // ... +}() +``` + +Alternatively you can use +[`bugsnag.AutoNotify()`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Recover) +to notify bugsnag of a panic while letting the program continue to panic. This +is useful if you're using a Framework that already has some handling of panics +and you are retrofitting bugsnag support. + +```go +defer bugsnag.AutoNotify() +``` + +## Sending Custom Data + +Most functions in the Bugsnag API, including `bugsnag.Notify()`, +`bugsnag.Recover()`, `bugsnag.AutoNotify()`, and `bugsnag.Handler()` let you +attach data to the notifications that they send. To do this you pass in rawData, +which can be any of the supported types listed here. To add support for more +types of rawData see [OnBeforeNotify](#custom-data-with-onbeforenotify). + +### Custom MetaData + +Custom metaData appears as tabs on Bugsnag.com. You can set it by passing +a [`bugsnag.MetaData`](https://godoc.org/github.com/bugsnag/bugsnag-go/#MetaData) +object as rawData. + +```go +bugsnag.Notify(err, + bugsnag.MetaData{ + "Account": { + "Name": Account.Name, + "Paying": Account.Plan.Premium, + }, + }) +``` + +### Request data + +Bugsnag can extract interesting data from +[`*http.Request`](https://godoc.org/net/http/#Request) objects, and +[`*revel.Controller`](https://godoc.org/github.com/revel/revel/#Controller) +objects. These are automatically passed in when handling panics, and you can +pass them yourself. + +```go +func (w http.ResponseWriter, r *http.Request) { + bugsnag.Notify(err, r) +} +``` + +### User data + +User data is searchable, and the `Id` powers the count of users affected. You +can set which user an error affects by passing a +[`bugsnag.User`](https://godoc.org/github.com/bugsnag/bugsnag-go/#User) object as +rawData. + +```go +bugsnag.Notify(err, + bugsnag.User{Id: "1234", Name: "Conrad", Email: "me@cirw.in"}) +``` + +### Context + +The context shows up prominently in the list view so that you can get an idea +of where a problem occurred. You can set it by passing a +[`bugsnag.Context`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Context) +object as rawData. + +```go +bugsnag.Notify(err, bugsnag.Context{"backgroundJob"}) +``` + +### Severity + +Bugsnag supports three severities, `SeverityError`, `SeverityWarning`, and `SeverityInfo`. +You can set the severity of an error by passing one of these objects as rawData. + +```go +bugsnag.Notify(err, bugsnag.SeverityInfo) +``` + +## Configuration + +You must call `bugsnag.Configure()` at the start of your program to use Bugsnag, you pass it +a [`bugsnag.Configuration`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Configuration) object +containing any of the following values. + +### APIKey + +The Bugsnag API key can be found on your [Bugsnag dashboard](https://bugsnag.com) under "Settings". + +```go +bugsnag.Configure(bugsnag.Configuration{ + APIKey: "YOUR_API_KEY_HERE", +}) +``` + +### Endpoint + +The Bugsnag endpoint defaults to `https://notify.bugsnag.com/`. If you're using Bugsnag enterprise, +you should set this to the endpoint of your local instance. + +```go +bugsnag.Configure(bugsnag.Configuration{ + Endpoint: "http://bugsnag.internal:49000/", +}) +``` + +### ReleaseStage + +The ReleaseStage tracks where your app is deployed. You should set this to `production`, `staging`, +`development` or similar as appropriate. + +```go +bugsnag.Configure(bugsnag.Configuration{ + ReleaseStage: "development", +}) +``` + +### NotifyReleaseStages + +The list of ReleaseStages to notify in. By default Bugsnag will notify you in all release stages, but +you can use this to silence development errors. + +```go +bugsnag.Configure(bugsnag.Configuration{ + NotifyReleaseStages: []string{"production", "staging"}, +}) +``` + +### AppVersion + +If you use a versioning scheme for deploys of your app, Bugsnag can use the `AppVersion` to only +re-open errors if they occur in later version of the app. + +```go +bugsnag.Configure(bugsnag.Configuration{ + AppVersion: "1.2.3", +}) +``` + +### Hostname + +The hostname is used to track where exceptions are coming from in the Bugsnag dashboard. The +default value is obtained from `os.Hostname()` so you won't often need to change this. + +```go +bugsnag.Configure(bugsnag.Configuration{ + Hostname: "go1", +}) +``` + +### ProjectPackages + +In order to determine where a crash happens Bugsnag needs to know which packages you consider to +be part of your app (as opposed to a library). By default this is set to `[]string{"main*"}`. Strings +are matched to package names using [`filepath.Match`](http://godoc.org/path/filepath#Match). + +```go +bugsnag.Configure(bugsnag.Configuration{ + ProjectPackages: []string{"main", "github.com/domain/myapp/*"}, +} +``` + +### ParamsFilters + +Sometimes sensitive data is accidentally included in Bugsnag MetaData. You can remove it by +setting `ParamsFilters`. Any key in the `MetaData` that includes any string in the filters +will be redacted. The default is `[]string{"password", "secret"}`, which prevents fields like +`password`, `password_confirmation` and `secret_answer` from being sent. + +```go +bugsnag.Configure(bugsnag.Configuration{ + ParamsFilters: []string{"password", "secret"}, +} +``` + +### Logger + +The Logger to write to in case of an error inside Bugsnag. This defaults to the global logger. + +```go +bugsnag.Configure(bugsnag.Configuration{ + Logger: app.Logger, +} +``` + +### PanicHandler + +The first time Bugsnag is configured, it wraps the running program in a panic +handler using [panicwrap](http://godoc.org/github.com/ConradIrwin/panicwrap). This +forks a sub-process which monitors unhandled panics. To prevent this, set +`PanicHandler` to `func() {}` the first time you call +`bugsnag.Configure`. This will prevent bugsnag from being able to notify you about +unhandled panics. + +```go +bugsnag.Configure(bugsnag.Configuration{ + PanicHandler: func() {}, +}) +``` + +### Synchronous + +Bugsnag usually starts a new goroutine before sending notifications. This means +that notifications can be lost if you do a bugsnag.Notify and then immediately +os.Exit. To avoid this problem, set Bugsnag to Synchronous (or just `panic()` +instead ;). + +```go +bugsnag.Configure(bugsnag.Configuration{ + Synchronous: true +}) +``` + +Or just for one error: + +```go +bugsnag.Notify(err, bugsnag.Configuration{Synchronous: true}) +``` + +### Transport + +The transport configures how Bugsnag makes http requests. By default we use +[`http.DefaultTransport`](http://godoc.org/net/http#RoundTripper) which handles +HTTP proxies automatically using the `$HTTP_PROXY` environment variable. + +```go +bugsnag.Configure(bugsnag.Configuration{ + Transport: http.DefaultTransport, +}) +``` + +## Custom data with OnBeforeNotify + +While it's nice that you can pass `MetaData` directly into `bugsnag.Notify`, +`bugsnag.AutoNotify`, and `bugsnag.Recover`, this can be a bit cumbersome and +inefficient — you're constructing the meta-data whether or not it will actually +be used. A better idea is to pass raw data in to these functions, and add an +`OnBeforeNotify` filter that converts them into `MetaData`. + +For example, lets say our system processes jobs: + +```go +type Job struct{ + Retry bool + UserId string + UserEmail string + Name string + Params map[string]string +} +``` + +You can pass a job directly into Bugsnag.notify: + +```go +bugsnag.Notify(err, job) +``` + +And then add a filter to extract information from that job and attach it to the +Bugsnag event: + +```go +bugsnag.OnBeforeNotify( + func(event *bugsnag.Event, config *bugsnag.Configuration) error { + + // Search all the RawData for any *Job pointers that we're passed in + // to bugsnag.Notify() and friends. + for _, datum := range event.RawData { + if job, ok := datum.(*Job); ok { + // don't notify bugsnag about errors in retries + if job.Retry { + return fmt.Errorf("not notifying about retried jobs") + } + + // add the job as a tab on Bugsnag.com + event.MetaData.AddStruct("Job", job) + + // set the user correctly + event.User = &User{Id: job.UserId, Email: job.UserEmail} + } + } + + // continue notifying as normal + return nil + }) +``` + +## Advanced Usage + +If you want to have multiple different configurations around in one program, +you can use `bugsnag.New()` to create multiple independent instances of +Bugsnag. You can use these without calling `bugsnag.Configure()`, but bear in +mind that until you call `bugsnag.Configure()` unhandled panics will not be +sent to bugsnag. + +```go +notifier := bugsnag.New(bugsnag.Configuration{ + APIKey: "YOUR_OTHER_API_KEY", +}) +``` + +In fact any place that lets you pass in `rawData` also allows you to pass in +configuration. For example to send http errors to one bugsnag project, you +could do: + +```go +bugsnag.Handler(nil, bugsnag.Configuration{APIKey: "YOUR_OTHER_API_KEY"}) +``` + +### GroupingHash + +If you need to override Bugsnag's grouping algorithm, you can set the +`GroupingHash` in an `OnBeforeNotify`: + +```go +bugsnag.OnBeforeNotify( + func (event *bugsnag.Event, config *bugsnag.Configuration) error { + event.GroupingHash = calculateGroupingHash(event) + return nil + }) +``` diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/appengine.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/appengine.go new file mode 100644 index 000000000..73aa2d778 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/appengine.go @@ -0,0 +1,76 @@ +// +build appengine + +package bugsnag + +import ( + "appengine" + "appengine/urlfetch" + "appengine/user" + "fmt" + "log" + "net/http" +) + +func defaultPanicHandler() {} + +func init() { + OnBeforeNotify(appengineMiddleware) +} + +func appengineMiddleware(event *Event, config *Configuration) (err error) { + var c appengine.Context + + for _, datum := range event.RawData { + if r, ok := datum.(*http.Request); ok { + c = appengine.NewContext(r) + break + } else if context, ok := datum.(appengine.Context); ok { + c = context + break + } + } + + if c == nil { + return fmt.Errorf("No appengine context given") + } + + // You can only use the builtin http library if you pay for appengine, + // so we use the appengine urlfetch service instead. + config.Transport = &urlfetch.Transport{ + Context: c, + } + + // Anything written to stderr/stdout is discarded, so lets log to the request. + config.Logger = log.New(appengineWriter{c}, config.Logger.Prefix(), config.Logger.Flags()) + + // Set the releaseStage appropriately + if config.ReleaseStage == "" { + if appengine.IsDevAppServer() { + config.ReleaseStage = "development" + } else { + config.ReleaseStage = "production" + } + } + + if event.User == nil { + u := user.Current(c) + if u != nil { + event.User = &User{ + Id: u.ID, + Email: u.Email, + } + } + } + + return nil +} + +// Convert an appengine.Context into an io.Writer so we can create a log.Logger. +type appengineWriter struct { + appengine.Context +} + +func (c appengineWriter) Write(b []byte) (int, error) { + c.Warningf(string(b)) + return len(b), nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/bugsnag.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/bugsnag.go new file mode 100644 index 000000000..acd0fed36 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/bugsnag.go @@ -0,0 +1,131 @@ +package bugsnag + +import ( + "github.com/bugsnag/bugsnag-go/errors" + "log" + "net/http" + "os" + "sync" + + // Fixes a bug with SHA-384 intermediate certs on some platforms. + // - https://github.com/bugsnag/bugsnag-go/issues/9 + _ "crypto/sha512" +) + +// The current version of bugsnag-go. +const VERSION = "1.0.2" + +var once sync.Once +var middleware middlewareStack + +// The configuration for the default bugsnag notifier. +var Config Configuration + +var defaultNotifier = Notifier{&Config, nil} + +// Configure Bugsnag. The only required setting is the APIKey, which can be +// obtained by clicking on "Settings" in your Bugsnag dashboard. This function +// is also responsible for installing the global panic handler, so it should be +// called as early as possible in your initialization process. +func Configure(config Configuration) { + Config.update(&config) + once.Do(Config.PanicHandler) +} + +// Notify sends an error to Bugsnag along with the current stack trace. The +// rawData is used to send extra information along with the error. For example +// you can pass the current http.Request to Bugsnag to see information about it +// in the dashboard, or set the severity of the notification. +func Notify(err error, rawData ...interface{}) error { + return defaultNotifier.Notify(errors.New(err, 1), rawData...) +} + +// AutoNotify logs a panic on a goroutine and then repanics. +// It should only be used in places that have existing panic handlers further +// up the stack. See bugsnag.Recover(). The rawData is used to send extra +// information along with any panics that are handled this way. +// Usage: defer bugsnag.AutoNotify() +func AutoNotify(rawData ...interface{}) { + if err := recover(); err != nil { + rawData = defaultNotifier.addDefaultSeverity(rawData, SeverityError) + defaultNotifier.Notify(errors.New(err, 2), rawData...) + panic(err) + } +} + +// Recover logs a panic on a goroutine and then recovers. +// The rawData is used to send extra information along with +// any panics that are handled this way +// Usage: defer bugsnag.Recover() +func Recover(rawData ...interface{}) { + if err := recover(); err != nil { + rawData = defaultNotifier.addDefaultSeverity(rawData, SeverityWarning) + defaultNotifier.Notify(errors.New(err, 2), rawData...) + } +} + +// OnBeforeNotify adds a callback to be run before a notification is sent to +// Bugsnag. It can be used to modify the event or its MetaData. Changes made +// to the configuration are local to notifying about this event. To prevent the +// event from being sent to Bugsnag return an error, this error will be +// returned from bugsnag.Notify() and the event will not be sent. +func OnBeforeNotify(callback func(event *Event, config *Configuration) error) { + middleware.OnBeforeNotify(callback) +} + +// Handler creates an http Handler that notifies Bugsnag any panics that +// happen. It then repanics so that the default http Server panic handler can +// handle the panic too. The rawData is used to send extra information along +// with any panics that are handled this way. +func Handler(h http.Handler, rawData ...interface{}) http.Handler { + notifier := New(rawData...) + if h == nil { + h = http.DefaultServeMux + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer notifier.AutoNotify(r) + h.ServeHTTP(w, r) + }) +} + +// HandlerFunc creates an http HandlerFunc that notifies Bugsnag about any +// panics that happen. It then repanics so that the default http Server panic +// handler can handle the panic too. The rawData is used to send extra +// information along with any panics that are handled this way. If you have +// already wrapped your http server using bugsnag.Handler() you don't also need +// to wrap each HandlerFunc. +func HandlerFunc(h http.HandlerFunc, rawData ...interface{}) http.HandlerFunc { + notifier := New(rawData...) + + return func(w http.ResponseWriter, r *http.Request) { + defer notifier.AutoNotify(r) + h(w, r) + } +} + +func init() { + // Set up builtin middlewarez + OnBeforeNotify(httpRequestMiddleware) + + // Default configuration + Config.update(&Configuration{ + APIKey: "", + Endpoint: "https://notify.bugsnag.com/", + Hostname: "", + AppVersion: "", + ReleaseStage: "", + ParamsFilters: []string{"password", "secret"}, + // * for app-engine + ProjectPackages: []string{"main*"}, + NotifyReleaseStages: nil, + Logger: log.New(os.Stdout, log.Prefix(), log.Flags()), + PanicHandler: defaultPanicHandler, + Transport: http.DefaultTransport, + }) + + hostname, err := os.Hostname() + if err == nil { + Config.Hostname = hostname + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/configuration.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/configuration.go new file mode 100644 index 000000000..7ff26e56f --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/configuration.go @@ -0,0 +1,159 @@ +package bugsnag + +import ( + "log" + "net/http" + "path/filepath" + "strings" +) + +// Configuration sets up and customizes communication with the Bugsnag API. +type Configuration struct { + // Your Bugsnag API key, e.g. "c9d60ae4c7e70c4b6c4ebd3e8056d2b8". You can + // find this by clicking Settings on https://bugsnag.com/. + APIKey string + // The Endpoint to notify about crashes. This defaults to + // "https://notify.bugsnag.com/", if you're using Bugsnag Enterprise then + // set it to your internal Bugsnag endpoint. + Endpoint string + + // The current release stage. This defaults to "production" and is used to + // filter errors in the Bugsnag dashboard. + ReleaseStage string + // The currently running version of the app. This is used to filter errors + // in the Bugsnag dasboard. If you set this then Bugsnag will only re-open + // resolved errors if they happen in different app versions. + AppVersion string + // The hostname of the current server. This defaults to the return value of + // os.Hostname() and is graphed in the Bugsnag dashboard. + Hostname string + + // The Release stages to notify in. If you set this then bugsnag-go will + // only send notifications to Bugsnag if the ReleaseStage is listed here. + NotifyReleaseStages []string + + // packages that are part of your app. Bugsnag uses this to determine how + // to group errors and how to display them on your dashboard. You should + // include any packages that are part of your app, and exclude libraries + // and helpers. You can list wildcards here, and they'll be expanded using + // filepath.Glob. The default value is []string{"main*"} + ProjectPackages []string + + // Any meta-data that matches these filters will be marked as [REDACTED] + // before sending a Notification to Bugsnag. It defaults to + // []string{"password", "secret"} so that request parameters like password, + // password_confirmation and auth_secret will not be sent to Bugsnag. + ParamsFilters []string + + // The PanicHandler is used by Bugsnag to catch unhandled panics in your + // application. The default panicHandler uses mitchellh's panicwrap library, + // and you can disable this feature by passing an empty: func() {} + PanicHandler func() + + // The logger that Bugsnag should log to. Uses the same defaults as go's + // builtin logging package. bugsnag-go logs whenever it notifies Bugsnag + // of an error, and when any error occurs inside the library itself. + Logger *log.Logger + // The http Transport to use, defaults to the default http Transport. This + // can be configured if you are in an environment like Google App Engine + // that has stringent conditions on making http requests. + Transport http.RoundTripper + // Whether bugsnag should notify synchronously. This defaults to false which + // causes bugsnag-go to spawn a new goroutine for each notification. + Synchronous bool + // TODO: remember to update the update() function when modifying this struct +} + +func (config *Configuration) update(other *Configuration) *Configuration { + if other.APIKey != "" { + config.APIKey = other.APIKey + } + if other.Endpoint != "" { + config.Endpoint = other.Endpoint + } + if other.Hostname != "" { + config.Hostname = other.Hostname + } + if other.AppVersion != "" { + config.AppVersion = other.AppVersion + } + if other.ReleaseStage != "" { + config.ReleaseStage = other.ReleaseStage + } + if other.ParamsFilters != nil { + config.ParamsFilters = other.ParamsFilters + } + if other.ProjectPackages != nil { + config.ProjectPackages = other.ProjectPackages + } + if other.Logger != nil { + config.Logger = other.Logger + } + if other.NotifyReleaseStages != nil { + config.NotifyReleaseStages = other.NotifyReleaseStages + } + if other.PanicHandler != nil { + config.PanicHandler = other.PanicHandler + } + if other.Transport != nil { + config.Transport = other.Transport + } + if other.Synchronous { + config.Synchronous = true + } + + return config +} + +func (config *Configuration) merge(other *Configuration) *Configuration { + return config.clone().update(other) +} + +func (config *Configuration) clone() *Configuration { + clone := *config + return &clone +} + +func (config *Configuration) isProjectPackage(pkg string) bool { + for _, p := range config.ProjectPackages { + if match, _ := filepath.Match(p, pkg); match { + return true + } + } + return false +} + +func (config *Configuration) stripProjectPackages(file string) string { + for _, p := range config.ProjectPackages { + if len(p) > 2 && p[len(p)-2] == '/' && p[len(p)-1] == '*' { + p = p[:len(p)-1] + } else { + p = p + "/" + } + if strings.HasPrefix(file, p) { + return strings.TrimPrefix(file, p) + } + } + + return file +} + +func (config *Configuration) log(fmt string, args ...interface{}) { + if config != nil && config.Logger != nil { + config.Logger.Printf(fmt, args...) + } else { + log.Printf(fmt, args...) + } +} + +func (config *Configuration) notifyInReleaseStage() bool { + if config.NotifyReleaseStages == nil { + return true + } + for _, r := range config.NotifyReleaseStages { + if r == config.ReleaseStage { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/doc.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/doc.go new file mode 100644 index 000000000..827e03b8b --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/doc.go @@ -0,0 +1,69 @@ +/* +Package bugsnag captures errors in real-time and reports them to Bugsnag (http://bugsnag.com). + +Using bugsnag-go is a three-step process. + +1. As early as possible in your program configure the notifier with your APIKey. This sets up +handling of panics that would otherwise crash your app. + + func init() { + bugsnag.Configure(bugsnag.Configuration{ + APIKey: "YOUR_API_KEY_HERE", + }) + } + +2. Add bugsnag to places that already catch panics. For example you should add it to the HTTP server +when you call ListenAndServer: + + http.ListenAndServe(":8080", bugsnag.Handler(nil)) + +If that's not possible, for example because you're using Google App Engine, you can also wrap each +HTTP handler manually: + + http.HandleFunc("/" bugsnag.HandlerFunc(func (w http.ResponseWriter, r *http.Request) { + ... + }) + +3. To notify Bugsnag of an error that is not a panic, pass it to bugsnag.Notify. This will also +log the error message using the configured Logger. + + if err != nil { + bugsnag.Notify(err) + } + +For detailed integration instructions see https://bugsnag.com/docs/notifiers/go. + +Configuration + +The only required configuration is the Bugsnag API key which can be obtained by clicking "Settings" +on the top of https://bugsnag.com/ after signing up. We also recommend you set the ReleaseStage +and AppVersion if these make sense for your deployment workflow. + +RawData + +If you need to attach extra data to Bugsnag notifications you can do that using +the rawData mechanism. Most of the functions that send errors to Bugsnag allow +you to pass in any number of interface{} values as rawData. The rawData can +consist of the Severity, Context, User or MetaData types listed below, and +there is also builtin support for *http.Requests. + + bugsnag.Notify(err, bugsnag.SeverityError) + +If you want to add custom tabs to your bugsnag dashboard you can pass any value in as rawData, +and then process it into the event's metadata using a bugsnag.OnBeforeNotify() hook. + + bugsnag.Notify(err, account) + + bugsnag.OnBeforeNotify(func (e *bugsnag.Event, c *bugsnag.Configuration) { + for datum := range e.RawData { + if account, ok := datum.(Account); ok { + e.MetaData.Add("account", "name", account.Name) + e.MetaData.Add("account", "url", account.URL) + } + } + }) + +If necessary you can pass Configuration in as rawData, or modify the Configuration object passed +into OnBeforeNotify hooks. Configuration passed in this way only affects the current notification. +*/ +package bugsnag diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/errors/README.md b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/errors/README.md new file mode 100644 index 000000000..8d8e097aa --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/errors/README.md @@ -0,0 +1,6 @@ +Adds stacktraces to errors in golang. + +This was made to help build the Bugsnag notifier but can be used standalone if +you like to have stacktraces on errors. + +See [Godoc](https://godoc.org/github.com/bugsnag/bugsnag-go/errors) for the API docs. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/errors/error.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/errors/error.go new file mode 100644 index 000000000..0081c0a80 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/errors/error.go @@ -0,0 +1,90 @@ +// Package errors provides errors that have stack-traces. +package errors + +import ( + "bytes" + "fmt" + "reflect" + "runtime" +) + +// The maximum number of stackframes on any error. +var MaxStackDepth = 50 + +// Error is an error with an attached stacktrace. It can be used +// wherever the builtin error interface is expected. +type Error struct { + Err error + stack []uintptr + frames []StackFrame +} + +// New makes an Error from the given value. If that value is already an +// error then it will be used directly, if not, it will be passed to +// fmt.Errorf("%v"). The skip parameter indicates how far up the stack +// to start the stacktrace. 0 is from the current call, 1 from its caller, etc. +func New(e interface{}, skip int) *Error { + var err error + + switch e := e.(type) { + case *Error: + return e + case error: + err = e + default: + err = fmt.Errorf("%v", e) + } + + stack := make([]uintptr, MaxStackDepth) + length := runtime.Callers(2+skip, stack[:]) + return &Error{ + Err: err, + stack: stack[:length], + } +} + +// Errorf creates a new error with the given message. You can use it +// as a drop-in replacement for fmt.Errorf() to provide descriptive +// errors in return values. +func Errorf(format string, a ...interface{}) *Error { + return New(fmt.Errorf(format, a...), 1) +} + +// Error returns the underlying error's message. +func (err *Error) Error() string { + return err.Err.Error() +} + +// Stack returns the callstack formatted the same way that go does +// in runtime/debug.Stack() +func (err *Error) Stack() []byte { + buf := bytes.Buffer{} + + for _, frame := range err.StackFrames() { + buf.WriteString(frame.String()) + } + + return buf.Bytes() +} + +// StackFrames returns an array of frames containing information about the +// stack. +func (err *Error) StackFrames() []StackFrame { + if err.frames == nil { + err.frames = make([]StackFrame, len(err.stack)) + + for i, pc := range err.stack { + err.frames[i] = NewStackFrame(pc) + } + } + + return err.frames +} + +// TypeName returns the type this error. e.g. *errors.stringError. +func (err *Error) TypeName() string { + if _, ok := err.Err.(uncaughtPanic); ok { + return "panic" + } + return reflect.TypeOf(err.Err).String() +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/errors/parse_panic.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/errors/parse_panic.go new file mode 100644 index 000000000..cc37052d7 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/errors/parse_panic.go @@ -0,0 +1,127 @@ +package errors + +import ( + "strconv" + "strings" +) + +type uncaughtPanic struct{ message string } + +func (p uncaughtPanic) Error() string { + return p.message +} + +// ParsePanic allows you to get an error object from the output of a go program +// that panicked. This is particularly useful with https://github.com/mitchellh/panicwrap. +func ParsePanic(text string) (*Error, error) { + lines := strings.Split(text, "\n") + + state := "start" + + var message string + var stack []StackFrame + + for i := 0; i < len(lines); i++ { + line := lines[i] + + if state == "start" { + if strings.HasPrefix(line, "panic: ") { + message = strings.TrimPrefix(line, "panic: ") + state = "seek" + } else { + return nil, Errorf("bugsnag.panicParser: Invalid line (no prefix): %s", line) + } + + } else if state == "seek" { + if strings.HasPrefix(line, "goroutine ") && strings.HasSuffix(line, "[running]:") { + state = "parsing" + } + + } else if state == "parsing" { + if line == "" { + state = "done" + break + } + createdBy := false + if strings.HasPrefix(line, "created by ") { + line = strings.TrimPrefix(line, "created by ") + createdBy = true + } + + i++ + + if i >= len(lines) { + return nil, Errorf("bugsnag.panicParser: Invalid line (unpaired): %s", line) + } + + frame, err := parsePanicFrame(line, lines[i], createdBy) + if err != nil { + return nil, err + } + + stack = append(stack, *frame) + if createdBy { + state = "done" + break + } + } + } + + if state == "done" || state == "parsing" { + return &Error{Err: uncaughtPanic{message}, frames: stack}, nil + } + return nil, Errorf("could not parse panic: %v", text) +} + +// The lines we're passing look like this: +// +// main.(*foo).destruct(0xc208067e98) +// /0/go/src/github.com/bugsnag/bugsnag-go/pan/main.go:22 +0x151 +func parsePanicFrame(name string, line string, createdBy bool) (*StackFrame, error) { + idx := strings.LastIndex(name, "(") + if idx == -1 && !createdBy { + return nil, Errorf("bugsnag.panicParser: Invalid line (no call): %s", name) + } + if idx != -1 { + name = name[:idx] + } + pkg := "" + + if lastslash := strings.LastIndex(name, "/"); lastslash >= 0 { + pkg += name[:lastslash] + "/" + name = name[lastslash+1:] + } + if period := strings.Index(name, "."); period >= 0 { + pkg += name[:period] + name = name[period+1:] + } + + name = strings.Replace(name, "·", ".", -1) + + if !strings.HasPrefix(line, "\t") { + return nil, Errorf("bugsnag.panicParser: Invalid line (no tab): %s", line) + } + + idx = strings.LastIndex(line, ":") + if idx == -1 { + return nil, Errorf("bugsnag.panicParser: Invalid line (no line number): %s", line) + } + file := line[1:idx] + + number := line[idx+1:] + if idx = strings.Index(number, " +"); idx > -1 { + number = number[:idx] + } + + lno, err := strconv.ParseInt(number, 10, 32) + if err != nil { + return nil, Errorf("bugsnag.panicParser: Invalid line (bad line number): %s", line) + } + + return &StackFrame{ + File: file, + LineNumber: int(lno), + Package: pkg, + Name: name, + }, nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/errors/stackframe.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/errors/stackframe.go new file mode 100644 index 000000000..4edadbc58 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/errors/stackframe.go @@ -0,0 +1,97 @@ +package errors + +import ( + "bytes" + "fmt" + "io/ioutil" + "runtime" + "strings" +) + +// A StackFrame contains all necessary information about to generate a line +// in a callstack. +type StackFrame struct { + File string + LineNumber int + Name string + Package string + ProgramCounter uintptr +} + +// NewStackFrame popoulates a stack frame object from the program counter. +func NewStackFrame(pc uintptr) (frame StackFrame) { + + frame = StackFrame{ProgramCounter: pc} + if frame.Func() == nil { + return + } + frame.Package, frame.Name = packageAndName(frame.Func()) + + // pc -1 because the program counters we use are usually return addresses, + // and we want to show the line that corresponds to the function call + frame.File, frame.LineNumber = frame.Func().FileLine(pc - 1) + return + +} + +// Func returns the function that this stackframe corresponds to +func (frame *StackFrame) Func() *runtime.Func { + if frame.ProgramCounter == 0 { + return nil + } + return runtime.FuncForPC(frame.ProgramCounter) +} + +// String returns the stackframe formatted in the same way as go does +// in runtime/debug.Stack() +func (frame *StackFrame) String() string { + str := fmt.Sprintf("%s:%d (0x%x)\n", frame.File, frame.LineNumber, frame.ProgramCounter) + + source, err := frame.SourceLine() + if err != nil { + return str + } + + return str + fmt.Sprintf("\t%s: %s\n", frame.Name, source) +} + +// SourceLine gets the line of code (from File and Line) of the original source if possible +func (frame *StackFrame) SourceLine() (string, error) { + data, err := ioutil.ReadFile(frame.File) + + if err != nil { + return "", err + } + + lines := bytes.Split(data, []byte{'\n'}) + if frame.LineNumber <= 0 || frame.LineNumber >= len(lines) { + return "???", nil + } + // -1 because line-numbers are 1 based, but our array is 0 based + return string(bytes.Trim(lines[frame.LineNumber-1], " \t")), nil +} + +func packageAndName(fn *runtime.Func) (string, string) { + name := fn.Name() + pkg := "" + + // The name includes the path name to the package, which is unnecessary + // since the file name is already included. Plus, it has center dots. + // That is, we see + // runtime/debug.*T·ptrmethod + // and want + // *T.ptrmethod + // Since the package path might contains dots (e.g. code.google.com/...), + // we first remove the path prefix if there is one. + if lastslash := strings.LastIndex(name, "/"); lastslash >= 0 { + pkg += name[:lastslash] + "/" + name = name[lastslash+1:] + } + if period := strings.Index(name, "."); period >= 0 { + pkg += name[:period] + name = name[period+1:] + } + + name = strings.Replace(name, "·", ".", -1) + return pkg, name +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/event.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/event.go new file mode 100644 index 000000000..1586ef3f0 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/event.go @@ -0,0 +1,134 @@ +package bugsnag + +import ( + "strings" + + "github.com/bugsnag/bugsnag-go/errors" +) + +// Context is the context of the error in Bugsnag. +// This can be passed to Notify, Recover or AutoNotify as rawData. +type Context struct { + String string +} + +// User represents the searchable user-data on Bugsnag. The Id is also used +// to determine the number of users affected by a bug. This can be +// passed to Notify, Recover or AutoNotify as rawData. +type User struct { + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Email string `json:"email,omitempty"` +} + +// Sets the severity of the error on Bugsnag. These values can be +// passed to Notify, Recover or AutoNotify as rawData. +var ( + SeverityError = severity{"error"} + SeverityWarning = severity{"warning"} + SeverityInfo = severity{"info"} +) + +// The severity tag type, private so that people can only use Error,Warning,Info +type severity struct { + String string +} + +// The form of stacktrace that Bugsnag expects +type stackFrame struct { + Method string `json:"method"` + File string `json:"file"` + LineNumber int `json:"lineNumber"` + InProject bool `json:"inProject,omitempty"` +} + +// Event represents a payload of data that gets sent to Bugsnag. +// This is passed to each OnBeforeNotify hook. +type Event struct { + + // The original error that caused this event, not sent to Bugsnag. + Error *errors.Error + + // The rawData affecting this error, not sent to Bugsnag. + RawData []interface{} + + // The error class to be sent to Bugsnag. This defaults to the type name of the Error, for + // example *error.String + ErrorClass string + // The error message to be sent to Bugsnag. This defaults to the return value of Error.Error() + Message string + // The stacktrrace of the error to be sent to Bugsnag. + Stacktrace []stackFrame + + // The context to be sent to Bugsnag. This should be set to the part of the app that was running, + // e.g. for http requests, set it to the path. + Context string + // The severity of the error. Can be SeverityError, SeverityWarning or SeverityInfo. + Severity severity + // The grouping hash is used to override Bugsnag's grouping. Set this if you'd like all errors with + // the same grouping hash to group together in the dashboard. + GroupingHash string + + // User data to send to Bugsnag. This is searchable on the dashboard. + User *User + // Other MetaData to send to Bugsnag. Appears as a set of tabbed tables in the dashboard. + MetaData MetaData +} + +func newEvent(err *errors.Error, rawData []interface{}, notifier *Notifier) (*Event, *Configuration) { + + config := notifier.Config + event := &Event{ + Error: err, + RawData: append(notifier.RawData, rawData...), + + ErrorClass: err.TypeName(), + Message: err.Error(), + Stacktrace: make([]stackFrame, len(err.StackFrames())), + + Severity: SeverityWarning, + + MetaData: make(MetaData), + } + + for _, datum := range event.RawData { + switch datum := datum.(type) { + case severity: + event.Severity = datum + + case Context: + event.Context = datum.String + + case Configuration: + config = config.merge(&datum) + + case MetaData: + event.MetaData.Update(datum) + + case User: + event.User = &datum + } + } + + for i, frame := range err.StackFrames() { + file := frame.File + inProject := config.isProjectPackage(frame.Package) + + // remove $GOROOT and $GOHOME from other frames + if idx := strings.Index(file, frame.Package); idx > -1 { + file = file[idx:] + } + if inProject { + file = config.stripProjectPackages(file) + } + + event.Stacktrace[i] = stackFrame{ + Method: frame.Name, + File: file, + LineNumber: frame.LineNumber, + InProject: inProject, + } + } + + return event, config +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/json_tags.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/json_tags.go new file mode 100644 index 000000000..45be38fa9 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/json_tags.go @@ -0,0 +1,43 @@ +// The code is stripped from: +// http://golang.org/src/pkg/encoding/json/tags.go?m=text + +package bugsnag + +import ( + "strings" +) + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/metadata.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/metadata.go new file mode 100644 index 000000000..ffe64e21e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/metadata.go @@ -0,0 +1,185 @@ +package bugsnag + +import ( + "fmt" + "reflect" + "strings" +) + +// MetaData is added to the Bugsnag dashboard in tabs. Each tab is +// a map of strings -> values. You can pass MetaData to Notify, Recover +// and AutoNotify as rawData. +type MetaData map[string]map[string]interface{} + +// Update the meta-data with more information. Tabs are merged together such +// that unique keys from both sides are preserved, and duplicate keys end up +// with the provided values. +func (meta MetaData) Update(other MetaData) { + for name, tab := range other { + + if meta[name] == nil { + meta[name] = make(map[string]interface{}) + } + + for key, value := range tab { + meta[name][key] = value + } + } +} + +// Add creates a tab of Bugsnag meta-data. +// If the tab doesn't yet exist it will be created. +// If the key already exists, it will be overwritten. +func (meta MetaData) Add(tab string, key string, value interface{}) { + if meta[tab] == nil { + meta[tab] = make(map[string]interface{}) + } + + meta[tab][key] = value +} + +// AddStruct creates a tab of Bugsnag meta-data. +// The struct will be converted to an Object using the +// reflect library so any private fields will not be exported. +// As a safety measure, if you pass a non-struct the value will be +// sent to Bugsnag under the "Extra data" tab. +func (meta MetaData) AddStruct(tab string, obj interface{}) { + val := sanitizer{}.Sanitize(obj) + content, ok := val.(map[string]interface{}) + if ok { + meta[tab] = content + } else { + // Wasn't a struct + meta.Add("Extra data", tab, obj) + } + +} + +// Remove any values from meta-data that have keys matching the filters, +// and any that are recursive data-structures +func (meta MetaData) sanitize(filters []string) interface{} { + return sanitizer{ + Filters: filters, + Seen: make([]interface{}, 0), + }.Sanitize(meta) + +} + +// The sanitizer is used to remove filtered params and recursion from meta-data. +type sanitizer struct { + Filters []string + Seen []interface{} +} + +func (s sanitizer) Sanitize(data interface{}) interface{} { + for _, s := range s.Seen { + // TODO: we don't need deep equal here, just type-ignoring equality + if reflect.DeepEqual(data, s) { + return "[RECURSION]" + } + } + + // Sanitizers are passed by value, so we can modify s and it only affects + // s.Seen for nested calls. + s.Seen = append(s.Seen, data) + + t := reflect.TypeOf(data) + v := reflect.ValueOf(data) + + switch t.Kind() { + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Float32, reflect.Float64: + return data + + case reflect.String: + return data + + case reflect.Interface, reflect.Ptr: + return s.Sanitize(v.Elem().Interface()) + + case reflect.Array, reflect.Slice: + ret := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + ret[i] = s.Sanitize(v.Index(i).Interface()) + } + return ret + + case reflect.Map: + return s.sanitizeMap(v) + + case reflect.Struct: + return s.sanitizeStruct(v, t) + + // Things JSON can't serialize: + // case t.Chan, t.Func, reflect.Complex64, reflect.Complex128, reflect.UnsafePointer: + default: + return "[" + t.String() + "]" + + } + +} + +func (s sanitizer) sanitizeMap(v reflect.Value) interface{} { + ret := make(map[string]interface{}) + + for _, key := range v.MapKeys() { + val := s.Sanitize(v.MapIndex(key).Interface()) + newKey := fmt.Sprintf("%v", key.Interface()) + + if s.shouldRedact(newKey) { + val = "[REDACTED]" + } + + ret[newKey] = val + } + + return ret +} + +func (s sanitizer) sanitizeStruct(v reflect.Value, t reflect.Type) interface{} { + ret := make(map[string]interface{}) + + for i := 0; i < v.NumField(); i++ { + + val := v.Field(i) + // Don't export private fields + if !val.CanInterface() { + continue + } + + name := t.Field(i).Name + var opts tagOptions + + // Parse JSON tags. Supports name and "omitempty" + if jsonTag := t.Field(i).Tag.Get("json"); len(jsonTag) != 0 { + name, opts = parseTag(jsonTag) + } + + if s.shouldRedact(name) { + ret[name] = "[REDACTED]" + } else { + sanitized := s.Sanitize(val.Interface()) + if str, ok := sanitized.(string); ok { + if !(opts.Contains("omitempty") && len(str) == 0) { + ret[name] = str + } + } else { + ret[name] = sanitized + } + + } + } + + return ret +} + +func (s sanitizer) shouldRedact(key string) bool { + for _, filter := range s.Filters { + if strings.Contains(strings.ToLower(filter), strings.ToLower(key)) { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/middleware.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/middleware.go new file mode 100644 index 000000000..266d5e461 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/middleware.go @@ -0,0 +1,96 @@ +package bugsnag + +import ( + "net/http" + "strings" +) + +type ( + beforeFunc func(*Event, *Configuration) error + + // MiddlewareStacks keep middleware in the correct order. They are + // called in reverse order, so if you add a new middleware it will + // be called before all existing middleware. + middlewareStack struct { + before []beforeFunc + } +) + +// AddMiddleware adds a new middleware to the outside of the existing ones, +// when the middlewareStack is Run it will be run before all middleware that +// have been added before. +func (stack *middlewareStack) OnBeforeNotify(middleware beforeFunc) { + stack.before = append(stack.before, middleware) +} + +// Run causes all the middleware to be run. If they all permit it the next callback +// will be called with all the middleware on the stack. +func (stack *middlewareStack) Run(event *Event, config *Configuration, next func() error) error { + // run all the before filters in reverse order + for i := range stack.before { + before := stack.before[len(stack.before)-i-1] + + err := stack.runBeforeFilter(before, event, config) + if err != nil { + return err + } + } + + return next() +} + +func (stack *middlewareStack) runBeforeFilter(f beforeFunc, event *Event, config *Configuration) error { + defer func() { + if err := recover(); err != nil { + config.log("bugsnag/middleware: unexpected panic: %v", err) + } + }() + + return f(event, config) +} + +// catchMiddlewarePanic is used to log any panics that happen inside Middleware, +// we wouldn't want to not notify Bugsnag in this case. +func catchMiddlewarePanic(event *Event, config *Configuration, next func() error) { +} + +// httpRequestMiddleware is added OnBeforeNotify by default. It takes information +// from an http.Request passed in as rawData, and adds it to the Event. You can +// use this as a template for writing your own Middleware. +func httpRequestMiddleware(event *Event, config *Configuration) error { + for _, datum := range event.RawData { + if request, ok := datum.(*http.Request); ok { + proto := "http://" + if request.TLS != nil { + proto = "https://" + } + + event.MetaData.Update(MetaData{ + "Request": { + "RemoteAddr": request.RemoteAddr, + "Method": request.Method, + "Url": proto + request.Host + request.RequestURI, + "Params": request.URL.Query(), + }, + }) + + // Add headers as a separate tab. + event.MetaData.AddStruct("Headers", request.Header) + + // Default context to Path + if event.Context == "" { + event.Context = request.URL.Path + } + + // Default user.id to IP so that users-affected works. + if event.User == nil { + ip := request.RemoteAddr + if idx := strings.LastIndex(ip, ":"); idx != -1 { + ip = ip[:idx] + } + event.User = &User{Id: ip} + } + } + } + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/notifier.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/notifier.go new file mode 100644 index 000000000..6b1081787 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/notifier.go @@ -0,0 +1,95 @@ +package bugsnag + +import ( + "fmt" + + "github.com/bugsnag/bugsnag-go/errors" +) + +// Notifier sends errors to Bugsnag. +type Notifier struct { + Config *Configuration + RawData []interface{} +} + +// New creates a new notifier. +// You can pass an instance of bugsnag.Configuration in rawData to change the configuration. +// Other values of rawData will be passed to Notify. +func New(rawData ...interface{}) *Notifier { + config := Config.clone() + for i, datum := range rawData { + if c, ok := datum.(Configuration); ok { + config.update(&c) + rawData[i] = nil + } + } + + return &Notifier{ + Config: config, + RawData: rawData, + } +} + +// Notify sends an error to Bugsnag. Any rawData you pass here will be sent to +// Bugsnag after being converted to JSON. e.g. bugsnag.SeverityError, bugsnag.Context, +// or bugsnag.MetaData. +func (notifier *Notifier) Notify(err error, rawData ...interface{}) (e error) { + event, config := newEvent(errors.New(err, 1), rawData, notifier) + + // Never block, start throwing away errors if we have too many. + e = middleware.Run(event, config, func() error { + config.log("notifying bugsnag: %s", event.Message) + if config.notifyInReleaseStage() { + if config.Synchronous { + return (&payload{event, config}).deliver() + } + go (&payload{event, config}).deliver() + return nil + } + return fmt.Errorf("not notifying in %s", config.ReleaseStage) + }) + + if e != nil { + config.log("bugsnag.Notify: %v", e) + } + return e +} + +// AutoNotify notifies Bugsnag of any panics, then repanics. +// It sends along any rawData that gets passed in. +// Usage: defer AutoNotify() +func (notifier *Notifier) AutoNotify(rawData ...interface{}) { + if err := recover(); err != nil { + rawData = notifier.addDefaultSeverity(rawData, SeverityError) + notifier.Notify(errors.New(err, 2), rawData...) + panic(err) + } +} + +// Recover logs any panics, then recovers. +// It sends along any rawData that gets passed in. +// Usage: defer Recover() +func (notifier *Notifier) Recover(rawData ...interface{}) { + if err := recover(); err != nil { + rawData = notifier.addDefaultSeverity(rawData, SeverityWarning) + notifier.Notify(errors.New(err, 2), rawData...) + } +} + +func (notifier *Notifier) dontPanic() { + if err := recover(); err != nil { + notifier.Config.log("bugsnag/notifier.Notify: panic! %s", err) + } +} + +// Add a severity to raw data only if the default is not set. +func (notifier *Notifier) addDefaultSeverity(rawData []interface{}, s severity) []interface{} { + + for _, datum := range append(notifier.RawData, rawData...) { + if _, ok := datum.(severity); ok { + return rawData + } + } + + return append(rawData, s) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/panicwrap.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/panicwrap.go new file mode 100644 index 000000000..14fb9fa8d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/panicwrap.go @@ -0,0 +1,27 @@ +// +build !appengine + +package bugsnag + +import ( + "github.com/bugsnag/panicwrap" + "github.com/bugsnag/bugsnag-go/errors" +) + +// NOTE: this function does not return when you call it, instead it +// re-exec()s the current process with panic monitoring. +func defaultPanicHandler() { + defer defaultNotifier.dontPanic() + + err := panicwrap.BasicMonitor(func(output string) { + toNotify, err := errors.ParsePanic(output) + + if err != nil { + defaultNotifier.Config.log("bugsnag.handleUncaughtPanic: %v", err) + } + Notify(toNotify, SeverityError, Configuration{Synchronous: true}) + }) + + if err != nil { + defaultNotifier.Config.log("bugsnag.handleUncaughtPanic: %v", err) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/payload.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/payload.go new file mode 100644 index 000000000..a516a5d2d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/payload.go @@ -0,0 +1,96 @@ +package bugsnag + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" +) + +type payload struct { + *Event + *Configuration +} + +type hash map[string]interface{} + +func (p *payload) deliver() error { + + if len(p.APIKey) != 32 { + return fmt.Errorf("bugsnag/payload.deliver: invalid api key") + } + + buf, err := json.Marshal(p) + + if err != nil { + return fmt.Errorf("bugsnag/payload.deliver: %v", err) + } + + client := http.Client{ + Transport: p.Transport, + } + + resp, err := client.Post(p.Endpoint, "application/json", bytes.NewBuffer(buf)) + + if err != nil { + return fmt.Errorf("bugsnag/payload.deliver: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return fmt.Errorf("bugsnag/payload.deliver: Got HTTP %s\n", resp.Status) + } + + return nil +} + +func (p *payload) MarshalJSON() ([]byte, error) { + + data := hash{ + "apiKey": p.APIKey, + + "notifier": hash{ + "name": "Bugsnag Go", + "url": "https://github.com/bugsnag/bugsnag-go", + "version": VERSION, + }, + + "events": []hash{ + { + "payloadVersion": "2", + "exceptions": []hash{ + { + "errorClass": p.ErrorClass, + "message": p.Message, + "stacktrace": p.Stacktrace, + }, + }, + "severity": p.Severity.String, + "app": hash{ + "releaseStage": p.ReleaseStage, + }, + "user": p.User, + "metaData": p.MetaData.sanitize(p.ParamsFilters), + }, + }, + } + + event := data["events"].([]hash)[0] + + if p.Context != "" { + event["context"] = p.Context + } + if p.GroupingHash != "" { + event["groupingHash"] = p.GroupingHash + } + if p.Hostname != "" { + event["device"] = hash{ + "hostname": p.Hostname, + } + } + if p.AppVersion != "" { + event["app"].(hash)["version"] = p.AppVersion + } + return json.Marshal(data) + +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/LICENSE new file mode 100644 index 000000000..18527a28f --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2012 Daniel Theophanes + +This software is provided 'as-is', without any express or implied +warranty. In no event will the authors be held liable for any damages +arising from the use of this software. + +Permission is granted to anyone to use this software for any purpose, +including commercial applications, and to alter it and redistribute it +freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + + 3. This notice may not be removed or altered from any source + distribution. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext.go new file mode 100644 index 000000000..37efbb221 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext.go @@ -0,0 +1,32 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Extensions to the standard "os" package. +package osext + +import "path/filepath" + +// Executable returns an absolute path that can be used to +// re-invoke the current program. +// It may not be valid after the current program exits. +func Executable() (string, error) { + p, err := executable() + return filepath.Clean(p), err +} + +// Returns same path as Executable, returns just the folder +// path. Excludes the executable name. +func ExecutableFolder() (string, error) { + p, err := Executable() + if err != nil { + return "", err + } + folder, _ := filepath.Split(p) + return folder, nil +} + +// Depricated. Same as Executable(). +func GetExePath() (exePath string, err error) { + return Executable() +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext_plan9.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext_plan9.go new file mode 100644 index 000000000..e88c1e093 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext_plan9.go @@ -0,0 +1,16 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package osext + +import "syscall" + +func executable() (string, error) { + f, err := Open("/proc/" + itoa(Getpid()) + "/text") + if err != nil { + return "", err + } + defer f.Close() + return syscall.Fd2path(int(f.Fd())) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext_procfs.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext_procfs.go new file mode 100644 index 000000000..546fec915 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext_procfs.go @@ -0,0 +1,25 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux netbsd openbsd + +package osext + +import ( + "errors" + "os" + "runtime" +) + +func executable() (string, error) { + switch runtime.GOOS { + case "linux": + return os.Readlink("/proc/self/exe") + case "netbsd": + return os.Readlink("/proc/curproc/exe") + case "openbsd": + return os.Readlink("/proc/curproc/file") + } + return "", errors.New("ExecPath not implemented for " + runtime.GOOS) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext_sysctl.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext_sysctl.go new file mode 100644 index 000000000..d76464628 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext_sysctl.go @@ -0,0 +1,64 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd + +package osext + +import ( + "os" + "runtime" + "syscall" + "unsafe" +) + +var startUpcwd, getwdError = os.Getwd() + +func executable() (string, error) { + var mib [4]int32 + switch runtime.GOOS { + case "freebsd": + mib = [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1} + case "darwin": + mib = [4]int32{1 /* CTL_KERN */, 38 /* KERN_PROCARGS */, int32(os.Getpid()), -1} + } + + n := uintptr(0) + // get length + _, _, err := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0) + if err != 0 { + return "", err + } + if n == 0 { // shouldn't happen + return "", nil + } + buf := make([]byte, n) + _, _, err = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0) + if err != 0 { + return "", err + } + if n == 0 { // shouldn't happen + return "", nil + } + for i, v := range buf { + if v == 0 { + buf = buf[:i] + break + } + } + if buf[0] != '/' { + if getwdError != nil { + return string(buf), getwdError + } else { + if buf[0] == '.' { + buf = buf[1:] + } + if startUpcwd[len(startUpcwd)-1] != '/' { + return startUpcwd + "/" + string(buf), nil + } + return startUpcwd + string(buf), nil + } + } + return string(buf), nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext_windows.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext_windows.go new file mode 100644 index 000000000..72d282cf8 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext_windows.go @@ -0,0 +1,34 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package osext + +import ( + "syscall" + "unicode/utf16" + "unsafe" +) + +var ( + kernel = syscall.MustLoadDLL("kernel32.dll") + getModuleFileNameProc = kernel.MustFindProc("GetModuleFileNameW") +) + +// GetModuleFileName() with hModule = NULL +func executable() (exePath string, err error) { + return getModuleFileName() +} + +func getModuleFileName() (string, error) { + var n uint32 + b := make([]uint16, syscall.MAX_PATH) + size := uint32(len(b)) + + r0, _, e1 := getModuleFileNameProc.Call(0, uintptr(unsafe.Pointer(&b[0])), uintptr(size)) + n = uint32(r0) + if n == 0 { + return "", e1 + } + return string(utf16.Decode(b[0:n])), nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/LICENSE new file mode 100644 index 000000000..f9c841a51 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/README.md b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/README.md new file mode 100644 index 000000000..d0a596753 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/README.md @@ -0,0 +1,101 @@ +# panicwrap + +panicwrap is a Go library that re-executes a Go binary and monitors stderr +output from the binary for a panic. When it find a panic, it executes a +user-defined handler function. Stdout, stderr, stdin, signals, and exit +codes continue to work as normal, making the existence of panicwrap mostly +invisble to the end user until a panic actually occurs. + +Since a panic is truly a bug in the program meant to crash the runtime, +globally catching panics within Go applications is not supposed to be possible. +Despite this, it is often useful to have a way to know when panics occur. +panicwrap allows you to do something with these panics, such as writing them +to a file, so that you can track when panics occur. + +panicwrap is ***not a panic recovery system***. Panics indicate serious +problems with your application and _should_ crash the runtime. panicwrap +is just meant as a way to monitor for panics. If you still think this is +the worst idea ever, read the section below on why. + +## Features + +* **SIMPLE!** +* Works with all Go applications on all platforms Go supports +* Custom behavior when a panic occurs +* Stdout, stderr, stdin, exit codes, and signals continue to work as + expected. + +## Usage + +Using panicwrap is simple. It behaves a lot like `fork`, if you know +how that works. A basic example is shown below. + +Because it would be sad to panic while capturing a panic, it is recommended +that the handler functions for panicwrap remain relatively simple and well +tested. panicwrap itself contains many tests. + +```go +package main + +import ( + "fmt" + "github.com/mitchellh/panicwrap" + "os" +) + +func main() { + exitStatus, err := panicwrap.BasicWrap(panicHandler) + if err != nil { + // Something went wrong setting up the panic wrapper. Unlikely, + // but possible. + panic(err) + } + + // If exitStatus >= 0, then we're the parent process and the panicwrap + // re-executed ourselves and completed. Just exit with the proper status. + if exitStatus >= 0 { + os.Exit(exitStatus) + } + + // Otherwise, exitStatus < 0 means we're the child. Continue executing as + // normal... + + // Let's say we panic + panic("oh shucks") +} + +func panicHandler(output string) { + // output contains the full output (including stack traces) of the + // panic. Put it in a file or something. + fmt.Printf("The child panicked:\n\n%s\n", output) + os.Exit(1) +} +``` + +## How Does it Work? + +panicwrap works by re-executing the running program (retaining arguments, +environmental variables, etc.) and monitoring the stderr of the program. +Since Go always outputs panics in a predictable way with a predictable +exit code, panicwrap is able to reliably detect panics and allow the parent +process to handle them. + +## WHY?! Panics should CRASH! + +Yes, panics _should_ crash. They are 100% always indicative of bugs. +However, in some cases, such as user-facing programs (programs like +[Packer](http://github.com/mitchellh/packer) or +[Docker](http://github.com/dotcloud/docker)), it is up to the user to +report such panics. This is unreliable, at best, and it would be better if the +program could have a way to automatically report panics. panicwrap provides +a way to do this. + +For backend applications, it is easier to detect crashes (since the application +exits). However, it is still nice sometimes to more intelligently log +panics in some way. For example, at [HashiCorp](http://www.hashicorp.com), +we use panicwrap to log panics to timestamped files with some additional +data (configuration settings at the time, environmental variables, etc.) + +The goal of panicwrap is _not_ to hide panics. It is instead to provide +a clean mechanism for handling them before bubbling the up to the user +and ultimately crashing. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/dup2.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/dup2.go new file mode 100644 index 000000000..de523c83d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/dup2.go @@ -0,0 +1,11 @@ +// +build darwin dragonfly freebsd linux,!arm64 netbsd openbsd + +package panicwrap + +import ( + "syscall" +) + +func dup2(oldfd, newfd int) error { + return syscall.Dup2(oldfd, newfd) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/dup3.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/dup3.go new file mode 100644 index 000000000..9721b36cc --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/dup3.go @@ -0,0 +1,11 @@ +// +build linux,arm64 + +package panicwrap + +import ( + "syscall" +) + +func dup2(oldfd, newfd int) error { + return syscall.Dup3(oldfd, newfd, 0) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/monitor.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/monitor.go new file mode 100644 index 000000000..72b418a27 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/monitor.go @@ -0,0 +1,62 @@ +// +build !windows + +package panicwrap + +import ( + "github.com/bugsnag/osext" + "os" + "os/exec" +) + +func monitor(c *WrapConfig) (int, error) { + + // If we're the child process, absorb panics. + if Wrapped(c) { + panicCh := make(chan string) + + go trackPanic(os.Stdin, os.Stderr, c.DetectDuration, panicCh) + + // Wait on the panic data + panicTxt := <-panicCh + if panicTxt != "" { + if !c.HidePanic { + os.Stderr.Write([]byte(panicTxt)) + } + + c.Handler(panicTxt) + } + + os.Exit(0) + } + + exePath, err := osext.Executable() + if err != nil { + return -1, err + } + cmd := exec.Command(exePath, os.Args[1:]...) + + read, write, err := os.Pipe() + if err != nil { + return -1, err + } + + cmd.Stdin = read + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Env = append(os.Environ(), c.CookieKey+"="+c.CookieValue) + + if err != nil { + return -1, err + } + err = cmd.Start() + if err != nil { + return -1, err + } + + err = dup2(int(write.Fd()), int(os.Stderr.Fd())) + if err != nil { + return -1, err + } + + return -1, nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/monitor_windows.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/monitor_windows.go new file mode 100644 index 000000000..d07a69218 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/monitor_windows.go @@ -0,0 +1,7 @@ +package panicwrap + +import "fmt" + +func monitor(c *WrapConfig) (int, error) { + return -1, fmt.Errorf("Monitor is not supported on windows") +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/panicwrap.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/panicwrap.go new file mode 100644 index 000000000..f9ea3e3e6 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/panicwrap.go @@ -0,0 +1,339 @@ +// The panicwrap package provides functions for capturing and handling +// panics in your application. It does this by re-executing the running +// application and monitoring stderr for any panics. At the same time, +// stdout/stderr/etc. are set to the same values so that data is shuttled +// through properly, making the existence of panicwrap mostly transparent. +// +// Panics are only detected when the subprocess exits with a non-zero +// exit status, since this is the only time panics are real. Otherwise, +// "panic-like" output is ignored. +package panicwrap + +import ( + "bytes" + "errors" + "github.com/bugsnag/osext" + "io" + "os" + "os/exec" + "os/signal" + "runtime" + "syscall" + "time" +) + +const ( + DEFAULT_COOKIE_KEY = "cccf35992f8f3cd8d1d28f0109dd953e26664531" + DEFAULT_COOKIE_VAL = "7c28215aca87789f95b406b8dd91aa5198406750" +) + +// HandlerFunc is the type called when a panic is detected. +type HandlerFunc func(string) + +// WrapConfig is the configuration for panicwrap when wrapping an existing +// binary. To get started, in general, you only need the BasicWrap function +// that will set this up for you. However, for more customizability, +// WrapConfig and Wrap can be used. +type WrapConfig struct { + // Handler is the function called when a panic occurs. + Handler HandlerFunc + + // The cookie key and value are used within environmental variables + // to tell the child process that it is already executing so that + // wrap doesn't re-wrap itself. + CookieKey string + CookieValue string + + // If true, the panic will not be mirrored to the configured writer + // and will instead ONLY go to the handler. This lets you effectively + // hide panics from the end user. This is not recommended because if + // your handler fails, the panic is effectively lost. + HidePanic bool + + // If true, panicwrap will boot a monitor sub-process and let the parent + // run the app. This mode is useful for processes run under supervisors + // like runit as signals get sent to the correct codebase. This is not + // supported when GOOS=windows, and ignores c.Stderr and c.Stdout. + Monitor bool + + // The amount of time that a process must exit within after detecting + // a panic header for panicwrap to assume it is a panic. Defaults to + // 300 milliseconds. + DetectDuration time.Duration + + // The writer to send the stderr to. If this is nil, then it defaults + // to os.Stderr. + Writer io.Writer + + // The writer to send stdout to. If this is nil, then it defaults to + // os.Stdout. + Stdout io.Writer +} + +// BasicWrap calls Wrap with the given handler function, using defaults +// for everything else. See Wrap and WrapConfig for more information on +// functionality and return values. +func BasicWrap(f HandlerFunc) (int, error) { + return Wrap(&WrapConfig{ + Handler: f, + }) +} + +// BasicMonitor calls Wrap with Monitor set to true on supported platforms. +// It forks your program and runs it again form the start. In one process +// BasicMonitor never returns, it just listens on stderr of the other process, +// and calls your handler when a panic is seen. In the other it either returns +// nil to indicate that the panic monitoring is enabled, or an error to indicate +// that something else went wrong. +func BasicMonitor(f HandlerFunc) error { + exitStatus, err := Wrap(&WrapConfig{ + Handler: f, + Monitor: runtime.GOOS != "windows", + }) + + if err != nil { + return err + } + + if exitStatus >= 0 { + os.Exit(exitStatus) + } + + return nil +} + +// Wrap wraps the current executable in a handler to catch panics. It +// returns an error if there was an error during the wrapping process. +// If the error is nil, then the int result indicates the exit status of the +// child process. If the exit status is -1, then this is the child process, +// and execution should continue as normal. Otherwise, this is the parent +// process and the child successfully ran already, and you should exit the +// process with the returned exit status. +// +// This function should be called very very early in your program's execution. +// Ideally, this runs as the first line of code of main. +// +// Once this is called, the given WrapConfig shouldn't be modified or used +// any further. +func Wrap(c *WrapConfig) (int, error) { + if c.Handler == nil { + return -1, errors.New("Handler must be set") + } + + if c.DetectDuration == 0 { + c.DetectDuration = 300 * time.Millisecond + } + + if c.Writer == nil { + c.Writer = os.Stderr + } + + if c.Monitor { + return monitor(c) + } else { + return wrap(c) + } +} + +func wrap(c *WrapConfig) (int, error) { + + // If we're already wrapped, exit out. + if Wrapped(c) { + return -1, nil + } + + // Get the path to our current executable + exePath, err := osext.Executable() + if err != nil { + return -1, err + } + + // Pipe the stderr so we can read all the data as we look for panics + stderr_r, stderr_w := io.Pipe() + + // doneCh is closed when we're done, signaling any other goroutines + // to end immediately. + doneCh := make(chan struct{}) + + // panicCh is the channel on which the panic text will actually be + // sent. + panicCh := make(chan string) + + // On close, make sure to finish off the copying of data to stderr + defer func() { + defer close(doneCh) + stderr_w.Close() + <-panicCh + }() + + // Start the goroutine that will watch stderr for any panics + go trackPanic(stderr_r, c.Writer, c.DetectDuration, panicCh) + + // Create the writer for stdout that we're going to use + var stdout_w io.Writer = os.Stdout + if c.Stdout != nil { + stdout_w = c.Stdout + } + + // Build a subcommand to re-execute ourselves. We make sure to + // set the environmental variable to include our cookie. We also + // set stdin/stdout to match the config. Finally, we pipe stderr + // through ourselves in order to watch for panics. + cmd := exec.Command(exePath, os.Args[1:]...) + cmd.Env = append(os.Environ(), c.CookieKey+"="+c.CookieValue) + cmd.Stdin = os.Stdin + cmd.Stdout = stdout_w + cmd.Stderr = stderr_w + if err := cmd.Start(); err != nil { + return 1, err + } + + // Listen to signals and capture them forever. We allow the child + // process to handle them in some way. + sigCh := make(chan os.Signal) + signal.Notify(sigCh, os.Interrupt) + go func() { + defer signal.Stop(sigCh) + for { + select { + case <-doneCh: + return + case <-sigCh: + } + } + }() + + if err := cmd.Wait(); err != nil { + exitErr, ok := err.(*exec.ExitError) + if !ok { + // This is some other kind of subprocessing error. + return 1, err + } + + exitStatus := 1 + if status, ok := exitErr.Sys().(syscall.WaitStatus); ok { + exitStatus = status.ExitStatus() + } + + // Close the writer end so that the tracker goroutine ends at some point + stderr_w.Close() + + // Wait on the panic data + panicTxt := <-panicCh + if panicTxt != "" { + if !c.HidePanic { + c.Writer.Write([]byte(panicTxt)) + } + + c.Handler(panicTxt) + } + + return exitStatus, nil + } + + return 0, nil +} + +// Wrapped checks if we're already wrapped according to the configuration +// given. +// +// Wrapped is very cheap and can be used early to short-circuit some pre-wrap +// logic your application may have. +func Wrapped(c *WrapConfig) bool { + if c.CookieKey == "" { + c.CookieKey = DEFAULT_COOKIE_KEY + } + + if c.CookieValue == "" { + c.CookieValue = DEFAULT_COOKIE_VAL + } + + // If the cookie key/value match our environment, then we are the + // child, so just exit now and tell the caller that we're the child + return os.Getenv(c.CookieKey) == c.CookieValue +} + +// trackPanic monitors the given reader for a panic. If a panic is detected, +// it is outputted on the result channel. This will close the channel once +// it is complete. +func trackPanic(r io.Reader, w io.Writer, dur time.Duration, result chan<- string) { + defer close(result) + + var panicTimer <-chan time.Time + panicBuf := new(bytes.Buffer) + panicHeader := []byte("panic:") + + tempBuf := make([]byte, 2048) + for { + var buf []byte + var n int + + if panicTimer == nil && panicBuf.Len() > 0 { + // We're not tracking a panic but the buffer length is + // greater than 0. We need to clear out that buffer, but + // look for another panic along the way. + + // First, remove the previous panic header so we don't loop + w.Write(panicBuf.Next(len(panicHeader))) + + // Next, assume that this is our new buffer to inspect + n = panicBuf.Len() + buf = make([]byte, n) + copy(buf, panicBuf.Bytes()) + panicBuf.Reset() + } else { + var err error + buf = tempBuf + n, err = r.Read(buf) + if n <= 0 && err == io.EOF { + if panicBuf.Len() > 0 { + // We were tracking a panic, assume it was a panic + // and return that as the result. + result <- panicBuf.String() + } + + return + } + } + + if panicTimer != nil { + // We're tracking what we think is a panic right now. + // If the timer ended, then it is not a panic. + isPanic := true + select { + case <-panicTimer: + isPanic = false + default: + } + + // No matter what, buffer the text some more. + panicBuf.Write(buf[0:n]) + + if !isPanic { + // It isn't a panic, stop tracking. Clean-up will happen + // on the next iteration. + panicTimer = nil + } + + continue + } + + flushIdx := n + idx := bytes.Index(buf[0:n], panicHeader) + if idx >= 0 { + flushIdx = idx + } + + // Flush to stderr what isn't a panic + w.Write(buf[0:flushIdx]) + + if idx < 0 { + // Not a panic so just continue along + continue + } + + // We have a panic header. Write we assume is a panic os far. + panicBuf.Write(buf[idx:n]) + panicTimer = time.After(dur) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/LICENSE.txt b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/LICENSE.txt new file mode 100644 index 000000000..918297133 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/LICENSE.txt @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015-2015 Li Yi (denverdino@gmail.com). + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/client.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/client.go new file mode 100644 index 000000000..4ed0a06f2 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/client.go @@ -0,0 +1,145 @@ +package common + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "log" + "net/http" + "net/url" + "time" + + "github.com/denverdino/aliyungo/util" +) + +// A Client represents a client of ECS services +type Client struct { + AccessKeyId string //Access Key Id + AccessKeySecret string //Access Key Secret + debug bool + httpClient *http.Client + endpoint string + version string +} + +// NewClient creates a new instance of ECS client +func (client *Client) Init(endpoint, version, accessKeyId, accessKeySecret string) { + client.AccessKeyId = accessKeyId + client.AccessKeySecret = accessKeySecret + "&" + client.debug = false + client.httpClient = &http.Client{} + client.endpoint = endpoint + client.version = version +} + +// SetEndpoint sets custom endpoint +func (client *Client) SetEndpoint(endpoint string) { + client.endpoint = endpoint +} + +// SetEndpoint sets custom version +func (client *Client) SetVersion(version string) { + client.version = version +} + +// SetAccessKeyId sets new AccessKeyId +func (client *Client) SetAccessKeyId(id string) { + client.AccessKeyId = id +} + +// SetAccessKeySecret sets new AccessKeySecret +func (client *Client) SetAccessKeySecret(secret string) { + client.AccessKeySecret = secret + "&" +} + +// SetDebug sets debug mode to log the request/response message +func (client *Client) SetDebug(debug bool) { + client.debug = debug +} + +// Invoke sends the raw HTTP request for ECS services +func (client *Client) Invoke(action string, args interface{}, response interface{}) error { + + request := Request{} + request.init(client.version, action, client.AccessKeyId) + + query := util.ConvertToQueryValues(request) + util.SetQueryValues(args, &query) + + // Sign request + signature := util.CreateSignatureForRequest(ECSRequestMethod, &query, client.AccessKeySecret) + + // Generate the request URL + requestURL := client.endpoint + "?" + query.Encode() + "&Signature=" + url.QueryEscape(signature) + + httpReq, err := http.NewRequest(ECSRequestMethod, requestURL, nil) + + // TODO move to util and add build val flag + httpReq.Header.Set("X-SDK-Client", `AliyunGO/`+Version) + + if err != nil { + return GetClientError(err) + } + + t0 := time.Now() + httpResp, err := client.httpClient.Do(httpReq) + t1 := time.Now() + if err != nil { + return GetClientError(err) + } + statusCode := httpResp.StatusCode + + if client.debug { + log.Printf("Invoke %s %s %d (%v)", ECSRequestMethod, requestURL, statusCode, t1.Sub(t0)) + } + + defer httpResp.Body.Close() + body, err := ioutil.ReadAll(httpResp.Body) + + if err != nil { + return GetClientError(err) + } + + if client.debug { + var prettyJSON bytes.Buffer + err = json.Indent(&prettyJSON, body, "", " ") + log.Println(string(prettyJSON.Bytes())) + } + + if statusCode >= 400 && statusCode <= 599 { + errorResponse := ErrorResponse{} + err = json.Unmarshal(body, &errorResponse) + ecsError := &Error{ + ErrorResponse: errorResponse, + StatusCode: statusCode, + } + return ecsError + } + + err = json.Unmarshal(body, response) + //log.Printf("%++v", response) + if err != nil { + return GetClientError(err) + } + + return nil +} + +// GenerateClientToken generates the Client Token with random string +func (client *Client) GenerateClientToken() string { + return util.CreateRandomString() +} + +func GetClientErrorFromString(str string) error { + return &Error{ + ErrorResponse: ErrorResponse{ + Code: "AliyunGoClientFailure", + Message: str, + }, + StatusCode: -1, + } +} + +func GetClientError(err error) error { + return GetClientErrorFromString(err.Error()) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/regions.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/regions.go new file mode 100644 index 000000000..9ceda4650 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/regions.go @@ -0,0 +1,19 @@ +package common + +// Region represents ECS region +type Region string + +// Constants of region definition +const ( + Hangzhou = Region("cn-hangzhou") + Qingdao = Region("cn-qingdao") + Beijing = Region("cn-beijing") + Hongkong = Region("cn-hongkong") + Shenzhen = Region("cn-shenzhen") + USWest1 = Region("us-west-1") + USEast1 = Region("us-east-1") + APSouthEast1 = Region("ap-southeast-1") + Shanghai = Region("cn-shanghai") +) + +var ValidRegions = []Region{Hangzhou, Qingdao, Beijing, Shenzhen, Hongkong, Shanghai, USWest1, USEast1, APSouthEast1} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/request.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/request.go new file mode 100644 index 000000000..2a883f19b --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/request.go @@ -0,0 +1,101 @@ +package common + +import ( + "fmt" + "log" + "time" + + "github.com/denverdino/aliyungo/util" +) + +// Constants for Aliyun API requests +const ( + SignatureVersion = "1.0" + SignatureMethod = "HMAC-SHA1" + JSONResponseFormat = "JSON" + XMLResponseFormat = "XML" + ECSRequestMethod = "GET" +) + +type Request struct { + Format string + Version string + AccessKeyId string + Signature string + SignatureMethod string + Timestamp util.ISO6801Time + SignatureVersion string + SignatureNonce string + ResourceOwnerAccount string + Action string +} + +func (request *Request) init(version string, action string, AccessKeyId string) { + request.Format = JSONResponseFormat + request.Timestamp = util.NewISO6801Time(time.Now().UTC()) + request.Version = version + request.SignatureVersion = SignatureVersion + request.SignatureMethod = SignatureMethod + request.SignatureNonce = util.CreateRandomString() + request.Action = action + request.AccessKeyId = AccessKeyId +} + +type Response struct { + RequestId string +} + +type ErrorResponse struct { + Response + HostId string + Code string + Message string +} + +// An Error represents a custom error for Aliyun API failure response +type Error struct { + ErrorResponse + StatusCode int //Status Code of HTTP Response +} + +func (e *Error) Error() string { + return fmt.Sprintf("Aliyun API Error: RequestId: %s Status Code: %d Code: %s Message: %s", e.RequestId, e.StatusCode, e.Code, e.Message) +} + +type Pagination struct { + PageNumber int + PageSize int +} + +func (p *Pagination) SetPageSize(size int) { + p.PageSize = size +} + +func (p *Pagination) Validate() { + if p.PageNumber < 0 { + log.Printf("Invalid PageNumber: %d", p.PageNumber) + p.PageNumber = 1 + } + if p.PageSize < 0 { + log.Printf("Invalid PageSize: %d", p.PageSize) + p.PageSize = 10 + } else if p.PageSize > 50 { + log.Printf("Invalid PageSize: %d", p.PageSize) + p.PageSize = 50 + } +} + +// A PaginationResponse represents a response with pagination information +type PaginationResult struct { + TotalCount int + PageNumber int + PageSize int +} + +// NextPage gets the next page of the result set +func (r *PaginationResult) NextPage() *Pagination { + if r.PageNumber*r.PageSize >= r.TotalCount { + return nil + } + return &Pagination{PageNumber: r.PageNumber + 1, PageSize: r.PageSize} +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/types.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/types.go new file mode 100644 index 000000000..c562aedfc --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/types.go @@ -0,0 +1,15 @@ +package common + +type InternetChargeType string + +const ( + PayByBandwidth = InternetChargeType("PayByBandwidth") + PayByTraffic = InternetChargeType("PayByTraffic") +) + +type InstanceChargeType string + +const ( + PrePaid = InstanceChargeType("PrePaid") + PostPaid = InstanceChargeType("PostPaid") +) diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/version.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/version.go new file mode 100644 index 000000000..7cb3d3aff --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/version.go @@ -0,0 +1,3 @@ +package common + +const Version = "0.1" diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/client.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/client.go new file mode 100644 index 000000000..c5e13e516 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/client.go @@ -0,0 +1,1394 @@ +package oss + +import ( + "bytes" + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "log" + "mime" + "net" + "net/http" + "net/http/httputil" + "net/url" + "os" + "path" + "strconv" + "strings" + "time" + + "github.com/denverdino/aliyungo/common" + "github.com/denverdino/aliyungo/util" +) + +const DefaultContentType = "application/octet-stream" + +// The Client type encapsulates operations with an OSS region. +type Client struct { + AccessKeyId string + AccessKeySecret string + SecurityToken string + Region Region + Internal bool + Secure bool + ConnectTimeout time.Duration + + endpoint string + debug bool +} + +// The Bucket type encapsulates operations with an bucket. +type Bucket struct { + *Client + Name string +} + +// The Owner type represents the owner of the object in an bucket. +type Owner struct { + ID string + DisplayName string +} + +// Options struct +// +type Options struct { + ServerSideEncryption bool + Meta map[string][]string + ContentEncoding string + CacheControl string + ContentMD5 string + ContentDisposition string + //Range string + //Expires int +} + +type CopyOptions struct { + Headers http.Header + CopySourceOptions string + MetadataDirective string + //ContentType string +} + +// CopyObjectResult is the output from a Copy request +type CopyObjectResult struct { + ETag string + LastModified string +} + +var attempts = util.AttemptStrategy{ + Min: 5, + Total: 5 * time.Second, + Delay: 200 * time.Millisecond, +} + +// NewOSSClient creates a new OSS. + +func NewOSSClientForAssumeRole(region Region, internal bool, accessKeyId string, accessKeySecret string, securityToken string, secure bool) *Client { + return &Client{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + SecurityToken: securityToken, + Region: region, + Internal: internal, + debug: false, + Secure: secure, + } +} + +func NewOSSClient(region Region, internal bool, accessKeyId string, accessKeySecret string, secure bool) *Client { + return &Client{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + Region: region, + Internal: internal, + debug: false, + Secure: secure, + } +} + +// SetDebug sets debug mode to log the request/response message +func (client *Client) SetDebug(debug bool) { + client.debug = debug +} + +// Bucket returns a Bucket with the given name. +func (client *Client) Bucket(name string) *Bucket { + name = strings.ToLower(name) + return &Bucket{ + Client: client, + Name: name, + } +} + +type BucketInfo struct { + Name string + CreationDate string + ExtranetEndpoint string + IntranetEndpoint string + Location string + Grant string `xml:"AccessControlList>Grant"` +} + +type GetServiceResp struct { + Owner Owner + Buckets []BucketInfo `xml:">Bucket"` +} + +type GetBucketInfoResp struct { + Bucket BucketInfo +} + +// GetService gets a list of all buckets owned by an account. +func (client *Client) GetService() (*GetServiceResp, error) { + bucket := client.Bucket("") + + r, err := bucket.Get("") + if err != nil { + return nil, err + } + + // Parse the XML response. + var resp GetServiceResp + if err = xml.Unmarshal(r, &resp); err != nil { + return nil, err + } + + return &resp, nil +} + +type ACL string + +const ( + Private = ACL("private") + PublicRead = ACL("public-read") + PublicReadWrite = ACL("public-read-write") + AuthenticatedRead = ACL("authenticated-read") + BucketOwnerRead = ACL("bucket-owner-read") + BucketOwnerFull = ACL("bucket-owner-full-control") +) + +var createBucketConfiguration = ` + %s +` + +// locationConstraint returns an io.Reader specifying a LocationConstraint if +// required for the region. +func (client *Client) locationConstraint() io.Reader { + constraint := fmt.Sprintf(createBucketConfiguration, client.Region) + return strings.NewReader(constraint) +} + +// override default endpoint +func (client *Client) SetEndpoint(endpoint string) { + // TODO check endpoint + client.endpoint = endpoint +} + +// Info query basic information about the bucket +// +// You can read doc at https://help.aliyun.com/document_detail/31968.html +func (b *Bucket) Info() (BucketInfo, error) { + params := make(url.Values) + params.Set("bucketInfo", "") + r, err := b.GetWithParams("/", params) + + if err != nil { + return BucketInfo{}, err + } + + // Parse the XML response. + var resp GetBucketInfoResp + if err = xml.Unmarshal(r, &resp); err != nil { + return BucketInfo{}, err + } + + return resp.Bucket, nil +} + +// PutBucket creates a new bucket. +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/bucket&PutBucket +func (b *Bucket) PutBucket(perm ACL) error { + headers := make(http.Header) + if perm != "" { + headers.Set("x-oss-acl", string(perm)) + } + req := &request{ + method: "PUT", + bucket: b.Name, + path: "/", + headers: headers, + payload: b.Client.locationConstraint(), + } + return b.Client.query(req, nil) +} + +// DelBucket removes an existing bucket. All objects in the bucket must +// be removed before the bucket itself can be removed. +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/bucket&DeleteBucket +func (b *Bucket) DelBucket() (err error) { + for attempt := attempts.Start(); attempt.Next(); { + req := &request{ + method: "DELETE", + bucket: b.Name, + path: "/", + } + + err = b.Client.query(req, nil) + if !shouldRetry(err) { + break + } + } + return err +} + +// Get retrieves an object from an bucket. +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&GetObject +func (b *Bucket) Get(path string) (data []byte, err error) { + body, err := b.GetReader(path) + if err != nil { + return nil, err + } + data, err = ioutil.ReadAll(body) + body.Close() + return data, err +} + +// GetReader retrieves an object from an bucket, +// returning the body of the HTTP response. +// It is the caller's responsibility to call Close on rc when +// finished reading. +func (b *Bucket) GetReader(path string) (rc io.ReadCloser, err error) { + resp, err := b.GetResponse(path) + if resp != nil { + return resp.Body, err + } + return nil, err +} + +// GetResponse retrieves an object from an bucket, +// returning the HTTP response. +// It is the caller's responsibility to call Close on rc when +// finished reading +func (b *Bucket) GetResponse(path string) (resp *http.Response, err error) { + return b.GetResponseWithHeaders(path, make(http.Header)) +} + +// GetResponseWithHeaders retrieves an object from an bucket +// Accepts custom headers to be sent as the second parameter +// returning the body of the HTTP response. +// It is the caller's responsibility to call Close on rc when +// finished reading +func (b *Bucket) GetResponseWithHeaders(path string, headers http.Header) (resp *http.Response, err error) { + for attempt := attempts.Start(); attempt.Next(); { + req := &request{ + bucket: b.Name, + path: path, + headers: headers, + } + err = b.Client.prepare(req) + if err != nil { + return nil, err + } + + resp, err := b.Client.run(req, nil) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return nil, err + } + return resp, nil + } + panic("unreachable") +} + +// Get retrieves an object from an bucket. +func (b *Bucket) GetWithParams(path string, params url.Values) (data []byte, err error) { + resp, err := b.GetResponseWithParamsAndHeaders(path, params, nil) + if err != nil { + return nil, err + } + data, err = ioutil.ReadAll(resp.Body) + resp.Body.Close() + return data, err +} + +func (b *Bucket) GetResponseWithParamsAndHeaders(path string, params url.Values, headers http.Header) (resp *http.Response, err error) { + for attempt := attempts.Start(); attempt.Next(); { + req := &request{ + bucket: b.Name, + path: path, + params: params, + headers: headers, + } + err = b.Client.prepare(req) + if err != nil { + return nil, err + } + + resp, err := b.Client.run(req, nil) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return nil, err + } + return resp, nil + } + panic("unreachable") +} + +// Exists checks whether or not an object exists on an bucket using a HEAD request. +func (b *Bucket) Exists(path string) (exists bool, err error) { + for attempt := attempts.Start(); attempt.Next(); { + req := &request{ + method: "HEAD", + bucket: b.Name, + path: path, + } + err = b.Client.prepare(req) + if err != nil { + return + } + + resp, err := b.Client.run(req, nil) + + if shouldRetry(err) && attempt.HasNext() { + continue + } + + if err != nil { + // We can treat a 403 or 404 as non existance + if e, ok := err.(*Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) { + return false, nil + } + return false, err + } + + if resp.StatusCode/100 == 2 { + exists = true + } + if resp.Body != nil { + resp.Body.Close() + } + return exists, err + } + return false, fmt.Errorf("OSS Currently Unreachable") +} + +// Head HEADs an object in the bucket, returns the response with +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&HeadObject +func (b *Bucket) Head(path string, headers http.Header) (*http.Response, error) { + + for attempt := attempts.Start(); attempt.Next(); { + req := &request{ + method: "HEAD", + bucket: b.Name, + path: path, + headers: headers, + } + err := b.Client.prepare(req) + if err != nil { + return nil, err + } + + resp, err := b.Client.run(req, nil) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return nil, err + } + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + return resp, err + } + return nil, fmt.Errorf("OSS Currently Unreachable") +} + +// Put inserts an object into the bucket. +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&PutObject +func (b *Bucket) Put(path string, data []byte, contType string, perm ACL, options Options) error { + body := bytes.NewBuffer(data) + return b.PutReader(path, body, int64(len(data)), contType, perm, options) +} + +// PutCopy puts a copy of an object given by the key path into bucket b using b.Path as the target key +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&CopyObject +func (b *Bucket) PutCopy(path string, perm ACL, options CopyOptions, source string) (*CopyObjectResult, error) { + headers := make(http.Header) + + headers.Set("x-oss-acl", string(perm)) + headers.Set("x-oss-copy-source", source) + + options.addHeaders(headers) + req := &request{ + method: "PUT", + bucket: b.Name, + path: path, + headers: headers, + timeout: 5 * time.Minute, + } + resp := &CopyObjectResult{} + err := b.Client.query(req, resp) + if err != nil { + return resp, err + } + return resp, nil +} + +// PutReader inserts an object into the bucket by consuming data +// from r until EOF. +func (b *Bucket) PutReader(path string, r io.Reader, length int64, contType string, perm ACL, options Options) error { + headers := make(http.Header) + headers.Set("Content-Length", strconv.FormatInt(length, 10)) + headers.Set("Content-Type", contType) + headers.Set("x-oss-acl", string(perm)) + + options.addHeaders(headers) + req := &request{ + method: "PUT", + bucket: b.Name, + path: path, + headers: headers, + payload: r, + } + return b.Client.query(req, nil) +} + +// PutFile creates/updates object with file +func (b *Bucket) PutFile(path string, file *os.File, perm ACL, options Options) error { + var contentType string + if dotPos := strings.LastIndex(file.Name(), "."); dotPos == -1 { + contentType = DefaultContentType + } else { + if mimeType := mime.TypeByExtension(file.Name()[dotPos:]); mimeType == "" { + contentType = DefaultContentType + } else { + contentType = mimeType + } + } + stats, err := file.Stat() + if err != nil { + log.Printf("Unable to read file %s stats.\n", file.Name()) + return err + } + + return b.PutReader(path, file, stats.Size(), contentType, perm, options) +} + +// addHeaders adds o's specified fields to headers +func (o Options) addHeaders(headers http.Header) { + if o.ServerSideEncryption { + headers.Set("x-oss-server-side-encryption", "AES256") + } + if len(o.ContentEncoding) != 0 { + headers.Set("Content-Encoding", o.ContentEncoding) + } + if len(o.CacheControl) != 0 { + headers.Set("Cache-Control", o.CacheControl) + } + if len(o.ContentMD5) != 0 { + headers.Set("Content-MD5", o.ContentMD5) + } + if len(o.ContentDisposition) != 0 { + headers.Set("Content-Disposition", o.ContentDisposition) + } + + for k, v := range o.Meta { + for _, mv := range v { + headers.Add("x-oss-meta-"+k, mv) + } + } +} + +// addHeaders adds o's specified fields to headers +func (o CopyOptions) addHeaders(headers http.Header) { + if len(o.MetadataDirective) != 0 { + headers.Set("x-oss-metadata-directive", o.MetadataDirective) + } + if len(o.CopySourceOptions) != 0 { + headers.Set("x-oss-copy-source-range", o.CopySourceOptions) + } + if o.Headers != nil { + for k, v := range o.Headers { + newSlice := make([]string, len(v)) + copy(newSlice, v) + headers[k] = newSlice + } + } +} + +func makeXMLBuffer(doc []byte) *bytes.Buffer { + buf := new(bytes.Buffer) + buf.WriteString(xml.Header) + buf.Write(doc) + return buf +} + +type IndexDocument struct { + Suffix string `xml:"Suffix"` +} + +type ErrorDocument struct { + Key string `xml:"Key"` +} + +type RoutingRule struct { + ConditionKeyPrefixEquals string `xml:"Condition>KeyPrefixEquals"` + RedirectReplaceKeyPrefixWith string `xml:"Redirect>ReplaceKeyPrefixWith,omitempty"` + RedirectReplaceKeyWith string `xml:"Redirect>ReplaceKeyWith,omitempty"` +} + +type RedirectAllRequestsTo struct { + HostName string `xml:"HostName"` + Protocol string `xml:"Protocol,omitempty"` +} + +type WebsiteConfiguration struct { + XMLName xml.Name `xml:"http://doc.oss-cn-hangzhou.aliyuncs.com WebsiteConfiguration"` + IndexDocument *IndexDocument `xml:"IndexDocument,omitempty"` + ErrorDocument *ErrorDocument `xml:"ErrorDocument,omitempty"` + RoutingRules *[]RoutingRule `xml:"RoutingRules>RoutingRule,omitempty"` + RedirectAllRequestsTo *RedirectAllRequestsTo `xml:"RedirectAllRequestsTo,omitempty"` +} + +// PutBucketWebsite configures a bucket as a website. +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/bucket&PutBucketWebsite +func (b *Bucket) PutBucketWebsite(configuration WebsiteConfiguration) error { + doc, err := xml.Marshal(configuration) + if err != nil { + return err + } + + buf := makeXMLBuffer(doc) + + return b.PutBucketSubresource("website", buf, int64(buf.Len())) +} + +func (b *Bucket) PutBucketSubresource(subresource string, r io.Reader, length int64) error { + headers := make(http.Header) + headers.Set("Content-Length", strconv.FormatInt(length, 10)) + + req := &request{ + path: "/", + method: "PUT", + bucket: b.Name, + headers: headers, + payload: r, + params: url.Values{subresource: {""}}, + } + + return b.Client.query(req, nil) +} + +// Del removes an object from the bucket. +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&DeleteObject +func (b *Bucket) Del(path string) error { + req := &request{ + method: "DELETE", + bucket: b.Name, + path: path, + } + return b.Client.query(req, nil) +} + +type Delete struct { + Quiet bool `xml:"Quiet,omitempty"` + Objects []Object `xml:"Object"` +} + +type Object struct { + Key string `xml:"Key"` + VersionId string `xml:"VersionId,omitempty"` +} + +// DelMulti removes up to 1000 objects from the bucket. +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&DeleteMultipleObjects +func (b *Bucket) DelMulti(objects Delete) error { + doc, err := xml.Marshal(objects) + if err != nil { + return err + } + + buf := makeXMLBuffer(doc) + digest := md5.New() + size, err := digest.Write(buf.Bytes()) + if err != nil { + return err + } + + headers := make(http.Header) + headers.Set("Content-Length", strconv.FormatInt(int64(size), 10)) + headers.Set("Content-MD5", base64.StdEncoding.EncodeToString(digest.Sum(nil))) + headers.Set("Content-Type", "text/xml") + + req := &request{ + path: "/", + method: "POST", + params: url.Values{"delete": {""}}, + bucket: b.Name, + headers: headers, + payload: buf, + } + + return b.Client.query(req, nil) +} + +// The ListResp type holds the results of a List bucket operation. +type ListResp struct { + Name string + Prefix string + Delimiter string + Marker string + MaxKeys int + // IsTruncated is true if the results have been truncated because + // there are more keys and prefixes than can fit in MaxKeys. + // N.B. this is the opposite sense to that documented (incorrectly) in + // http://goo.gl/YjQTc + IsTruncated bool + Contents []Key + CommonPrefixes []string `xml:">Prefix"` + // if IsTruncated is true, pass NextMarker as marker argument to List() + // to get the next set of keys + NextMarker string +} + +// The Key type represents an item stored in an bucket. +type Key struct { + Key string + LastModified string + Type string + Size int64 + // ETag gives the hex-encoded MD5 sum of the contents, + // surrounded with double-quotes. + ETag string + StorageClass string + Owner Owner +} + +// List returns information about objects in an bucket. +// +// The prefix parameter limits the response to keys that begin with the +// specified prefix. +// +// The delim parameter causes the response to group all of the keys that +// share a common prefix up to the next delimiter in a single entry within +// the CommonPrefixes field. You can use delimiters to separate a bucket +// into different groupings of keys, similar to how folders would work. +// +// The marker parameter specifies the key to start with when listing objects +// in a bucket. OSS lists objects in alphabetical order and +// will return keys alphabetically greater than the marker. +// +// The max parameter specifies how many keys + common prefixes to return in +// the response, at most 1000. The default is 100. +// +// For example, given these keys in a bucket: +// +// index.html +// index2.html +// photos/2006/January/sample.jpg +// photos/2006/February/sample2.jpg +// photos/2006/February/sample3.jpg +// photos/2006/February/sample4.jpg +// +// Listing this bucket with delimiter set to "/" would yield the +// following result: +// +// &ListResp{ +// Name: "sample-bucket", +// MaxKeys: 1000, +// Delimiter: "/", +// Contents: []Key{ +// {Key: "index.html", "index2.html"}, +// }, +// CommonPrefixes: []string{ +// "photos/", +// }, +// } +// +// Listing the same bucket with delimiter set to "/" and prefix set to +// "photos/2006/" would yield the following result: +// +// &ListResp{ +// Name: "sample-bucket", +// MaxKeys: 1000, +// Delimiter: "/", +// Prefix: "photos/2006/", +// CommonPrefixes: []string{ +// "photos/2006/February/", +// "photos/2006/January/", +// }, +// } +// +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/bucket&GetBucket +func (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp, err error) { + params := make(url.Values) + params.Set("prefix", prefix) + params.Set("delimiter", delim) + params.Set("marker", marker) + if max != 0 { + params.Set("max-keys", strconv.FormatInt(int64(max), 10)) + } + result = &ListResp{} + for attempt := attempts.Start(); attempt.Next(); { + req := &request{ + bucket: b.Name, + params: params, + } + err = b.Client.query(req, result) + if !shouldRetry(err) { + break + } + } + if err != nil { + return nil, err + } + // if NextMarker is not returned, it should be set to the name of last key, + // so let's do it so that each caller doesn't have to + if result.IsTruncated && result.NextMarker == "" { + n := len(result.Contents) + if n > 0 { + result.NextMarker = result.Contents[n-1].Key + } + } + return result, nil +} + +type GetLocationResp struct { + Location string `xml:",innerxml"` +} + +func (b *Bucket) Location() (string, error) { + params := make(url.Values) + params.Set("location", "") + r, err := b.GetWithParams("/", params) + + if err != nil { + return "", err + } + + // Parse the XML response. + var resp GetLocationResp + if err = xml.Unmarshal(r, &resp); err != nil { + return "", err + } + + if resp.Location == "" { + return string(Hangzhou), nil + } + return resp.Location, nil +} + +func (b *Bucket) Path(path string) string { + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + return "/" + b.Name + path +} + +// URL returns a non-signed URL that allows retriving the +// object at path. It only works if the object is publicly +// readable (see SignedURL). +func (b *Bucket) URL(path string) string { + req := &request{ + bucket: b.Name, + path: path, + } + err := b.Client.prepare(req) + if err != nil { + panic(err) + } + u, err := req.url() + if err != nil { + panic(err) + } + u.RawQuery = "" + return u.String() +} + +// SignedURL returns a signed URL that allows anyone holding the URL +// to retrieve the object at path. The signature is valid until expires. +func (b *Bucket) SignedURL(path string, expires time.Time) string { + return b.SignedURLWithArgs(path, expires, nil, nil) +} + +// SignedURLWithArgs returns a signed URL that allows anyone holding the URL +// to retrieve the object at path. The signature is valid until expires. +func (b *Bucket) SignedURLWithArgs(path string, expires time.Time, params url.Values, headers http.Header) string { + return b.SignedURLWithMethod("GET", path, expires, params, headers) +} + +// SignedURLWithMethod returns a signed URL that allows anyone holding the URL +// to either retrieve the object at path or make a HEAD request against it. The signature is valid until expires. +func (b *Bucket) SignedURLWithMethod(method, path string, expires time.Time, params url.Values, headers http.Header) string { + var uv = url.Values{} + + if params != nil { + uv = params + } + + uv.Set("Expires", strconv.FormatInt(expires.Unix(), 10)) + uv.Set("OSSAccessKeyId", b.AccessKeyId) + + req := &request{ + method: method, + bucket: b.Name, + path: path, + params: uv, + headers: headers, + } + err := b.Client.prepare(req) + if err != nil { + panic(err) + } + u, err := req.url() + if err != nil { + panic(err) + } + + return u.String() +} + +// UploadSignedURL returns a signed URL that allows anyone holding the URL +// to upload the object at path. The signature is valid until expires. +// contenttype is a string like image/png +// name is the resource name in OSS terminology like images/ali.png [obviously excluding the bucket name itself] +func (b *Bucket) UploadSignedURL(name, method, contentType string, expires time.Time) string { + //TODO TESTING + expireDate := expires.Unix() + if method != "POST" { + method = "PUT" + } + + tokenData := "" + + stringToSign := method + "\n\n" + contentType + "\n" + strconv.FormatInt(expireDate, 10) + "\n" + tokenData + "/" + path.Join(b.Name, name) + secretKey := b.AccessKeySecret + accessId := b.AccessKeyId + mac := hmac.New(sha1.New, []byte(secretKey)) + mac.Write([]byte(stringToSign)) + macsum := mac.Sum(nil) + signature := base64.StdEncoding.EncodeToString(macsum) + signature = strings.TrimSpace(signature) + + signedurl, err := url.Parse(b.Region.GetEndpoint(b.Internal, b.Name, b.Secure)) + if err != nil { + log.Println("ERROR sining url for OSS upload", err) + return "" + } + signedurl.Path = name + params := url.Values{} + params.Add("OSSAccessKeyId", accessId) + params.Add("Expires", strconv.FormatInt(expireDate, 10)) + params.Add("Signature", signature) + + signedurl.RawQuery = params.Encode() + return signedurl.String() +} + +// PostFormArgsEx returns the action and input fields needed to allow anonymous +// uploads to a bucket within the expiration limit +// Additional conditions can be specified with conds +func (b *Bucket) PostFormArgsEx(path string, expires time.Time, redirect string, conds []string) (action string, fields map[string]string) { + conditions := []string{} + fields = map[string]string{ + "AWSAccessKeyId": b.AccessKeyId, + "key": path, + } + + if conds != nil { + conditions = append(conditions, conds...) + } + + conditions = append(conditions, fmt.Sprintf("{\"key\": \"%s\"}", path)) + conditions = append(conditions, fmt.Sprintf("{\"bucket\": \"%s\"}", b.Name)) + if redirect != "" { + conditions = append(conditions, fmt.Sprintf("{\"success_action_redirect\": \"%s\"}", redirect)) + fields["success_action_redirect"] = redirect + } + + vExpiration := expires.Format("2006-01-02T15:04:05Z") + vConditions := strings.Join(conditions, ",") + policy := fmt.Sprintf("{\"expiration\": \"%s\", \"conditions\": [%s]}", vExpiration, vConditions) + policy64 := base64.StdEncoding.EncodeToString([]byte(policy)) + fields["policy"] = policy64 + + signer := hmac.New(sha1.New, []byte(b.AccessKeySecret)) + signer.Write([]byte(policy64)) + fields["signature"] = base64.StdEncoding.EncodeToString(signer.Sum(nil)) + + action = fmt.Sprintf("%s/%s/", b.Client.Region, b.Name) + return +} + +// PostFormArgs returns the action and input fields needed to allow anonymous +// uploads to a bucket within the expiration limit +func (b *Bucket) PostFormArgs(path string, expires time.Time, redirect string) (action string, fields map[string]string) { + return b.PostFormArgsEx(path, expires, redirect, nil) +} + +type request struct { + method string + bucket string + path string + params url.Values + headers http.Header + baseurl string + payload io.Reader + prepared bool + timeout time.Duration +} + +func (req *request) url() (*url.URL, error) { + u, err := url.Parse(req.baseurl) + if err != nil { + return nil, fmt.Errorf("bad OSS endpoint URL %q: %v", req.baseurl, err) + } + u.RawQuery = req.params.Encode() + u.Path = req.path + return u, nil +} + +// query prepares and runs the req request. +// If resp is not nil, the XML data contained in the response +// body will be unmarshalled on it. +func (client *Client) query(req *request, resp interface{}) error { + err := client.prepare(req) + if err != nil { + return err + } + r, err := client.run(req, resp) + if r != nil && r.Body != nil { + r.Body.Close() + } + return err +} + +// Sets baseurl on req from bucket name and the region endpoint +func (client *Client) setBaseURL(req *request) error { + + if client.endpoint == "" { + req.baseurl = client.Region.GetEndpoint(client.Internal, req.bucket, client.Secure) + } else { + req.baseurl = fmt.Sprintf("%s://%s", getProtocol(client.Secure), client.endpoint) + } + + return nil +} + +// partiallyEscapedPath partially escapes the OSS path allowing for all OSS REST API calls. +// +// Some commands including: +// GET Bucket acl http://goo.gl/aoXflF +// GET Bucket cors http://goo.gl/UlmBdx +// GET Bucket lifecycle http://goo.gl/8Fme7M +// GET Bucket policy http://goo.gl/ClXIo3 +// GET Bucket location http://goo.gl/5lh8RD +// GET Bucket Logging http://goo.gl/sZ5ckF +// GET Bucket notification http://goo.gl/qSSZKD +// GET Bucket tagging http://goo.gl/QRvxnM +// require the first character after the bucket name in the path to be a literal '?' and +// not the escaped hex representation '%3F'. +func partiallyEscapedPath(path string) string { + pathEscapedAndSplit := strings.Split((&url.URL{Path: path}).String(), "/") + if len(pathEscapedAndSplit) >= 3 { + if len(pathEscapedAndSplit[2]) >= 3 { + // Check for the one "?" that should not be escaped. + if pathEscapedAndSplit[2][0:3] == "%3F" { + pathEscapedAndSplit[2] = "?" + pathEscapedAndSplit[2][3:] + } + } + } + return strings.Replace(strings.Join(pathEscapedAndSplit, "/"), "+", "%2B", -1) +} + +// prepare sets up req to be delivered to OSS. +func (client *Client) prepare(req *request) error { + // Copy so they can be mutated without affecting on retries. + headers := copyHeader(req.headers) + if len(client.SecurityToken) != 0 { + headers.Set("x-oss-security-token", client.SecurityToken) + } + + params := make(url.Values) + + for k, v := range req.params { + params[k] = v + } + + req.params = params + req.headers = headers + + if !req.prepared { + req.prepared = true + if req.method == "" { + req.method = "GET" + } + + if !strings.HasPrefix(req.path, "/") { + req.path = "/" + req.path + } + + err := client.setBaseURL(req) + if err != nil { + return err + } + } + + req.headers.Set("Date", util.GetGMTime()) + client.signRequest(req) + + return nil +} + +// Prepares an *http.Request for doHttpRequest +func (client *Client) setupHttpRequest(req *request) (*http.Request, error) { + // Copy so that signing the http request will not mutate it + + u, err := req.url() + if err != nil { + return nil, err + } + u.Opaque = fmt.Sprintf("//%s%s", u.Host, partiallyEscapedPath(u.Path)) + + hreq := http.Request{ + URL: u, + Method: req.method, + ProtoMajor: 1, + ProtoMinor: 1, + Close: true, + Header: req.headers, + Form: req.params, + } + + hreq.Header.Set("X-SDK-Client", `AliyunGO/`+common.Version) + + contentLength := req.headers.Get("Content-Length") + + if contentLength != "" { + hreq.ContentLength, _ = strconv.ParseInt(contentLength, 10, 64) + req.headers.Del("Content-Length") + } + + if req.payload != nil { + hreq.Body = ioutil.NopCloser(req.payload) + } + + return &hreq, nil +} + +// doHttpRequest sends hreq and returns the http response from the server. +// If resp is not nil, the XML data contained in the response +// body will be unmarshalled on it. +func (client *Client) doHttpRequest(c *http.Client, hreq *http.Request, resp interface{}) (*http.Response, error) { + + if true { + log.Printf("%s %s ...\n", hreq.Method, hreq.URL.String()) + } + hresp, err := c.Do(hreq) + if err != nil { + return nil, err + } + if client.debug { + log.Printf("%s %s %d\n", hreq.Method, hreq.URL.String(), hresp.StatusCode) + contentType := hresp.Header.Get("Content-Type") + if contentType == "application/xml" || contentType == "text/xml" { + dump, _ := httputil.DumpResponse(hresp, true) + log.Printf("%s\n", dump) + } else { + log.Printf("Response Content-Type: %s\n", contentType) + } + } + if hresp.StatusCode != 200 && hresp.StatusCode != 204 && hresp.StatusCode != 206 { + return nil, client.buildError(hresp) + } + if resp != nil { + err = xml.NewDecoder(hresp.Body).Decode(resp) + hresp.Body.Close() + + if client.debug { + log.Printf("aliyungo.oss> decoded xml into %#v", resp) + } + + } + return hresp, err +} + +// run sends req and returns the http response from the server. +// If resp is not nil, the XML data contained in the response +// body will be unmarshalled on it. +func (client *Client) run(req *request, resp interface{}) (*http.Response, error) { + if client.debug { + log.Printf("Running OSS request: %#v", req) + } + + hreq, err := client.setupHttpRequest(req) + if err != nil { + return nil, err + } + + c := &http.Client{ + Transport: &http.Transport{ + Dial: func(netw, addr string) (c net.Conn, err error) { + if client.ConnectTimeout > 0 { + c, err = net.DialTimeout(netw, addr, client.ConnectTimeout) + } else { + c, err = net.Dial(netw, addr) + } + if err != nil { + return + } + return + }, + Proxy: http.ProxyFromEnvironment, + }, + Timeout: req.timeout, + } + + return client.doHttpRequest(c, hreq, resp) +} + +// Error represents an error in an operation with OSS. +type Error struct { + StatusCode int // HTTP status code (200, 403, ...) + Code string // OSS error code ("UnsupportedOperation", ...) + Message string // The human-oriented error message + BucketName string + RequestId string + HostId string +} + +func (e *Error) Error() string { + return fmt.Sprintf("Aliyun API Error: RequestId: %s Status Code: %d Code: %s Message: %s", e.RequestId, e.StatusCode, e.Code, e.Message) +} + +func (client *Client) buildError(r *http.Response) error { + if client.debug { + log.Printf("got error (status code %v)", r.StatusCode) + data, err := ioutil.ReadAll(r.Body) + if err != nil { + log.Printf("\tread error: %v", err) + } else { + log.Printf("\tdata:\n%s\n\n", data) + } + r.Body = ioutil.NopCloser(bytes.NewBuffer(data)) + } + + err := Error{} + // TODO return error if Unmarshal fails? + xml.NewDecoder(r.Body).Decode(&err) + r.Body.Close() + err.StatusCode = r.StatusCode + if err.Message == "" { + err.Message = r.Status + } + if client.debug { + log.Printf("err: %#v\n", err) + } + return &err +} + +type TimeoutError interface { + error + Timeout() bool // Is the error a timeout? +} + +func shouldRetry(err error) bool { + if err == nil { + return false + } + + _, ok := err.(TimeoutError) + if ok { + return true + } + + switch err { + case io.ErrUnexpectedEOF, io.EOF: + return true + } + switch e := err.(type) { + case *net.DNSError: + return true + case *net.OpError: + switch e.Op { + case "read", "write": + return true + } + case *url.Error: + // url.Error can be returned either by net/url if a URL cannot be + // parsed, or by net/http if the response is closed before the headers + // are received or parsed correctly. In that later case, e.Op is set to + // the HTTP method name with the first letter uppercased. We don't want + // to retry on POST operations, since those are not idempotent, all the + // other ones should be safe to retry. + switch e.Op { + case "Get", "Put", "Delete", "Head": + return shouldRetry(e.Err) + default: + return false + } + case *Error: + switch e.Code { + case "InternalError", "NoSuchUpload", "NoSuchBucket": + return true + } + } + return false +} + +func hasCode(err error, code string) bool { + e, ok := err.(*Error) + return ok && e.Code == code +} + +func copyHeader(header http.Header) (newHeader http.Header) { + newHeader = make(http.Header) + for k, v := range header { + newSlice := make([]string, len(v)) + copy(newSlice, v) + newHeader[k] = newSlice + } + return +} + +type AccessControlPolicy struct { + Owner Owner + Grants []string `xml:"AccessControlList>Grant"` +} + +// ACL returns ACL of bucket +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/bucket&GetBucketAcl +func (b *Bucket) ACL() (result *AccessControlPolicy, err error) { + + params := make(url.Values) + params.Set("acl", "") + + r, err := b.GetWithParams("/", params) + if err != nil { + return nil, err + } + + // Parse the XML response. + var resp AccessControlPolicy + if err = xml.Unmarshal(r, &resp); err != nil { + return nil, err + } + + return &resp, nil +} + +func (b *Bucket) GetContentLength(sourcePath string) (int64, error) { + resp, err := b.Head(sourcePath, nil) + if err != nil { + return 0, err + } + + currentLength := resp.ContentLength + + return currentLength, err +} + +func (b *Bucket) CopyLargeFile(sourcePath string, destPath string, contentType string, perm ACL, options Options) error { + return b.CopyLargeFileInParallel(sourcePath, destPath, contentType, perm, options, 1) +} + +const defaultChunkSize = int64(128 * 1024 * 1024) //128MB +const maxCopytSize = int64(128 * 1024 * 1024) //128MB + +// Copy large file in the same bucket +func (b *Bucket) CopyLargeFileInParallel(sourcePath string, destPath string, contentType string, perm ACL, options Options, maxConcurrency int) error { + + if maxConcurrency < 1 { + maxConcurrency = 1 + } + + currentLength, err := b.GetContentLength(sourcePath) + + log.Printf("Parallel Copy large file[size: %d] from %s to %s\n",currentLength, sourcePath, destPath) + + if err != nil { + return err + } + + if currentLength < maxCopytSize { + _, err := b.PutCopy(destPath, perm, + CopyOptions{}, + b.Path(sourcePath)) + return err + } + + multi, err := b.InitMulti(destPath, contentType, perm, options) + if err != nil { + return err + } + + numParts := (currentLength + defaultChunkSize - 1) / defaultChunkSize + completedParts := make([]Part, numParts) + + errChan := make(chan error, numParts) + limiter := make(chan struct{}, maxConcurrency) + + var start int64 = 0 + var to int64 = 0 + var partNumber = 0 + sourcePathForCopy := b.Path(sourcePath) + + for start = 0; start < currentLength; start = to { + to = start + defaultChunkSize + if to > currentLength { + to = currentLength + } + partNumber++ + + rangeStr := fmt.Sprintf("bytes=%d-%d", start, to-1) + limiter <- struct{}{} + go func(partNumber int, rangeStr string) { + _, part, err := multi.PutPartCopyWithContentLength(partNumber, + CopyOptions{CopySourceOptions: rangeStr}, + sourcePathForCopy, currentLength) + if err == nil { + completedParts[partNumber-1] = part + } else { + log.Printf("Unable in PutPartCopy of part %d for %s: %v\n", partNumber, sourcePathForCopy, err) + } + errChan <- err + <-limiter + }(partNumber, rangeStr) + } + + fullyCompleted := true + for range completedParts { + err := <-errChan + if err != nil { + fullyCompleted = false + } + } + + if fullyCompleted { + err = multi.Complete(completedParts) + } else { + err = multi.Abort() + } + + return err +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/export.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/export.go new file mode 100644 index 000000000..ebdb0477a --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/export.go @@ -0,0 +1,23 @@ +package oss + +import ( + "github.com/denverdino/aliyungo/util" +) + +var originalStrategy = attempts + +func SetAttemptStrategy(s *util.AttemptStrategy) { + if s == nil { + attempts = originalStrategy + } else { + attempts = *s + } +} + +func SetListPartsMax(n int) { + listPartsMax = n +} + +func SetListMultiMax(n int) { + listMultiMax = n +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/multi.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/multi.go new file mode 100644 index 000000000..d720e1884 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/multi.go @@ -0,0 +1,489 @@ +package oss + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "encoding/hex" + "encoding/xml" + "errors" + "io" + "time" + //"log" + "net/http" + "net/url" + "sort" + "strconv" + "strings" +) + +// Multi represents an unfinished multipart upload. +// +// Multipart uploads allow sending big objects in smaller chunks. +// After all parts have been sent, the upload must be explicitly +// completed by calling Complete with the list of parts. + +type Multi struct { + Bucket *Bucket + Key string + UploadId string +} + +// That's the default. Here just for testing. +var listMultiMax = 1000 + +type listMultiResp struct { + NextKeyMarker string + NextUploadIdMarker string + IsTruncated bool + Upload []Multi + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` +} + +// ListMulti returns the list of unfinished multipart uploads in b. +// +// The prefix parameter limits the response to keys that begin with the +// specified prefix. You can use prefixes to separate a bucket into different +// groupings of keys (to get the feeling of folders, for example). +// +// The delim parameter causes the response to group all of the keys that +// share a common prefix up to the next delimiter in a single entry within +// the CommonPrefixes field. You can use delimiters to separate a bucket +// into different groupings of keys, similar to how folders would work. +// +func (b *Bucket) ListMulti(prefix, delim string) (multis []*Multi, prefixes []string, err error) { + params := make(url.Values) + params.Set("uploads", "") + params.Set("max-uploads", strconv.FormatInt(int64(listMultiMax), 10)) + params.Set("prefix", prefix) + params.Set("delimiter", delim) + + for attempt := attempts.Start(); attempt.Next(); { + req := &request{ + method: "GET", + bucket: b.Name, + params: params, + } + var resp listMultiResp + err := b.Client.query(req, &resp) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return nil, nil, err + } + for i := range resp.Upload { + multi := &resp.Upload[i] + multi.Bucket = b + multis = append(multis, multi) + } + prefixes = append(prefixes, resp.CommonPrefixes...) + if !resp.IsTruncated { + return multis, prefixes, nil + } + params.Set("key-marker", resp.NextKeyMarker) + params.Set("upload-id-marker", resp.NextUploadIdMarker) + attempt = attempts.Start() // Last request worked. + } + panic("unreachable") +} + +// Multi returns a multipart upload handler for the provided key +// inside b. If a multipart upload exists for key, it is returned, +// otherwise a new multipart upload is initiated with contType and perm. +func (b *Bucket) Multi(key, contType string, perm ACL, options Options) (*Multi, error) { + multis, _, err := b.ListMulti(key, "") + if err != nil && !hasCode(err, "NoSuchUpload") { + return nil, err + } + for _, m := range multis { + if m.Key == key { + return m, nil + } + } + return b.InitMulti(key, contType, perm, options) +} + +// InitMulti initializes a new multipart upload at the provided +// key inside b and returns a value for manipulating it. +// +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/multipart-upload&InitiateMultipartUpload +func (b *Bucket) InitMulti(key string, contType string, perm ACL, options Options) (*Multi, error) { + headers := make(http.Header) + headers.Set("Content-Length", "0") + headers.Set("Content-Type", contType) + headers.Set("x-oss-acl", string(perm)) + + options.addHeaders(headers) + params := make(url.Values) + params.Set("uploads", "") + req := &request{ + method: "POST", + bucket: b.Name, + path: key, + headers: headers, + params: params, + } + var err error + var resp struct { + UploadId string `xml:"UploadId"` + } + for attempt := attempts.Start(); attempt.Next(); { + err = b.Client.query(req, &resp) + if !shouldRetry(err) { + break + } + } + if err != nil { + return nil, err + } + return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil +} + +func (m *Multi) PutPartCopy(n int, options CopyOptions, source string) (*CopyObjectResult, Part, error) { + return m.PutPartCopyWithContentLength(n, options, source, -1) +} + +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/multipart-upload&UploadPartCopy +func (m *Multi) PutPartCopyWithContentLength(n int, options CopyOptions, source string, contentLength int64) (*CopyObjectResult, Part, error) { + // TODO source format a /BUCKET/PATH/TO/OBJECT + // TODO not a good design. API could be changed to PutPartCopyWithinBucket(..., path) and PutPartCopyFromBucket(bucket, path) + + headers := make(http.Header) + headers.Set("x-oss-copy-source", source) + + options.addHeaders(headers) + params := make(url.Values) + params.Set("uploadId", m.UploadId) + params.Set("partNumber", strconv.FormatInt(int64(n), 10)) + + if contentLength < 0 { + sourceBucket := m.Bucket.Client.Bucket(strings.TrimRight(strings.Split(source, "/")[1], "/")) + //log.Println("source: ", source) + //log.Println("sourceBucket: ", sourceBucket.Name) + //log.Println("HEAD: ", strings.strings.SplitAfterN(source, "/", 3)[2]) + // TODO SplitAfterN can be use in bucket name + sourceMeta, err := sourceBucket.Head(strings.SplitAfterN(source, "/", 3)[2], nil) + if err != nil { + return nil, Part{}, err + } + contentLength = sourceMeta.ContentLength + } + + for attempt := attempts.Start(); attempt.Next(); { + req := &request{ + method: "PUT", + bucket: m.Bucket.Name, + path: m.Key, + headers: headers, + params: params, + } + resp := &CopyObjectResult{} + err := m.Bucket.Client.query(req, resp) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return nil, Part{}, err + } + if resp.ETag == "" { + return nil, Part{}, errors.New("part upload succeeded with no ETag") + } + return resp, Part{n, resp.ETag, contentLength}, nil + } + panic("unreachable") +} + +// PutPart sends part n of the multipart upload, reading all the content from r. +// Each part, except for the last one, must be at least 5MB in size. +// +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/multipart-upload&UploadPart +func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) { + partSize, _, md5b64, err := seekerInfo(r) + if err != nil { + return Part{}, err + } + return m.putPart(n, r, partSize, md5b64, 0) +} + +func (m *Multi) PutPartWithTimeout(n int, r io.ReadSeeker, timeout time.Duration) (Part, error) { + partSize, _, md5b64, err := seekerInfo(r) + if err != nil { + return Part{}, err + } + return m.putPart(n, r, partSize, md5b64, timeout) +} + +func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string, timeout time.Duration) (Part, error) { + headers := make(http.Header) + headers.Set("Content-Length", strconv.FormatInt(partSize, 10)) + headers.Set("Content-MD5", md5b64) + + params := make(url.Values) + params.Set("uploadId", m.UploadId) + params.Set("partNumber", strconv.FormatInt(int64(n), 10)) + + for attempt := attempts.Start(); attempt.Next(); { + _, err := r.Seek(0, 0) + if err != nil { + return Part{}, err + } + req := &request{ + method: "PUT", + bucket: m.Bucket.Name, + path: m.Key, + headers: headers, + params: params, + payload: r, + timeout: timeout, + } + err = m.Bucket.Client.prepare(req) + if err != nil { + return Part{}, err + } + resp, err := m.Bucket.Client.run(req, nil) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return Part{}, err + } + etag := resp.Header.Get("ETag") + if etag == "" { + return Part{}, errors.New("part upload succeeded with no ETag") + } + return Part{n, etag, partSize}, nil + } + panic("unreachable") +} + +func seekerInfo(r io.ReadSeeker) (size int64, md5hex string, md5b64 string, err error) { + _, err = r.Seek(0, 0) + if err != nil { + return 0, "", "", err + } + digest := md5.New() + size, err = io.Copy(digest, r) + if err != nil { + return 0, "", "", err + } + sum := digest.Sum(nil) + md5hex = hex.EncodeToString(sum) + md5b64 = base64.StdEncoding.EncodeToString(sum) + return size, md5hex, md5b64, nil +} + +type Part struct { + N int `xml:"PartNumber"` + ETag string + Size int64 +} + +type partSlice []Part + +func (s partSlice) Len() int { return len(s) } +func (s partSlice) Less(i, j int) bool { return s[i].N < s[j].N } +func (s partSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type listPartsResp struct { + NextPartNumberMarker string + IsTruncated bool + Part []Part +} + +// That's the default. Here just for testing. +var listPartsMax = 1000 + +// ListParts for backcompatability. See the documentation for ListPartsFull +func (m *Multi) ListParts() ([]Part, error) { + return m.ListPartsFull(0, listPartsMax) +} + +// ListPartsFull returns the list of previously uploaded parts in m, +// ordered by part number (Only parts with higher part numbers than +// partNumberMarker will be listed). Only up to maxParts parts will be +// returned. +// +func (m *Multi) ListPartsFull(partNumberMarker int, maxParts int) ([]Part, error) { + if maxParts > listPartsMax { + maxParts = listPartsMax + } + + params := make(url.Values) + params.Set("uploadId", m.UploadId) + params.Set("max-parts", strconv.FormatInt(int64(maxParts), 10)) + params.Set("part-number-marker", strconv.FormatInt(int64(partNumberMarker), 10)) + + var parts partSlice + for attempt := attempts.Start(); attempt.Next(); { + req := &request{ + method: "GET", + bucket: m.Bucket.Name, + path: m.Key, + params: params, + } + var resp listPartsResp + err := m.Bucket.Client.query(req, &resp) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return nil, err + } + parts = append(parts, resp.Part...) + if !resp.IsTruncated { + sort.Sort(parts) + return parts, nil + } + params.Set("part-number-marker", resp.NextPartNumberMarker) + attempt = attempts.Start() // Last request worked. + } + panic("unreachable") +} + +type ReaderAtSeeker interface { + io.ReaderAt + io.ReadSeeker +} + +// PutAll sends all of r via a multipart upload with parts no larger +// than partSize bytes, which must be set to at least 5MB. +// Parts previously uploaded are either reused if their checksum +// and size match the new part, or otherwise overwritten with the +// new content. +// PutAll returns all the parts of m (reused or not). +func (m *Multi) PutAll(r ReaderAtSeeker, partSize int64) ([]Part, error) { + old, err := m.ListParts() + if err != nil && !hasCode(err, "NoSuchUpload") { + return nil, err + } + reuse := 0 // Index of next old part to consider reusing. + current := 1 // Part number of latest good part handled. + totalSize, err := r.Seek(0, 2) + if err != nil { + return nil, err + } + first := true // Must send at least one empty part if the file is empty. + var result []Part +NextSection: + for offset := int64(0); offset < totalSize || first; offset += partSize { + first = false + if offset+partSize > totalSize { + partSize = totalSize - offset + } + section := io.NewSectionReader(r, offset, partSize) + _, md5hex, md5b64, err := seekerInfo(section) + if err != nil { + return nil, err + } + for reuse < len(old) && old[reuse].N <= current { + // Looks like this part was already sent. + part := &old[reuse] + etag := `"` + md5hex + `"` + if part.N == current && part.Size == partSize && part.ETag == etag { + // Checksum matches. Reuse the old part. + result = append(result, *part) + current++ + continue NextSection + } + reuse++ + } + + // Part wasn't found or doesn't match. Send it. + part, err := m.putPart(current, section, partSize, md5b64, 0) + if err != nil { + return nil, err + } + result = append(result, part) + current++ + } + return result, nil +} + +type completeUpload struct { + XMLName xml.Name `xml:"CompleteMultipartUpload"` + Parts completeParts `xml:"Part"` +} + +type completePart struct { + PartNumber int + ETag string +} + +type completeParts []completePart + +func (p completeParts) Len() int { return len(p) } +func (p completeParts) Less(i, j int) bool { return p[i].PartNumber < p[j].PartNumber } +func (p completeParts) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// Complete assembles the given previously uploaded parts into the +// final object. This operation may take several minutes. +// +func (m *Multi) Complete(parts []Part) error { + params := make(url.Values) + params.Set("uploadId", m.UploadId) + + c := completeUpload{} + for _, p := range parts { + c.Parts = append(c.Parts, completePart{p.N, p.ETag}) + } + sort.Sort(c.Parts) + data, err := xml.Marshal(&c) + if err != nil { + return err + } + for attempt := attempts.Start(); attempt.Next(); { + req := &request{ + method: "POST", + bucket: m.Bucket.Name, + path: m.Key, + params: params, + payload: bytes.NewReader(data), + } + err := m.Bucket.Client.query(req, nil) + if shouldRetry(err) && attempt.HasNext() { + continue + } + return err + } + panic("unreachable") +} + +// Abort deletes an unifinished multipart upload and any previously +// uploaded parts for it. +// +// After a multipart upload is aborted, no additional parts can be +// uploaded using it. However, if any part uploads are currently in +// progress, those part uploads might or might not succeed. As a result, +// it might be necessary to abort a given multipart upload multiple +// times in order to completely free all storage consumed by all parts. +// +// NOTE: If the described scenario happens to you, please report back to +// the goamz authors with details. In the future such retrying should be +// handled internally, but it's not clear what happens precisely (Is an +// error returned? Is the issue completely undetectable?). +// +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/multipart-upload&AbortMultipartUpload +func (m *Multi) Abort() error { + params := make(url.Values) + params.Set("uploadId", m.UploadId) + + for attempt := attempts.Start(); attempt.Next(); { + req := &request{ + method: "DELETE", + bucket: m.Bucket.Name, + path: m.Key, + params: params, + } + err := m.Bucket.Client.query(req, nil) + if shouldRetry(err) && attempt.HasNext() { + continue + } + return err + } + panic("unreachable") +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/regions.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/regions.go new file mode 100644 index 000000000..0f2506043 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/regions.go @@ -0,0 +1,70 @@ +package oss + +import ( + "fmt" +) + +// Region represents OSS region +type Region string + +// Constants of region definition +const ( + Hangzhou = Region("oss-cn-hangzhou") + Qingdao = Region("oss-cn-qingdao") + Beijing = Region("oss-cn-beijing") + Hongkong = Region("oss-cn-hongkong") + Shenzhen = Region("oss-cn-shenzhen") + USWest1 = Region("oss-us-west-1") + USEast1 = Region("oss-us-east-1") + APSouthEast1 = Region("oss-ap-southeast-1") + Shanghai = Region("oss-cn-shanghai") + + DefaultRegion = Hangzhou +) + +// GetEndpoint returns endpoint of region +func (r Region) GetEndpoint(internal bool, bucket string, secure bool) string { + if internal { + return r.GetInternalEndpoint(bucket, secure) + } + return r.GetInternetEndpoint(bucket, secure) +} + +func getProtocol(secure bool) string { + protocol := "http" + if secure { + protocol = "https" + } + return protocol +} + +// GetInternetEndpoint returns internet endpoint of region +func (r Region) GetInternetEndpoint(bucket string, secure bool) string { + protocol := getProtocol(secure) + if bucket == "" { + return fmt.Sprintf("%s://oss.aliyuncs.com", protocol) + } + return fmt.Sprintf("%s://%s.%s.aliyuncs.com", protocol, bucket, string(r)) +} + +// GetInternalEndpoint returns internal endpoint of region +func (r Region) GetInternalEndpoint(bucket string, secure bool) string { + protocol := getProtocol(secure) + if bucket == "" { + return fmt.Sprintf("%s://oss-internal.aliyuncs.com", protocol) + } + return fmt.Sprintf("%s://%s.%s-internal.aliyuncs.com", protocol, bucket, string(r)) +} + +// GetInternalEndpoint returns internal endpoint of region +func (r Region) GetVPCInternalEndpoint(bucket string, secure bool) string { + protocol := getProtocol(secure) + if bucket == "" { + return fmt.Sprintf("%s://vpc100-oss-cn-hangzhou.aliyuncs.com", protocol) + } + if r == USEast1 { + return r.GetInternalEndpoint(bucket, secure) + } else { + return fmt.Sprintf("%s://%s.vpc100-%s.aliyuncs.com", protocol, bucket, string(r)) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/signature.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/signature.go new file mode 100644 index 000000000..12677175d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/signature.go @@ -0,0 +1,107 @@ +package oss + +import ( + "github.com/denverdino/aliyungo/util" + //"log" + "net/http" + "net/url" + "sort" + "strings" +) + +const HeaderOSSPrefix = "x-oss-" + +var ossParamsToSign = map[string]bool{ + "acl": true, + "delete": true, + "location": true, + "logging": true, + "notification": true, + "partNumber": true, + "policy": true, + "requestPayment": true, + "torrent": true, + "uploadId": true, + "uploads": true, + "versionId": true, + "versioning": true, + "versions": true, + "response-content-type": true, + "response-content-language": true, + "response-expires": true, + "response-cache-control": true, + "response-content-disposition": true, + "response-content-encoding": true, + "bucketInfo": true, +} + +func (client *Client) signRequest(request *request) { + query := request.params + + urlSignature := query.Get("OSSAccessKeyId") != "" + + headers := request.headers + contentMd5 := headers.Get("Content-Md5") + contentType := headers.Get("Content-Type") + date := "" + if urlSignature { + date = query.Get("Expires") + } else { + date = headers.Get("Date") + } + + resource := request.path + if request.bucket != "" { + resource = "/" + request.bucket + request.path + } + params := make(url.Values) + for k, v := range query { + if ossParamsToSign[k] { + params[k] = v + } + } + + if len(params) > 0 { + resource = resource + "?" + util.Encode(params) + } + + canonicalizedResource := resource + + _, canonicalizedHeader := canonicalizeHeader(headers) + + stringToSign := request.method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedHeader + canonicalizedResource + + //log.Println("stringToSign: ", stringToSign) + signature := util.CreateSignature(stringToSign, client.AccessKeySecret) + + if query.Get("OSSAccessKeyId") != "" { + query.Set("Signature", signature) + } else { + headers.Set("Authorization", "OSS "+client.AccessKeyId+":"+signature) + } +} + +//Have to break the abstraction to append keys with lower case. +func canonicalizeHeader(headers http.Header) (newHeaders http.Header, result string) { + var canonicalizedHeaders []string + newHeaders = http.Header{} + + for k, v := range headers { + if lower := strings.ToLower(k); strings.HasPrefix(lower, HeaderOSSPrefix) { + newHeaders[lower] = v + canonicalizedHeaders = append(canonicalizedHeaders, lower) + } else { + newHeaders[k] = v + } + } + + sort.Strings(canonicalizedHeaders) + + var canonicalizedHeader string + + for _, k := range canonicalizedHeaders { + canonicalizedHeader += k + ":" + headers.Get(k) + "\n" + } + + return newHeaders, canonicalizedHeader +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/attempt.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/attempt.go new file mode 100644 index 000000000..2d07f03a8 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/attempt.go @@ -0,0 +1,76 @@ +package util + +import ( + "time" +) + +// AttemptStrategy is reused from the goamz package + +// AttemptStrategy represents a strategy for waiting for an action +// to complete successfully. This is an internal type used by the +// implementation of other packages. +type AttemptStrategy struct { + Total time.Duration // total duration of attempt. + Delay time.Duration // interval between each try in the burst. + Min int // minimum number of retries; overrides Total +} + +type Attempt struct { + strategy AttemptStrategy + last time.Time + end time.Time + force bool + count int +} + +// Start begins a new sequence of attempts for the given strategy. +func (s AttemptStrategy) Start() *Attempt { + now := time.Now() + return &Attempt{ + strategy: s, + last: now, + end: now.Add(s.Total), + force: true, + } +} + +// Next waits until it is time to perform the next attempt or returns +// false if it is time to stop trying. +func (a *Attempt) Next() bool { + now := time.Now() + sleep := a.nextSleep(now) + if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count { + return false + } + a.force = false + if sleep > 0 && a.count > 0 { + time.Sleep(sleep) + now = time.Now() + } + a.count++ + a.last = now + return true +} + +func (a *Attempt) nextSleep(now time.Time) time.Duration { + sleep := a.strategy.Delay - now.Sub(a.last) + if sleep < 0 { + return 0 + } + return sleep +} + +// HasNext returns whether another attempt will be made if the current +// one fails. If it returns true, the following call to Next is +// guaranteed to return true. +func (a *Attempt) HasNext() bool { + if a.force || a.strategy.Min > a.count { + return true + } + now := time.Now() + if now.Add(a.nextSleep(now)).Before(a.end) { + a.force = true + return true + } + return false +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/encoding.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/encoding.go new file mode 100644 index 000000000..e545e069d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/encoding.go @@ -0,0 +1,152 @@ +package util + +import ( + "encoding/json" + "fmt" + "log" + "net/url" + "reflect" + "strconv" + "time" +) + +//ConvertToQueryValues converts the struct to url.Values +func ConvertToQueryValues(ifc interface{}) url.Values { + values := url.Values{} + SetQueryValues(ifc, &values) + return values +} + +//SetQueryValues sets the struct to existing url.Values following ECS encoding rules +func SetQueryValues(ifc interface{}, values *url.Values) { + setQueryValues(ifc, values, "") +} + +func setQueryValues(i interface{}, values *url.Values, prefix string) { + // add to support url.Values + mapValues, ok := i.(url.Values) + if ok { + for k, _ := range mapValues { + values.Set(k, mapValues.Get(k)) + } + return + } + + elem := reflect.ValueOf(i) + if elem.Kind() == reflect.Ptr { + elem = elem.Elem() + } + elemType := elem.Type() + for i := 0; i < elem.NumField(); i++ { + + fieldName := elemType.Field(i).Name + anonymous := elemType.Field(i).Anonymous + field := elem.Field(i) + // TODO Use Tag for validation + // tag := typ.Field(i).Tag.Get("tagname") + kind := field.Kind() + if (kind == reflect.Ptr || kind == reflect.Array || kind == reflect.Slice || kind == reflect.Map || kind == reflect.Chan) && field.IsNil() { + continue + } + if kind == reflect.Ptr { + field = field.Elem() + kind = field.Kind() + } + var value string + //switch field.Interface().(type) { + switch kind { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + i := field.Int() + if i != 0 { + value = strconv.FormatInt(i, 10) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + i := field.Uint() + if i != 0 { + value = strconv.FormatUint(i, 10) + } + case reflect.Float32: + value = strconv.FormatFloat(field.Float(), 'f', 4, 32) + case reflect.Float64: + value = strconv.FormatFloat(field.Float(), 'f', 4, 64) + case reflect.Bool: + value = strconv.FormatBool(field.Bool()) + case reflect.String: + value = field.String() + case reflect.Map: + ifc := field.Interface() + m := ifc.(map[string]string) + if m != nil { + j := 0 + for k, v := range m { + j++ + keyName := fmt.Sprintf("%s.%d.Key", fieldName, j) + values.Set(keyName, k) + valueName := fmt.Sprintf("%s.%d.Value", fieldName, j) + values.Set(valueName, v) + } + } + case reflect.Slice: + switch field.Type().Elem().Kind() { + case reflect.Uint8: + value = string(field.Bytes()) + case reflect.String: + l := field.Len() + if l > 0 { + strArray := make([]string, l) + for i := 0; i < l; i++ { + strArray[i] = field.Index(i).String() + } + bytes, err := json.Marshal(strArray) + if err == nil { + value = string(bytes) + } else { + log.Printf("Failed to convert JSON: %v", err) + } + } + default: + l := field.Len() + for j := 0; j < l; j++ { + prefixName := fmt.Sprintf("%s.%d.", fieldName, (j + 1)) + ifc := field.Index(j).Interface() + //log.Printf("%s : %v", prefixName, ifc) + if ifc != nil { + setQueryValues(ifc, values, prefixName) + } + } + continue + } + + default: + switch field.Interface().(type) { + case ISO6801Time: + t := field.Interface().(ISO6801Time) + value = t.String() + case time.Time: + t := field.Interface().(time.Time) + value = GetISO8601TimeStamp(t) + default: + ifc := field.Interface() + if ifc != nil { + if anonymous { + SetQueryValues(ifc, values) + } else { + prefixName := fieldName + "." + setQueryValues(ifc, values, prefixName) + } + continue + } + } + } + if value != "" { + name := elemType.Field(i).Tag.Get("ArgName") + if name == "" { + name = fieldName + } + if prefix != "" { + name = prefix + name + } + values.Set(name, value) + } + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/iso6801.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/iso6801.go new file mode 100644 index 000000000..9c25e8f68 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/iso6801.go @@ -0,0 +1,80 @@ +package util + +import ( + "fmt" + "strconv" + "time" +) + +// GetISO8601TimeStamp gets timestamp string in ISO8601 format +func GetISO8601TimeStamp(ts time.Time) string { + t := ts.UTC() + return fmt.Sprintf("%04d-%02d-%02dT%02d:%02d:%02dZ", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second()) +} + +const formatISO8601 = "2006-01-02T15:04:05Z" +const jsonFormatISO8601 = `"` + formatISO8601 + `"` +const formatISO8601withoutSeconds = "2006-01-02T15:04Z" +const jsonFormatISO8601withoutSeconds = `"` + formatISO8601withoutSeconds + `"` + +// A ISO6801Time represents a time in ISO8601 format +type ISO6801Time time.Time + +// New constructs a new iso8601.Time instance from an existing +// time.Time instance. This causes the nanosecond field to be set to +// 0, and its time zone set to a fixed zone with no offset from UTC +// (but it is *not* UTC itself). +func NewISO6801Time(t time.Time) ISO6801Time { + return ISO6801Time(time.Date( + t.Year(), + t.Month(), + t.Day(), + t.Hour(), + t.Minute(), + t.Second(), + 0, + time.UTC, + )) +} + +// IsDefault checks if the time is default +func (it *ISO6801Time) IsDefault() bool { + return *it == ISO6801Time{} +} + +// MarshalJSON serializes the ISO6801Time into JSON string +func (it ISO6801Time) MarshalJSON() ([]byte, error) { + return []byte(time.Time(it).Format(jsonFormatISO8601)), nil +} + +// UnmarshalJSON deserializes the ISO6801Time from JSON string +func (it *ISO6801Time) UnmarshalJSON(data []byte) error { + str := string(data) + + if str == "\"\"" || len(data) == 0 { + return nil + } + var t time.Time + var err error + if str[0] == '"' { + t, err = time.ParseInLocation(jsonFormatISO8601, str, time.UTC) + if err != nil { + t, err = time.ParseInLocation(jsonFormatISO8601withoutSeconds, str, time.UTC) + } + } else { + var i int64 + i, err = strconv.ParseInt(str, 10, 64) + if err == nil { + t = time.Unix(i/1000, i%1000) + } + } + if err == nil { + *it = ISO6801Time(t) + } + return err +} + +// String returns the time in ISO6801Time format +func (it ISO6801Time) String() string { + return time.Time(it).Format(formatISO8601) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/signature.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/signature.go new file mode 100644 index 000000000..a00b27c19 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/signature.go @@ -0,0 +1,40 @@ +package util + +import ( + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "net/url" + "strings" +) + +//CreateSignature creates signature for string following Aliyun rules +func CreateSignature(stringToSignature, accessKeySecret string) string { + // Crypto by HMAC-SHA1 + hmacSha1 := hmac.New(sha1.New, []byte(accessKeySecret)) + hmacSha1.Write([]byte(stringToSignature)) + sign := hmacSha1.Sum(nil) + + // Encode to Base64 + base64Sign := base64.StdEncoding.EncodeToString(sign) + + return base64Sign +} + +func percentReplace(str string) string { + str = strings.Replace(str, "+", "%20", -1) + str = strings.Replace(str, "*", "%2A", -1) + str = strings.Replace(str, "%7E", "~", -1) + + return str +} + +// CreateSignatureForRequest creates signature for query string values +func CreateSignatureForRequest(method string, values *url.Values, accessKeySecret string) string { + + canonicalizedQueryString := percentReplace(values.Encode()) + + stringToSign := method + "&%2F&" + url.QueryEscape(canonicalizedQueryString) + + return CreateSignature(stringToSign, accessKeySecret) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/util.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/util.go new file mode 100644 index 000000000..dd68214e3 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/util.go @@ -0,0 +1,147 @@ +package util + +import ( + "bytes" + srand "crypto/rand" + "encoding/binary" + "math/rand" + "net/http" + "net/url" + "sort" + "time" +) + +const dictionary = "_0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + +//CreateRandomString create random string +func CreateRandomString() string { + b := make([]byte, 32) + l := len(dictionary) + + _, err := srand.Read(b) + + if err != nil { + // fail back to insecure rand + rand.Seed(time.Now().UnixNano()) + for i := range b { + b[i] = dictionary[rand.Int()%l] + } + } else { + for i, v := range b { + b[i] = dictionary[v%byte(l)] + } + } + + return string(b) +} + +// Encode encodes the values into ``URL encoded'' form +// ("acl&bar=baz&foo=quux") sorted by key. +func Encode(v url.Values) string { + if v == nil { + return "" + } + var buf bytes.Buffer + keys := make([]string, 0, len(v)) + for k := range v { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := v[k] + prefix := url.QueryEscape(k) + for _, v := range vs { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(prefix) + if v != "" { + buf.WriteString("=") + buf.WriteString(url.QueryEscape(v)) + } + } + } + return buf.String() +} + +func GetGMTime() string { + return time.Now().UTC().Format(http.TimeFormat) +} + +// + +func randUint32() uint32 { + return randUint32Slice(1)[0] +} + +func randUint32Slice(c int) []uint32 { + b := make([]byte, c*4) + + _, err := srand.Read(b) + + if err != nil { + // fail back to insecure rand + rand.Seed(time.Now().UnixNano()) + for i := range b { + b[i] = byte(rand.Int()) + } + } + + n := make([]uint32, c) + + for i := range n { + n[i] = binary.BigEndian.Uint32(b[i*4 : i*4+4]) + } + + return n +} + +func toByte(n uint32, st, ed byte) byte { + return byte(n%uint32(ed-st+1) + uint32(st)) +} + +func toDigit(n uint32) byte { + return toByte(n, '0', '9') +} + +func toLowerLetter(n uint32) byte { + return toByte(n, 'a', 'z') +} + +func toUpperLetter(n uint32) byte { + return toByte(n, 'A', 'Z') +} + +type convFunc func(uint32) byte + +var convFuncs = []convFunc{toDigit, toLowerLetter, toUpperLetter} + +// tools for generating a random ECS instance password +// from 8 to 30 char MUST contain digit upper, case letter and upper case letter +// http://docs.aliyun.com/#/pub/ecs/open-api/instance&createinstance +func GenerateRandomECSPassword() string { + + // [8, 30] + l := int(randUint32()%23 + 8) + + n := randUint32Slice(l) + + b := make([]byte, l) + + b[0] = toDigit(n[0]) + b[1] = toLowerLetter(n[1]) + b[2] = toUpperLetter(n[2]) + + for i := 3; i < l; i++ { + b[i] = convFuncs[n[i]%3](n[i]) + } + + s := make([]byte, l) + perm := rand.Perm(l) + for i, v := range perm { + s[v] = b[i] + } + + return string(s) + +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/LICENSE new file mode 100644 index 000000000..53320c352 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/LICENSE @@ -0,0 +1,185 @@ +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/attempt.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/attempt.go new file mode 100644 index 000000000..c0654f5d8 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/attempt.go @@ -0,0 +1,74 @@ +package aws + +import ( + "time" +) + +// AttemptStrategy represents a strategy for waiting for an action +// to complete successfully. This is an internal type used by the +// implementation of other goamz packages. +type AttemptStrategy struct { + Total time.Duration // total duration of attempt. + Delay time.Duration // interval between each try in the burst. + Min int // minimum number of retries; overrides Total +} + +type Attempt struct { + strategy AttemptStrategy + last time.Time + end time.Time + force bool + count int +} + +// Start begins a new sequence of attempts for the given strategy. +func (s AttemptStrategy) Start() *Attempt { + now := time.Now() + return &Attempt{ + strategy: s, + last: now, + end: now.Add(s.Total), + force: true, + } +} + +// Next waits until it is time to perform the next attempt or returns +// false if it is time to stop trying. +func (a *Attempt) Next() bool { + now := time.Now() + sleep := a.nextSleep(now) + if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count { + return false + } + a.force = false + if sleep > 0 && a.count > 0 { + time.Sleep(sleep) + now = time.Now() + } + a.count++ + a.last = now + return true +} + +func (a *Attempt) nextSleep(now time.Time) time.Duration { + sleep := a.strategy.Delay - now.Sub(a.last) + if sleep < 0 { + return 0 + } + return sleep +} + +// HasNext returns whether another attempt will be made if the current +// one fails. If it returns true, the following call to Next is +// guaranteed to return true. +func (a *Attempt) HasNext() bool { + if a.force || a.strategy.Min > a.count { + return true + } + now := time.Now() + if now.Add(a.nextSleep(now)).Before(a.end) { + a.force = true + return true + } + return false +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/aws.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/aws.go new file mode 100644 index 000000000..ac8340ad4 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/aws.go @@ -0,0 +1,636 @@ +// +// goamz - Go packages to interact with the Amazon Web Services. +// +// https://wiki.ubuntu.com/goamz +// +// Copyright (c) 2011 Canonical Ltd. +// +// Written by Gustavo Niemeyer +// +package aws + +import ( + "encoding/json" + "encoding/xml" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "os/user" + "path" + "regexp" + "strings" + "time" +) + +// Regular expressions for INI files +var ( + iniSectionRegexp = regexp.MustCompile(`^\s*\[([^\[\]]+)\]\s*$`) + iniSettingRegexp = regexp.MustCompile(`^\s*(.+?)\s*=\s*(.*\S)\s*$`) +) + +// Defines the valid signers +const ( + V2Signature = iota + V4Signature = iota + Route53Signature = iota +) + +// Defines the service endpoint and correct Signer implementation to use +// to sign requests for this endpoint +type ServiceInfo struct { + Endpoint string + Signer uint +} + +// Region defines the URLs where AWS services may be accessed. +// +// See http://goo.gl/d8BP1 for more details. +type Region struct { + Name string // the canonical name of this region. + EC2Endpoint ServiceInfo + S3Endpoint string + S3BucketEndpoint string // Not needed by AWS S3. Use ${bucket} for bucket name. + S3LocationConstraint bool // true if this region requires a LocationConstraint declaration. + S3LowercaseBucket bool // true if the region requires bucket names to be lower case. + SDBEndpoint string + SNSEndpoint string + SQSEndpoint string + SESEndpoint string + IAMEndpoint string + ELBEndpoint string + KMSEndpoint string + DynamoDBEndpoint string + CloudWatchServicepoint ServiceInfo + AutoScalingEndpoint string + RDSEndpoint ServiceInfo + KinesisEndpoint string + STSEndpoint string + CloudFormationEndpoint string + ElastiCacheEndpoint string +} + +var Regions = map[string]Region{ + APNortheast.Name: APNortheast, + APNortheast2.Name: APNortheast2, + APSoutheast.Name: APSoutheast, + APSoutheast2.Name: APSoutheast2, + EUCentral.Name: EUCentral, + EUWest.Name: EUWest, + USEast.Name: USEast, + USWest.Name: USWest, + USWest2.Name: USWest2, + USGovWest.Name: USGovWest, + SAEast.Name: SAEast, + CNNorth1.Name: CNNorth1, +} + +// Designates a signer interface suitable for signing AWS requests, params +// should be appropriately encoded for the request before signing. +// +// A signer should be initialized with Auth and the appropriate endpoint. +type Signer interface { + Sign(method, path string, params map[string]string) +} + +// An AWS Service interface with the API to query the AWS service +// +// Supplied as an easy way to mock out service calls during testing. +type AWSService interface { + // Queries the AWS service at a given method/path with the params and + // returns an http.Response and error + Query(method, path string, params map[string]string) (*http.Response, error) + // Builds an error given an XML payload in the http.Response, can be used + // to process an error if the status code is not 200 for example. + BuildError(r *http.Response) error +} + +// Implements a Server Query/Post API to easily query AWS services and build +// errors when desired +type Service struct { + service ServiceInfo + signer Signer +} + +// Create a base set of params for an action +func MakeParams(action string) map[string]string { + params := make(map[string]string) + params["Action"] = action + return params +} + +// Create a new AWS server to handle making requests +func NewService(auth Auth, service ServiceInfo) (s *Service, err error) { + var signer Signer + switch service.Signer { + case V2Signature: + signer, err = NewV2Signer(auth, service) + // case V4Signature: + // signer, err = NewV4Signer(auth, service, Regions["eu-west-1"]) + default: + err = fmt.Errorf("Unsupported signer for service") + } + if err != nil { + return + } + s = &Service{service: service, signer: signer} + return +} + +func (s *Service) Query(method, path string, params map[string]string) (resp *http.Response, err error) { + params["Timestamp"] = time.Now().UTC().Format(time.RFC3339) + u, err := url.Parse(s.service.Endpoint) + if err != nil { + return nil, err + } + u.Path = path + + s.signer.Sign(method, path, params) + if method == "GET" { + u.RawQuery = multimap(params).Encode() + resp, err = http.Get(u.String()) + } else if method == "POST" { + resp, err = http.PostForm(u.String(), multimap(params)) + } + + return +} + +func (s *Service) BuildError(r *http.Response) error { + errors := ErrorResponse{} + xml.NewDecoder(r.Body).Decode(&errors) + var err Error + err = errors.Errors + err.RequestId = errors.RequestId + err.StatusCode = r.StatusCode + if err.Message == "" { + err.Message = r.Status + } + return &err +} + +type ServiceError interface { + error + ErrorCode() string +} + +type ErrorResponse struct { + Errors Error `xml:"Error"` + RequestId string // A unique ID for tracking the request +} + +type Error struct { + StatusCode int + Type string + Code string + Message string + RequestId string +} + +func (err *Error) Error() string { + return fmt.Sprintf("Type: %s, Code: %s, Message: %s", + err.Type, err.Code, err.Message, + ) +} + +func (err *Error) ErrorCode() string { + return err.Code +} + +type Auth struct { + AccessKey, SecretKey string + token string + expiration time.Time +} + +func (a *Auth) Token() string { + if a.token == "" { + return "" + } + if time.Since(a.expiration) >= -30*time.Second { //in an ideal world this should be zero assuming the instance is synching it's clock + auth, err := GetAuth("", "", "", time.Time{}) + if err == nil { + *a = auth + } + } + return a.token +} + +func (a *Auth) Expiration() time.Time { + return a.expiration +} + +// To be used with other APIs that return auth credentials such as STS +func NewAuth(accessKey, secretKey, token string, expiration time.Time) *Auth { + return &Auth{ + AccessKey: accessKey, + SecretKey: secretKey, + token: token, + expiration: expiration, + } +} + +// ResponseMetadata +type ResponseMetadata struct { + RequestId string // A unique ID for tracking the request +} + +type BaseResponse struct { + ResponseMetadata ResponseMetadata +} + +var unreserved = make([]bool, 128) +var hex = "0123456789ABCDEF" + +func init() { + // RFC3986 + u := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567890-_.~" + for _, c := range u { + unreserved[c] = true + } +} + +func multimap(p map[string]string) url.Values { + q := make(url.Values, len(p)) + for k, v := range p { + q[k] = []string{v} + } + return q +} + +type credentials struct { + Code string + LastUpdated string + Type string + AccessKeyId string + SecretAccessKey string + Token string + Expiration string +} + +// GetMetaData retrieves instance metadata about the current machine. +// +// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html for more details. +func GetMetaData(path string) (contents []byte, err error) { + c := http.Client{ + Transport: &http.Transport{ + Dial: func(netw, addr string) (net.Conn, error) { + deadline := time.Now().Add(5 * time.Second) + c, err := net.DialTimeout(netw, addr, time.Second*2) + if err != nil { + return nil, err + } + c.SetDeadline(deadline) + return c, nil + }, + }, + } + + url := "http://169.254.169.254/latest/meta-data/" + path + + resp, err := c.Get(url) + if err != nil { + return + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + err = fmt.Errorf("Code %d returned for url %s", resp.StatusCode, url) + return + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + return []byte(body), err +} + +func GetRegion(regionName string) (region Region) { + region = Regions[regionName] + return +} + +// GetInstanceCredentials creates an Auth based on the instance's role credentials. +// If the running instance is not in EC2 or does not have a valid IAM role, an error will be returned. +// For more info about setting up IAM roles, see http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html +func GetInstanceCredentials() (cred credentials, err error) { + credentialPath := "iam/security-credentials/" + + // Get the instance role + role, err := GetMetaData(credentialPath) + if err != nil { + return + } + + // Get the instance role credentials + credentialJSON, err := GetMetaData(credentialPath + string(role)) + if err != nil { + return + } + + err = json.Unmarshal([]byte(credentialJSON), &cred) + return +} + +// GetAuth creates an Auth based on either passed in credentials, +// environment information or instance based role credentials. +func GetAuth(accessKey string, secretKey, token string, expiration time.Time) (auth Auth, err error) { + // First try passed in credentials + if accessKey != "" && secretKey != "" { + return Auth{accessKey, secretKey, token, expiration}, nil + } + + // Next try to get auth from the environment + auth, err = EnvAuth() + if err == nil { + // Found auth, return + return + } + + // Next try getting auth from the instance role + cred, err := GetInstanceCredentials() + if err == nil { + // Found auth, return + auth.AccessKey = cred.AccessKeyId + auth.SecretKey = cred.SecretAccessKey + auth.token = cred.Token + exptdate, err := time.Parse("2006-01-02T15:04:05Z", cred.Expiration) + if err != nil { + err = fmt.Errorf("Error Parsing expiration date: cred.Expiration :%s , error: %s \n", cred.Expiration, err) + } + auth.expiration = exptdate + return auth, err + } + + // Next try getting auth from the credentials file + auth, err = CredentialFileAuth("", "", time.Minute*5) + if err == nil { + return + } + + //err = errors.New("No valid AWS authentication found") + err = fmt.Errorf("No valid AWS authentication found: %s", err) + return auth, err +} + +// EnvAuth creates an Auth based on environment information. +// The AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment +// variables are used. +func EnvAuth() (auth Auth, err error) { + auth.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID") + if auth.AccessKey == "" { + auth.AccessKey = os.Getenv("AWS_ACCESS_KEY") + } + + auth.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY") + if auth.SecretKey == "" { + auth.SecretKey = os.Getenv("AWS_SECRET_KEY") + } + if auth.AccessKey == "" { + err = errors.New("AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment") + } + if auth.SecretKey == "" { + err = errors.New("AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment") + } + return +} + +// CredentialFileAuth creates and Auth based on a credentials file. The file +// contains various authentication profiles for use with AWS. +// +// The credentials file, which is used by other AWS SDKs, is documented at +// http://blogs.aws.amazon.com/security/post/Tx3D6U6WSFGOK2H/A-New-and-Standardized-Way-to-Manage-Credentials-in-the-AWS-SDKs +func CredentialFileAuth(filePath string, profile string, expiration time.Duration) (auth Auth, err error) { + if profile == "" { + profile = os.Getenv("AWS_DEFAULT_PROFILE") + if profile == "" { + profile = os.Getenv("AWS_PROFILE") + if profile == "" { + profile = "default" + } + } + } + + if filePath == "" { + u, err := user.Current() + if err != nil { + return auth, err + } + + filePath = path.Join(u.HomeDir, ".aws", "credentials") + } + + // read the file, then parse the INI + contents, err := ioutil.ReadFile(filePath) + if err != nil { + return + } + + profiles := parseINI(string(contents)) + profileData, ok := profiles[profile] + + if !ok { + err = errors.New("The credentials file did not contain the profile") + return + } + + keyId, ok := profileData["aws_access_key_id"] + if !ok { + err = errors.New("The credentials file did not contain required attribute aws_access_key_id") + return + } + + secretKey, ok := profileData["aws_secret_access_key"] + if !ok { + err = errors.New("The credentials file did not contain required attribute aws_secret_access_key") + return + } + + auth.AccessKey = keyId + auth.SecretKey = secretKey + + if token, ok := profileData["aws_session_token"]; ok { + auth.token = token + } + + auth.expiration = time.Now().Add(expiration) + + return +} + +// parseINI takes the contents of a credentials file and returns a map, whose keys +// are the various profiles, and whose values are maps of the settings for the +// profiles +func parseINI(fileContents string) map[string]map[string]string { + profiles := make(map[string]map[string]string) + + lines := strings.Split(fileContents, "\n") + + var currentSection map[string]string + for _, line := range lines { + // remove comments, which start with a semi-colon + if split := strings.Split(line, ";"); len(split) > 1 { + line = split[0] + } + + // check if the line is the start of a profile. + // + // for example: + // [default] + // + // otherwise, check for the proper setting + // property=value + if sectMatch := iniSectionRegexp.FindStringSubmatch(line); len(sectMatch) == 2 { + currentSection = make(map[string]string) + profiles[sectMatch[1]] = currentSection + } else if setMatch := iniSettingRegexp.FindStringSubmatch(line); len(setMatch) == 3 && currentSection != nil { + currentSection[setMatch[1]] = setMatch[2] + } + } + + return profiles +} + +// Encode takes a string and URI-encodes it in a way suitable +// to be used in AWS signatures. +func Encode(s string) string { + encode := false + for i := 0; i != len(s); i++ { + c := s[i] + if c > 127 || !unreserved[c] { + encode = true + break + } + } + if !encode { + return s + } + e := make([]byte, len(s)*3) + ei := 0 + for i := 0; i != len(s); i++ { + c := s[i] + if c > 127 || !unreserved[c] { + e[ei] = '%' + e[ei+1] = hex[c>>4] + e[ei+2] = hex[c&0xF] + ei += 3 + } else { + e[ei] = c + ei += 1 + } + } + return string(e[:ei]) +} + +func dialTimeout(network, addr string) (net.Conn, error) { + return net.DialTimeout(network, addr, time.Duration(2*time.Second)) +} + +func AvailabilityZone() string { + transport := http.Transport{Dial: dialTimeout} + client := http.Client{ + Transport: &transport, + } + resp, err := client.Get("http://169.254.169.254/latest/meta-data/placement/availability-zone") + if err != nil { + return "unknown" + } else { + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "unknown" + } else { + return string(body) + } + } +} + +func InstanceRegion() string { + az := AvailabilityZone() + if az == "unknown" { + return az + } else { + region := az[:len(az)-1] + return region + } +} + +func InstanceId() string { + transport := http.Transport{Dial: dialTimeout} + client := http.Client{ + Transport: &transport, + } + resp, err := client.Get("http://169.254.169.254/latest/meta-data/instance-id") + if err != nil { + return "unknown" + } else { + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "unknown" + } else { + return string(body) + } + } +} + +func InstanceType() string { + transport := http.Transport{Dial: dialTimeout} + client := http.Client{ + Transport: &transport, + } + resp, err := client.Get("http://169.254.169.254/latest/meta-data/instance-type") + if err != nil { + return "unknown" + } else { + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "unknown" + } else { + return string(body) + } + } +} + +func ServerLocalIp() string { + transport := http.Transport{Dial: dialTimeout} + client := http.Client{ + Transport: &transport, + } + resp, err := client.Get("http://169.254.169.254/latest/meta-data/local-ipv4") + if err != nil { + return "127.0.0.1" + } else { + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "127.0.0.1" + } else { + return string(body) + } + } +} + +func ServerPublicIp() string { + transport := http.Transport{Dial: dialTimeout} + client := http.Client{ + Transport: &transport, + } + resp, err := client.Get("http://169.254.169.254/latest/meta-data/public-ipv4") + if err != nil { + return "127.0.0.1" + } else { + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "127.0.0.1" + } else { + return string(body) + } + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/client.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/client.go new file mode 100644 index 000000000..86d2ccec8 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/client.go @@ -0,0 +1,124 @@ +package aws + +import ( + "math" + "net" + "net/http" + "time" +) + +type RetryableFunc func(*http.Request, *http.Response, error) bool +type WaitFunc func(try int) +type DeadlineFunc func() time.Time + +type ResilientTransport struct { + // Timeout is the maximum amount of time a dial will wait for + // a connect to complete. + // + // The default is no timeout. + // + // With or without a timeout, the operating system may impose + // its own earlier timeout. For instance, TCP timeouts are + // often around 3 minutes. + DialTimeout time.Duration + + // MaxTries, if non-zero, specifies the number of times we will retry on + // failure. Retries are only attempted for temporary network errors or known + // safe failures. + MaxTries int + Deadline DeadlineFunc + ShouldRetry RetryableFunc + Wait WaitFunc + transport *http.Transport +} + +// Convenience method for creating an http client +func NewClient(rt *ResilientTransport) *http.Client { + rt.transport = &http.Transport{ + Dial: func(netw, addr string) (net.Conn, error) { + c, err := net.DialTimeout(netw, addr, rt.DialTimeout) + if err != nil { + return nil, err + } + c.SetDeadline(rt.Deadline()) + return c, nil + }, + Proxy: http.ProxyFromEnvironment, + } + // TODO: Would be nice is ResilientTransport allowed clients to initialize + // with http.Transport attributes. + return &http.Client{ + Transport: rt, + } +} + +var retryingTransport = &ResilientTransport{ + Deadline: func() time.Time { + return time.Now().Add(5 * time.Second) + }, + DialTimeout: 10 * time.Second, + MaxTries: 3, + ShouldRetry: awsRetry, + Wait: ExpBackoff, +} + +// Exported default client +var RetryingClient = NewClient(retryingTransport) + +func (t *ResilientTransport) RoundTrip(req *http.Request) (*http.Response, error) { + return t.tries(req) +} + +// Retry a request a maximum of t.MaxTries times. +// We'll only retry if the proper criteria are met. +// If a wait function is specified, wait that amount of time +// In between requests. +func (t *ResilientTransport) tries(req *http.Request) (res *http.Response, err error) { + for try := 0; try < t.MaxTries; try += 1 { + res, err = t.transport.RoundTrip(req) + + if !t.ShouldRetry(req, res, err) { + break + } + if res != nil { + res.Body.Close() + } + if t.Wait != nil { + t.Wait(try) + } + } + + return +} + +func ExpBackoff(try int) { + time.Sleep(100 * time.Millisecond * + time.Duration(math.Exp2(float64(try)))) +} + +func LinearBackoff(try int) { + time.Sleep(time.Duration(try*100) * time.Millisecond) +} + +// Decide if we should retry a request. +// In general, the criteria for retrying a request is described here +// http://docs.aws.amazon.com/general/latest/gr/api-retries.html +func awsRetry(req *http.Request, res *http.Response, err error) bool { + retry := false + + // Retry if there's a temporary network error. + if neterr, ok := err.(net.Error); ok { + if neterr.Temporary() { + retry = true + } + } + + // Retry if we get a 5xx series error. + if res != nil { + if res.StatusCode >= 500 && res.StatusCode < 600 { + retry = true + } + } + + return retry +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/regions.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/regions.go new file mode 100644 index 000000000..0406648dc --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/regions.go @@ -0,0 +1,289 @@ +package aws + +var USGovWest = Region{ + "us-gov-west-1", + ServiceInfo{"https://ec2.us-gov-west-1.amazonaws.com", V2Signature}, + "https://s3-fips-us-gov-west-1.amazonaws.com", + "", + true, + true, + "", + "https://sns.us-gov-west-1.amazonaws.com", + "https://sqs.us-gov-west-1.amazonaws.com", + "", + "https://iam.us-gov.amazonaws.com", + "https://elasticloadbalancing.us-gov-west-1.amazonaws.com", + "", + "https://dynamodb.us-gov-west-1.amazonaws.com", + ServiceInfo{"https://monitoring.us-gov-west-1.amazonaws.com", V2Signature}, + "https://autoscaling.us-gov-west-1.amazonaws.com", + ServiceInfo{"https://rds.us-gov-west-1.amazonaws.com", V2Signature}, + "", + "https://sts.amazonaws.com", + "https://cloudformation.us-gov-west-1.amazonaws.com", + "", +} + +var USEast = Region{ + "us-east-1", + ServiceInfo{"https://ec2.us-east-1.amazonaws.com", V2Signature}, + "https://s3-external-1.amazonaws.com", + "", + false, + false, + "https://sdb.amazonaws.com", + "https://sns.us-east-1.amazonaws.com", + "https://sqs.us-east-1.amazonaws.com", + "https://email.us-east-1.amazonaws.com", + "https://iam.amazonaws.com", + "https://elasticloadbalancing.us-east-1.amazonaws.com", + "https://kms.us-east-1.amazonaws.com", + "https://dynamodb.us-east-1.amazonaws.com", + ServiceInfo{"https://monitoring.us-east-1.amazonaws.com", V2Signature}, + "https://autoscaling.us-east-1.amazonaws.com", + ServiceInfo{"https://rds.us-east-1.amazonaws.com", V2Signature}, + "https://kinesis.us-east-1.amazonaws.com", + "https://sts.amazonaws.com", + "https://cloudformation.us-east-1.amazonaws.com", + "https://elasticache.us-east-1.amazonaws.com", +} + +var USWest = Region{ + "us-west-1", + ServiceInfo{"https://ec2.us-west-1.amazonaws.com", V2Signature}, + "https://s3-us-west-1.amazonaws.com", + "", + true, + true, + "https://sdb.us-west-1.amazonaws.com", + "https://sns.us-west-1.amazonaws.com", + "https://sqs.us-west-1.amazonaws.com", + "", + "https://iam.amazonaws.com", + "https://elasticloadbalancing.us-west-1.amazonaws.com", + "https://kms.us-west-1.amazonaws.com", + "https://dynamodb.us-west-1.amazonaws.com", + ServiceInfo{"https://monitoring.us-west-1.amazonaws.com", V2Signature}, + "https://autoscaling.us-west-1.amazonaws.com", + ServiceInfo{"https://rds.us-west-1.amazonaws.com", V2Signature}, + "https://kinesis.us-west-1.amazonaws.com", + "https://sts.amazonaws.com", + "https://cloudformation.us-west-1.amazonaws.com", + "https://elasticache.us-west-1.amazonaws.com", +} + +var USWest2 = Region{ + "us-west-2", + ServiceInfo{"https://ec2.us-west-2.amazonaws.com", V2Signature}, + "https://s3-us-west-2.amazonaws.com", + "", + true, + true, + "https://sdb.us-west-2.amazonaws.com", + "https://sns.us-west-2.amazonaws.com", + "https://sqs.us-west-2.amazonaws.com", + "https://email.us-west-2.amazonaws.com", + "https://iam.amazonaws.com", + "https://elasticloadbalancing.us-west-2.amazonaws.com", + "https://kms.us-west-2.amazonaws.com", + "https://dynamodb.us-west-2.amazonaws.com", + ServiceInfo{"https://monitoring.us-west-2.amazonaws.com", V2Signature}, + "https://autoscaling.us-west-2.amazonaws.com", + ServiceInfo{"https://rds.us-west-2.amazonaws.com", V2Signature}, + "https://kinesis.us-west-2.amazonaws.com", + "https://sts.amazonaws.com", + "https://cloudformation.us-west-2.amazonaws.com", + "https://elasticache.us-west-2.amazonaws.com", +} + +var EUWest = Region{ + "eu-west-1", + ServiceInfo{"https://ec2.eu-west-1.amazonaws.com", V2Signature}, + "https://s3-eu-west-1.amazonaws.com", + "", + true, + true, + "https://sdb.eu-west-1.amazonaws.com", + "https://sns.eu-west-1.amazonaws.com", + "https://sqs.eu-west-1.amazonaws.com", + "https://email.eu-west-1.amazonaws.com", + "https://iam.amazonaws.com", + "https://elasticloadbalancing.eu-west-1.amazonaws.com", + "https://kms.eu-west-1.amazonaws.com", + "https://dynamodb.eu-west-1.amazonaws.com", + ServiceInfo{"https://monitoring.eu-west-1.amazonaws.com", V2Signature}, + "https://autoscaling.eu-west-1.amazonaws.com", + ServiceInfo{"https://rds.eu-west-1.amazonaws.com", V2Signature}, + "https://kinesis.eu-west-1.amazonaws.com", + "https://sts.amazonaws.com", + "https://cloudformation.eu-west-1.amazonaws.com", + "https://elasticache.eu-west-1.amazonaws.com", +} + +var EUCentral = Region{ + "eu-central-1", + ServiceInfo{"https://ec2.eu-central-1.amazonaws.com", V4Signature}, + "https://s3-eu-central-1.amazonaws.com", + "", + true, + true, + "https://sdb.eu-central-1.amazonaws.com", + "https://sns.eu-central-1.amazonaws.com", + "https://sqs.eu-central-1.amazonaws.com", + "", + "https://iam.amazonaws.com", + "https://elasticloadbalancing.eu-central-1.amazonaws.com", + "https://kms.eu-central-1.amazonaws.com", + "https://dynamodb.eu-central-1.amazonaws.com", + ServiceInfo{"https://monitoring.eu-central-1.amazonaws.com", V2Signature}, + "https://autoscaling.eu-central-1.amazonaws.com", + ServiceInfo{"https://rds.eu-central-1.amazonaws.com", V2Signature}, + "https://kinesis.eu-central-1.amazonaws.com", + "https://sts.amazonaws.com", + "https://cloudformation.eu-central-1.amazonaws.com", + "", +} + +var APSoutheast = Region{ + "ap-southeast-1", + ServiceInfo{"https://ec2.ap-southeast-1.amazonaws.com", V2Signature}, + "https://s3-ap-southeast-1.amazonaws.com", + "", + true, + true, + "https://sdb.ap-southeast-1.amazonaws.com", + "https://sns.ap-southeast-1.amazonaws.com", + "https://sqs.ap-southeast-1.amazonaws.com", + "", + "https://iam.amazonaws.com", + "https://elasticloadbalancing.ap-southeast-1.amazonaws.com", + "https://kms.ap-southeast-1.amazonaws.com", + "https://dynamodb.ap-southeast-1.amazonaws.com", + ServiceInfo{"https://monitoring.ap-southeast-1.amazonaws.com", V2Signature}, + "https://autoscaling.ap-southeast-1.amazonaws.com", + ServiceInfo{"https://rds.ap-southeast-1.amazonaws.com", V2Signature}, + "https://kinesis.ap-southeast-1.amazonaws.com", + "https://sts.amazonaws.com", + "https://cloudformation.ap-southeast-1.amazonaws.com", + "https://elasticache.ap-southeast-1.amazonaws.com", +} + +var APSoutheast2 = Region{ + "ap-southeast-2", + ServiceInfo{"https://ec2.ap-southeast-2.amazonaws.com", V2Signature}, + "https://s3-ap-southeast-2.amazonaws.com", + "", + true, + true, + "https://sdb.ap-southeast-2.amazonaws.com", + "https://sns.ap-southeast-2.amazonaws.com", + "https://sqs.ap-southeast-2.amazonaws.com", + "", + "https://iam.amazonaws.com", + "https://elasticloadbalancing.ap-southeast-2.amazonaws.com", + "https://kms.ap-southeast-2.amazonaws.com", + "https://dynamodb.ap-southeast-2.amazonaws.com", + ServiceInfo{"https://monitoring.ap-southeast-2.amazonaws.com", V2Signature}, + "https://autoscaling.ap-southeast-2.amazonaws.com", + ServiceInfo{"https://rds.ap-southeast-2.amazonaws.com", V2Signature}, + "https://kinesis.ap-southeast-2.amazonaws.com", + "https://sts.amazonaws.com", + "https://cloudformation.ap-southeast-2.amazonaws.com", + "https://elasticache.ap-southeast-2.amazonaws.com", +} + +var APNortheast = Region{ + "ap-northeast-1", + ServiceInfo{"https://ec2.ap-northeast-1.amazonaws.com", V2Signature}, + "https://s3-ap-northeast-1.amazonaws.com", + "", + true, + true, + "https://sdb.ap-northeast-1.amazonaws.com", + "https://sns.ap-northeast-1.amazonaws.com", + "https://sqs.ap-northeast-1.amazonaws.com", + "", + "https://iam.amazonaws.com", + "https://elasticloadbalancing.ap-northeast-1.amazonaws.com", + "https://kms.ap-northeast-1.amazonaws.com", + "https://dynamodb.ap-northeast-1.amazonaws.com", + ServiceInfo{"https://monitoring.ap-northeast-1.amazonaws.com", V2Signature}, + "https://autoscaling.ap-northeast-1.amazonaws.com", + ServiceInfo{"https://rds.ap-northeast-1.amazonaws.com", V2Signature}, + "https://kinesis.ap-northeast-1.amazonaws.com", + "https://sts.amazonaws.com", + "https://cloudformation.ap-northeast-1.amazonaws.com", + "https://elasticache.ap-northeast-1.amazonaws.com", +} + +var APNortheast2 = Region{ + "ap-northeast-2", + ServiceInfo{"https://ec2.ap-northeast-2.amazonaws.com", V2Signature}, + "https://s3-ap-northeast-2.amazonaws.com", + "", + true, + true, + "", + "https://sns.ap-northeast-2.amazonaws.com", + "https://sqs.ap-northeast-2.amazonaws.com", + "", + "https://iam.amazonaws.com", + "https://elasticloadbalancing.ap-northeast-2.amazonaws.com", + "https://kms.ap-northeast-2.amazonaws.com", + "https://dynamodb.ap-northeast-2.amazonaws.com", + ServiceInfo{"https://monitoring.ap-northeast-2.amazonaws.com", V2Signature}, + "https://autoscaling.ap-northeast-2.amazonaws.com", + ServiceInfo{"https://rds.ap-northeast-2.amazonaws.com", V2Signature}, + "https://kinesis.ap-northeast-2.amazonaws.com", + "https://sts.ap-northeast-2.amazonaws.com", + "https://cloudformation.ap-northeast-2.amazonaws.com", + "https://elasticache.ap-northeast-2.amazonaws.com", +} + +var SAEast = Region{ + "sa-east-1", + ServiceInfo{"https://ec2.sa-east-1.amazonaws.com", V2Signature}, + "https://s3-sa-east-1.amazonaws.com", + "", + true, + true, + "https://sdb.sa-east-1.amazonaws.com", + "https://sns.sa-east-1.amazonaws.com", + "https://sqs.sa-east-1.amazonaws.com", + "", + "https://iam.amazonaws.com", + "https://elasticloadbalancing.sa-east-1.amazonaws.com", + "https://kms.sa-east-1.amazonaws.com", + "https://dynamodb.sa-east-1.amazonaws.com", + ServiceInfo{"https://monitoring.sa-east-1.amazonaws.com", V2Signature}, + "https://autoscaling.sa-east-1.amazonaws.com", + ServiceInfo{"https://rds.sa-east-1.amazonaws.com", V2Signature}, + "", + "https://sts.amazonaws.com", + "https://cloudformation.sa-east-1.amazonaws.com", + "https://elasticache.sa-east-1.amazonaws.com", +} + +var CNNorth1 = Region{ + "cn-north-1", + ServiceInfo{"https://ec2.cn-north-1.amazonaws.com.cn", V2Signature}, + "https://s3.cn-north-1.amazonaws.com.cn", + "", + true, + true, + "", + "https://sns.cn-north-1.amazonaws.com.cn", + "https://sqs.cn-north-1.amazonaws.com.cn", + "", + "https://iam.cn-north-1.amazonaws.com.cn", + "https://elasticloadbalancing.cn-north-1.amazonaws.com.cn", + "", + "https://dynamodb.cn-north-1.amazonaws.com.cn", + ServiceInfo{"https://monitoring.cn-north-1.amazonaws.com.cn", V4Signature}, + "https://autoscaling.cn-north-1.amazonaws.com.cn", + ServiceInfo{"https://rds.cn-north-1.amazonaws.com.cn", V4Signature}, + "", + "https://sts.cn-north-1.amazonaws.com.cn", + "", + "", +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/retry.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/retry.go new file mode 100644 index 000000000..bea964b9f --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/retry.go @@ -0,0 +1,136 @@ +package aws + +import ( + "math/rand" + "net" + "net/http" + "time" +) + +const ( + maxDelay = 20 * time.Second + defaultScale = 300 * time.Millisecond + throttlingScale = 500 * time.Millisecond + throttlingScaleRange = throttlingScale / 4 + defaultMaxRetries = 3 + dynamoDBScale = 25 * time.Millisecond + dynamoDBMaxRetries = 10 +) + +// A RetryPolicy encapsulates a strategy for implementing client retries. +// +// Default implementations are provided which match the AWS SDKs. +type RetryPolicy interface { + // ShouldRetry returns whether a client should retry a failed request. + ShouldRetry(target string, r *http.Response, err error, numRetries int) bool + + // Delay returns the time a client should wait before issuing a retry. + Delay(target string, r *http.Response, err error, numRetries int) time.Duration +} + +// DefaultRetryPolicy implements the AWS SDK default retry policy. +// +// It will retry up to 3 times, and uses an exponential backoff with a scale +// factor of 300ms (300ms, 600ms, 1200ms). If the retry is because of +// throttling, the delay will also include some randomness. +// +// See https://github.com/aws/aws-sdk-java/blob/master/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L90. +type DefaultRetryPolicy struct { +} + +// ShouldRetry implements the RetryPolicy ShouldRetry method. +func (policy DefaultRetryPolicy) ShouldRetry(target string, r *http.Response, err error, numRetries int) bool { + return shouldRetry(r, err, numRetries, defaultMaxRetries) +} + +// Delay implements the RetryPolicy Delay method. +func (policy DefaultRetryPolicy) Delay(target string, r *http.Response, err error, numRetries int) time.Duration { + scale := defaultScale + if err, ok := err.(*Error); ok && isThrottlingException(err) { + scale = throttlingScale + time.Duration(rand.Int63n(int64(throttlingScaleRange))) + } + return exponentialBackoff(numRetries, scale) +} + +// DynamoDBRetryPolicy implements the AWS SDK DynamoDB retry policy. +// +// It will retry up to 10 times, and uses an exponential backoff with a scale +// factor of 25ms (25ms, 50ms, 100ms, ...). +// +// See https://github.com/aws/aws-sdk-java/blob/master/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L103. +type DynamoDBRetryPolicy struct { +} + +// ShouldRetry implements the RetryPolicy ShouldRetry method. +func (policy DynamoDBRetryPolicy) ShouldRetry(target string, r *http.Response, err error, numRetries int) bool { + return shouldRetry(r, err, numRetries, dynamoDBMaxRetries) +} + +// Delay implements the RetryPolicy Delay method. +func (policy DynamoDBRetryPolicy) Delay(target string, r *http.Response, err error, numRetries int) time.Duration { + return exponentialBackoff(numRetries, dynamoDBScale) +} + +// NeverRetryPolicy never retries requests and returns immediately on failure. +type NeverRetryPolicy struct { +} + +// ShouldRetry implements the RetryPolicy ShouldRetry method. +func (policy NeverRetryPolicy) ShouldRetry(target string, r *http.Response, err error, numRetries int) bool { + return false +} + +// Delay implements the RetryPolicy Delay method. +func (policy NeverRetryPolicy) Delay(target string, r *http.Response, err error, numRetries int) time.Duration { + return time.Duration(0) +} + +// shouldRetry determines if we should retry the request. +// +// See http://docs.aws.amazon.com/general/latest/gr/api-retries.html. +func shouldRetry(r *http.Response, err error, numRetries int, maxRetries int) bool { + // Once we've exceeded the max retry attempts, game over. + if numRetries >= maxRetries { + return false + } + + // Always retry temporary network errors. + if err, ok := err.(net.Error); ok && err.Temporary() { + return true + } + + // Always retry 5xx responses. + if r != nil && r.StatusCode >= 500 { + return true + } + + // Always retry throttling exceptions. + if err, ok := err.(ServiceError); ok && isThrottlingException(err) { + return true + } + + // Other classes of failures indicate a problem with the request. Retrying + // won't help. + return false +} + +func exponentialBackoff(numRetries int, scale time.Duration) time.Duration { + if numRetries < 0 { + return time.Duration(0) + } + + delay := (1 << uint(numRetries)) * scale + if delay > maxDelay { + return maxDelay + } + return delay +} + +func isThrottlingException(err ServiceError) bool { + switch err.ErrorCode() { + case "Throttling", "ThrottlingException", "ProvisionedThroughputExceededException": + return true + default: + return false + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/sign.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/sign.go new file mode 100644 index 000000000..8b53e8e86 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/sign.go @@ -0,0 +1,472 @@ +package aws + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "path" + "sort" + "strings" + "time" +) + +// AWS specifies that the parameters in a signed request must +// be provided in the natural order of the keys. This is distinct +// from the natural order of the encoded value of key=value. +// Percent and gocheck.Equals affect the sorting order. +func EncodeSorted(values url.Values) string { + // preallocate the arrays for perfomance + keys := make([]string, 0, len(values)) + sarray := make([]string, 0, len(values)) + for k := range values { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + for _, v := range values[k] { + sarray = append(sarray, Encode(k)+"="+Encode(v)) + } + } + + return strings.Join(sarray, "&") +} + +type V2Signer struct { + auth Auth + service ServiceInfo + host string +} + +var b64 = base64.StdEncoding + +func NewV2Signer(auth Auth, service ServiceInfo) (*V2Signer, error) { + u, err := url.Parse(service.Endpoint) + if err != nil { + return nil, err + } + return &V2Signer{auth: auth, service: service, host: u.Host}, nil +} + +func (s *V2Signer) Sign(method, path string, params map[string]string) { + params["AWSAccessKeyId"] = s.auth.AccessKey + params["SignatureVersion"] = "2" + params["SignatureMethod"] = "HmacSHA256" + if s.auth.Token() != "" { + params["SecurityToken"] = s.auth.Token() + } + // AWS specifies that the parameters in a signed request must + // be provided in the natural order of the keys. This is distinct + // from the natural order of the encoded value of key=value. + // Percent and gocheck.Equals affect the sorting order. + var keys, sarray []string + for k := range params { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + sarray = append(sarray, Encode(k)+"="+Encode(params[k])) + } + joined := strings.Join(sarray, "&") + payload := method + "\n" + s.host + "\n" + path + "\n" + joined + hash := hmac.New(sha256.New, []byte(s.auth.SecretKey)) + hash.Write([]byte(payload)) + signature := make([]byte, b64.EncodedLen(hash.Size())) + b64.Encode(signature, hash.Sum(nil)) + + params["Signature"] = string(signature) +} + +func (s *V2Signer) SignRequest(req *http.Request) error { + req.ParseForm() + req.Form.Set("AWSAccessKeyId", s.auth.AccessKey) + req.Form.Set("SignatureVersion", "2") + req.Form.Set("SignatureMethod", "HmacSHA256") + if s.auth.Token() != "" { + req.Form.Set("SecurityToken", s.auth.Token()) + } + + payload := req.Method + "\n" + req.URL.Host + "\n" + req.URL.Path + "\n" + EncodeSorted(req.Form) + hash := hmac.New(sha256.New, []byte(s.auth.SecretKey)) + hash.Write([]byte(payload)) + signature := make([]byte, b64.EncodedLen(hash.Size())) + b64.Encode(signature, hash.Sum(nil)) + + req.Form.Set("Signature", string(signature)) + + req.URL.RawQuery = req.Form.Encode() + + return nil +} + +// Common date formats for signing requests +const ( + ISO8601BasicFormat = "20060102T150405Z" + ISO8601BasicFormatShort = "20060102" +) + +type Route53Signer struct { + auth Auth +} + +func NewRoute53Signer(auth Auth) *Route53Signer { + return &Route53Signer{auth: auth} +} + +// Creates the authorize signature based on the date stamp and secret key +func (s *Route53Signer) getHeaderAuthorize(message string) string { + hmacSha256 := hmac.New(sha256.New, []byte(s.auth.SecretKey)) + hmacSha256.Write([]byte(message)) + cryptedString := hmacSha256.Sum(nil) + + return base64.StdEncoding.EncodeToString(cryptedString) +} + +// Adds all the required headers for AWS Route53 API to the request +// including the authorization +func (s *Route53Signer) Sign(req *http.Request) { + date := time.Now().UTC().Format(time.RFC1123) + delete(req.Header, "Date") + req.Header.Set("Date", date) + + authHeader := fmt.Sprintf("AWS3-HTTPS AWSAccessKeyId=%s,Algorithm=%s,Signature=%s", + s.auth.AccessKey, "HmacSHA256", s.getHeaderAuthorize(date)) + + req.Header.Set("Host", req.Host) + req.Header.Set("X-Amzn-Authorization", authHeader) + req.Header.Set("Content-Type", "application/xml") + if s.auth.Token() != "" { + req.Header.Set("X-Amz-Security-Token", s.auth.Token()) + } +} + +/* +The V4Signer encapsulates all of the functionality to sign a request with the AWS +Signature Version 4 Signing Process. (http://goo.gl/u1OWZz) +*/ +type V4Signer struct { + auth Auth + serviceName string + region Region + // Add the x-amz-content-sha256 header + IncludeXAmzContentSha256 bool +} + +/* +Return a new instance of a V4Signer capable of signing AWS requests. +*/ +func NewV4Signer(auth Auth, serviceName string, region Region) *V4Signer { + return &V4Signer{ + auth: auth, + serviceName: serviceName, + region: region, + IncludeXAmzContentSha256: false, + } +} + +/* +Sign a request according to the AWS Signature Version 4 Signing Process. (http://goo.gl/u1OWZz) + +The signed request will include an "x-amz-date" header with a current timestamp if a valid "x-amz-date" +or "date" header was not available in the original request. In addition, AWS Signature Version 4 requires +the "host" header to be a signed header, therefor the Sign method will manually set a "host" header from +the request.Host. + +The signed request will include a new "Authorization" header indicating that the request has been signed. + +Any changes to the request after signing the request will invalidate the signature. +*/ +func (s *V4Signer) Sign(req *http.Request) { + req.Header.Set("host", req.Host) // host header must be included as a signed header + t := s.requestTime(req) // Get request time + + payloadHash := "" + + if _, ok := req.Form["X-Amz-Expires"]; ok { + // We are authenticating the the request by using query params + // (also known as pre-signing a url, http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html) + payloadHash = "UNSIGNED-PAYLOAD" + req.Header.Del("x-amz-date") + + req.Form["X-Amz-SignedHeaders"] = []string{s.signedHeaders(req.Header)} + req.Form["X-Amz-Algorithm"] = []string{"AWS4-HMAC-SHA256"} + req.Form["X-Amz-Credential"] = []string{s.auth.AccessKey + "/" + s.credentialScope(t)} + req.Form["X-Amz-Date"] = []string{t.Format(ISO8601BasicFormat)} + req.URL.RawQuery = req.Form.Encode() + } else { + payloadHash = s.payloadHash(req) + if s.IncludeXAmzContentSha256 { + req.Header.Set("x-amz-content-sha256", payloadHash) // x-amz-content-sha256 contains the payload hash + } + } + creq := s.canonicalRequest(req, payloadHash) // Build canonical request + sts := s.stringToSign(t, creq) // Build string to sign + signature := s.signature(t, sts) // Calculate the AWS Signature Version 4 + auth := s.authorization(req.Header, t, signature) // Create Authorization header value + + if _, ok := req.Form["X-Amz-Expires"]; ok { + req.Form["X-Amz-Signature"] = []string{signature} + } else { + req.Header.Set("Authorization", auth) // Add Authorization header to request + } + return +} + +func (s *V4Signer) SignRequest(req *http.Request) error { + s.Sign(req) + return nil +} + +/* +requestTime method will parse the time from the request "x-amz-date" or "date" headers. +If the "x-amz-date" header is present, that will take priority over the "date" header. +If neither header is defined or we are unable to parse either header as a valid date +then we will create a new "x-amz-date" header with the current time. +*/ +func (s *V4Signer) requestTime(req *http.Request) time.Time { + + // Get "x-amz-date" header + date := req.Header.Get("x-amz-date") + + // Attempt to parse as ISO8601BasicFormat + t, err := time.Parse(ISO8601BasicFormat, date) + if err == nil { + return t + } + + // Attempt to parse as http.TimeFormat + t, err = time.Parse(http.TimeFormat, date) + if err == nil { + req.Header.Set("x-amz-date", t.Format(ISO8601BasicFormat)) + return t + } + + // Get "date" header + date = req.Header.Get("date") + + // Attempt to parse as http.TimeFormat + t, err = time.Parse(http.TimeFormat, date) + if err == nil { + return t + } + + // Create a current time header to be used + t = time.Now().UTC() + req.Header.Set("x-amz-date", t.Format(ISO8601BasicFormat)) + return t +} + +/* +canonicalRequest method creates the canonical request according to Task 1 of the AWS Signature Version 4 Signing Process. (http://goo.gl/eUUZ3S) + + CanonicalRequest = + HTTPRequestMethod + '\n' + + CanonicalURI + '\n' + + CanonicalQueryString + '\n' + + CanonicalHeaders + '\n' + + SignedHeaders + '\n' + + HexEncode(Hash(Payload)) + +payloadHash is optional; use the empty string and it will be calculated from the request +*/ +func (s *V4Signer) canonicalRequest(req *http.Request, payloadHash string) string { + if payloadHash == "" { + payloadHash = s.payloadHash(req) + } + c := new(bytes.Buffer) + fmt.Fprintf(c, "%s\n", req.Method) + fmt.Fprintf(c, "%s\n", s.canonicalURI(req.URL)) + fmt.Fprintf(c, "%s\n", s.canonicalQueryString(req.URL)) + fmt.Fprintf(c, "%s\n\n", s.canonicalHeaders(req.Header)) + fmt.Fprintf(c, "%s\n", s.signedHeaders(req.Header)) + fmt.Fprintf(c, "%s", payloadHash) + return c.String() +} + +func (s *V4Signer) canonicalURI(u *url.URL) string { + u = &url.URL{Path: u.Path} + canonicalPath := u.String() + + slash := strings.HasSuffix(canonicalPath, "/") + canonicalPath = path.Clean(canonicalPath) + + if canonicalPath == "" || canonicalPath == "." { + canonicalPath = "/" + } + + if canonicalPath != "/" && slash { + canonicalPath += "/" + } + + return canonicalPath +} + +func (s *V4Signer) canonicalQueryString(u *url.URL) string { + keyValues := make(map[string]string, len(u.Query())) + keys := make([]string, len(u.Query())) + + key_i := 0 + for k, vs := range u.Query() { + k = url.QueryEscape(k) + + a := make([]string, len(vs)) + for idx, v := range vs { + v = url.QueryEscape(v) + a[idx] = fmt.Sprintf("%s=%s", k, v) + } + + keyValues[k] = strings.Join(a, "&") + keys[key_i] = k + key_i++ + } + + sort.Strings(keys) + + query := make([]string, len(keys)) + for idx, key := range keys { + query[idx] = keyValues[key] + } + + query_str := strings.Join(query, "&") + + // AWS V4 signing requires that the space characters + // are encoded as %20 instead of +. On the other hand + // golangs url.QueryEscape as well as url.Values.Encode() + // both encode the space as a + character. See: + // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + // https://github.com/golang/go/issues/4013 + // https://groups.google.com/forum/#!topic/golang-nuts/BB443qEjPIk + + return strings.Replace(query_str, "+", "%20", -1) +} + +func (s *V4Signer) canonicalHeaders(h http.Header) string { + i, a, lowerCase := 0, make([]string, len(h)), make(map[string][]string) + + for k, v := range h { + lowerCase[strings.ToLower(k)] = v + } + + var keys []string + for k := range lowerCase { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + v := lowerCase[k] + for j, w := range v { + v[j] = strings.Trim(w, " ") + } + sort.Strings(v) + a[i] = strings.ToLower(k) + ":" + strings.Join(v, ",") + i++ + } + return strings.Join(a, "\n") +} + +func (s *V4Signer) signedHeaders(h http.Header) string { + i, a := 0, make([]string, len(h)) + for k := range h { + a[i] = strings.ToLower(k) + i++ + } + sort.Strings(a) + return strings.Join(a, ";") +} + +func (s *V4Signer) payloadHash(req *http.Request) string { + var b []byte + if req.Body == nil { + b = []byte("") + } else { + var err error + b, err = ioutil.ReadAll(req.Body) + if err != nil { + // TODO: I REALLY DON'T LIKE THIS PANIC!!!! + panic(err) + } + } + req.Body = ioutil.NopCloser(bytes.NewBuffer(b)) + return s.hash(string(b)) +} + +/* +stringToSign method creates the string to sign accorting to Task 2 of the AWS Signature Version 4 Signing Process. (http://goo.gl/es1PAu) + + StringToSign = + Algorithm + '\n' + + RequestDate + '\n' + + CredentialScope + '\n' + + HexEncode(Hash(CanonicalRequest)) +*/ +func (s *V4Signer) stringToSign(t time.Time, creq string) string { + w := new(bytes.Buffer) + fmt.Fprint(w, "AWS4-HMAC-SHA256\n") + fmt.Fprintf(w, "%s\n", t.Format(ISO8601BasicFormat)) + fmt.Fprintf(w, "%s\n", s.credentialScope(t)) + fmt.Fprintf(w, "%s", s.hash(creq)) + return w.String() +} + +func (s *V4Signer) credentialScope(t time.Time) string { + return fmt.Sprintf("%s/%s/%s/aws4_request", t.Format(ISO8601BasicFormatShort), s.region.Name, s.serviceName) +} + +/* +signature method calculates the AWS Signature Version 4 according to Task 3 of the AWS Signature Version 4 Signing Process. (http://goo.gl/j0Yqe1) + + signature = HexEncode(HMAC(derived-signing-key, string-to-sign)) +*/ +func (s *V4Signer) signature(t time.Time, sts string) string { + h := s.hmac(s.derivedKey(t), []byte(sts)) + return fmt.Sprintf("%x", h) +} + +/* +derivedKey method derives a signing key to be used for signing a request. + + kSecret = Your AWS Secret Access Key + kDate = HMAC("AWS4" + kSecret, Date) + kRegion = HMAC(kDate, Region) + kService = HMAC(kRegion, Service) + kSigning = HMAC(kService, "aws4_request") +*/ +func (s *V4Signer) derivedKey(t time.Time) []byte { + h := s.hmac([]byte("AWS4"+s.auth.SecretKey), []byte(t.Format(ISO8601BasicFormatShort))) + h = s.hmac(h, []byte(s.region.Name)) + h = s.hmac(h, []byte(s.serviceName)) + h = s.hmac(h, []byte("aws4_request")) + return h +} + +/* +authorization method generates the authorization header value. +*/ +func (s *V4Signer) authorization(header http.Header, t time.Time, signature string) string { + w := new(bytes.Buffer) + fmt.Fprint(w, "AWS4-HMAC-SHA256 ") + fmt.Fprintf(w, "Credential=%s/%s, ", s.auth.AccessKey, s.credentialScope(t)) + fmt.Fprintf(w, "SignedHeaders=%s, ", s.signedHeaders(header)) + fmt.Fprintf(w, "Signature=%s", signature) + return w.String() +} + +// hash method calculates the sha256 hash for a given string +func (s *V4Signer) hash(in string) string { + h := sha256.New() + fmt.Fprintf(h, "%s", in) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +// hmac method calculates the sha256 hmac for a given slice of bytes +func (s *V4Signer) hmac(key, data []byte) []byte { + h := hmac.New(sha256.New, key) + h.Write(data) + return h.Sum(nil) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/s3/lifecycle.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/s3/lifecycle.go new file mode 100644 index 000000000..d9281261b --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/s3/lifecycle.go @@ -0,0 +1,202 @@ +package s3 + +import ( + "crypto/md5" + "encoding/base64" + "encoding/xml" + "net/url" + "strconv" + "time" +) + +// Implements an interface for s3 bucket lifecycle configuration +// See goo.gl/d0bbDf for details. + +const ( + LifecycleRuleStatusEnabled = "Enabled" + LifecycleRuleStatusDisabled = "Disabled" + LifecycleRuleDateFormat = "2006-01-02" + StorageClassGlacier = "GLACIER" +) + +type Expiration struct { + Days *uint `xml:"Days,omitempty"` + Date string `xml:"Date,omitempty"` +} + +// Returns Date as a time.Time. +func (r *Expiration) ParseDate() (time.Time, error) { + return time.Parse(LifecycleRuleDateFormat, r.Date) +} + +type Transition struct { + Days *uint `xml:"Days,omitempty"` + Date string `xml:"Date,omitempty"` + StorageClass string `xml:"StorageClass"` +} + +// Returns Date as a time.Time. +func (r *Transition) ParseDate() (time.Time, error) { + return time.Parse(LifecycleRuleDateFormat, r.Date) +} + +type NoncurrentVersionExpiration struct { + Days *uint `xml:"NoncurrentDays,omitempty"` +} + +type NoncurrentVersionTransition struct { + Days *uint `xml:"NoncurrentDays,omitempty"` + StorageClass string `xml:"StorageClass"` +} + +type LifecycleRule struct { + ID string `xml:"ID"` + Prefix string `xml:"Prefix"` + Status string `xml:"Status"` + NoncurrentVersionTransition *NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty"` + NoncurrentVersionExpiration *NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"` + Transition *Transition `xml:"Transition,omitempty"` + Expiration *Expiration `xml:"Expiration,omitempty"` +} + +// Create a lifecycle rule with arbitrary identifier id and object name prefix +// for which the rules should apply. +func NewLifecycleRule(id, prefix string) *LifecycleRule { + rule := &LifecycleRule{ + ID: id, + Prefix: prefix, + Status: LifecycleRuleStatusEnabled, + } + return rule +} + +// Adds a transition rule in days. Overwrites any previous transition rule. +func (r *LifecycleRule) SetTransitionDays(days uint) { + r.Transition = &Transition{ + Days: &days, + StorageClass: StorageClassGlacier, + } +} + +// Adds a transition rule as a date. Overwrites any previous transition rule. +func (r *LifecycleRule) SetTransitionDate(date time.Time) { + r.Transition = &Transition{ + Date: date.Format(LifecycleRuleDateFormat), + StorageClass: StorageClassGlacier, + } +} + +// Adds an expiration rule in days. Overwrites any previous expiration rule. +// Days must be > 0. +func (r *LifecycleRule) SetExpirationDays(days uint) { + r.Expiration = &Expiration{ + Days: &days, + } +} + +// Adds an expiration rule as a date. Overwrites any previous expiration rule. +func (r *LifecycleRule) SetExpirationDate(date time.Time) { + r.Expiration = &Expiration{ + Date: date.Format(LifecycleRuleDateFormat), + } +} + +// Adds a noncurrent version transition rule. Overwrites any previous +// noncurrent version transition rule. +func (r *LifecycleRule) SetNoncurrentVersionTransitionDays(days uint) { + r.NoncurrentVersionTransition = &NoncurrentVersionTransition{ + Days: &days, + StorageClass: StorageClassGlacier, + } +} + +// Adds a noncurrent version expiration rule. Days must be > 0. Overwrites +// any previous noncurrent version expiration rule. +func (r *LifecycleRule) SetNoncurrentVersionExpirationDays(days uint) { + r.NoncurrentVersionExpiration = &NoncurrentVersionExpiration{ + Days: &days, + } +} + +// Marks the rule as disabled. +func (r *LifecycleRule) Disable() { + r.Status = LifecycleRuleStatusDisabled +} + +// Marks the rule as enabled (default). +func (r *LifecycleRule) Enable() { + r.Status = LifecycleRuleStatusEnabled +} + +type LifecycleConfiguration struct { + XMLName xml.Name `xml:"LifecycleConfiguration"` + Rules *[]*LifecycleRule `xml:"Rule,omitempty"` +} + +// Adds a LifecycleRule to the configuration. +func (c *LifecycleConfiguration) AddRule(r *LifecycleRule) { + var rules []*LifecycleRule + if c.Rules != nil { + rules = *c.Rules + } + rules = append(rules, r) + c.Rules = &rules +} + +// Sets the bucket's lifecycle configuration. +func (b *Bucket) PutLifecycleConfiguration(c *LifecycleConfiguration) error { + doc, err := xml.Marshal(c) + if err != nil { + return err + } + + buf := makeXmlBuffer(doc) + digest := md5.New() + size, err := digest.Write(buf.Bytes()) + if err != nil { + return err + } + + headers := map[string][]string{ + "Content-Length": {strconv.FormatInt(int64(size), 10)}, + "Content-MD5": {base64.StdEncoding.EncodeToString(digest.Sum(nil))}, + } + + req := &request{ + path: "/", + method: "PUT", + bucket: b.Name, + headers: headers, + payload: buf, + params: url.Values{"lifecycle": {""}}, + } + + return b.S3.queryV4Sign(req, nil) +} + +// Retrieves the lifecycle configuration for the bucket. AWS returns an error +// if no lifecycle found. +func (b *Bucket) GetLifecycleConfiguration() (*LifecycleConfiguration, error) { + req := &request{ + method: "GET", + bucket: b.Name, + path: "/", + params: url.Values{"lifecycle": {""}}, + } + + conf := &LifecycleConfiguration{} + err := b.S3.queryV4Sign(req, conf) + return conf, err +} + +// Delete the bucket's lifecycle configuration. +func (b *Bucket) DeleteLifecycleConfiguration() error { + req := &request{ + method: "DELETE", + bucket: b.Name, + path: "/", + params: url.Values{"lifecycle": {""}}, + } + + return b.S3.queryV4Sign(req, nil) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/s3/multi.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/s3/multi.go new file mode 100644 index 000000000..d905f5657 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/s3/multi.go @@ -0,0 +1,508 @@ +package s3 + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "encoding/hex" + "encoding/xml" + "errors" + "io" + "net/http" + "net/url" + "sort" + "strconv" + "strings" +) + +// Multi represents an unfinished multipart upload. +// +// Multipart uploads allow sending big objects in smaller chunks. +// After all parts have been sent, the upload must be explicitly +// completed by calling Complete with the list of parts. +// +// See http://goo.gl/vJfTG for an overview of multipart uploads. +type Multi struct { + Bucket *Bucket + Key string + UploadId string +} + +// That's the default. Here just for testing. +var listMultiMax = 1000 + +type listMultiResp struct { + NextKeyMarker string + NextUploadIdMarker string + IsTruncated bool + Upload []Multi + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` +} + +// ListMulti returns the list of unfinished multipart uploads in b. +// +// The prefix parameter limits the response to keys that begin with the +// specified prefix. You can use prefixes to separate a bucket into different +// groupings of keys (to get the feeling of folders, for example). +// +// The delim parameter causes the response to group all of the keys that +// share a common prefix up to the next delimiter in a single entry within +// the CommonPrefixes field. You can use delimiters to separate a bucket +// into different groupings of keys, similar to how folders would work. +// +// See http://goo.gl/ePioY for details. +func (b *Bucket) ListMulti(prefix, delim string) (multis []*Multi, prefixes []string, err error) { + params := map[string][]string{ + "uploads": {""}, + "max-uploads": {strconv.FormatInt(int64(listMultiMax), 10)}, + "prefix": {prefix}, + "delimiter": {delim}, + } + for attempt := attempts.Start(); attempt.Next(); { + req := &request{ + method: "GET", + bucket: b.Name, + params: params, + } + var resp listMultiResp + err := b.S3.query(req, &resp) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return nil, nil, err + } + for i := range resp.Upload { + multi := &resp.Upload[i] + multi.Bucket = b + multis = append(multis, multi) + } + prefixes = append(prefixes, resp.CommonPrefixes...) + if !resp.IsTruncated { + return multis, prefixes, nil + } + params["key-marker"] = []string{resp.NextKeyMarker} + params["upload-id-marker"] = []string{resp.NextUploadIdMarker} + attempt = attempts.Start() // Last request worked. + } + panic("unreachable") +} + +// Multi returns a multipart upload handler for the provided key +// inside b. If a multipart upload exists for key, it is returned, +// otherwise a new multipart upload is initiated with contType and perm. +func (b *Bucket) Multi(key, contType string, perm ACL, options Options) (*Multi, error) { + multis, _, err := b.ListMulti(key, "") + if err != nil && !hasCode(err, "NoSuchUpload") { + return nil, err + } + for _, m := range multis { + if m.Key == key { + return m, nil + } + } + return b.InitMulti(key, contType, perm, options) +} + +// InitMulti initializes a new multipart upload at the provided +// key inside b and returns a value for manipulating it. +// +// See http://goo.gl/XP8kL for details. +func (b *Bucket) InitMulti(key string, contType string, perm ACL, options Options) (*Multi, error) { + headers := map[string][]string{ + "Content-Type": {contType}, + "Content-Length": {"0"}, + "x-amz-acl": {string(perm)}, + } + options.addHeaders(headers) + params := map[string][]string{ + "uploads": {""}, + } + req := &request{ + method: "POST", + bucket: b.Name, + path: key, + headers: headers, + params: params, + } + var err error + var resp struct { + UploadId string `xml:"UploadId"` + } + for attempt := attempts.Start(); attempt.Next(); { + err = b.S3.query(req, &resp) + if !shouldRetry(err) { + break + } + } + if err != nil { + return nil, err + } + return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil +} + +func (m *Multi) PutPartCopy(n int, options CopyOptions, source string) (*CopyObjectResult, Part, error) { + headers := map[string][]string{ + "x-amz-copy-source": {url.QueryEscape(source)}, + } + options.addHeaders(headers) + params := map[string][]string{ + "uploadId": {m.UploadId}, + "partNumber": {strconv.FormatInt(int64(n), 10)}, + } + + sourceBucket := m.Bucket.S3.Bucket(strings.TrimRight(strings.SplitAfterN(source, "/", 2)[0], "/")) + sourceMeta, err := sourceBucket.Head(strings.SplitAfterN(source, "/", 2)[1], nil) + if err != nil { + return nil, Part{}, err + } + + for attempt := attempts.Start(); attempt.Next(); { + req := &request{ + method: "PUT", + bucket: m.Bucket.Name, + path: m.Key, + headers: headers, + params: params, + } + resp := &CopyObjectResult{} + err = m.Bucket.S3.query(req, resp) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return nil, Part{}, err + } + if resp.ETag == "" { + return nil, Part{}, errors.New("part upload succeeded with no ETag") + } + return resp, Part{n, resp.ETag, sourceMeta.ContentLength}, nil + } + panic("unreachable") +} + +// PutPart sends part n of the multipart upload, reading all the content from r. +// Each part, except for the last one, must be at least 5MB in size. +// +// See http://goo.gl/pqZer for details. +func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) { + partSize, _, md5b64, err := seekerInfo(r) + if err != nil { + return Part{}, err + } + return m.putPart(n, r, partSize, md5b64) +} + +func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string) (Part, error) { + headers := map[string][]string{ + "Content-Length": {strconv.FormatInt(partSize, 10)}, + "Content-MD5": {md5b64}, + } + params := map[string][]string{ + "uploadId": {m.UploadId}, + "partNumber": {strconv.FormatInt(int64(n), 10)}, + } + for attempt := attempts.Start(); attempt.Next(); { + _, err := r.Seek(0, 0) + if err != nil { + return Part{}, err + } + req := &request{ + method: "PUT", + bucket: m.Bucket.Name, + path: m.Key, + headers: headers, + params: params, + payload: r, + } + err = m.Bucket.S3.prepare(req) + if err != nil { + return Part{}, err + } + resp, err := m.Bucket.S3.run(req, nil) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return Part{}, err + } + etag := resp.Header.Get("ETag") + if etag == "" { + return Part{}, errors.New("part upload succeeded with no ETag") + } + return Part{n, etag, partSize}, nil + } + panic("unreachable") +} + +func seekerInfo(r io.ReadSeeker) (size int64, md5hex string, md5b64 string, err error) { + _, err = r.Seek(0, 0) + if err != nil { + return 0, "", "", err + } + digest := md5.New() + size, err = io.Copy(digest, r) + if err != nil { + return 0, "", "", err + } + sum := digest.Sum(nil) + md5hex = hex.EncodeToString(sum) + md5b64 = base64.StdEncoding.EncodeToString(sum) + return size, md5hex, md5b64, nil +} + +type Part struct { + N int `xml:"PartNumber"` + ETag string + Size int64 +} + +type partSlice []Part + +func (s partSlice) Len() int { return len(s) } +func (s partSlice) Less(i, j int) bool { return s[i].N < s[j].N } +func (s partSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type listPartsResp struct { + NextPartNumberMarker string + IsTruncated bool + Part []Part +} + +// That's the default. Here just for testing. +var listPartsMax = 1000 + +// Kept for backcompatability. See the documentation for ListPartsFull +func (m *Multi) ListParts() ([]Part, error) { + return m.ListPartsFull(0, listPartsMax) +} + +// ListParts returns the list of previously uploaded parts in m, +// ordered by part number (Only parts with higher part numbers than +// partNumberMarker will be listed). Only up to maxParts parts will be +// returned. +// +// See http://goo.gl/ePioY for details. +func (m *Multi) ListPartsFull(partNumberMarker int, maxParts int) ([]Part, error) { + if maxParts > listPartsMax { + maxParts = listPartsMax + } + + params := map[string][]string{ + "uploadId": {m.UploadId}, + "max-parts": {strconv.FormatInt(int64(maxParts), 10)}, + "part-number-marker": {strconv.FormatInt(int64(partNumberMarker), 10)}, + } + var parts partSlice + for attempt := attempts.Start(); attempt.Next(); { + req := &request{ + method: "GET", + bucket: m.Bucket.Name, + path: m.Key, + params: params, + } + var resp listPartsResp + err := m.Bucket.S3.query(req, &resp) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return nil, err + } + parts = append(parts, resp.Part...) + if !resp.IsTruncated { + sort.Sort(parts) + return parts, nil + } + params["part-number-marker"] = []string{resp.NextPartNumberMarker} + attempt = attempts.Start() // Last request worked. + } + panic("unreachable") +} + +type ReaderAtSeeker interface { + io.ReaderAt + io.ReadSeeker +} + +// PutAll sends all of r via a multipart upload with parts no larger +// than partSize bytes, which must be set to at least 5MB. +// Parts previously uploaded are either reused if their checksum +// and size match the new part, or otherwise overwritten with the +// new content. +// PutAll returns all the parts of m (reused or not). +func (m *Multi) PutAll(r ReaderAtSeeker, partSize int64) ([]Part, error) { + old, err := m.ListParts() + if err != nil && !hasCode(err, "NoSuchUpload") { + return nil, err + } + reuse := 0 // Index of next old part to consider reusing. + current := 1 // Part number of latest good part handled. + totalSize, err := r.Seek(0, 2) + if err != nil { + return nil, err + } + first := true // Must send at least one empty part if the file is empty. + var result []Part +NextSection: + for offset := int64(0); offset < totalSize || first; offset += partSize { + first = false + if offset+partSize > totalSize { + partSize = totalSize - offset + } + section := io.NewSectionReader(r, offset, partSize) + _, md5hex, md5b64, err := seekerInfo(section) + if err != nil { + return nil, err + } + for reuse < len(old) && old[reuse].N <= current { + // Looks like this part was already sent. + part := &old[reuse] + etag := `"` + md5hex + `"` + if part.N == current && part.Size == partSize && part.ETag == etag { + // Checksum matches. Reuse the old part. + result = append(result, *part) + current++ + continue NextSection + } + reuse++ + } + + // Part wasn't found or doesn't match. Send it. + part, err := m.putPart(current, section, partSize, md5b64) + if err != nil { + return nil, err + } + result = append(result, part) + current++ + } + return result, nil +} + +type completeUpload struct { + XMLName xml.Name `xml:"CompleteMultipartUpload"` + Parts completeParts `xml:"Part"` +} + +type completePart struct { + PartNumber int + ETag string +} + +type completeParts []completePart + +func (p completeParts) Len() int { return len(p) } +func (p completeParts) Less(i, j int) bool { return p[i].PartNumber < p[j].PartNumber } +func (p completeParts) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// We can't know in advance whether we'll have an Error or a +// CompleteMultipartUploadResult, so this structure is just a placeholder to +// know the name of the XML object. +type completeUploadResp struct { + XMLName xml.Name + InnerXML string `xml:",innerxml"` +} + +// Complete assembles the given previously uploaded parts into the +// final object. This operation may take several minutes. +// +// See http://goo.gl/2Z7Tw for details. +func (m *Multi) Complete(parts []Part) error { + params := map[string][]string{ + "uploadId": {m.UploadId}, + } + c := completeUpload{} + for _, p := range parts { + c.Parts = append(c.Parts, completePart{p.N, p.ETag}) + } + sort.Sort(c.Parts) + data, err := xml.Marshal(&c) + if err != nil { + return err + } + for attempt := attempts.Start(); attempt.Next(); { + req := &request{ + method: "POST", + bucket: m.Bucket.Name, + path: m.Key, + params: params, + payload: bytes.NewReader(data), + } + var resp completeUploadResp + if m.Bucket.Region.Name == "generic" { + headers := make(http.Header) + headers.Add("Content-Length", strconv.FormatInt(int64(len(data)), 10)) + req.headers = headers + } + err := m.Bucket.S3.query(req, &resp) + if shouldRetry(err) && attempt.HasNext() { + continue + } + + if err != nil { + return err + } + + // A 200 error code does not guarantee that there were no errors (see + // http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html ), + // so first figure out what kind of XML "object" we are dealing with. + + if resp.XMLName.Local == "Error" { + // S3.query does the unmarshalling for us, so we can't unmarshal + // again in a different struct... So we need to duct-tape back the + // original XML back together. + fullErrorXml := "" + resp.InnerXML + "" + s3err := &Error{} + + if err := xml.Unmarshal([]byte(fullErrorXml), s3err); err != nil { + return err + } + + return s3err + } + + if resp.XMLName.Local == "CompleteMultipartUploadResult" { + // FIXME: One could probably add a CompleteFull method returning the + // actual contents of the CompleteMultipartUploadResult object. + return nil + } + + return errors.New("Invalid XML struct returned: " + resp.XMLName.Local) + } + panic("unreachable") +} + +// Abort deletes an unifinished multipart upload and any previously +// uploaded parts for it. +// +// After a multipart upload is aborted, no additional parts can be +// uploaded using it. However, if any part uploads are currently in +// progress, those part uploads might or might not succeed. As a result, +// it might be necessary to abort a given multipart upload multiple +// times in order to completely free all storage consumed by all parts. +// +// NOTE: If the described scenario happens to you, please report back to +// the goamz authors with details. In the future such retrying should be +// handled internally, but it's not clear what happens precisely (Is an +// error returned? Is the issue completely undetectable?). +// +// See http://goo.gl/dnyJw for details. +func (m *Multi) Abort() error { + params := map[string][]string{ + "uploadId": {m.UploadId}, + } + for attempt := attempts.Start(); attempt.Next(); { + req := &request{ + method: "DELETE", + bucket: m.Bucket.Name, + path: m.Key, + params: params, + } + err := m.Bucket.S3.query(req, nil) + if shouldRetry(err) && attempt.HasNext() { + continue + } + return err + } + panic("unreachable") +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/s3/s3.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/s3/s3.go new file mode 100644 index 000000000..5dedaf105 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/s3/s3.go @@ -0,0 +1,1305 @@ +// +// goamz - Go packages to interact with the Amazon Web Services. +// +// https://wiki.ubuntu.com/goamz +// +// Copyright (c) 2011 Canonical Ltd. +// +// Written by Gustavo Niemeyer +// + +package s3 + +import ( + "bytes" + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/http/httputil" + "net/url" + "path" + "strconv" + "strings" + "time" + + "github.com/docker/goamz/aws" +) + +const debug = false + +// The S3 type encapsulates operations with an S3 region. +type S3 struct { + aws.Auth + aws.Region + Signature int + Client *http.Client + private byte // Reserve the right of using private data. +} + +// The Bucket type encapsulates operations with an S3 bucket. +type Bucket struct { + *S3 + Name string +} + +// The Owner type represents the owner of the object in an S3 bucket. +type Owner struct { + ID string + DisplayName string +} + +// Fold options into an Options struct +// +type Options struct { + SSE bool + SSEKMS bool + SSEKMSKeyId string + SSECustomerAlgorithm string + SSECustomerKey string + SSECustomerKeyMD5 string + Meta map[string][]string + ContentEncoding string + CacheControl string + RedirectLocation string + ContentMD5 string + ContentDisposition string + Range string + StorageClass StorageClass + // What else? +} + +type CopyOptions struct { + Options + CopySourceOptions string + MetadataDirective string + ContentType string +} + +// CopyObjectResult is the output from a Copy request +type CopyObjectResult struct { + ETag string + LastModified string +} + +var attempts = aws.AttemptStrategy{ + Min: 5, + Total: 5 * time.Second, + Delay: 200 * time.Millisecond, +} + +// New creates a new S3. +func New(auth aws.Auth, region aws.Region) *S3 { + return &S3{ + Auth: auth, + Region: region, + Signature: aws.V2Signature, + Client: http.DefaultClient, + private: 0, + } +} + +// Bucket returns a Bucket with the given name. +func (s3 *S3) Bucket(name string) *Bucket { + if s3.Region.S3BucketEndpoint != "" || s3.Region.S3LowercaseBucket { + name = strings.ToLower(name) + } + return &Bucket{s3, name} +} + +type BucketInfo struct { + Name string + CreationDate string +} + +type GetServiceResp struct { + Owner Owner + Buckets []BucketInfo `xml:">Bucket"` +} + +// GetService gets a list of all buckets owned by an account. +// +// See http://goo.gl/wbHkGj for details. +func (s3 *S3) GetService() (*GetServiceResp, error) { + bucket := s3.Bucket("") + + r, err := bucket.Get("") + if err != nil { + return nil, err + } + + // Parse the XML response. + var resp GetServiceResp + if err = xml.Unmarshal(r, &resp); err != nil { + return nil, err + } + + return &resp, nil +} + +var createBucketConfiguration = ` + %s +` + +// locationConstraint returns an io.Reader specifying a LocationConstraint if +// required for the region. +// +// See http://goo.gl/bh9Kq for details. +func (s3 *S3) locationConstraint() io.Reader { + constraint := "" + if s3.Region.S3LocationConstraint { + constraint = fmt.Sprintf(createBucketConfiguration, s3.Region.Name) + } + return strings.NewReader(constraint) +} + +type ACL string + +const ( + Private = ACL("private") + PublicRead = ACL("public-read") + PublicReadWrite = ACL("public-read-write") + AuthenticatedRead = ACL("authenticated-read") + BucketOwnerRead = ACL("bucket-owner-read") + BucketOwnerFull = ACL("bucket-owner-full-control") +) + +type StorageClass string + +const ( + ReducedRedundancy = StorageClass("REDUCED_REDUNDANCY") + StandardStorage = StorageClass("STANDARD") +) + +type ServerSideEncryption string + +const ( + S3Managed = ServerSideEncryption("AES256") + KMSManaged = ServerSideEncryption("aws:kms") +) + +// PutBucket creates a new bucket. +// +// See http://goo.gl/ndjnR for details. +func (b *Bucket) PutBucket(perm ACL) error { + headers := map[string][]string{ + "x-amz-acl": {string(perm)}, + } + req := &request{ + method: "PUT", + bucket: b.Name, + path: "/", + headers: headers, + payload: b.locationConstraint(), + } + return b.S3.query(req, nil) +} + +// DelBucket removes an existing S3 bucket. All objects in the bucket must +// be removed before the bucket itself can be removed. +// +// See http://goo.gl/GoBrY for details. +func (b *Bucket) DelBucket() (err error) { + req := &request{ + method: "DELETE", + bucket: b.Name, + path: "/", + } + for attempt := attempts.Start(); attempt.Next(); { + err = b.S3.query(req, nil) + if !shouldRetry(err) { + break + } + } + return err +} + +// Get retrieves an object from an S3 bucket. +// +// See http://goo.gl/isCO7 for details. +func (b *Bucket) Get(path string) (data []byte, err error) { + body, err := b.GetReader(path) + if err != nil { + return nil, err + } + data, err = ioutil.ReadAll(body) + body.Close() + return data, err +} + +// GetReader retrieves an object from an S3 bucket, +// returning the body of the HTTP response. +// It is the caller's responsibility to call Close on rc when +// finished reading. +func (b *Bucket) GetReader(path string) (rc io.ReadCloser, err error) { + resp, err := b.GetResponse(path) + if resp != nil { + return resp.Body, err + } + return nil, err +} + +// GetResponse retrieves an object from an S3 bucket, +// returning the HTTP response. +// It is the caller's responsibility to call Close on rc when +// finished reading +func (b *Bucket) GetResponse(path string) (resp *http.Response, err error) { + return b.GetResponseWithHeaders(path, make(http.Header)) +} + +// GetReaderWithHeaders retrieves an object from an S3 bucket +// Accepts custom headers to be sent as the second parameter +// returning the body of the HTTP response. +// It is the caller's responsibility to call Close on rc when +// finished reading +func (b *Bucket) GetResponseWithHeaders(path string, headers map[string][]string) (resp *http.Response, err error) { + req := &request{ + bucket: b.Name, + path: path, + headers: headers, + } + err = b.S3.prepare(req) + if err != nil { + return nil, err + } + for attempt := attempts.Start(); attempt.Next(); { + resp, err := b.S3.run(req, nil) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return nil, err + } + return resp, nil + } + panic("unreachable") +} + +// Exists checks whether or not an object exists on an S3 bucket using a HEAD request. +func (b *Bucket) Exists(path string) (exists bool, err error) { + req := &request{ + method: "HEAD", + bucket: b.Name, + path: path, + } + err = b.S3.prepare(req) + if err != nil { + return + } + for attempt := attempts.Start(); attempt.Next(); { + resp, err := b.S3.run(req, nil) + + if shouldRetry(err) && attempt.HasNext() { + continue + } + + if err != nil { + // We can treat a 403 or 404 as non existance + if e, ok := err.(*Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) { + return false, nil + } + return false, err + } + + if resp.StatusCode/100 == 2 { + exists = true + } + if resp.Body != nil { + resp.Body.Close() + } + return exists, err + } + return false, fmt.Errorf("S3 Currently Unreachable") +} + +// Head HEADs an object in the S3 bucket, returns the response with +// no body see http://bit.ly/17K1ylI +func (b *Bucket) Head(path string, headers map[string][]string) (*http.Response, error) { + req := &request{ + method: "HEAD", + bucket: b.Name, + path: path, + headers: headers, + } + err := b.S3.prepare(req) + if err != nil { + return nil, err + } + + for attempt := attempts.Start(); attempt.Next(); { + resp, err := b.S3.run(req, nil) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return nil, err + } + return resp, err + } + return nil, fmt.Errorf("S3 Currently Unreachable") +} + +// Put inserts an object into the S3 bucket. +// +// See http://goo.gl/FEBPD for details. +func (b *Bucket) Put(path string, data []byte, contType string, perm ACL, options Options) error { + body := bytes.NewBuffer(data) + return b.PutReader(path, body, int64(len(data)), contType, perm, options) +} + +// PutCopy puts a copy of an object given by the key path into bucket b using b.Path as the target key +func (b *Bucket) PutCopy(path string, perm ACL, options CopyOptions, source string) (*CopyObjectResult, error) { + headers := map[string][]string{ + "x-amz-acl": {string(perm)}, + "x-amz-copy-source": {escapePath(source)}, + } + options.addHeaders(headers) + req := &request{ + method: "PUT", + bucket: b.Name, + path: path, + headers: headers, + } + resp := &CopyObjectResult{} + err := b.S3.query(req, resp) + if err != nil { + return resp, err + } + return resp, nil +} + +// PutReader inserts an object into the S3 bucket by consuming data +// from r until EOF. +func (b *Bucket) PutReader(path string, r io.Reader, length int64, contType string, perm ACL, options Options) error { + headers := map[string][]string{ + "Content-Length": {strconv.FormatInt(length, 10)}, + "Content-Type": {contType}, + "x-amz-acl": {string(perm)}, + } + options.addHeaders(headers) + req := &request{ + method: "PUT", + bucket: b.Name, + path: path, + headers: headers, + payload: r, + } + return b.S3.query(req, nil) +} + +// addHeaders adds o's specified fields to headers +func (o Options) addHeaders(headers map[string][]string) { + if o.SSE { + headers["x-amz-server-side-encryption"] = []string{string(S3Managed)} + } else if o.SSEKMS { + headers["x-amz-server-side-encryption"] = []string{string(KMSManaged)} + if len(o.SSEKMSKeyId) != 0 { + headers["x-amz-server-side-encryption-aws-kms-key-id"] = []string{o.SSEKMSKeyId} + } + } else if len(o.SSECustomerAlgorithm) != 0 && len(o.SSECustomerKey) != 0 && len(o.SSECustomerKeyMD5) != 0 { + // Amazon-managed keys and customer-managed keys are mutually exclusive + headers["x-amz-server-side-encryption-customer-algorithm"] = []string{o.SSECustomerAlgorithm} + headers["x-amz-server-side-encryption-customer-key"] = []string{o.SSECustomerKey} + headers["x-amz-server-side-encryption-customer-key-MD5"] = []string{o.SSECustomerKeyMD5} + } + if len(o.Range) != 0 { + headers["Range"] = []string{o.Range} + } + if len(o.ContentEncoding) != 0 { + headers["Content-Encoding"] = []string{o.ContentEncoding} + } + if len(o.CacheControl) != 0 { + headers["Cache-Control"] = []string{o.CacheControl} + } + if len(o.ContentMD5) != 0 { + headers["Content-MD5"] = []string{o.ContentMD5} + } + if len(o.RedirectLocation) != 0 { + headers["x-amz-website-redirect-location"] = []string{o.RedirectLocation} + } + if len(o.ContentDisposition) != 0 { + headers["Content-Disposition"] = []string{o.ContentDisposition} + } + if len(o.StorageClass) != 0 { + headers["x-amz-storage-class"] = []string{string(o.StorageClass)} + + } + for k, v := range o.Meta { + headers["x-amz-meta-"+k] = v + } +} + +// addHeaders adds o's specified fields to headers +func (o CopyOptions) addHeaders(headers map[string][]string) { + o.Options.addHeaders(headers) + if len(o.MetadataDirective) != 0 { + headers["x-amz-metadata-directive"] = []string{o.MetadataDirective} + } + if len(o.CopySourceOptions) != 0 { + headers["x-amz-copy-source-range"] = []string{o.CopySourceOptions} + } + if len(o.ContentType) != 0 { + headers["Content-Type"] = []string{o.ContentType} + } +} + +func makeXmlBuffer(doc []byte) *bytes.Buffer { + buf := new(bytes.Buffer) + buf.WriteString(xml.Header) + buf.Write(doc) + return buf +} + +type IndexDocument struct { + Suffix string `xml:"Suffix"` +} + +type ErrorDocument struct { + Key string `xml:"Key"` +} + +type RoutingRule struct { + ConditionKeyPrefixEquals string `xml:"Condition>KeyPrefixEquals"` + RedirectReplaceKeyPrefixWith string `xml:"Redirect>ReplaceKeyPrefixWith,omitempty"` + RedirectReplaceKeyWith string `xml:"Redirect>ReplaceKeyWith,omitempty"` +} + +type RedirectAllRequestsTo struct { + HostName string `xml:"HostName"` + Protocol string `xml:"Protocol,omitempty"` +} + +type WebsiteConfiguration struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ WebsiteConfiguration"` + IndexDocument *IndexDocument `xml:"IndexDocument,omitempty"` + ErrorDocument *ErrorDocument `xml:"ErrorDocument,omitempty"` + RoutingRules *[]RoutingRule `xml:"RoutingRules>RoutingRule,omitempty"` + RedirectAllRequestsTo *RedirectAllRequestsTo `xml:"RedirectAllRequestsTo,omitempty"` +} + +// PutBucketWebsite configures a bucket as a website. +// +// See http://goo.gl/TpRlUy for details. +func (b *Bucket) PutBucketWebsite(configuration WebsiteConfiguration) error { + doc, err := xml.Marshal(configuration) + if err != nil { + return err + } + + buf := makeXmlBuffer(doc) + + return b.PutBucketSubresource("website", buf, int64(buf.Len())) +} + +func (b *Bucket) PutBucketSubresource(subresource string, r io.Reader, length int64) error { + headers := map[string][]string{ + "Content-Length": {strconv.FormatInt(length, 10)}, + } + req := &request{ + path: "/", + method: "PUT", + bucket: b.Name, + headers: headers, + payload: r, + params: url.Values{subresource: {""}}, + } + + return b.S3.query(req, nil) +} + +// Del removes an object from the S3 bucket. +// +// See http://goo.gl/APeTt for details. +func (b *Bucket) Del(path string) error { + req := &request{ + method: "DELETE", + bucket: b.Name, + path: path, + } + return b.S3.query(req, nil) +} + +type Delete struct { + Quiet bool `xml:"Quiet,omitempty"` + Objects []Object `xml:"Object"` +} + +type Object struct { + Key string `xml:"Key"` + VersionId string `xml:"VersionId,omitempty"` +} + +// DelMulti removes up to 1000 objects from the S3 bucket. +// +// See http://goo.gl/jx6cWK for details. +func (b *Bucket) DelMulti(objects Delete) error { + doc, err := xml.Marshal(objects) + if err != nil { + return err + } + + buf := makeXmlBuffer(doc) + digest := md5.New() + size, err := digest.Write(buf.Bytes()) + if err != nil { + return err + } + + headers := map[string][]string{ + "Content-Length": {strconv.FormatInt(int64(size), 10)}, + "Content-MD5": {base64.StdEncoding.EncodeToString(digest.Sum(nil))}, + "Content-Type": {"text/xml"}, + } + req := &request{ + path: "/", + method: "POST", + params: url.Values{"delete": {""}}, + bucket: b.Name, + headers: headers, + payload: buf, + } + + return b.S3.query(req, nil) +} + +// The ListResp type holds the results of a List bucket operation. +type ListResp struct { + Name string + Prefix string + Delimiter string + Marker string + MaxKeys int + // IsTruncated is true if the results have been truncated because + // there are more keys and prefixes than can fit in MaxKeys. + // N.B. this is the opposite sense to that documented (incorrectly) in + // http://goo.gl/YjQTc + IsTruncated bool + Contents []Key + CommonPrefixes []string `xml:">Prefix"` + // if IsTruncated is true, pass NextMarker as marker argument to List() + // to get the next set of keys + NextMarker string +} + +// The Key type represents an item stored in an S3 bucket. +type Key struct { + Key string + LastModified string + Size int64 + // ETag gives the hex-encoded MD5 sum of the contents, + // surrounded with double-quotes. + ETag string + StorageClass string + Owner Owner +} + +// List returns information about objects in an S3 bucket. +// +// The prefix parameter limits the response to keys that begin with the +// specified prefix. +// +// The delim parameter causes the response to group all of the keys that +// share a common prefix up to the next delimiter in a single entry within +// the CommonPrefixes field. You can use delimiters to separate a bucket +// into different groupings of keys, similar to how folders would work. +// +// The marker parameter specifies the key to start with when listing objects +// in a bucket. Amazon S3 lists objects in alphabetical order and +// will return keys alphabetically greater than the marker. +// +// The max parameter specifies how many keys + common prefixes to return in +// the response. The default is 1000. +// +// For example, given these keys in a bucket: +// +// index.html +// index2.html +// photos/2006/January/sample.jpg +// photos/2006/February/sample2.jpg +// photos/2006/February/sample3.jpg +// photos/2006/February/sample4.jpg +// +// Listing this bucket with delimiter set to "/" would yield the +// following result: +// +// &ListResp{ +// Name: "sample-bucket", +// MaxKeys: 1000, +// Delimiter: "/", +// Contents: []Key{ +// {Key: "index.html", "index2.html"}, +// }, +// CommonPrefixes: []string{ +// "photos/", +// }, +// } +// +// Listing the same bucket with delimiter set to "/" and prefix set to +// "photos/2006/" would yield the following result: +// +// &ListResp{ +// Name: "sample-bucket", +// MaxKeys: 1000, +// Delimiter: "/", +// Prefix: "photos/2006/", +// CommonPrefixes: []string{ +// "photos/2006/February/", +// "photos/2006/January/", +// }, +// } +// +// See http://goo.gl/YjQTc for details. +func (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp, err error) { + params := map[string][]string{ + "prefix": {prefix}, + "delimiter": {delim}, + "marker": {marker}, + } + if max != 0 { + params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)} + } + req := &request{ + bucket: b.Name, + params: params, + } + result = &ListResp{} + for attempt := attempts.Start(); attempt.Next(); { + err = b.S3.query(req, result) + if !shouldRetry(err) { + break + } + } + if err != nil { + return nil, err + } + // if NextMarker is not returned, it should be set to the name of last key, + // so let's do it so that each caller doesn't have to + if result.IsTruncated && result.NextMarker == "" { + n := len(result.Contents) + if n > 0 { + result.NextMarker = result.Contents[n-1].Key + } + } + return result, nil +} + +// The VersionsResp type holds the results of a list bucket Versions operation. +type VersionsResp struct { + Name string + Prefix string + KeyMarker string + VersionIdMarker string + MaxKeys int + Delimiter string + IsTruncated bool + Versions []Version `xml:"Version"` + CommonPrefixes []string `xml:">Prefix"` +} + +// The Version type represents an object version stored in an S3 bucket. +type Version struct { + Key string + VersionId string + IsLatest bool + LastModified string + // ETag gives the hex-encoded MD5 sum of the contents, + // surrounded with double-quotes. + ETag string + Size int64 + Owner Owner + StorageClass string +} + +func (b *Bucket) Versions(prefix, delim, keyMarker string, versionIdMarker string, max int) (result *VersionsResp, err error) { + params := map[string][]string{ + "versions": {""}, + "prefix": {prefix}, + "delimiter": {delim}, + } + + if len(versionIdMarker) != 0 { + params["version-id-marker"] = []string{versionIdMarker} + } + if len(keyMarker) != 0 { + params["key-marker"] = []string{keyMarker} + } + + if max != 0 { + params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)} + } + req := &request{ + bucket: b.Name, + params: params, + } + result = &VersionsResp{} + for attempt := attempts.Start(); attempt.Next(); { + err = b.S3.query(req, result) + if !shouldRetry(err) { + break + } + } + if err != nil { + return nil, err + } + return result, nil +} + +type GetLocationResp struct { + Location string `xml:",innerxml"` +} + +func (b *Bucket) Location() (string, error) { + r, err := b.Get("/?location") + if err != nil { + return "", err + } + + // Parse the XML response. + var resp GetLocationResp + if err = xml.Unmarshal(r, &resp); err != nil { + return "", err + } + + if resp.Location == "" { + return "us-east-1", nil + } else { + return resp.Location, nil + } +} + +// URL returns a non-signed URL that allows retriving the +// object at path. It only works if the object is publicly +// readable (see SignedURL). +func (b *Bucket) URL(path string) string { + req := &request{ + bucket: b.Name, + path: path, + } + err := b.S3.prepare(req) + if err != nil { + panic(err) + } + u, err := req.url() + if err != nil { + panic(err) + } + u.RawQuery = "" + return u.String() +} + +// SignedURL returns a signed URL that allows anyone holding the URL +// to retrieve the object at path. The signature is valid until expires. +func (b *Bucket) SignedURL(path string, expires time.Time) string { + return b.SignedURLWithArgs(path, expires, nil, nil) +} + +// SignedURLWithArgs returns a signed URL that allows anyone holding the URL +// to retrieve the object at path. The signature is valid until expires. +func (b *Bucket) SignedURLWithArgs(path string, expires time.Time, params url.Values, headers http.Header) string { + return b.SignedURLWithMethod("GET", path, expires, params, headers) +} + +// SignedURLWithMethod returns a signed URL that allows anyone holding the URL +// to either retrieve the object at path or make a HEAD request against it. The signature is valid until expires. +func (b *Bucket) SignedURLWithMethod(method, path string, expires time.Time, params url.Values, headers http.Header) string { + var uv = url.Values{} + + if params != nil { + uv = params + } + + if b.S3.Signature == aws.V2Signature { + uv.Set("Expires", strconv.FormatInt(expires.Unix(), 10)) + } else { + uv.Set("X-Amz-Expires", strconv.FormatInt(expires.Unix()-time.Now().Unix(), 10)) + } + + req := &request{ + method: method, + bucket: b.Name, + path: path, + params: uv, + headers: headers, + } + err := b.S3.prepare(req) + if err != nil { + panic(err) + } + u, err := req.url() + if err != nil { + panic(err) + } + if b.S3.Auth.Token() != "" && b.S3.Signature == aws.V2Signature { + return u.String() + "&x-amz-security-token=" + url.QueryEscape(req.headers["X-Amz-Security-Token"][0]) + } else { + return u.String() + } +} + +// UploadSignedURL returns a signed URL that allows anyone holding the URL +// to upload the object at path. The signature is valid until expires. +// contenttype is a string like image/png +// name is the resource name in s3 terminology like images/ali.png [obviously excluding the bucket name itself] +func (b *Bucket) UploadSignedURL(name, method, content_type string, expires time.Time) string { + expire_date := expires.Unix() + if method != "POST" { + method = "PUT" + } + + a := b.S3.Auth + tokenData := "" + + if a.Token() != "" { + tokenData = "x-amz-security-token:" + a.Token() + "\n" + } + + stringToSign := method + "\n\n" + content_type + "\n" + strconv.FormatInt(expire_date, 10) + "\n" + tokenData + "/" + path.Join(b.Name, name) + secretKey := a.SecretKey + accessId := a.AccessKey + mac := hmac.New(sha1.New, []byte(secretKey)) + mac.Write([]byte(stringToSign)) + macsum := mac.Sum(nil) + signature := base64.StdEncoding.EncodeToString([]byte(macsum)) + signature = strings.TrimSpace(signature) + + var signedurl *url.URL + var err error + if b.Region.S3Endpoint != "" { + signedurl, err = url.Parse(b.Region.S3Endpoint) + name = b.Name + "/" + name + } else { + signedurl, err = url.Parse("https://" + b.Name + ".s3.amazonaws.com/") + } + + if err != nil { + log.Println("ERROR sining url for S3 upload", err) + return "" + } + signedurl.Path = name + params := url.Values{} + params.Add("AWSAccessKeyId", accessId) + params.Add("Expires", strconv.FormatInt(expire_date, 10)) + params.Add("Signature", signature) + if a.Token() != "" { + params.Add("x-amz-security-token", a.Token()) + } + + signedurl.RawQuery = params.Encode() + return signedurl.String() +} + +// PostFormArgs returns the action and input fields needed to allow anonymous +// uploads to a bucket within the expiration limit +// Additional conditions can be specified with conds +func (b *Bucket) PostFormArgsEx(path string, expires time.Time, redirect string, conds []string) (action string, fields map[string]string) { + conditions := make([]string, 0) + fields = map[string]string{ + "AWSAccessKeyId": b.Auth.AccessKey, + "key": path, + } + + if token := b.S3.Auth.Token(); token != "" { + fields["x-amz-security-token"] = token + conditions = append(conditions, + fmt.Sprintf("{\"x-amz-security-token\": \"%s\"}", token)) + } + + if conds != nil { + conditions = append(conditions, conds...) + } + + conditions = append(conditions, fmt.Sprintf("{\"key\": \"%s\"}", path)) + conditions = append(conditions, fmt.Sprintf("{\"bucket\": \"%s\"}", b.Name)) + if redirect != "" { + conditions = append(conditions, fmt.Sprintf("{\"success_action_redirect\": \"%s\"}", redirect)) + fields["success_action_redirect"] = redirect + } + + vExpiration := expires.Format("2006-01-02T15:04:05Z") + vConditions := strings.Join(conditions, ",") + policy := fmt.Sprintf("{\"expiration\": \"%s\", \"conditions\": [%s]}", vExpiration, vConditions) + policy64 := base64.StdEncoding.EncodeToString([]byte(policy)) + fields["policy"] = policy64 + + signer := hmac.New(sha1.New, []byte(b.Auth.SecretKey)) + signer.Write([]byte(policy64)) + fields["signature"] = base64.StdEncoding.EncodeToString(signer.Sum(nil)) + + action = fmt.Sprintf("%s/%s/", b.S3.Region.S3Endpoint, b.Name) + return +} + +// PostFormArgs returns the action and input fields needed to allow anonymous +// uploads to a bucket within the expiration limit +func (b *Bucket) PostFormArgs(path string, expires time.Time, redirect string) (action string, fields map[string]string) { + return b.PostFormArgsEx(path, expires, redirect, nil) +} + +type request struct { + method string + bucket string + path string + params url.Values + headers http.Header + baseurl string + payload io.Reader + prepared bool +} + +func (req *request) url() (*url.URL, error) { + u, err := url.Parse(req.baseurl) + if err != nil { + return nil, fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err) + } + u.RawQuery = req.params.Encode() + u.Path = req.path + return u, nil +} + +// query prepares and runs the req request. +// If resp is not nil, the XML data contained in the response +// body will be unmarshalled on it. +func (s3 *S3) query(req *request, resp interface{}) error { + err := s3.prepare(req) + if err != nil { + return err + } + r, err := s3.run(req, resp) + if r != nil && r.Body != nil { + r.Body.Close() + } + return err +} + +// queryV4Signprepares and runs the req request, signed with aws v4 signatures. +// If resp is not nil, the XML data contained in the response +// body will be unmarshalled on it. +func (s3 *S3) queryV4Sign(req *request, resp interface{}) error { + if req.headers == nil { + req.headers = map[string][]string{} + } + + err := s3.setBaseURL(req) + if err != nil { + return err + } + + hreq, err := s3.setupHttpRequest(req) + if err != nil { + return err + } + + // req.Host must be set for V4 signature calculation + hreq.Host = hreq.URL.Host + + signer := aws.NewV4Signer(s3.Auth, "s3", s3.Region) + signer.IncludeXAmzContentSha256 = true + signer.Sign(hreq) + + _, err = s3.doHttpRequest(hreq, resp) + return err +} + +// Sets baseurl on req from bucket name and the region endpoint +func (s3 *S3) setBaseURL(req *request) error { + if req.bucket == "" { + req.baseurl = s3.Region.S3Endpoint + } else { + req.baseurl = s3.Region.S3BucketEndpoint + if req.baseurl == "" { + // Use the path method to address the bucket. + req.baseurl = s3.Region.S3Endpoint + req.path = "/" + req.bucket + req.path + } else { + // Just in case, prevent injection. + if strings.IndexAny(req.bucket, "/:@") >= 0 { + return fmt.Errorf("bad S3 bucket: %q", req.bucket) + } + req.baseurl = strings.Replace(req.baseurl, "${bucket}", req.bucket, -1) + } + } + + return nil +} + +// partiallyEscapedPath partially escapes the S3 path allowing for all S3 REST API calls. +// +// Some commands including: +// GET Bucket acl http://goo.gl/aoXflF +// GET Bucket cors http://goo.gl/UlmBdx +// GET Bucket lifecycle http://goo.gl/8Fme7M +// GET Bucket policy http://goo.gl/ClXIo3 +// GET Bucket location http://goo.gl/5lh8RD +// GET Bucket Logging http://goo.gl/sZ5ckF +// GET Bucket notification http://goo.gl/qSSZKD +// GET Bucket tagging http://goo.gl/QRvxnM +// require the first character after the bucket name in the path to be a literal '?' and +// not the escaped hex representation '%3F'. +func partiallyEscapedPath(path string) string { + pathEscapedAndSplit := strings.Split((&url.URL{Path: path}).String(), "/") + if len(pathEscapedAndSplit) >= 3 { + if len(pathEscapedAndSplit[2]) >= 3 { + // Check for the one "?" that should not be escaped. + if pathEscapedAndSplit[2][0:3] == "%3F" { + pathEscapedAndSplit[2] = "?" + pathEscapedAndSplit[2][3:] + } + } + } + return strings.Replace(strings.Join(pathEscapedAndSplit, "/"), "+", "%2B", -1) +} + +// prepare sets up req to be delivered to S3. +func (s3 *S3) prepare(req *request) error { + // Copy so they can be mutated without affecting on retries. + params := make(url.Values) + headers := make(http.Header) + for k, v := range req.params { + params[k] = v + } + for k, v := range req.headers { + headers[k] = v + } + req.params = params + req.headers = headers + + if !req.prepared { + req.prepared = true + if req.method == "" { + req.method = "GET" + } + + if !strings.HasPrefix(req.path, "/") { + req.path = "/" + req.path + } + + err := s3.setBaseURL(req) + if err != nil { + return err + } + } + + if s3.Signature == aws.V2Signature && s3.Auth.Token() != "" { + req.headers["X-Amz-Security-Token"] = []string{s3.Auth.Token()} + } else if s3.Auth.Token() != "" { + req.params.Set("X-Amz-Security-Token", s3.Auth.Token()) + } + + if s3.Signature == aws.V2Signature { + // Always sign again as it's not clear how far the + // server has handled a previous attempt. + u, err := url.Parse(req.baseurl) + if err != nil { + return err + } + + signpathPartiallyEscaped := partiallyEscapedPath(req.path) + if strings.IndexAny(s3.Region.S3BucketEndpoint, "${bucket}") >= 0 { + signpathPartiallyEscaped = "/" + req.bucket + signpathPartiallyEscaped + } + req.headers["Host"] = []string{u.Host} + req.headers["Date"] = []string{time.Now().In(time.UTC).Format(time.RFC1123)} + + sign(s3.Auth, req.method, signpathPartiallyEscaped, req.params, req.headers) + } else { + hreq, err := s3.setupHttpRequest(req) + if err != nil { + return err + } + + hreq.Host = hreq.URL.Host + signer := aws.NewV4Signer(s3.Auth, "s3", s3.Region) + signer.IncludeXAmzContentSha256 = true + signer.Sign(hreq) + + req.payload = hreq.Body + if _, ok := headers["Content-Length"]; ok { + req.headers["Content-Length"] = headers["Content-Length"] + } + } + return nil +} + +// Prepares an *http.Request for doHttpRequest +func (s3 *S3) setupHttpRequest(req *request) (*http.Request, error) { + // Copy so that signing the http request will not mutate it + headers := make(http.Header) + for k, v := range req.headers { + headers[k] = v + } + req.headers = headers + + u, err := req.url() + if err != nil { + return nil, err + } + if s3.Region.Name != "generic" { + u.Opaque = fmt.Sprintf("//%s%s", u.Host, partiallyEscapedPath(u.Path)) + } + + hreq := http.Request{ + URL: u, + Method: req.method, + ProtoMajor: 1, + ProtoMinor: 1, + Header: req.headers, + Form: req.params, + } + + if v, ok := req.headers["Content-Length"]; ok { + hreq.ContentLength, _ = strconv.ParseInt(v[0], 10, 64) + delete(req.headers, "Content-Length") + } + if req.payload != nil { + hreq.Body = ioutil.NopCloser(req.payload) + } + + return &hreq, nil +} + +// doHttpRequest sends hreq and returns the http response from the server. +// If resp is not nil, the XML data contained in the response +// body will be unmarshalled on it. +func (s3 *S3) doHttpRequest(hreq *http.Request, resp interface{}) (*http.Response, error) { + hresp, err := s3.Client.Do(hreq) + if err != nil { + return nil, err + } + if debug { + dump, _ := httputil.DumpResponse(hresp, true) + log.Printf("} -> %s\n", dump) + } + if hresp.StatusCode != 200 && hresp.StatusCode != 204 && hresp.StatusCode != 206 { + return nil, buildError(hresp) + } + if resp != nil { + err = xml.NewDecoder(hresp.Body).Decode(resp) + hresp.Body.Close() + + if debug { + log.Printf("goamz.s3> decoded xml into %#v", resp) + } + + } + return hresp, err +} + +// run sends req and returns the http response from the server. +// If resp is not nil, the XML data contained in the response +// body will be unmarshalled on it. +func (s3 *S3) run(req *request, resp interface{}) (*http.Response, error) { + if debug { + log.Printf("Running S3 request: %#v", req) + } + + hreq, err := s3.setupHttpRequest(req) + if err != nil { + return nil, err + } + + return s3.doHttpRequest(hreq, resp) +} + +// Error represents an error in an operation with S3. +type Error struct { + StatusCode int // HTTP status code (200, 403, ...) + Code string // EC2 error code ("UnsupportedOperation", ...) + Message string // The human-oriented error message + BucketName string + RequestId string + HostId string +} + +func (e *Error) Error() string { + return e.Message +} + +func buildError(r *http.Response) error { + if debug { + log.Printf("got error (status code %v)", r.StatusCode) + data, err := ioutil.ReadAll(r.Body) + if err != nil { + log.Printf("\tread error: %v", err) + } else { + log.Printf("\tdata:\n%s\n\n", data) + } + r.Body = ioutil.NopCloser(bytes.NewBuffer(data)) + } + + err := Error{} + // TODO return error if Unmarshal fails? + xml.NewDecoder(r.Body).Decode(&err) + r.Body.Close() + err.StatusCode = r.StatusCode + if err.Message == "" { + err.Message = r.Status + } + if debug { + log.Printf("err: %#v\n", err) + } + return &err +} + +func shouldRetry(err error) bool { + if err == nil { + return false + } + switch err { + case io.ErrUnexpectedEOF, io.EOF: + return true + } + switch e := err.(type) { + case *net.DNSError: + return true + case *net.OpError: + switch e.Op { + case "dial", "read", "write": + return true + } + case *url.Error: + // url.Error can be returned either by net/url if a URL cannot be + // parsed, or by net/http if the response is closed before the headers + // are received or parsed correctly. In that later case, e.Op is set to + // the HTTP method name with the first letter uppercased. We don't want + // to retry on POST operations, since those are not idempotent, all the + // other ones should be safe to retry. The only case where all + // operations are safe to retry are "dial" errors, since in that case + // the POST request didn't make it to the server. + + if netErr, ok := e.Err.(*net.OpError); ok && netErr.Op == "dial" { + return true + } + + switch e.Op { + case "Get", "Put", "Delete", "Head": + return shouldRetry(e.Err) + default: + return false + } + case *Error: + switch e.Code { + case "InternalError", "NoSuchUpload", "NoSuchBucket": + return true + } + switch e.StatusCode { + case 500, 503, 504: + return true + } + } + return false +} + +func hasCode(err error, code string) bool { + s3err, ok := err.(*Error) + return ok && s3err.Code == code +} + +func escapePath(s string) string { + return (&url.URL{Path: s}).String() +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/s3/sign.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/s3/sign.go new file mode 100644 index 000000000..6f24c6679 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/s3/sign.go @@ -0,0 +1,120 @@ +package s3 + +import ( + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "github.com/docker/goamz/aws" + "log" + "sort" + "strings" +) + +var b64 = base64.StdEncoding + +// ---------------------------------------------------------------------------- +// S3 signing (http://goo.gl/G1LrK) + +var s3ParamsToSign = map[string]bool{ + "acl": true, + "location": true, + "logging": true, + "notification": true, + "partNumber": true, + "policy": true, + "requestPayment": true, + "torrent": true, + "uploadId": true, + "uploads": true, + "versionId": true, + "versioning": true, + "versions": true, + "response-content-type": true, + "response-content-language": true, + "response-expires": true, + "response-cache-control": true, + "response-content-disposition": true, + "response-content-encoding": true, + "website": true, + "delete": true, +} + +func sign(auth aws.Auth, method, canonicalPath string, params, headers map[string][]string) { + var md5, ctype, date, xamz string + var xamzDate bool + var keys, sarray []string + xheaders := make(map[string]string) + for k, v := range headers { + k = strings.ToLower(k) + switch k { + case "content-md5": + md5 = v[0] + case "content-type": + ctype = v[0] + case "date": + if !xamzDate { + date = v[0] + } + default: + if strings.HasPrefix(k, "x-amz-") { + keys = append(keys, k) + xheaders[k] = strings.Join(v, ",") + if k == "x-amz-date" { + xamzDate = true + date = "" + } + } + } + } + if len(keys) > 0 { + sort.StringSlice(keys).Sort() + for i := range keys { + key := keys[i] + value := xheaders[key] + sarray = append(sarray, key+":"+value) + } + xamz = strings.Join(sarray, "\n") + "\n" + } + + expires := false + if v, ok := params["Expires"]; ok { + // Query string request authentication alternative. + expires = true + date = v[0] + params["AWSAccessKeyId"] = []string{auth.AccessKey} + } + + sarray = sarray[0:0] + for k, v := range params { + if s3ParamsToSign[k] { + for _, vi := range v { + if vi == "" { + sarray = append(sarray, k) + } else { + // "When signing you do not encode these values." + sarray = append(sarray, k+"="+vi) + } + } + } + } + if len(sarray) > 0 { + sort.StringSlice(sarray).Sort() + canonicalPath = canonicalPath + "?" + strings.Join(sarray, "&") + } + + payload := method + "\n" + md5 + "\n" + ctype + "\n" + date + "\n" + xamz + canonicalPath + hash := hmac.New(sha1.New, []byte(auth.SecretKey)) + hash.Write([]byte(payload)) + signature := make([]byte, b64.EncodedLen(hash.Size())) + b64.Encode(signature, hash.Sum(nil)) + + if expires { + params["Signature"] = []string{string(signature)} + } else { + headers["Authorization"] = []string{"AWS " + auth.AccessKey + ":" + string(signature)} + } + if debug { + log.Printf("Signature payload: %q", payload) + log.Printf("Signature: %q", signature) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/CONTRIBUTING.md b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/CONTRIBUTING.md new file mode 100644 index 000000000..05be0f8ab --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/CONTRIBUTING.md @@ -0,0 +1,13 @@ +# Contributing to libtrust + +Want to hack on libtrust? Awesome! Here are instructions to get you +started. + +libtrust is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read +[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md). + +Happy hacking! diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/LICENSE new file mode 100644 index 000000000..27448585a --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/MAINTAINERS b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/MAINTAINERS new file mode 100644 index 000000000..9768175fe --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/MAINTAINERS @@ -0,0 +1,3 @@ +Solomon Hykes +Josh Hawn (github: jlhawn) +Derek McGowan (github: dmcgowan) diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/README.md b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/README.md new file mode 100644 index 000000000..8e7db3818 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/README.md @@ -0,0 +1,18 @@ +# libtrust + +Libtrust is library for managing authentication and authorization using public key cryptography. + +Authentication is handled using the identity attached to the public key. +Libtrust provides multiple methods to prove possession of the private key associated with an identity. + - TLS x509 certificates + - Signature verification + - Key Challenge + +Authorization and access control is managed through a distributed trust graph. +Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access. + +## Copyright and license + +Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. +Docs released under Creative commons. + diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/certificates.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/certificates.go new file mode 100644 index 000000000..3dcca33cb --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/certificates.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "io/ioutil" + "math/big" + "net" + "time" +) + +type certTemplateInfo struct { + commonName string + domains []string + ipAddresses []net.IP + isCA bool + clientAuth bool + serverAuth bool +} + +func generateCertTemplate(info *certTemplateInfo) *x509.Certificate { + // Generate a certificate template which is valid from the past week to + // 10 years from now. The usage of the certificate depends on the + // specified fields in the given certTempInfo object. + var ( + keyUsage x509.KeyUsage + extKeyUsage []x509.ExtKeyUsage + ) + + if info.isCA { + keyUsage = x509.KeyUsageCertSign + } + + if info.clientAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth) + } + + if info.serverAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth) + } + + return &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: info.commonName, + }, + NotBefore: time.Now().Add(-time.Hour * 24 * 7), + NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10), + DNSNames: info.domains, + IPAddresses: info.ipAddresses, + IsCA: info.isCA, + KeyUsage: keyUsage, + ExtKeyUsage: extKeyUsage, + BasicConstraintsValid: info.isCA, + } +} + +func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) { + pubCertTemplate := generateCertTemplate(subInfo) + privCertTemplate := generateCertTemplate(issInfo) + + certDER, err := x509.CreateCertificate( + rand.Reader, pubCertTemplate, privCertTemplate, + pub.CryptoPublicKey(), priv.CryptoPrivateKey(), + ) + if err != nil { + return nil, fmt.Errorf("failed to create certificate: %s", err) + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %s", err) + } + + return +} + +// GenerateSelfSignedServerCert creates a self-signed certificate for the +// given key which is to be used for TLS servers with the given domains and +// IP addresses. +func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + domains: domains, + ipAddresses: ipAddresses, + serverAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateSelfSignedClientCert creates a self-signed certificate for the +// given key which is to be used for TLS clients. +func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + clientAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateCACert creates a certificate which can be used as a trusted +// certificate authority. +func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) { + subjectInfo := &certTemplateInfo{ + commonName: trustedKey.KeyID(), + isCA: true, + } + issuerInfo := &certTemplateInfo{ + commonName: signer.KeyID(), + } + + return generateCert(trustedKey, signer, subjectInfo, issuerInfo) +} + +// GenerateCACertPool creates a certificate authority pool to be used for a +// TLS configuration. Any self-signed certificates issued by the specified +// trusted keys will be verified during a TLS handshake +func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) { + certPool := x509.NewCertPool() + + for _, trustedKey := range trustedKeys { + cert, err := GenerateCACert(signer, trustedKey) + if err != nil { + return nil, fmt.Errorf("failed to generate CA certificate: %s", err) + } + + certPool.AddCert(cert) + } + + return certPool, nil +} + +// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + certificates := []*x509.Certificate{} + var block *pem.Block + block, b = pem.Decode(b) + for ; block != nil; block, b = pem.Decode(b) { + if block.Type == "CERTIFICATE" { + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + certificates = append(certificates, cert) + } else { + return nil, fmt.Errorf("invalid pem block type: %s", block.Type) + } + } + + return certificates, nil +} + +// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificatePool(filename string) (*x509.CertPool, error) { + certs, err := LoadCertificateBundle(filename) + if err != nil { + return nil, err + } + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + return pool, nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/doc.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/doc.go new file mode 100644 index 000000000..ec5d2159c --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/doc.go @@ -0,0 +1,9 @@ +/* +Package libtrust provides an interface for managing authentication and +authorization using public key cryptography. Authentication is handled +using the identity attached to the public key and verified through TLS +x509 certificates, a key challenge, or signature. Authorization and +access control is managed through a trust graph distributed between +both remote trust servers and locally cached and managed data. +*/ +package libtrust diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/ec_key.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/ec_key.go new file mode 100644 index 000000000..00bbe4b3c --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/ec_key.go @@ -0,0 +1,428 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * EC DSA PUBLIC KEY + */ + +// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital +// signature algorithms. +type ecPublicKey struct { + *ecdsa.PublicKey + curveName string + signatureAlgorithm *signatureAlgorithm + extended map[string]interface{} +} + +func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) { + curve := cryptoPublicKey.Curve + + switch { + case curve == elliptic.P256(): + return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil + case curve == elliptic.P384(): + return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil + case curve == elliptic.P521(): + return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil + default: + return nil, errors.New("unsupported elliptic curve") + } +} + +// KeyType returns the key type for elliptic curve keys, i.e., "EC". +func (k *ecPublicKey) KeyType() string { + return "EC" +} + +// CurveName returns the elliptic curve identifier. +// Possible values are "P-256", "P-384", and "P-521". +func (k *ecPublicKey) CurveName() string { + return k.curveName +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *ecPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *ecPublicKey) String() string { + return fmt.Sprintf("EC Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this +// PublicKey. The alg parameter should identify the digital signature +// algorithm which was used to produce the signature and should be supported +// by this public key. Returns a nil error if the signature is valid. +func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // For EC keys there is only one supported signature algorithm depending + // on the curve parameters. + if k.signatureAlgorithm.HeaderParam() != alg { + return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg) + } + + // signature is the concatenation of (r, s), base64Url encoded. + sigLength := len(signature) + expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3) + if sigLength != expectedOctetLength { + return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength) + } + + rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:] + r := new(big.Int).SetBytes(rBytes) + s := new(big.Int).SetBytes(sBytes) + + hasher := k.signatureAlgorithm.HashID().New() + _, err := io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + if !ecdsa.Verify(k.PublicKey, hash, r, s) { + return errors.New("invalid signature") + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *ecPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["crv"] = k.CurveName() + + xBytes := k.X.Bytes() + yBytes := k.Y.Bytes() + octetLength := (k.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output so that x, y are each + // *octetLength* bytes long. + xBuf := make([]byte, octetLength-len(xBytes), octetLength) + yBuf := make([]byte, octetLength-len(yBytes), octetLength) + xBuf = append(xBuf, xBytes...) + yBuf = append(yBuf, yBytes...) + + jwk["x"] = joseBase64UrlEncode(xBuf) + jwk["y"] = joseBase64UrlEncode(yBuf) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *ecPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *ecPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *ecPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) { + // JWK key type (kty) has already been determined to be "EC". + // Need to extract 'crv', 'x', 'y', and 'kid' and check for + // consistency. + + // Get the curve identifier value. + crv, err := stringFromMap(jwk, "crv") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err) + } + + var ( + curve elliptic.Curve + sigAlg *signatureAlgorithm + ) + + switch { + case crv == "P-256": + curve = elliptic.P256() + sigAlg = es256 + case crv == "P-384": + curve = elliptic.P384() + sigAlg = es384 + case crv == "P-521": + curve = elliptic.P521() + sigAlg = es512 + default: + return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv) + } + + // Get the X and Y coordinates for the public key point. + xB64Url, err := stringFromMap(jwk, "x") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + x, err := parseECCoordinate(xB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + + yB64Url, err := stringFromMap(jwk, "y") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + y, err := parseECCoordinate(yB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + + key := &ecPublicKey{ + PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y}, + curveName: crv, signatureAlgorithm: sigAlg, + } + + // Key ID is optional too, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid) + } + } + + key.extended = jwk + + return key, nil +} + +/* + * EC DSA PRIVATE KEY + */ + +// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature +// algorithms. +type ecPrivateKey struct { + ecPublicKey + *ecdsa.PrivateKey +} + +func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) { + publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey) + if err != nil { + return nil, err + } + + return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *ecPrivateKey) PublicKey() PublicKey { + return &k.ecPublicKey +} + +func (k *ecPrivateKey) String() string { + return fmt.Sprintf("EC Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the elliptic curve private key. If the specified hashing algorithm is +// supported by this key, that hash function is used to generate the signature +// otherwise the the default hashing algorithm for this key is used. Returns +// the signature and the name of the JWK signature algorithm used, e.g., +// "ES256", "ES384", "ES512". +func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + // The given hashId is only a suggestion, and since EC keys only support + // on signature/hash algorithm given the curve name, we disregard it for + // the elliptic curve JWK signature implementation. + hasher := k.signatureAlgorithm.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return nil, "", fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + rBytes, sBytes := r.Bytes(), s.Bytes() + octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output + rBuf := make([]byte, octetLength-len(rBytes), octetLength) + sBuf := make([]byte, octetLength-len(sBytes), octetLength) + + rBuf = append(rBuf, rBytes...) + sBuf = append(sBuf, sBytes...) + + signature = append(rBuf, sBuf...) + alg = k.signatureAlgorithm.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *ecPrivateKey) toMap() map[string]interface{} { + jwk := k.ecPublicKey.toMap() + + dBytes := k.D.Bytes() + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := k.ecPublicKey.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + // Create a buffer with the necessary zero-padding. + dBuf := make([]byte, octetLength-len(dBytes), octetLength) + dBuf = append(dBuf, dBytes...) + + jwk["d"] = joseBase64UrlEncode(dBuf) + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err) + } + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("EC PRIVATE KEY", derBytes, k.extended) +} + +func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) { + dB64Url, err := stringFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key: %s", err) + } + + // JWK key type (kty) has already been determined to be "EC". + // Need to extract the public key information, then extract the private + // key value 'd'. + publicKey, err := ecPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + d, err := parseECPrivateParam(dB64Url, publicKey.Curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err) + } + + key := &ecPrivateKey{ + ecPublicKey: *publicKey, + PrivateKey: &ecdsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: d, + }, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) { + k = new(ecPrivateKey) + k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return nil, err + } + + k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256. +func GenerateECP256PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P256()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-256 key: %s", err) + } + + k.curveName = "P-256" + k.signatureAlgorithm = es256 + + return k, nil +} + +// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384. +func GenerateECP384PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P384()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-384 key: %s", err) + } + + k.curveName = "P-384" + k.signatureAlgorithm = es384 + + return k, nil +} + +// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521. +func GenerateECP521PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P521()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-521 key: %s", err) + } + + k.curveName = "P-521" + k.signatureAlgorithm = es512 + + return k, nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/filter.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/filter.go new file mode 100644 index 000000000..5b2b4fca6 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/filter.go @@ -0,0 +1,50 @@ +package libtrust + +import ( + "path/filepath" +) + +// FilterByHosts filters the list of PublicKeys to only those which contain a +// 'hosts' pattern which matches the given host. If *includeEmpty* is true, +// then keys which do not specify any hosts are also returned. +func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) { + filtered := make([]PublicKey, 0, len(keys)) + + for _, pubKey := range keys { + var hosts []string + switch v := pubKey.GetExtendedField("hosts").(type) { + case []string: + hosts = v + case []interface{}: + for _, value := range v { + h, ok := value.(string) + if !ok { + continue + } + hosts = append(hosts, h) + } + } + + if len(hosts) == 0 { + if includeEmpty { + filtered = append(filtered, pubKey) + } + continue + } + + // Check if any hosts match pattern + for _, hostPattern := range hosts { + match, err := filepath.Match(hostPattern, host) + if err != nil { + return nil, err + } + + if match { + filtered = append(filtered, pubKey) + continue + } + } + } + + return filtered, nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/hash.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/hash.go new file mode 100644 index 000000000..a2df787dd --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/hash.go @@ -0,0 +1,56 @@ +package libtrust + +import ( + "crypto" + _ "crypto/sha256" // Registrer SHA224 and SHA256 + _ "crypto/sha512" // Registrer SHA384 and SHA512 + "fmt" +) + +type signatureAlgorithm struct { + algHeaderParam string + hashID crypto.Hash +} + +func (h *signatureAlgorithm) HeaderParam() string { + return h.algHeaderParam +} + +func (h *signatureAlgorithm) HashID() crypto.Hash { + return h.hashID +} + +var ( + rs256 = &signatureAlgorithm{"RS256", crypto.SHA256} + rs384 = &signatureAlgorithm{"RS384", crypto.SHA384} + rs512 = &signatureAlgorithm{"RS512", crypto.SHA512} + es256 = &signatureAlgorithm{"ES256", crypto.SHA256} + es384 = &signatureAlgorithm{"ES384", crypto.SHA384} + es512 = &signatureAlgorithm{"ES512", crypto.SHA512} +) + +func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) { + switch { + case alg == "RS256": + return rs256, nil + case alg == "RS384": + return rs384, nil + case alg == "RS512": + return rs512, nil + default: + return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg) + } +} + +func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm { + switch { + case hashID == crypto.SHA512: + return rs512 + case hashID == crypto.SHA384: + return rs384 + case hashID == crypto.SHA256: + fallthrough + default: + return rs256 + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/jsonsign.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/jsonsign.go new file mode 100644 index 000000000..cb2ca9a76 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/jsonsign.go @@ -0,0 +1,657 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "sort" + "time" + "unicode" +) + +var ( + // ErrInvalidSignContent is used when the content to be signed is invalid. + ErrInvalidSignContent = errors.New("invalid sign content") + + // ErrInvalidJSONContent is used when invalid json is encountered. + ErrInvalidJSONContent = errors.New("invalid json content") + + // ErrMissingSignatureKey is used when the specified signature key + // does not exist in the JSON content. + ErrMissingSignatureKey = errors.New("missing signature key") +) + +type jsHeader struct { + JWK PublicKey `json:"jwk,omitempty"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c,omitempty"` +} + +type jsSignature struct { + Header jsHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected,omitempty"` +} + +type jsSignaturesSorted []jsSignature + +func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] } +func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) } + +func (jsbkid jsSignaturesSorted) Less(i, j int) bool { + ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID() + si, sj := jsbkid[i].Signature, jsbkid[j].Signature + + if ki == kj { + return si < sj + } + + return ki < kj +} + +type signKey struct { + PrivateKey + Chain []*x509.Certificate +} + +// JSONSignature represents a signature of a json object. +type JSONSignature struct { + payload string + signatures []jsSignature + indent string + formatLength int + formatTail []byte +} + +func newJSONSignature() *JSONSignature { + return &JSONSignature{ + signatures: make([]jsSignature, 0, 1), + } +} + +// Payload returns the encoded payload of the signature. This +// payload should not be signed directly +func (js *JSONSignature) Payload() ([]byte, error) { + return joseBase64UrlDecode(js.payload) +} + +func (js *JSONSignature) protectedHeader() (string, error) { + protected := map[string]interface{}{ + "formatLength": js.formatLength, + "formatTail": joseBase64UrlEncode(js.formatTail), + "time": time.Now().UTC().Format(time.RFC3339), + } + protectedBytes, err := json.Marshal(protected) + if err != nil { + return "", err + } + + return joseBase64UrlEncode(protectedBytes), nil +} + +func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) { + buf := make([]byte, len(js.payload)+len(protectedHeader)+1) + copy(buf, protectedHeader) + buf[len(protectedHeader)] = '.' + copy(buf[len(protectedHeader)+1:], js.payload) + return buf, nil +} + +// Sign adds a signature using the given private key. +func (js *JSONSignature) Sign(key PrivateKey) error { + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + js.signatures = append(js.signatures, jsSignature{ + Header: jsHeader{ + JWK: key.PublicKey(), + Algorithm: algorithm, + }, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// SignWithChain adds a signature using the given private key +// and setting the x509 chain. The public key of the first element +// in the chain must be the public key corresponding with the sign key. +func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error { + // Ensure key.Chain[0] is public key for key + //key.Chain.PublicKey + //key.PublicKey().CryptoPublicKey() + + // Verify chain + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + header := jsHeader{ + Chain: make([]string, len(chain)), + Algorithm: algorithm, + } + + for i, cert := range chain { + header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw) + } + + js.signatures = append(js.signatures, jsSignature{ + Header: header, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// Verify verifies all the signatures and returns the list of +// public keys used to sign. Any x509 chains are not checked. +func (js *JSONSignature) Verify() ([]PublicKey, error) { + keys := make([]PublicKey, len(js.signatures)) + for i, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + } else if signature.Header.JWK != nil { + publicKey = signature.Header.JWK + } else { + return nil, errors.New("missing public key") + } + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + + keys[i] = publicKey + } + return keys, nil +} + +// VerifyChains verifies all the signatures and the chains associated +// with each signature and returns the list of verified chains. +// Signatures without an x509 chain are not checked. +func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) { + chains := make([][]*x509.Certificate, 0, len(js.signatures)) + for _, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + intermediates := x509.NewCertPool() + if len(signature.Header.Chain) > 1 { + intermediateChain := signature.Header.Chain[1:] + for i := range intermediateChain { + certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i]) + if err != nil { + return nil, err + } + intermediate, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + intermediates.AddCert(intermediate) + } + } + + verifyOptions := x509.VerifyOptions{ + Intermediates: intermediates, + Roots: ca, + } + + verifiedChains, err := cert.Verify(verifyOptions) + if err != nil { + return nil, err + } + chains = append(chains, verifiedChains...) + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + } + + } + return chains, nil +} + +// JWS returns JSON serialized JWS according to +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2 +func (js *JSONSignature) JWS() ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("missing signature") + } + + sort.Sort(jsSignaturesSorted(js.signatures)) + + jsonMap := map[string]interface{}{ + "payload": js.payload, + "signatures": js.signatures, + } + + return json.MarshalIndent(jsonMap, "", " ") +} + +func notSpace(r rune) bool { + return !unicode.IsSpace(r) +} + +func detectJSONIndent(jsonContent []byte) (indent string) { + if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' { + quoteIndex := bytes.IndexRune(jsonContent[1:], '"') + if quoteIndex > 0 { + indent = string(jsonContent[2 : quoteIndex+1]) + } + } + return +} + +type jsParsedHeader struct { + JWK json.RawMessage `json:"jwk"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c"` +} + +type jsParsedSignature struct { + Header jsParsedHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected"` +} + +// ParseJWS parses a JWS serialized JSON object into a Json Signature. +func ParseJWS(content []byte) (*JSONSignature, error) { + type jsParsed struct { + Payload string `json:"payload"` + Signatures []jsParsedSignature `json:"signatures"` + } + parsed := &jsParsed{} + err := json.Unmarshal(content, parsed) + if err != nil { + return nil, err + } + if len(parsed.Signatures) == 0 { + return nil, errors.New("missing signatures") + } + payload, err := joseBase64UrlDecode(parsed.Payload) + if err != nil { + return nil, err + } + + js, err := NewJSONSignature(payload) + if err != nil { + return nil, err + } + js.signatures = make([]jsSignature, len(parsed.Signatures)) + for i, signature := range parsed.Signatures { + header := jsHeader{ + Algorithm: signature.Header.Algorithm, + } + if signature.Header.Chain != nil { + header.Chain = signature.Header.Chain + } + if signature.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK)) + if err != nil { + return nil, err + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signature.Signature, + Protected: signature.Protected, + } + } + + return js, nil +} + +// NewJSONSignature returns a new unsigned JWS from a json byte array. +// JSONSignature will need to be signed before serializing or storing. +// Optionally, one or more signatures can be provided as byte buffers, +// containing serialized JWS signatures, to assemble a fully signed JWS +// package. It is the callers responsibility to ensure uniqueness of the +// provided signatures. +func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) { + var dataMap map[string]interface{} + err := json.Unmarshal(content, &dataMap) + if err != nil { + return nil, err + } + + js := newJSONSignature() + js.indent = detectJSONIndent(content) + + js.payload = joseBase64UrlEncode(content) + + // Find trailing } and whitespace, put in protected header + closeIndex := bytes.LastIndexFunc(content, notSpace) + if content[closeIndex] != '}' { + return nil, ErrInvalidJSONContent + } + lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace) + if content[lastRuneIndex] == ',' { + return nil, ErrInvalidJSONContent + } + js.formatLength = lastRuneIndex + 1 + js.formatTail = content[js.formatLength:] + + if len(signatures) > 0 { + for _, signature := range signatures { + var parsedJSig jsParsedSignature + + if err := json.Unmarshal(signature, &parsedJSig); err != nil { + return nil, err + } + + // TODO(stevvooe): A lot of the code below is repeated in + // ParseJWS. It will require more refactoring to fix that. + jsig := jsSignature{ + Header: jsHeader{ + Algorithm: parsedJSig.Header.Algorithm, + }, + Signature: parsedJSig.Signature, + Protected: parsedJSig.Protected, + } + + if parsedJSig.Header.Chain != nil { + jsig.Header.Chain = parsedJSig.Header.Chain + } + + if parsedJSig.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK)) + if err != nil { + return nil, err + } + jsig.Header.JWK = publicKey + } + + js.signatures = append(js.signatures, jsig) + } + } + + return js, nil +} + +// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or +// struct. JWS will need to be signed before serializing or storing. +func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) { + switch content.(type) { + case map[string]interface{}: + case struct{}: + default: + return nil, errors.New("invalid data type") + } + + js := newJSONSignature() + js.indent = " " + + payload, err := json.MarshalIndent(content, "", js.indent) + if err != nil { + return nil, err + } + js.payload = joseBase64UrlEncode(payload) + + // Remove '\n}' from formatted section, put in protected header + js.formatLength = len(payload) - 2 + js.formatTail = payload[js.formatLength:] + + return js, nil +} + +func readIntFromMap(key string, m map[string]interface{}) (int, bool) { + value, ok := m[key] + if !ok { + return 0, false + } + switch v := value.(type) { + case int: + return v, true + case float64: + return int(v), true + default: + return 0, false + } +} + +func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) { + value, ok := m[key] + if !ok { + return "", false + } + v, ok = value.(string) + return +} + +// ParsePrettySignature parses a formatted signature into a +// JSON signature. If the signatures are missing the format information +// an error is thrown. The formatted signature must be created by +// the same method as format signature. +func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) { + var contentMap map[string]json.RawMessage + err := json.Unmarshal(content, &contentMap) + if err != nil { + return nil, fmt.Errorf("error unmarshalling content: %s", err) + } + sigMessage, ok := contentMap[signatureKey] + if !ok { + return nil, ErrMissingSignatureKey + } + + var signatureBlocks []jsParsedSignature + err = json.Unmarshal([]byte(sigMessage), &signatureBlocks) + if err != nil { + return nil, fmt.Errorf("error unmarshalling signatures: %s", err) + } + + js := newJSONSignature() + js.signatures = make([]jsSignature, len(signatureBlocks)) + + for i, signatureBlock := range signatureBlocks { + protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected) + if err != nil { + return nil, fmt.Errorf("base64 decode error: %s", err) + } + var protectedHeader map[string]interface{} + err = json.Unmarshal(protectedBytes, &protectedHeader) + if err != nil { + return nil, fmt.Errorf("error unmarshalling protected header: %s", err) + } + + formatLength, ok := readIntFromMap("formatLength", protectedHeader) + if !ok { + return nil, errors.New("missing formatted length") + } + encodedTail, ok := readStringFromMap("formatTail", protectedHeader) + if !ok { + return nil, errors.New("missing formatted tail") + } + formatTail, err := joseBase64UrlDecode(encodedTail) + if err != nil { + return nil, fmt.Errorf("base64 decode error on tail: %s", err) + } + if js.formatLength == 0 { + js.formatLength = formatLength + } else if js.formatLength != formatLength { + return nil, errors.New("conflicting format length") + } + if len(js.formatTail) == 0 { + js.formatTail = formatTail + } else if bytes.Compare(js.formatTail, formatTail) != 0 { + return nil, errors.New("conflicting format tail") + } + + header := jsHeader{ + Algorithm: signatureBlock.Header.Algorithm, + Chain: signatureBlock.Header.Chain, + } + if signatureBlock.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK)) + if err != nil { + return nil, fmt.Errorf("error unmarshalling public key: %s", err) + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signatureBlock.Signature, + Protected: signatureBlock.Protected, + } + } + if js.formatLength > len(content) { + return nil, errors.New("invalid format length") + } + formatted := make([]byte, js.formatLength+len(js.formatTail)) + copy(formatted, content[:js.formatLength]) + copy(formatted[js.formatLength:], js.formatTail) + js.indent = detectJSONIndent(formatted) + js.payload = joseBase64UrlEncode(formatted) + + return js, nil +} + +// PrettySignature formats a json signature into an easy to read +// single json serialized object. +func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("no signatures") + } + payload, err := joseBase64UrlDecode(js.payload) + if err != nil { + return nil, err + } + payload = payload[:js.formatLength] + + sort.Sort(jsSignaturesSorted(js.signatures)) + + var marshalled []byte + var marshallErr error + if js.indent != "" { + marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent) + } else { + marshalled, marshallErr = json.Marshal(js.signatures) + } + if marshallErr != nil { + return nil, marshallErr + } + + buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34)) + buf.Write(payload) + buf.WriteByte(',') + if js.indent != "" { + buf.WriteByte('\n') + buf.WriteString(js.indent) + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\": ") + buf.Write(marshalled) + buf.WriteByte('\n') + } else { + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\":") + buf.Write(marshalled) + } + buf.WriteByte('}') + + return buf.Bytes(), nil +} + +// Signatures provides the signatures on this JWS as opaque blobs, sorted by +// keyID. These blobs can be stored and reassembled with payloads. Internally, +// they are simply marshaled json web signatures but implementations should +// not rely on this. +func (js *JSONSignature) Signatures() ([][]byte, error) { + sort.Sort(jsSignaturesSorted(js.signatures)) + + var sb [][]byte + for _, jsig := range js.signatures { + p, err := json.Marshal(jsig) + if err != nil { + return nil, err + } + + sb = append(sb, p) + } + + return sb, nil +} + +// Merge combines the signatures from one or more other signatures into the +// method receiver. If the payloads differ for any argument, an error will be +// returned and the receiver will not be modified. +func (js *JSONSignature) Merge(others ...*JSONSignature) error { + merged := js.signatures + for _, other := range others { + if js.payload != other.payload { + return fmt.Errorf("payloads differ from merge target") + } + merged = append(merged, other.signatures...) + } + + js.signatures = merged + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/key.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/key.go new file mode 100644 index 000000000..73642db2a --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/key.go @@ -0,0 +1,253 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" +) + +// PublicKey is a generic interface for a Public Key. +type PublicKey interface { + // KeyType returns the key type for this key. For elliptic curve keys, + // this value should be "EC". For RSA keys, this value should be "RSA". + KeyType() string + // KeyID returns a distinct identifier which is unique to this Public Key. + // The format generated by this library is a base32 encoding of a 240 bit + // hash of the public key data divided into 12 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + KeyID() string + // Verify verifyies the signature of the data in the io.Reader using this + // Public Key. The alg parameter should identify the digital signature + // algorithm which was used to produce the signature and should be + // supported by this public key. Returns a nil error if the signature + // is valid. + Verify(data io.Reader, alg string, signature []byte) error + // CryptoPublicKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The type + // is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPublicKey() crypto.PublicKey + // These public keys can be serialized to the standard JSON encoding for + // JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web + // Algorithms. + MarshalJSON() ([]byte, error) + // These keys can also be serialized to the standard PEM encoding. + PEMBlock() (*pem.Block, error) + // The string representation of a key is its key type and ID. + String() string + AddExtendedField(string, interface{}) + GetExtendedField(string) interface{} +} + +// PrivateKey is a generic interface for a Private Key. +type PrivateKey interface { + // A PrivateKey contains all fields and methods of a PublicKey of the + // same type. The MarshalJSON method also outputs the private key as a + // JSON Web Key, and the PEMBlock method outputs the private key as a + // PEM block. + PublicKey + // PublicKey returns the PublicKey associated with this PrivateKey. + PublicKey() PublicKey + // Sign signs the data read from the io.Reader using a signature algorithm + // supported by the private key. If the specified hashing algorithm is + // supported by this key, that hash function is used to generate the + // signature otherwise the the default hashing algorithm for this key is + // used. Returns the signature and identifier of the algorithm used. + Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) + // CryptoPrivateKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The + // type is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPrivateKey() crypto.PrivateKey +} + +// FromCryptoPublicKey returns a libtrust PublicKey representation of the given +// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) { + switch cryptoPublicKey := cryptoPublicKey.(type) { + case *ecdsa.PublicKey: + return fromECPublicKey(cryptoPublicKey) + case *rsa.PublicKey: + return fromRSAPublicKey(cryptoPublicKey), nil + default: + return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey) + } +} + +// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given +// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) { + switch cryptoPrivateKey := cryptoPrivateKey.(type) { + case *ecdsa.PrivateKey: + return fromECPrivateKey(cryptoPrivateKey) + case *rsa.PrivateKey: + return fromRSAPrivateKey(cryptoPrivateKey), nil + default: + return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey) + } +} + +// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust +// PublicKey or an error if there is a problem with the encoding. +func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + return pubKeyFromPEMBlock(pemBlock) +} + +// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of +// PEM blocks appended one after the other and returns a slice of PublicKey +// objects that it finds. +func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) { + pubKeys := []PublicKey{} + + for { + var pemBlock *pem.Block + pemBlock, data = pem.Decode(data) + if pemBlock == nil { + break + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + pubKey, err := pubKeyFromPEMBlock(pemBlock) + if err != nil { + return nil, err + } + + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust +// PrivateKey or an error if there is a problem with the encoding. +func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } + + var key PrivateKey + + switch { + case pemBlock.Type == "RSA PRIVATE KEY": + rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err) + } + key = fromRSAPrivateKey(rsaPrivateKey) + case pemBlock.Type == "EC PRIVATE KEY": + ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err) + } + key, err = fromECPrivateKey(ecPrivateKey) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type) + } + + addPEMHeadersToKey(pemBlock, key.PublicKey()) + + return key, nil +} + +// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic +// Public Key to be used with libtrust. +func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Public Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Public Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC public key. + return ecPublicKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA public key. + return rsaPublicKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Public Key type not supported: %q\n", kty, + ) + } +} + +// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set +// and returns a slice of Public Key objects. +func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) { + rawKeys, err := loadJSONKeySetRaw(data) + if err != nil { + return nil, err + } + + pubKeys := make([]PublicKey, 0, len(rawKeys)) + + for _, rawKey := range rawKeys { + pubKey, err := UnmarshalPublicKeyJWK(rawKey) + if err != nil { + return nil, err + } + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic +// Private Key to be used with libtrust. +func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Private Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Private Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC private key. + return ecPrivateKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA private key. + return rsaPrivateKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Private Key type not supported: %q\n", kty, + ) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/key_files.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/key_files.go new file mode 100644 index 000000000..c526de545 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/key_files.go @@ -0,0 +1,255 @@ +package libtrust + +import ( + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "os" + "strings" +) + +var ( + // ErrKeyFileDoesNotExist indicates that the private key file does not exist. + ErrKeyFileDoesNotExist = errors.New("key file does not exist") +) + +func readKeyFileBytes(filename string) ([]byte, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + if os.IsNotExist(err) { + err = ErrKeyFileDoesNotExist + } else { + err = fmt.Errorf("unable to read key file %s: %s", filename, err) + } + + return nil, err + } + + return data, nil +} + +/* + Loading and Saving of Public and Private Keys in either PEM or JWK format. +*/ + +// LoadKeyFile opens the given filename and attempts to read a Private Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadKeyFile(filename string) (PrivateKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PrivateKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPrivateKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key JWK: %s", err) + } + } else { + key, err = UnmarshalPrivateKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key PEM: %s", err) + } + } + + return key, nil +} + +// LoadPublicKeyFile opens the given filename and attempts to read a Public Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadPublicKeyFile(filename string) (PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PublicKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPublicKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key JWK: %s", err) + } + } else { + key, err = UnmarshalPublicKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key PEM: %s", err) + } + } + + return key, nil +} + +// SaveKey saves the given key to a file using the provided filename. +// This process will overwrite any existing file at the provided location. +func SaveKey(filename string, key PrivateKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode private key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode private key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600)) + if err != nil { + return fmt.Errorf("unable to write private key file %s: %s", filename, err) + } + + return nil +} + +// SavePublicKey saves the given public key to the file. +func SavePublicKey(filename string, key PublicKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode public key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode public key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write public key file %s: %s", filename, err) + } + + return nil +} + +// Public Key Set files + +type jwkSet struct { + Keys []json.RawMessage `json:"keys"` +} + +// LoadKeySetFile loads a key set +func LoadKeySetFile(filename string) ([]PublicKey, error) { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return loadJSONKeySetFile(filename) + } + + // Must be a PEM format file + return loadPEMKeySetFile(filename) +} + +func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) { + if len(data) == 0 { + // This is okay, just return an empty slice. + return []json.RawMessage{}, nil + } + + keySet := jwkSet{} + + err := json.Unmarshal(data, &keySet) + if err != nil { + return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err) + } + + return keySet.Keys, nil +} + +func loadJSONKeySetFile(filename string) ([]PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyJWKSet(contents) +} + +func loadPEMKeySetFile(filename string) ([]PublicKey, error) { + data, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyPEMBundle(data) +} + +// AddKeySetFile adds a key to a key set +func AddKeySetFile(filename string, key PublicKey) error { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return addKeySetJSONFile(filename, key) + } + + // Must be a PEM format file + return addKeySetPEMFile(filename, key) +} + +func addKeySetJSONFile(filename string, key PublicKey) error { + encodedKey, err := json.Marshal(key) + if err != nil { + return fmt.Errorf("unable to encode trusted client key: %s", err) + } + + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return err + } + + rawEntries, err := loadJSONKeySetRaw(contents) + if err != nil { + return err + } + + rawEntries = append(rawEntries, json.RawMessage(encodedKey)) + entriesWrapper := jwkSet{Keys: rawEntries} + + encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ") + if err != nil { + return fmt.Errorf("unable to encode trusted client keys: %s", err) + } + + err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err) + } + + return nil +} + +func addKeySetPEMFile(filename string, key PublicKey) error { + // Encode to PEM, open file for appending, write PEM. + file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err) + } + defer file.Close() + + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encoded trusted key: %s", err) + } + + _, err = file.Write(pem.EncodeToMemory(pemBlock)) + if err != nil { + return fmt.Errorf("unable to write trusted keys file: %s", err) + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/key_manager.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/key_manager.go new file mode 100644 index 000000000..9a98ae357 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/key_manager.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "os" + "path" + "sync" +) + +// ClientKeyManager manages client keys on the filesystem +type ClientKeyManager struct { + key PrivateKey + clientFile string + clientDir string + + clientLock sync.RWMutex + clients []PublicKey + + configLock sync.Mutex + configs []*tls.Config +} + +// NewClientKeyManager loads a new manager from a set of key files +// and managed by the given private key. +func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) { + m := &ClientKeyManager{ + key: trustKey, + clientFile: clientFile, + clientDir: clientDir, + } + if err := m.loadKeys(); err != nil { + return nil, err + } + // TODO Start watching file and directory + + return m, nil +} + +func (c *ClientKeyManager) loadKeys() (err error) { + // Load authorized keys file + var clients []PublicKey + if c.clientFile != "" { + clients, err = LoadKeySetFile(c.clientFile) + if err != nil { + return fmt.Errorf("unable to load authorized keys: %s", err) + } + } + + // Add clients from authorized keys directory + files, err := ioutil.ReadDir(c.clientDir) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("unable to open authorized keys directory: %s", err) + } + for _, f := range files { + if !f.IsDir() { + publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name())) + if err != nil { + return fmt.Errorf("unable to load authorized key file: %s", err) + } + clients = append(clients, publicKey) + } + } + + c.clientLock.Lock() + c.clients = clients + c.clientLock.Unlock() + + return nil +} + +// RegisterTLSConfig registers a tls configuration to manager +// such that any changes to the keys may be reflected in +// the tls client CA pool +func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error { + c.clientLock.RLock() + certPool, err := GenerateCACertPool(c.key, c.clients) + if err != nil { + return fmt.Errorf("CA pool generation error: %s", err) + } + c.clientLock.RUnlock() + + tlsConfig.ClientCAs = certPool + + c.configLock.Lock() + c.configs = append(c.configs, tlsConfig) + c.configLock.Unlock() + + return nil +} + +// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for +// libtrust identity authentication for the domain specified +func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + if err := clients.RegisterTLSConfig(tlsConfig); err != nil { + return nil, err + } + + // Generate cert + ips, domains, err := parseAddr(addr) + if err != nil { + return nil, err + } + // add domain that it expects clients to use + domains = append(domains, domain) + x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + return tlsConfig, nil +} + +// NewCertAuthTLSConfig creates a tls.Config for the server to use for +// certificate authentication +func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + cert, err := tls.LoadX509KeyPair(certPath, keyPath) + if err != nil { + return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + + // Verify client certificates against a CA? + if caPath != "" { + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile(caPath) + if err != nil { + return nil, fmt.Errorf("Couldn't read CA certificate: %s", err) + } + certPool.AppendCertsFromPEM(file) + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + tlsConfig.ClientCAs = certPool + } + + return tlsConfig, nil +} + +func newTLSConfig() *tls.Config { + return &tls.Config{ + NextProtos: []string{"http/1.1"}, + // Avoid fallback on insecure SSL protocols + MinVersion: tls.VersionTLS10, + } +} + +// parseAddr parses an address into an array of IPs and domains +func parseAddr(addr string) ([]net.IP, []string, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, nil, err + } + var domains []string + var ips []net.IP + ip := net.ParseIP(host) + if ip != nil { + ips = []net.IP{ip} + } else { + domains = []string{host} + } + return ips, domains, nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/rsa_key.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/rsa_key.go new file mode 100644 index 000000000..dac4cacf2 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/rsa_key.go @@ -0,0 +1,427 @@ +package libtrust + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * RSA DSA PUBLIC KEY + */ + +// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms. +type rsaPublicKey struct { + *rsa.PublicKey + extended map[string]interface{} +} + +func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey { + return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}} +} + +// KeyType returns the JWK key type for RSA keys, i.e., "RSA". +func (k *rsaPublicKey) KeyType() string { + return "RSA" +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *rsaPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *rsaPublicKey) String() string { + return fmt.Sprintf("RSA Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this Public Key. +// The alg parameter should be the name of the JWA digital signature algorithm +// which was used to produce the signature and should be supported by this +// public key. Returns a nil error if the signature is valid. +func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // Verify the signature of the given date, return non-nil error if valid. + sigAlg, err := rsaSignatureAlgorithmByName(alg) + if err != nil { + return fmt.Errorf("unable to verify Signature: %s", err) + } + + hasher := sigAlg.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature) + if err != nil { + return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err) + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *rsaPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["n"] = joseBase64UrlEncode(k.N.Bytes()) + jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E)) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *rsaPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) { + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract 'n', 'e', and 'kid' and check for + // consistency. + + // Get the modulus parameter N. + nB64Url, err := stringFromMap(jwk, "n") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + n, err := parseRSAModulusParam(nB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + // Get the public exponent E. + eB64Url, err := stringFromMap(jwk, "e") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + e, err := parseRSAPublicExponentParam(eB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + key := &rsaPublicKey{ + PublicKey: &rsa.PublicKey{N: n, E: e}, + } + + // Key ID is optional, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid) + } + } + + if _, ok := jwk["d"]; ok { + return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent") + } + + key.extended = jwk + + return key, nil +} + +/* + * RSA DSA PRIVATE KEY + */ + +// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms. +type rsaPrivateKey struct { + rsaPublicKey + *rsa.PrivateKey +} + +func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey { + return &rsaPrivateKey{ + *fromRSAPublicKey(&cryptoPrivateKey.PublicKey), + cryptoPrivateKey, + } +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *rsaPrivateKey) PublicKey() PublicKey { + return &k.rsaPublicKey +} + +func (k *rsaPrivateKey) String() string { + return fmt.Sprintf("RSA Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the RSA private key. If the specified hashing algorithm is supported by +// this key, that hash function is used to generate the signature otherwise the +// the default hashing algorithm for this key is used. Returns the signature +// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384", +// "RS512". +func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID) + hasher := sigAlg.HashID().New() + + _, err = io.Copy(hasher, data) + if err != nil { + return nil, "", fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + + alg = sigAlg.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *rsaPrivateKey) toMap() map[string]interface{} { + k.Precompute() // Make sure the precomputed values are stored. + jwk := k.rsaPublicKey.toMap() + + jwk["d"] = joseBase64UrlEncode(k.D.Bytes()) + jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes()) + jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes()) + jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes()) + jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes()) + jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes()) + + otherPrimes := k.Primes[2:] + + if len(otherPrimes) > 0 { + otherPrimesInfo := make([]interface{}, len(otherPrimes)) + for i, r := range otherPrimes { + otherPrimeInfo := make(map[string]string, 3) + otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes()) + crtVal := k.Precomputed.CRTValues[i] + otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes()) + otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes()) + otherPrimesInfo[i] = otherPrimeInfo + } + jwk["oth"] = otherPrimesInfo + } + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey) + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended) +} + +func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) { + // The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that + // only the private key exponent 'd' is REQUIRED, the others are just for + // signature/decryption optimizations and SHOULD be included when the JWK + // is produced. We MAY choose to accept a JWK which only includes 'd', but + // we're going to go ahead and not choose to accept it without the extra + // fields. Only the 'oth' field will be optional (for multi-prime keys). + privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err) + } + firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + var oth interface{} + if _, ok := jwk["oth"]; ok { + oth = jwk["oth"] + delete(jwk, "oth") + } + + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract the public key information, then extract the private + // key values. + publicKey, err := rsaPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + privateKey := &rsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: privateExponent, + Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor}, + Precomputed: rsa.PrecomputedValues{ + Dp: firstFactorCRT, + Dq: secondFactorCRT, + Qinv: crtCoeff, + }, + } + + if oth != nil { + // Should be an array of more JSON objects. + otherPrimesInfo, ok := oth.([]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array") + } + numOtherPrimeFactors := len(otherPrimesInfo) + if numOtherPrimeFactors == 0 { + return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty") + } + otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors) + productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor) + crtValues := make([]rsa.CRTValue, numOtherPrimeFactors) + + for i, val := range otherPrimesInfo { + otherPrimeinfo, ok := val.(map[string]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object") + } + + otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + crtValue := crtValues[i] + crtValue.Exp = otherFactorCRT + crtValue.Coeff = otherCrtCoeff + crtValue.R = productOfPrimes + otherPrimeFactors[i] = otherPrimeFactor + productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor) + } + + privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...) + privateKey.Precomputed.CRTValues = crtValues + } + + key := &rsaPrivateKey{ + rsaPublicKey: *publicKey, + PrivateKey: privateKey, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) { + k = new(rsaPrivateKey) + k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, err + } + + k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA. +func GenerateRSA2048PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(2048) + if err != nil { + return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA. +func GenerateRSA3072PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(3072) + if err != nil { + return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA. +func GenerateRSA4096PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(4096) + if err != nil { + return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err) + } + + return k, nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/util.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/util.go new file mode 100644 index 000000000..45dc3e182 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/util.go @@ -0,0 +1,361 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/elliptic" + "crypto/tls" + "crypto/x509" + "encoding/base32" + "encoding/base64" + "encoding/binary" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net/url" + "os" + "path/filepath" + "strings" + "time" +) + +// LoadOrCreateTrustKey will load a PrivateKey from the specified path +func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) { + if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil { + return nil, err + } + + trustKey, err := LoadKeyFile(trustKeyPath) + if err == ErrKeyFileDoesNotExist { + trustKey, err = GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("error generating key: %s", err) + } + + if err := SaveKey(trustKeyPath, trustKey); err != nil { + return nil, fmt.Errorf("error saving key file: %s", err) + } + + dir, file := filepath.Split(trustKeyPath) + if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil { + return nil, fmt.Errorf("error saving public key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("error loading key file: %s", err) + } + return trustKey, nil +} + +// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity +// based authentication from the specified dockerUrl, the rootConfigPath and +// the server name to which it is connecting. +// If trustUnknownHosts is true it will automatically add the host to the +// known-hosts.json in rootConfigPath. +func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + trustKeyPath := filepath.Join(rootConfigPath, "key.json") + knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json") + + u, err := url.Parse(dockerUrl) + if err != nil { + return nil, fmt.Errorf("unable to parse machine url") + } + + if u.Scheme == "unix" { + return nil, nil + } + + addr := u.Host + proto := "tcp" + + trustKey, err := LoadOrCreateTrustKey(trustKeyPath) + if err != nil { + return nil, fmt.Errorf("unable to load trust key: %s", err) + } + + knownHosts, err := LoadKeySetFile(knownHostsPath) + if err != nil { + return nil, fmt.Errorf("could not load trusted hosts file: %s", err) + } + + allowedHosts, err := FilterByHosts(knownHosts, addr, false) + if err != nil { + return nil, fmt.Errorf("error filtering hosts: %s", err) + } + + certPool, err := GenerateCACertPool(trustKey, allowedHosts) + if err != nil { + return nil, fmt.Errorf("Could not create CA pool: %s", err) + } + + tlsConfig.ServerName = serverName + tlsConfig.RootCAs = certPool + + x509Cert, err := GenerateSelfSignedClientCert(trustKey) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + tlsConfig.InsecureSkipVerify = true + + testConn, err := tls.Dial(proto, addr, tlsConfig) + if err != nil { + return nil, fmt.Errorf("tls Handshake error: %s", err) + } + + opts := x509.VerifyOptions{ + Roots: tlsConfig.RootCAs, + CurrentTime: time.Now(), + DNSName: tlsConfig.ServerName, + Intermediates: x509.NewCertPool(), + } + + certs := testConn.ConnectionState().PeerCertificates + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + + if _, err := certs[0].Verify(opts); err != nil { + if _, ok := err.(x509.UnknownAuthorityError); ok { + if trustUnknownHosts { + pubKey, err := FromCryptoPublicKey(certs[0].PublicKey) + if err != nil { + return nil, fmt.Errorf("error extracting public key from cert: %s", err) + } + + pubKey.AddExtendedField("hosts", []string{addr}) + + if err := AddKeySetFile(knownHostsPath, pubKey); err != nil { + return nil, fmt.Errorf("error adding machine to known hosts: %s", err) + } + } else { + return nil, fmt.Errorf("unable to connect. unknown host: %s", addr) + } + } + } + + testConn.Close() + tlsConfig.InsecureSkipVerify = false + + return tlsConfig, nil +} + +// joseBase64UrlEncode encodes the given data using the standard base64 url +// encoding format but with all trailing '=' characters ommitted in accordance +// with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlEncode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +func keyIDEncode(b []byte) string { + s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=") + var buf bytes.Buffer + var i int + for i = 0; i < len(s)/4-1; i++ { + start := i * 4 + end := start + 4 + buf.WriteString(s[start:end] + ":") + } + buf.WriteString(s[i*4:]) + return buf.String() +} + +func keyIDFromCryptoKey(pubKey PublicKey) string { + // Generate and return a 'libtrust' fingerprint of the public key. + // For an RSA key this should be: + // SHA256(DER encoded ASN1) + // Then truncated to 240 bits and encoded into 12 base32 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey()) + if err != nil { + return "" + } + hasher := crypto.SHA256.New() + hasher.Write(derBytes) + return keyIDEncode(hasher.Sum(nil)[:30]) +} + +func stringFromMap(m map[string]interface{}, key string) (string, error) { + val, ok := m[key] + if !ok { + return "", fmt.Errorf("%q value not specified", key) + } + + str, ok := val.(string) + if !ok { + return "", fmt.Errorf("%q value must be a string", key) + } + delete(m, key) + + return str, nil +} + +func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) { + curveByteLen := (curve.Params().BitSize + 7) >> 3 + + cBytes, err := joseBase64UrlDecode(cB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + cByteLength := len(cBytes) + if cByteLength != curveByteLen { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen) + } + return new(big.Int).SetBytes(cBytes), nil +} + +func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) { + dBytes, err := joseBase64UrlDecode(dB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := curve.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + dByteLength := len(dBytes) + + if dByteLength != octetLength { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength) + } + + return new(big.Int).SetBytes(dBytes), nil +} + +func parseRSAModulusParam(nB64Url string) (*big.Int, error) { + nBytes, err := joseBase64UrlDecode(nB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(nBytes), nil +} + +func serializeRSAPublicExponentParam(e int) []byte { + // We MUST use the minimum number of octets to represent E. + // E is supposed to be 65537 for performance and security reasons + // and is what golang's rsa package generates, but it might be + // different if imported from some other generator. + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, uint32(e)) + var i int + for i = 0; i < 8; i++ { + if buf[i] != 0 { + break + } + } + return buf[i:] +} + +func parseRSAPublicExponentParam(eB64Url string) (int, error) { + eBytes, err := joseBase64UrlDecode(eB64Url) + if err != nil { + return 0, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + // Only the minimum number of bytes were used to represent E, but + // binary.BigEndian.Uint32 expects at least 4 bytes, so we need + // to add zero padding if necassary. + byteLen := len(eBytes) + buf := make([]byte, 4-byteLen, 4) + eBytes = append(buf, eBytes...) + + return int(binary.BigEndian.Uint32(eBytes)), nil +} + +func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) { + b64Url, err := stringFromMap(m, key) + if err != nil { + return nil, err + } + + paramBytes, err := joseBase64UrlDecode(b64Url) + if err != nil { + return nil, fmt.Errorf("invaled base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(paramBytes), nil +} + +func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) { + pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}} + for k, v := range headers { + switch val := v.(type) { + case string: + pemBlock.Headers[k] = val + case []string: + if k == "hosts" { + pemBlock.Headers[k] = strings.Join(val, ",") + } else { + // Return error, non-encodable type + } + default: + // Return error, non-encodable type + } + } + + return pemBlock, nil +} + +func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) { + cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err) + } + + pubKey, err := FromCryptoPublicKey(cryptoPublicKey) + if err != nil { + return nil, err + } + + addPEMHeadersToKey(pemBlock, pubKey) + + return pubKey, nil +} + +func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) { + for key, value := range pemBlock.Headers { + var safeVal interface{} + if key == "hosts" { + safeVal = strings.Split(value, ",") + } else { + safeVal = value + } + pubKey.AddExtendedField(key, safeVal) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/internal/commandinfo.go b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/internal/commandinfo.go new file mode 100644 index 000000000..ce78eff6f --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/internal/commandinfo.go @@ -0,0 +1,45 @@ +// Copyright 2014 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package internal + +import ( + "strings" +) + +const ( + WatchState = 1 << iota + MultiState + SubscribeState + MonitorState +) + +type CommandInfo struct { + Set, Clear int +} + +var commandInfos = map[string]CommandInfo{ + "WATCH": {Set: WatchState}, + "UNWATCH": {Clear: WatchState}, + "MULTI": {Set: MultiState}, + "EXEC": {Clear: WatchState | MultiState}, + "DISCARD": {Clear: WatchState | MultiState}, + "PSUBSCRIBE": {Set: SubscribeState}, + "SUBSCRIBE": {Set: SubscribeState}, + "MONITOR": {Set: MonitorState}, +} + +func LookupCommandInfo(commandName string) CommandInfo { + return commandInfos[strings.ToUpper(commandName)] +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/conn.go b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/conn.go new file mode 100644 index 000000000..ac0e971c4 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/conn.go @@ -0,0 +1,455 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "net" + "strconv" + "sync" + "time" +) + +// conn is the low-level implementation of Conn +type conn struct { + + // Shared + mu sync.Mutex + pending int + err error + conn net.Conn + + // Read + readTimeout time.Duration + br *bufio.Reader + + // Write + writeTimeout time.Duration + bw *bufio.Writer + + // Scratch space for formatting argument length. + // '*' or '$', length, "\r\n" + lenScratch [32]byte + + // Scratch space for formatting integers and floats. + numScratch [40]byte +} + +// Dial connects to the Redis server at the given network and address. +func Dial(network, address string) (Conn, error) { + dialer := xDialer{} + return dialer.Dial(network, address) +} + +// DialTimeout acts like Dial but takes timeouts for establishing the +// connection to the server, writing a command and reading a reply. +func DialTimeout(network, address string, connectTimeout, readTimeout, writeTimeout time.Duration) (Conn, error) { + netDialer := net.Dialer{Timeout: connectTimeout} + dialer := xDialer{ + NetDial: netDialer.Dial, + ReadTimeout: readTimeout, + WriteTimeout: writeTimeout, + } + return dialer.Dial(network, address) +} + +// A Dialer specifies options for connecting to a Redis server. +type xDialer struct { + // NetDial specifies the dial function for creating TCP connections. If + // NetDial is nil, then net.Dial is used. + NetDial func(network, addr string) (net.Conn, error) + + // ReadTimeout specifies the timeout for reading a single command + // reply. If ReadTimeout is zero, then no timeout is used. + ReadTimeout time.Duration + + // WriteTimeout specifies the timeout for writing a single command. If + // WriteTimeout is zero, then no timeout is used. + WriteTimeout time.Duration +} + +// Dial connects to the Redis server at address on the named network. +func (d *xDialer) Dial(network, address string) (Conn, error) { + dial := d.NetDial + if dial == nil { + dial = net.Dial + } + netConn, err := dial(network, address) + if err != nil { + return nil, err + } + return &conn{ + conn: netConn, + bw: bufio.NewWriter(netConn), + br: bufio.NewReader(netConn), + readTimeout: d.ReadTimeout, + writeTimeout: d.WriteTimeout, + }, nil +} + +// NewConn returns a new Redigo connection for the given net connection. +func NewConn(netConn net.Conn, readTimeout, writeTimeout time.Duration) Conn { + return &conn{ + conn: netConn, + bw: bufio.NewWriter(netConn), + br: bufio.NewReader(netConn), + readTimeout: readTimeout, + writeTimeout: writeTimeout, + } +} + +func (c *conn) Close() error { + c.mu.Lock() + err := c.err + if c.err == nil { + c.err = errors.New("redigo: closed") + err = c.conn.Close() + } + c.mu.Unlock() + return err +} + +func (c *conn) fatal(err error) error { + c.mu.Lock() + if c.err == nil { + c.err = err + // Close connection to force errors on subsequent calls and to unblock + // other reader or writer. + c.conn.Close() + } + c.mu.Unlock() + return err +} + +func (c *conn) Err() error { + c.mu.Lock() + err := c.err + c.mu.Unlock() + return err +} + +func (c *conn) writeLen(prefix byte, n int) error { + c.lenScratch[len(c.lenScratch)-1] = '\n' + c.lenScratch[len(c.lenScratch)-2] = '\r' + i := len(c.lenScratch) - 3 + for { + c.lenScratch[i] = byte('0' + n%10) + i -= 1 + n = n / 10 + if n == 0 { + break + } + } + c.lenScratch[i] = prefix + _, err := c.bw.Write(c.lenScratch[i:]) + return err +} + +func (c *conn) writeString(s string) error { + c.writeLen('$', len(s)) + c.bw.WriteString(s) + _, err := c.bw.WriteString("\r\n") + return err +} + +func (c *conn) writeBytes(p []byte) error { + c.writeLen('$', len(p)) + c.bw.Write(p) + _, err := c.bw.WriteString("\r\n") + return err +} + +func (c *conn) writeInt64(n int64) error { + return c.writeBytes(strconv.AppendInt(c.numScratch[:0], n, 10)) +} + +func (c *conn) writeFloat64(n float64) error { + return c.writeBytes(strconv.AppendFloat(c.numScratch[:0], n, 'g', -1, 64)) +} + +func (c *conn) writeCommand(cmd string, args []interface{}) (err error) { + c.writeLen('*', 1+len(args)) + err = c.writeString(cmd) + for _, arg := range args { + if err != nil { + break + } + switch arg := arg.(type) { + case string: + err = c.writeString(arg) + case []byte: + err = c.writeBytes(arg) + case int: + err = c.writeInt64(int64(arg)) + case int64: + err = c.writeInt64(arg) + case float64: + err = c.writeFloat64(arg) + case bool: + if arg { + err = c.writeString("1") + } else { + err = c.writeString("0") + } + case nil: + err = c.writeString("") + default: + var buf bytes.Buffer + fmt.Fprint(&buf, arg) + err = c.writeBytes(buf.Bytes()) + } + } + return err +} + +type protocolError string + +func (pe protocolError) Error() string { + return fmt.Sprintf("redigo: %s (possible server error or unsupported concurrent read by application)", string(pe)) +} + +func (c *conn) readLine() ([]byte, error) { + p, err := c.br.ReadSlice('\n') + if err == bufio.ErrBufferFull { + return nil, protocolError("long response line") + } + if err != nil { + return nil, err + } + i := len(p) - 2 + if i < 0 || p[i] != '\r' { + return nil, protocolError("bad response line terminator") + } + return p[:i], nil +} + +// parseLen parses bulk string and array lengths. +func parseLen(p []byte) (int, error) { + if len(p) == 0 { + return -1, protocolError("malformed length") + } + + if p[0] == '-' && len(p) == 2 && p[1] == '1' { + // handle $-1 and $-1 null replies. + return -1, nil + } + + var n int + for _, b := range p { + n *= 10 + if b < '0' || b > '9' { + return -1, protocolError("illegal bytes in length") + } + n += int(b - '0') + } + + return n, nil +} + +// parseInt parses an integer reply. +func parseInt(p []byte) (interface{}, error) { + if len(p) == 0 { + return 0, protocolError("malformed integer") + } + + var negate bool + if p[0] == '-' { + negate = true + p = p[1:] + if len(p) == 0 { + return 0, protocolError("malformed integer") + } + } + + var n int64 + for _, b := range p { + n *= 10 + if b < '0' || b > '9' { + return 0, protocolError("illegal bytes in length") + } + n += int64(b - '0') + } + + if negate { + n = -n + } + return n, nil +} + +var ( + okReply interface{} = "OK" + pongReply interface{} = "PONG" +) + +func (c *conn) readReply() (interface{}, error) { + line, err := c.readLine() + if err != nil { + return nil, err + } + if len(line) == 0 { + return nil, protocolError("short response line") + } + switch line[0] { + case '+': + switch { + case len(line) == 3 && line[1] == 'O' && line[2] == 'K': + // Avoid allocation for frequent "+OK" response. + return okReply, nil + case len(line) == 5 && line[1] == 'P' && line[2] == 'O' && line[3] == 'N' && line[4] == 'G': + // Avoid allocation in PING command benchmarks :) + return pongReply, nil + default: + return string(line[1:]), nil + } + case '-': + return Error(string(line[1:])), nil + case ':': + return parseInt(line[1:]) + case '$': + n, err := parseLen(line[1:]) + if n < 0 || err != nil { + return nil, err + } + p := make([]byte, n) + _, err = io.ReadFull(c.br, p) + if err != nil { + return nil, err + } + if line, err := c.readLine(); err != nil { + return nil, err + } else if len(line) != 0 { + return nil, protocolError("bad bulk string format") + } + return p, nil + case '*': + n, err := parseLen(line[1:]) + if n < 0 || err != nil { + return nil, err + } + r := make([]interface{}, n) + for i := range r { + r[i], err = c.readReply() + if err != nil { + return nil, err + } + } + return r, nil + } + return nil, protocolError("unexpected response line") +} + +func (c *conn) Send(cmd string, args ...interface{}) error { + c.mu.Lock() + c.pending += 1 + c.mu.Unlock() + if c.writeTimeout != 0 { + c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) + } + if err := c.writeCommand(cmd, args); err != nil { + return c.fatal(err) + } + return nil +} + +func (c *conn) Flush() error { + if c.writeTimeout != 0 { + c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) + } + if err := c.bw.Flush(); err != nil { + return c.fatal(err) + } + return nil +} + +func (c *conn) Receive() (reply interface{}, err error) { + if c.readTimeout != 0 { + c.conn.SetReadDeadline(time.Now().Add(c.readTimeout)) + } + if reply, err = c.readReply(); err != nil { + return nil, c.fatal(err) + } + // When using pub/sub, the number of receives can be greater than the + // number of sends. To enable normal use of the connection after + // unsubscribing from all channels, we do not decrement pending to a + // negative value. + // + // The pending field is decremented after the reply is read to handle the + // case where Receive is called before Send. + c.mu.Lock() + if c.pending > 0 { + c.pending -= 1 + } + c.mu.Unlock() + if err, ok := reply.(Error); ok { + return nil, err + } + return +} + +func (c *conn) Do(cmd string, args ...interface{}) (interface{}, error) { + c.mu.Lock() + pending := c.pending + c.pending = 0 + c.mu.Unlock() + + if cmd == "" && pending == 0 { + return nil, nil + } + + if c.writeTimeout != 0 { + c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) + } + + if cmd != "" { + c.writeCommand(cmd, args) + } + + if err := c.bw.Flush(); err != nil { + return nil, c.fatal(err) + } + + if c.readTimeout != 0 { + c.conn.SetReadDeadline(time.Now().Add(c.readTimeout)) + } + + if cmd == "" { + reply := make([]interface{}, pending) + for i := range reply { + r, e := c.readReply() + if e != nil { + return nil, c.fatal(e) + } + reply[i] = r + } + return reply, nil + } + + var err error + var reply interface{} + for i := 0; i <= pending; i++ { + var e error + if reply, e = c.readReply(); e != nil { + return nil, c.fatal(e) + } + if e, ok := reply.(Error); ok && err == nil { + err = e + } + } + return reply, err +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/doc.go b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/doc.go new file mode 100644 index 000000000..1ae6f0cc2 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/doc.go @@ -0,0 +1,169 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package redis is a client for the Redis database. +// +// The Redigo FAQ (https://github.com/garyburd/redigo/wiki/FAQ) contains more +// documentation about this package. +// +// Connections +// +// The Conn interface is the primary interface for working with Redis. +// Applications create connections by calling the Dial, DialWithTimeout or +// NewConn functions. In the future, functions will be added for creating +// sharded and other types of connections. +// +// The application must call the connection Close method when the application +// is done with the connection. +// +// Executing Commands +// +// The Conn interface has a generic method for executing Redis commands: +// +// Do(commandName string, args ...interface{}) (reply interface{}, err error) +// +// The Redis command reference (http://redis.io/commands) lists the available +// commands. An example of using the Redis APPEND command is: +// +// n, err := conn.Do("APPEND", "key", "value") +// +// The Do method converts command arguments to binary strings for transmission +// to the server as follows: +// +// Go Type Conversion +// []byte Sent as is +// string Sent as is +// int, int64 strconv.FormatInt(v) +// float64 strconv.FormatFloat(v, 'g', -1, 64) +// bool true -> "1", false -> "0" +// nil "" +// all other types fmt.Print(v) +// +// Redis command reply types are represented using the following Go types: +// +// Redis type Go type +// error redis.Error +// integer int64 +// simple string string +// bulk string []byte or nil if value not present. +// array []interface{} or nil if value not present. +// +// Use type assertions or the reply helper functions to convert from +// interface{} to the specific Go type for the command result. +// +// Pipelining +// +// Connections support pipelining using the Send, Flush and Receive methods. +// +// Send(commandName string, args ...interface{}) error +// Flush() error +// Receive() (reply interface{}, err error) +// +// Send writes the command to the connection's output buffer. Flush flushes the +// connection's output buffer to the server. Receive reads a single reply from +// the server. The following example shows a simple pipeline. +// +// c.Send("SET", "foo", "bar") +// c.Send("GET", "foo") +// c.Flush() +// c.Receive() // reply from SET +// v, err = c.Receive() // reply from GET +// +// The Do method combines the functionality of the Send, Flush and Receive +// methods. The Do method starts by writing the command and flushing the output +// buffer. Next, the Do method receives all pending replies including the reply +// for the command just sent by Do. If any of the received replies is an error, +// then Do returns the error. If there are no errors, then Do returns the last +// reply. If the command argument to the Do method is "", then the Do method +// will flush the output buffer and receive pending replies without sending a +// command. +// +// Use the Send and Do methods to implement pipelined transactions. +// +// c.Send("MULTI") +// c.Send("INCR", "foo") +// c.Send("INCR", "bar") +// r, err := c.Do("EXEC") +// fmt.Println(r) // prints [1, 1] +// +// Concurrency +// +// Connections do not support concurrent calls to the write methods (Send, +// Flush) or concurrent calls to the read method (Receive). Connections do +// allow a concurrent reader and writer. +// +// Because the Do method combines the functionality of Send, Flush and Receive, +// the Do method cannot be called concurrently with the other methods. +// +// For full concurrent access to Redis, use the thread-safe Pool to get and +// release connections from within a goroutine. +// +// Publish and Subscribe +// +// Use the Send, Flush and Receive methods to implement Pub/Sub subscribers. +// +// c.Send("SUBSCRIBE", "example") +// c.Flush() +// for { +// reply, err := c.Receive() +// if err != nil { +// return err +// } +// // process pushed message +// } +// +// The PubSubConn type wraps a Conn with convenience methods for implementing +// subscribers. The Subscribe, PSubscribe, Unsubscribe and PUnsubscribe methods +// send and flush a subscription management command. The receive method +// converts a pushed message to convenient types for use in a type switch. +// +// psc := redis.PubSubConn{c} +// psc.Subscribe("example") +// for { +// switch v := psc.Receive().(type) { +// case redis.Message: +// fmt.Printf("%s: message: %s\n", v.Channel, v.Data) +// case redis.Subscription: +// fmt.Printf("%s: %s %d\n", v.Channel, v.Kind, v.Count) +// case error: +// return v +// } +// } +// +// Reply Helpers +// +// The Bool, Int, Bytes, String, Strings and Values functions convert a reply +// to a value of a specific type. To allow convenient wrapping of calls to the +// connection Do and Receive methods, the functions take a second argument of +// type error. If the error is non-nil, then the helper function returns the +// error. If the error is nil, the function converts the reply to the specified +// type: +// +// exists, err := redis.Bool(c.Do("EXISTS", "foo")) +// if err != nil { +// // handle error return from c.Do or type conversion error. +// } +// +// The Scan function converts elements of a array reply to Go types: +// +// var value1 int +// var value2 string +// reply, err := redis.Values(c.Do("MGET", "key1", "key2")) +// if err != nil { +// // handle error +// } +// if _, err := redis.Scan(reply, &value1, &value2); err != nil { +// // handle error +// } +package redis diff --git a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/log.go b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/log.go new file mode 100644 index 000000000..129b86d67 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/log.go @@ -0,0 +1,117 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bytes" + "fmt" + "log" +) + +// NewLoggingConn returns a logging wrapper around a connection. +func NewLoggingConn(conn Conn, logger *log.Logger, prefix string) Conn { + if prefix != "" { + prefix = prefix + "." + } + return &loggingConn{conn, logger, prefix} +} + +type loggingConn struct { + Conn + logger *log.Logger + prefix string +} + +func (c *loggingConn) Close() error { + err := c.Conn.Close() + var buf bytes.Buffer + fmt.Fprintf(&buf, "%sClose() -> (%v)", c.prefix, err) + c.logger.Output(2, buf.String()) + return err +} + +func (c *loggingConn) printValue(buf *bytes.Buffer, v interface{}) { + const chop = 32 + switch v := v.(type) { + case []byte: + if len(v) > chop { + fmt.Fprintf(buf, "%q...", v[:chop]) + } else { + fmt.Fprintf(buf, "%q", v) + } + case string: + if len(v) > chop { + fmt.Fprintf(buf, "%q...", v[:chop]) + } else { + fmt.Fprintf(buf, "%q", v) + } + case []interface{}: + if len(v) == 0 { + buf.WriteString("[]") + } else { + sep := "[" + fin := "]" + if len(v) > chop { + v = v[:chop] + fin = "...]" + } + for _, vv := range v { + buf.WriteString(sep) + c.printValue(buf, vv) + sep = ", " + } + buf.WriteString(fin) + } + default: + fmt.Fprint(buf, v) + } +} + +func (c *loggingConn) print(method, commandName string, args []interface{}, reply interface{}, err error) { + var buf bytes.Buffer + fmt.Fprintf(&buf, "%s%s(", c.prefix, method) + if method != "Receive" { + buf.WriteString(commandName) + for _, arg := range args { + buf.WriteString(", ") + c.printValue(&buf, arg) + } + } + buf.WriteString(") -> (") + if method != "Send" { + c.printValue(&buf, reply) + buf.WriteString(", ") + } + fmt.Fprintf(&buf, "%v)", err) + c.logger.Output(3, buf.String()) +} + +func (c *loggingConn) Do(commandName string, args ...interface{}) (interface{}, error) { + reply, err := c.Conn.Do(commandName, args...) + c.print("Do", commandName, args, reply, err) + return reply, err +} + +func (c *loggingConn) Send(commandName string, args ...interface{}) error { + err := c.Conn.Send(commandName, args...) + c.print("Send", commandName, args, nil, err) + return err +} + +func (c *loggingConn) Receive() (interface{}, error) { + reply, err := c.Conn.Receive() + c.print("Receive", "", nil, reply, err) + return reply, err +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/pool.go b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/pool.go new file mode 100644 index 000000000..9daf2e33f --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/pool.go @@ -0,0 +1,389 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bytes" + "container/list" + "crypto/rand" + "crypto/sha1" + "errors" + "io" + "strconv" + "sync" + "time" + + "github.com/garyburd/redigo/internal" +) + +var nowFunc = time.Now // for testing + +// ErrPoolExhausted is returned from a pool connection method (Do, Send, +// Receive, Flush, Err) when the maximum number of database connections in the +// pool has been reached. +var ErrPoolExhausted = errors.New("redigo: connection pool exhausted") + +var ( + errPoolClosed = errors.New("redigo: connection pool closed") + errConnClosed = errors.New("redigo: connection closed") +) + +// Pool maintains a pool of connections. The application calls the Get method +// to get a connection from the pool and the connection's Close method to +// return the connection's resources to the pool. +// +// The following example shows how to use a pool in a web application. The +// application creates a pool at application startup and makes it available to +// request handlers using a global variable. +// +// func newPool(server, password string) *redis.Pool { +// return &redis.Pool{ +// MaxIdle: 3, +// IdleTimeout: 240 * time.Second, +// Dial: func () (redis.Conn, error) { +// c, err := redis.Dial("tcp", server) +// if err != nil { +// return nil, err +// } +// if _, err := c.Do("AUTH", password); err != nil { +// c.Close() +// return nil, err +// } +// return c, err +// }, +// TestOnBorrow: func(c redis.Conn, t time.Time) error { +// _, err := c.Do("PING") +// return err +// }, +// } +// } +// +// var ( +// pool *redis.Pool +// redisServer = flag.String("redisServer", ":6379", "") +// redisPassword = flag.String("redisPassword", "", "") +// ) +// +// func main() { +// flag.Parse() +// pool = newPool(*redisServer, *redisPassword) +// ... +// } +// +// A request handler gets a connection from the pool and closes the connection +// when the handler is done: +// +// func serveHome(w http.ResponseWriter, r *http.Request) { +// conn := pool.Get() +// defer conn.Close() +// .... +// } +// +type Pool struct { + + // Dial is an application supplied function for creating and configuring a + // connection + Dial func() (Conn, error) + + // TestOnBorrow is an optional application supplied function for checking + // the health of an idle connection before the connection is used again by + // the application. Argument t is the time that the connection was returned + // to the pool. If the function returns an error, then the connection is + // closed. + TestOnBorrow func(c Conn, t time.Time) error + + // Maximum number of idle connections in the pool. + MaxIdle int + + // Maximum number of connections allocated by the pool at a given time. + // When zero, there is no limit on the number of connections in the pool. + MaxActive int + + // Close connections after remaining idle for this duration. If the value + // is zero, then idle connections are not closed. Applications should set + // the timeout to a value less than the server's timeout. + IdleTimeout time.Duration + + // If Wait is true and the pool is at the MaxIdle limit, then Get() waits + // for a connection to be returned to the pool before returning. + Wait bool + + // mu protects fields defined below. + mu sync.Mutex + cond *sync.Cond + closed bool + active int + + // Stack of idleConn with most recently used at the front. + idle list.List +} + +type idleConn struct { + c Conn + t time.Time +} + +// NewPool creates a new pool. This function is deprecated. Applications should +// initialize the Pool fields directly as shown in example. +func NewPool(newFn func() (Conn, error), maxIdle int) *Pool { + return &Pool{Dial: newFn, MaxIdle: maxIdle} +} + +// Get gets a connection. The application must close the returned connection. +// This method always returns a valid connection so that applications can defer +// error handling to the first use of the connection. If there is an error +// getting an underlying connection, then the connection Err, Do, Send, Flush +// and Receive methods return that error. +func (p *Pool) Get() Conn { + c, err := p.get() + if err != nil { + return errorConnection{err} + } + return &pooledConnection{p: p, c: c} +} + +// ActiveCount returns the number of active connections in the pool. +func (p *Pool) ActiveCount() int { + p.mu.Lock() + active := p.active + p.mu.Unlock() + return active +} + +// Close releases the resources used by the pool. +func (p *Pool) Close() error { + p.mu.Lock() + idle := p.idle + p.idle.Init() + p.closed = true + p.active -= idle.Len() + if p.cond != nil { + p.cond.Broadcast() + } + p.mu.Unlock() + for e := idle.Front(); e != nil; e = e.Next() { + e.Value.(idleConn).c.Close() + } + return nil +} + +// release decrements the active count and signals waiters. The caller must +// hold p.mu during the call. +func (p *Pool) release() { + p.active -= 1 + if p.cond != nil { + p.cond.Signal() + } +} + +// get prunes stale connections and returns a connection from the idle list or +// creates a new connection. +func (p *Pool) get() (Conn, error) { + p.mu.Lock() + + // Prune stale connections. + + if timeout := p.IdleTimeout; timeout > 0 { + for i, n := 0, p.idle.Len(); i < n; i++ { + e := p.idle.Back() + if e == nil { + break + } + ic := e.Value.(idleConn) + if ic.t.Add(timeout).After(nowFunc()) { + break + } + p.idle.Remove(e) + p.release() + p.mu.Unlock() + ic.c.Close() + p.mu.Lock() + } + } + + for { + + // Get idle connection. + + for i, n := 0, p.idle.Len(); i < n; i++ { + e := p.idle.Front() + if e == nil { + break + } + ic := e.Value.(idleConn) + p.idle.Remove(e) + test := p.TestOnBorrow + p.mu.Unlock() + if test == nil || test(ic.c, ic.t) == nil { + return ic.c, nil + } + ic.c.Close() + p.mu.Lock() + p.release() + } + + // Check for pool closed before dialing a new connection. + + if p.closed { + p.mu.Unlock() + return nil, errors.New("redigo: get on closed pool") + } + + // Dial new connection if under limit. + + if p.MaxActive == 0 || p.active < p.MaxActive { + dial := p.Dial + p.active += 1 + p.mu.Unlock() + c, err := dial() + if err != nil { + p.mu.Lock() + p.release() + p.mu.Unlock() + c = nil + } + return c, err + } + + if !p.Wait { + p.mu.Unlock() + return nil, ErrPoolExhausted + } + + if p.cond == nil { + p.cond = sync.NewCond(&p.mu) + } + p.cond.Wait() + } +} + +func (p *Pool) put(c Conn, forceClose bool) error { + err := c.Err() + p.mu.Lock() + if !p.closed && err == nil && !forceClose { + p.idle.PushFront(idleConn{t: nowFunc(), c: c}) + if p.idle.Len() > p.MaxIdle { + c = p.idle.Remove(p.idle.Back()).(idleConn).c + } else { + c = nil + } + } + + if c == nil { + if p.cond != nil { + p.cond.Signal() + } + p.mu.Unlock() + return nil + } + + p.release() + p.mu.Unlock() + return c.Close() +} + +type pooledConnection struct { + p *Pool + c Conn + state int +} + +var ( + sentinel []byte + sentinelOnce sync.Once +) + +func initSentinel() { + p := make([]byte, 64) + if _, err := rand.Read(p); err == nil { + sentinel = p + } else { + h := sha1.New() + io.WriteString(h, "Oops, rand failed. Use time instead.") + io.WriteString(h, strconv.FormatInt(time.Now().UnixNano(), 10)) + sentinel = h.Sum(nil) + } +} + +func (pc *pooledConnection) Close() error { + c := pc.c + if _, ok := c.(errorConnection); ok { + return nil + } + pc.c = errorConnection{errConnClosed} + + if pc.state&internal.MultiState != 0 { + c.Send("DISCARD") + pc.state &^= (internal.MultiState | internal.WatchState) + } else if pc.state&internal.WatchState != 0 { + c.Send("UNWATCH") + pc.state &^= internal.WatchState + } + if pc.state&internal.SubscribeState != 0 { + c.Send("UNSUBSCRIBE") + c.Send("PUNSUBSCRIBE") + // To detect the end of the message stream, ask the server to echo + // a sentinel value and read until we see that value. + sentinelOnce.Do(initSentinel) + c.Send("ECHO", sentinel) + c.Flush() + for { + p, err := c.Receive() + if err != nil { + break + } + if p, ok := p.([]byte); ok && bytes.Equal(p, sentinel) { + pc.state &^= internal.SubscribeState + break + } + } + } + c.Do("") + pc.p.put(c, pc.state != 0) + return nil +} + +func (pc *pooledConnection) Err() error { + return pc.c.Err() +} + +func (pc *pooledConnection) Do(commandName string, args ...interface{}) (reply interface{}, err error) { + ci := internal.LookupCommandInfo(commandName) + pc.state = (pc.state | ci.Set) &^ ci.Clear + return pc.c.Do(commandName, args...) +} + +func (pc *pooledConnection) Send(commandName string, args ...interface{}) error { + ci := internal.LookupCommandInfo(commandName) + pc.state = (pc.state | ci.Set) &^ ci.Clear + return pc.c.Send(commandName, args...) +} + +func (pc *pooledConnection) Flush() error { + return pc.c.Flush() +} + +func (pc *pooledConnection) Receive() (reply interface{}, err error) { + return pc.c.Receive() +} + +type errorConnection struct{ err error } + +func (ec errorConnection) Do(string, ...interface{}) (interface{}, error) { return nil, ec.err } +func (ec errorConnection) Send(string, ...interface{}) error { return ec.err } +func (ec errorConnection) Err() error { return ec.err } +func (ec errorConnection) Close() error { return ec.err } +func (ec errorConnection) Flush() error { return ec.err } +func (ec errorConnection) Receive() (interface{}, error) { return nil, ec.err } diff --git a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/pubsub.go b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/pubsub.go new file mode 100644 index 000000000..f0790429f --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/pubsub.go @@ -0,0 +1,129 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "errors" +) + +// Subscription represents a subscribe or unsubscribe notification. +type Subscription struct { + + // Kind is "subscribe", "unsubscribe", "psubscribe" or "punsubscribe" + Kind string + + // The channel that was changed. + Channel string + + // The current number of subscriptions for connection. + Count int +} + +// Message represents a message notification. +type Message struct { + + // The originating channel. + Channel string + + // The message data. + Data []byte +} + +// PMessage represents a pmessage notification. +type PMessage struct { + + // The matched pattern. + Pattern string + + // The originating channel. + Channel string + + // The message data. + Data []byte +} + +// PubSubConn wraps a Conn with convenience methods for subscribers. +type PubSubConn struct { + Conn Conn +} + +// Close closes the connection. +func (c PubSubConn) Close() error { + return c.Conn.Close() +} + +// Subscribe subscribes the connection to the specified channels. +func (c PubSubConn) Subscribe(channel ...interface{}) error { + c.Conn.Send("SUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// PSubscribe subscribes the connection to the given patterns. +func (c PubSubConn) PSubscribe(channel ...interface{}) error { + c.Conn.Send("PSUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// Unsubscribe unsubscribes the connection from the given channels, or from all +// of them if none is given. +func (c PubSubConn) Unsubscribe(channel ...interface{}) error { + c.Conn.Send("UNSUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// PUnsubscribe unsubscribes the connection from the given patterns, or from all +// of them if none is given. +func (c PubSubConn) PUnsubscribe(channel ...interface{}) error { + c.Conn.Send("PUNSUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// Receive returns a pushed message as a Subscription, Message, PMessage or +// error. The return value is intended to be used directly in a type switch as +// illustrated in the PubSubConn example. +func (c PubSubConn) Receive() interface{} { + reply, err := Values(c.Conn.Receive()) + if err != nil { + return err + } + + var kind string + reply, err = Scan(reply, &kind) + if err != nil { + return err + } + + switch kind { + case "message": + var m Message + if _, err := Scan(reply, &m.Channel, &m.Data); err != nil { + return err + } + return m + case "pmessage": + var pm PMessage + if _, err := Scan(reply, &pm.Pattern, &pm.Channel, &pm.Data); err != nil { + return err + } + return pm + case "subscribe", "psubscribe", "unsubscribe", "punsubscribe": + s := Subscription{Kind: kind} + if _, err := Scan(reply, &s.Channel, &s.Count); err != nil { + return err + } + return s + } + return errors.New("redigo: unknown pubsub notification") +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/redis.go b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/redis.go new file mode 100644 index 000000000..c90a48ed4 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/redis.go @@ -0,0 +1,44 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +// Error represents an error returned in a command reply. +type Error string + +func (err Error) Error() string { return string(err) } + +// Conn represents a connection to a Redis server. +type Conn interface { + // Close closes the connection. + Close() error + + // Err returns a non-nil value if the connection is broken. The returned + // value is either the first non-nil value returned from the underlying + // network connection or a protocol parsing error. Applications should + // close broken connections. + Err() error + + // Do sends a command to the server and returns the received reply. + Do(commandName string, args ...interface{}) (reply interface{}, err error) + + // Send writes the command to the client's output buffer. + Send(commandName string, args ...interface{}) error + + // Flush flushes the output buffer to the Redis server. + Flush() error + + // Receive receives a single reply from the Redis server + Receive() (reply interface{}, err error) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/reply.go b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/reply.go new file mode 100644 index 000000000..5648f930d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/reply.go @@ -0,0 +1,312 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "errors" + "fmt" + "strconv" +) + +// ErrNil indicates that a reply value is nil. +var ErrNil = errors.New("redigo: nil returned") + +// Int is a helper that converts a command reply to an integer. If err is not +// equal to nil, then Int returns 0, err. Otherwise, Int converts the +// reply to an int as follows: +// +// Reply type Result +// integer int(reply), nil +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Int(reply interface{}, err error) (int, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case int64: + x := int(reply) + if int64(x) != reply { + return 0, strconv.ErrRange + } + return x, nil + case []byte: + n, err := strconv.ParseInt(string(reply), 10, 0) + return int(n), err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Int, got type %T", reply) +} + +// Int64 is a helper that converts a command reply to 64 bit integer. If err is +// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the +// reply to an int64 as follows: +// +// Reply type Result +// integer reply, nil +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Int64(reply interface{}, err error) (int64, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case int64: + return reply, nil + case []byte: + n, err := strconv.ParseInt(string(reply), 10, 64) + return n, err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Int64, got type %T", reply) +} + +var errNegativeInt = errors.New("redigo: unexpected value for Uint64") + +// Uint64 is a helper that converts a command reply to 64 bit integer. If err is +// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the +// reply to an int64 as follows: +// +// Reply type Result +// integer reply, nil +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Uint64(reply interface{}, err error) (uint64, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case int64: + if reply < 0 { + return 0, errNegativeInt + } + return uint64(reply), nil + case []byte: + n, err := strconv.ParseUint(string(reply), 10, 64) + return n, err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Uint64, got type %T", reply) +} + +// Float64 is a helper that converts a command reply to 64 bit float. If err is +// not equal to nil, then Float64 returns 0, err. Otherwise, Float64 converts +// the reply to an int as follows: +// +// Reply type Result +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Float64(reply interface{}, err error) (float64, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case []byte: + n, err := strconv.ParseFloat(string(reply), 64) + return n, err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Float64, got type %T", reply) +} + +// String is a helper that converts a command reply to a string. If err is not +// equal to nil, then String returns "", err. Otherwise String converts the +// reply to a string as follows: +// +// Reply type Result +// bulk string string(reply), nil +// simple string reply, nil +// nil "", ErrNil +// other "", error +func String(reply interface{}, err error) (string, error) { + if err != nil { + return "", err + } + switch reply := reply.(type) { + case []byte: + return string(reply), nil + case string: + return reply, nil + case nil: + return "", ErrNil + case Error: + return "", reply + } + return "", fmt.Errorf("redigo: unexpected type for String, got type %T", reply) +} + +// Bytes is a helper that converts a command reply to a slice of bytes. If err +// is not equal to nil, then Bytes returns nil, err. Otherwise Bytes converts +// the reply to a slice of bytes as follows: +// +// Reply type Result +// bulk string reply, nil +// simple string []byte(reply), nil +// nil nil, ErrNil +// other nil, error +func Bytes(reply interface{}, err error) ([]byte, error) { + if err != nil { + return nil, err + } + switch reply := reply.(type) { + case []byte: + return reply, nil + case string: + return []byte(reply), nil + case nil: + return nil, ErrNil + case Error: + return nil, reply + } + return nil, fmt.Errorf("redigo: unexpected type for Bytes, got type %T", reply) +} + +// Bool is a helper that converts a command reply to a boolean. If err is not +// equal to nil, then Bool returns false, err. Otherwise Bool converts the +// reply to boolean as follows: +// +// Reply type Result +// integer value != 0, nil +// bulk string strconv.ParseBool(reply) +// nil false, ErrNil +// other false, error +func Bool(reply interface{}, err error) (bool, error) { + if err != nil { + return false, err + } + switch reply := reply.(type) { + case int64: + return reply != 0, nil + case []byte: + return strconv.ParseBool(string(reply)) + case nil: + return false, ErrNil + case Error: + return false, reply + } + return false, fmt.Errorf("redigo: unexpected type for Bool, got type %T", reply) +} + +// MultiBulk is deprecated. Use Values. +func MultiBulk(reply interface{}, err error) ([]interface{}, error) { return Values(reply, err) } + +// Values is a helper that converts an array command reply to a []interface{}. +// If err is not equal to nil, then Values returns nil, err. Otherwise, Values +// converts the reply as follows: +// +// Reply type Result +// array reply, nil +// nil nil, ErrNil +// other nil, error +func Values(reply interface{}, err error) ([]interface{}, error) { + if err != nil { + return nil, err + } + switch reply := reply.(type) { + case []interface{}: + return reply, nil + case nil: + return nil, ErrNil + case Error: + return nil, reply + } + return nil, fmt.Errorf("redigo: unexpected type for Values, got type %T", reply) +} + +// Strings is a helper that converts an array command reply to a []string. If +// err is not equal to nil, then Strings returns nil, err. Nil array items are +// converted to "" in the output slice. Strings returns an error if an array +// item is not a bulk string or nil. +func Strings(reply interface{}, err error) ([]string, error) { + if err != nil { + return nil, err + } + switch reply := reply.(type) { + case []interface{}: + result := make([]string, len(reply)) + for i := range reply { + if reply[i] == nil { + continue + } + p, ok := reply[i].([]byte) + if !ok { + return nil, fmt.Errorf("redigo: unexpected element type for Strings, got type %T", reply[i]) + } + result[i] = string(p) + } + return result, nil + case nil: + return nil, ErrNil + case Error: + return nil, reply + } + return nil, fmt.Errorf("redigo: unexpected type for Strings, got type %T", reply) +} + +// Ints is a helper that converts an array command reply to a []int. If +// err is not equal to nil, then Ints returns nil, err. +func Ints(reply interface{}, err error) ([]int, error) { + var ints []int + if reply == nil { + return ints, ErrNil + } + values, err := Values(reply, err) + if err != nil { + return ints, err + } + if err := ScanSlice(values, &ints); err != nil { + return ints, err + } + return ints, nil +} + +// StringMap is a helper that converts an array of strings (alternating key, value) +// into a map[string]string. The HGETALL and CONFIG GET commands return replies in this format. +// Requires an even number of values in result. +func StringMap(result interface{}, err error) (map[string]string, error) { + values, err := Values(result, err) + if err != nil { + return nil, err + } + if len(values)%2 != 0 { + return nil, errors.New("redigo: StringMap expects even number of values result") + } + m := make(map[string]string, len(values)/2) + for i := 0; i < len(values); i += 2 { + key, okKey := values[i].([]byte) + value, okValue := values[i+1].([]byte) + if !okKey || !okValue { + return nil, errors.New("redigo: ScanMap key not a bulk string value") + } + m[string(key)] = string(value) + } + return m, nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/scan.go b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/scan.go new file mode 100644 index 000000000..8c9cfa18d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/scan.go @@ -0,0 +1,513 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "sync" +) + +func ensureLen(d reflect.Value, n int) { + if n > d.Cap() { + d.Set(reflect.MakeSlice(d.Type(), n, n)) + } else { + d.SetLen(n) + } +} + +func cannotConvert(d reflect.Value, s interface{}) error { + return fmt.Errorf("redigo: Scan cannot convert from %s to %s", + reflect.TypeOf(s), d.Type()) +} + +func convertAssignBytes(d reflect.Value, s []byte) (err error) { + switch d.Type().Kind() { + case reflect.Float32, reflect.Float64: + var x float64 + x, err = strconv.ParseFloat(string(s), d.Type().Bits()) + d.SetFloat(x) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + var x int64 + x, err = strconv.ParseInt(string(s), 10, d.Type().Bits()) + d.SetInt(x) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + var x uint64 + x, err = strconv.ParseUint(string(s), 10, d.Type().Bits()) + d.SetUint(x) + case reflect.Bool: + var x bool + x, err = strconv.ParseBool(string(s)) + d.SetBool(x) + case reflect.String: + d.SetString(string(s)) + case reflect.Slice: + if d.Type().Elem().Kind() != reflect.Uint8 { + err = cannotConvert(d, s) + } else { + d.SetBytes(s) + } + default: + err = cannotConvert(d, s) + } + return +} + +func convertAssignInt(d reflect.Value, s int64) (err error) { + switch d.Type().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + d.SetInt(s) + if d.Int() != s { + err = strconv.ErrRange + d.SetInt(0) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if s < 0 { + err = strconv.ErrRange + } else { + x := uint64(s) + d.SetUint(x) + if d.Uint() != x { + err = strconv.ErrRange + d.SetUint(0) + } + } + case reflect.Bool: + d.SetBool(s != 0) + default: + err = cannotConvert(d, s) + } + return +} + +func convertAssignValue(d reflect.Value, s interface{}) (err error) { + switch s := s.(type) { + case []byte: + err = convertAssignBytes(d, s) + case int64: + err = convertAssignInt(d, s) + default: + err = cannotConvert(d, s) + } + return err +} + +func convertAssignValues(d reflect.Value, s []interface{}) error { + if d.Type().Kind() != reflect.Slice { + return cannotConvert(d, s) + } + ensureLen(d, len(s)) + for i := 0; i < len(s); i++ { + if err := convertAssignValue(d.Index(i), s[i]); err != nil { + return err + } + } + return nil +} + +func convertAssign(d interface{}, s interface{}) (err error) { + // Handle the most common destination types using type switches and + // fall back to reflection for all other types. + switch s := s.(type) { + case nil: + // ingore + case []byte: + switch d := d.(type) { + case *string: + *d = string(s) + case *int: + *d, err = strconv.Atoi(string(s)) + case *bool: + *d, err = strconv.ParseBool(string(s)) + case *[]byte: + *d = s + case *interface{}: + *d = s + case nil: + // skip value + default: + if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { + err = cannotConvert(d, s) + } else { + err = convertAssignBytes(d.Elem(), s) + } + } + case int64: + switch d := d.(type) { + case *int: + x := int(s) + if int64(x) != s { + err = strconv.ErrRange + x = 0 + } + *d = x + case *bool: + *d = s != 0 + case *interface{}: + *d = s + case nil: + // skip value + default: + if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { + err = cannotConvert(d, s) + } else { + err = convertAssignInt(d.Elem(), s) + } + } + case []interface{}: + switch d := d.(type) { + case *[]interface{}: + *d = s + case *interface{}: + *d = s + case nil: + // skip value + default: + if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { + err = cannotConvert(d, s) + } else { + err = convertAssignValues(d.Elem(), s) + } + } + case Error: + err = s + default: + err = cannotConvert(reflect.ValueOf(d), s) + } + return +} + +// Scan copies from src to the values pointed at by dest. +// +// The values pointed at by dest must be an integer, float, boolean, string, +// []byte, interface{} or slices of these types. Scan uses the standard strconv +// package to convert bulk strings to numeric and boolean types. +// +// If a dest value is nil, then the corresponding src value is skipped. +// +// If a src element is nil, then the corresponding dest value is not modified. +// +// To enable easy use of Scan in a loop, Scan returns the slice of src +// following the copied values. +func Scan(src []interface{}, dest ...interface{}) ([]interface{}, error) { + if len(src) < len(dest) { + return nil, errors.New("redigo: Scan array short") + } + var err error + for i, d := range dest { + err = convertAssign(d, src[i]) + if err != nil { + break + } + } + return src[len(dest):], err +} + +type fieldSpec struct { + name string + index []int + //omitEmpty bool +} + +type structSpec struct { + m map[string]*fieldSpec + l []*fieldSpec +} + +func (ss *structSpec) fieldSpec(name []byte) *fieldSpec { + return ss.m[string(name)] +} + +func compileStructSpec(t reflect.Type, depth map[string]int, index []int, ss *structSpec) { + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + switch { + case f.PkgPath != "": + // Ignore unexported fields. + case f.Anonymous: + // TODO: Handle pointers. Requires change to decoder and + // protection against infinite recursion. + if f.Type.Kind() == reflect.Struct { + compileStructSpec(f.Type, depth, append(index, i), ss) + } + default: + fs := &fieldSpec{name: f.Name} + tag := f.Tag.Get("redis") + p := strings.Split(tag, ",") + if len(p) > 0 { + if p[0] == "-" { + continue + } + if len(p[0]) > 0 { + fs.name = p[0] + } + for _, s := range p[1:] { + switch s { + //case "omitempty": + // fs.omitempty = true + default: + panic(errors.New("redigo: unknown field flag " + s + " for type " + t.Name())) + } + } + } + d, found := depth[fs.name] + if !found { + d = 1 << 30 + } + switch { + case len(index) == d: + // At same depth, remove from result. + delete(ss.m, fs.name) + j := 0 + for i := 0; i < len(ss.l); i++ { + if fs.name != ss.l[i].name { + ss.l[j] = ss.l[i] + j += 1 + } + } + ss.l = ss.l[:j] + case len(index) < d: + fs.index = make([]int, len(index)+1) + copy(fs.index, index) + fs.index[len(index)] = i + depth[fs.name] = len(index) + ss.m[fs.name] = fs + ss.l = append(ss.l, fs) + } + } + } +} + +var ( + structSpecMutex sync.RWMutex + structSpecCache = make(map[reflect.Type]*structSpec) + defaultFieldSpec = &fieldSpec{} +) + +func structSpecForType(t reflect.Type) *structSpec { + + structSpecMutex.RLock() + ss, found := structSpecCache[t] + structSpecMutex.RUnlock() + if found { + return ss + } + + structSpecMutex.Lock() + defer structSpecMutex.Unlock() + ss, found = structSpecCache[t] + if found { + return ss + } + + ss = &structSpec{m: make(map[string]*fieldSpec)} + compileStructSpec(t, make(map[string]int), nil, ss) + structSpecCache[t] = ss + return ss +} + +var errScanStructValue = errors.New("redigo: ScanStruct value must be non-nil pointer to a struct") + +// ScanStruct scans alternating names and values from src to a struct. The +// HGETALL and CONFIG GET commands return replies in this format. +// +// ScanStruct uses exported field names to match values in the response. Use +// 'redis' field tag to override the name: +// +// Field int `redis:"myName"` +// +// Fields with the tag redis:"-" are ignored. +// +// Integer, float, boolean, string and []byte fields are supported. Scan uses the +// standard strconv package to convert bulk string values to numeric and +// boolean types. +// +// If a src element is nil, then the corresponding field is not modified. +func ScanStruct(src []interface{}, dest interface{}) error { + d := reflect.ValueOf(dest) + if d.Kind() != reflect.Ptr || d.IsNil() { + return errScanStructValue + } + d = d.Elem() + if d.Kind() != reflect.Struct { + return errScanStructValue + } + ss := structSpecForType(d.Type()) + + if len(src)%2 != 0 { + return errors.New("redigo: ScanStruct expects even number of values in values") + } + + for i := 0; i < len(src); i += 2 { + s := src[i+1] + if s == nil { + continue + } + name, ok := src[i].([]byte) + if !ok { + return errors.New("redigo: ScanStruct key not a bulk string value") + } + fs := ss.fieldSpec(name) + if fs == nil { + continue + } + if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { + return err + } + } + return nil +} + +var ( + errScanSliceValue = errors.New("redigo: ScanSlice dest must be non-nil pointer to a struct") +) + +// ScanSlice scans src to the slice pointed to by dest. The elements the dest +// slice must be integer, float, boolean, string, struct or pointer to struct +// values. +// +// Struct fields must be integer, float, boolean or string values. All struct +// fields are used unless a subset is specified using fieldNames. +func ScanSlice(src []interface{}, dest interface{}, fieldNames ...string) error { + d := reflect.ValueOf(dest) + if d.Kind() != reflect.Ptr || d.IsNil() { + return errScanSliceValue + } + d = d.Elem() + if d.Kind() != reflect.Slice { + return errScanSliceValue + } + + isPtr := false + t := d.Type().Elem() + if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { + isPtr = true + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + ensureLen(d, len(src)) + for i, s := range src { + if s == nil { + continue + } + if err := convertAssignValue(d.Index(i), s); err != nil { + return err + } + } + return nil + } + + ss := structSpecForType(t) + fss := ss.l + if len(fieldNames) > 0 { + fss = make([]*fieldSpec, len(fieldNames)) + for i, name := range fieldNames { + fss[i] = ss.m[name] + if fss[i] == nil { + return errors.New("redigo: ScanSlice bad field name " + name) + } + } + } + + if len(fss) == 0 { + return errors.New("redigo: ScanSlice no struct fields") + } + + n := len(src) / len(fss) + if n*len(fss) != len(src) { + return errors.New("redigo: ScanSlice length not a multiple of struct field count") + } + + ensureLen(d, n) + for i := 0; i < n; i++ { + d := d.Index(i) + if isPtr { + if d.IsNil() { + d.Set(reflect.New(t)) + } + d = d.Elem() + } + for j, fs := range fss { + s := src[i*len(fss)+j] + if s == nil { + continue + } + if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { + return err + } + } + } + return nil +} + +// Args is a helper for constructing command arguments from structured values. +type Args []interface{} + +// Add returns the result of appending value to args. +func (args Args) Add(value ...interface{}) Args { + return append(args, value...) +} + +// AddFlat returns the result of appending the flattened value of v to args. +// +// Maps are flattened by appending the alternating keys and map values to args. +// +// Slices are flattened by appending the slice elements to args. +// +// Structs are flattened by appending the alternating names and values of +// exported fields to args. If v is a nil struct pointer, then nothing is +// appended. The 'redis' field tag overrides struct field names. See ScanStruct +// for more information on the use of the 'redis' field tag. +// +// Other types are appended to args as is. +func (args Args) AddFlat(v interface{}) Args { + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Struct: + args = flattenStruct(args, rv) + case reflect.Slice: + for i := 0; i < rv.Len(); i++ { + args = append(args, rv.Index(i).Interface()) + } + case reflect.Map: + for _, k := range rv.MapKeys() { + args = append(args, k.Interface(), rv.MapIndex(k).Interface()) + } + case reflect.Ptr: + if rv.Type().Elem().Kind() == reflect.Struct { + if !rv.IsNil() { + args = flattenStruct(args, rv.Elem()) + } + } else { + args = append(args, v) + } + default: + args = append(args, v) + } + return args +} + +func flattenStruct(args Args, v reflect.Value) Args { + ss := structSpecForType(v.Type()) + for _, fs := range ss.l { + fv := v.FieldByIndex(fs.index) + args = append(args, fs.name, fv.Interface()) + } + return args +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/script.go b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/script.go new file mode 100644 index 000000000..78605a90a --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/script.go @@ -0,0 +1,86 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "crypto/sha1" + "encoding/hex" + "io" + "strings" +) + +// Script encapsulates the source, hash and key count for a Lua script. See +// http://redis.io/commands/eval for information on scripts in Redis. +type Script struct { + keyCount int + src string + hash string +} + +// NewScript returns a new script object. If keyCount is greater than or equal +// to zero, then the count is automatically inserted in the EVAL command +// argument list. If keyCount is less than zero, then the application supplies +// the count as the first value in the keysAndArgs argument to the Do, Send and +// SendHash methods. +func NewScript(keyCount int, src string) *Script { + h := sha1.New() + io.WriteString(h, src) + return &Script{keyCount, src, hex.EncodeToString(h.Sum(nil))} +} + +func (s *Script) args(spec string, keysAndArgs []interface{}) []interface{} { + var args []interface{} + if s.keyCount < 0 { + args = make([]interface{}, 1+len(keysAndArgs)) + args[0] = spec + copy(args[1:], keysAndArgs) + } else { + args = make([]interface{}, 2+len(keysAndArgs)) + args[0] = spec + args[1] = s.keyCount + copy(args[2:], keysAndArgs) + } + return args +} + +// Do evaluates the script. Under the covers, Do optimistically evaluates the +// script using the EVALSHA command. If the command fails because the script is +// not loaded, then Do evaluates the script using the EVAL command (thus +// causing the script to load). +func (s *Script) Do(c Conn, keysAndArgs ...interface{}) (interface{}, error) { + v, err := c.Do("EVALSHA", s.args(s.hash, keysAndArgs)...) + if e, ok := err.(Error); ok && strings.HasPrefix(string(e), "NOSCRIPT ") { + v, err = c.Do("EVAL", s.args(s.src, keysAndArgs)...) + } + return v, err +} + +// SendHash evaluates the script without waiting for the reply. The script is +// evaluated with the EVALSHA command. The application must ensure that the +// script is loaded by a previous call to Send, Do or Load methods. +func (s *Script) SendHash(c Conn, keysAndArgs ...interface{}) error { + return c.Send("EVALSHA", s.args(s.hash, keysAndArgs)...) +} + +// Send evaluates the script without waiting for the reply. +func (s *Script) Send(c Conn, keysAndArgs ...interface{}) error { + return c.Send("EVAL", s.args(s.src, keysAndArgs)...) +} + +// Load loads the script without evaluating it. +func (s *Script) Load(c Conn) error { + _, err := c.Do("SCRIPT", "LOAD", s.src) + return err +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/LICENSE new file mode 100644 index 000000000..1b1b1921e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/LICENSE @@ -0,0 +1,31 @@ +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/Makefile new file mode 100644 index 000000000..e2e0651a9 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/Makefile @@ -0,0 +1,43 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +install: + go install + +test: install generate-test-pbs + go test + + +generate-test-pbs: + make install + make -C testdata + protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto + make diff --git a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/clone.go new file mode 100644 index 000000000..e98ddec98 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/clone.go @@ -0,0 +1,223 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + + out := reflect.New(in.Type().Elem()) + // out is empty so a merge is a deep copy. + mergeStruct(out.Elem(), in.Elem()) + return out.Interface().(Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + // Explicit test prior to mergeStruct so that mistyped nils will fail + panic("proto: type mismatch") + } + if in.IsNil() { + // Merging nil into non-nil is a quiet no-op + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, ok := in.Addr().Interface().(extendableProto); ok { + emOut := out.Addr().Interface().(extendableProto) + mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap()) + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/decode.go new file mode 100644 index 000000000..f94b9f416 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/decode.go @@ -0,0 +1,868 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" + "os" + "reflect" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// The fundamental decoders that interpret bytes on the wire. +// Those that take integer types all return uint64 and are +// therefore of type valueDecoder. + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + // x, n already 0 + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + // x, err already 0 + + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// These are not ValueDecoders: they produce an array of bytes or a string. +// bytes, embedded messages + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +// If the protocol buffer has extensions, and the field matches, add it as an extension. +// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. +func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { + oi := o.index + + err := o.skip(t, tag, wire) + if err != nil { + return err + } + + if !unrecField.IsValid() { + return nil + } + + ptr := structPointer_Bytes(base, unrecField) + + // Add the skipped field to struct field + obuf := o.buf + + o.buf = *ptr + o.EncodeVarint(uint64(tag<<3 | wire)) + *ptr = append(o.buf, obuf[oi:o.index]...) + + o.buf = obuf + + return nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +func (o *Buffer) skip(t reflect.Type, tag, wire int) error { + + var u uint64 + var err error + + switch wire { + case WireVarint: + _, err = o.DecodeVarint() + case WireFixed64: + _, err = o.DecodeFixed64() + case WireBytes: + _, err = o.DecodeRawBytes(false) + case WireFixed32: + _, err = o.DecodeFixed32() + case WireStartGroup: + for { + u, err = o.DecodeVarint() + if err != nil { + break + } + fwire := int(u & 0x7) + if fwire == WireEndGroup { + break + } + ftag := int(u >> 3) + err = o.skip(t, ftag, fwire) + if err != nil { + break + } + } + default: + err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) + } + return err +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The method should reset the receiver before +// decoding starts. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + return UnmarshalMerge(buf, pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +func (p *Buffer) DecodeGroup(pb Message) error { + typ, base, err := getbase(pb) + if err != nil { + return err + } + return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + typ, base, err := getbase(pb) + if err != nil { + return err + } + + err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) + + if collectStats { + stats.Decode++ + } + + return err +} + +// unmarshalType does the work of unmarshaling a structure. +func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { + var state errorState + required, reqFields := prop.reqCount, uint64(0) + + var err error + for err == nil && o.index < len(o.buf) { + oi := o.index + var u uint64 + u, err = o.DecodeVarint() + if err != nil { + break + } + wire := int(u & 0x7) + if wire == WireEndGroup { + if is_group { + return nil // input is satisfied + } + return fmt.Errorf("proto: %s: wiretype end group for non-group", st) + } + tag := int(u >> 3) + if tag <= 0 { + return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) + } + fieldnum, ok := prop.decoderTags.get(tag) + if !ok { + // Maybe it's an extension? + if prop.extendable { + if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) { + if err = o.skip(st, tag, wire); err == nil { + ext := e.ExtensionMap()[int32(tag)] // may be missing + ext.enc = append(ext.enc, o.buf[oi:o.index]...) + e.ExtensionMap()[int32(tag)] = ext + } + continue + } + } + // Maybe it's a oneof? + if prop.oneofUnmarshaler != nil { + m := structPointer_Interface(base, st).(Message) + // First return value indicates whether tag is a oneof field. + ok, err = prop.oneofUnmarshaler(m, tag, wire, o) + if err == ErrInternalBadWireType { + // Map the error to something more descriptive. + // Do the formatting here to save generated code space. + err = fmt.Errorf("bad wiretype for oneof field in %T", m) + } + if ok { + continue + } + } + err = o.skipAndSave(st, tag, wire, base, prop.unrecField) + continue + } + p := prop.Prop[fieldnum] + + if p.dec == nil { + fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) + continue + } + dec := p.dec + if wire != WireStartGroup && wire != p.WireType { + if wire == WireBytes && p.packedDec != nil { + // a packable field + dec = p.packedDec + } else { + err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) + continue + } + } + decErr := dec(o, p, base) + if decErr != nil && !state.shouldContinue(decErr, p) { + err = decErr + } + if err == nil && p.Required { + // Successfully decoded a required field. + if tag <= 64 { + // use bitmap for fields 1-64 to catch field reuse. + var mask uint64 = 1 << uint64(tag-1) + if reqFields&mask == 0 { + // new required field + reqFields |= mask + required-- + } + } else { + // This is imprecise. It can be fooled by a required field + // with a tag > 64 that is encoded twice; that's very rare. + // A fully correct implementation would require allocating + // a data structure, which we would like to avoid. + required-- + } + } + } + if err == nil { + if is_group { + return io.ErrUnexpectedEOF + } + if state.err != nil { + return state.err + } + if required > 0 { + // Not enough information to determine the exact field. If we use extra + // CPU, we could determine the field only if the missing required field + // has a tag <= 64 and we check reqFields. + return &RequiredNotSetError{"{Unknown}"} + } + } + return err +} + +// Individual type decoders +// For each, +// u is the decoded value, +// v is a pointer to the field (pointer) in the struct + +// Sizes of the pools to allocate inside the Buffer. +// The goal is modest amortization and allocation +// on at least 16-byte boundaries. +const ( + boolPoolSize = 16 + uint32PoolSize = 8 + uint64PoolSize = 4 +) + +// Decode a bool. +func (o *Buffer) dec_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + if len(o.bools) == 0 { + o.bools = make([]bool, boolPoolSize) + } + o.bools[0] = u != 0 + *structPointer_Bool(base, p.field) = &o.bools[0] + o.bools = o.bools[1:] + return nil +} + +func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + *structPointer_BoolVal(base, p.field) = u != 0 + return nil +} + +// Decode an int32. +func (o *Buffer) dec_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) + return nil +} + +func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) + return nil +} + +// Decode an int64. +func (o *Buffer) dec_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, u) + return nil +} + +func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64Val_Set(structPointer_Word64Val(base, p.field), o, u) + return nil +} + +// Decode a string. +func (o *Buffer) dec_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_String(base, p.field) = &s + return nil +} + +func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_StringVal(base, p.field) = s + return nil +} + +// Decode a slice of bytes ([]byte). +func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + *structPointer_Bytes(base, p.field) = b + return nil +} + +// Decode a slice of bools ([]bool). +func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + v := structPointer_BoolSlice(base, p.field) + *v = append(*v, u != 0) + return nil +} + +// Decode a slice of bools ([]bool) in packed format. +func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { + v := structPointer_BoolSlice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded bools + fin := o.index + nb + if fin < o.index { + return errOverflow + } + + y := *v + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + y = append(y, u != 0) + } + + *v = y + return nil +} + +// Decode a slice of int32s ([]int32). +func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + structPointer_Word32Slice(base, p.field).Append(uint32(u)) + return nil +} + +// Decode a slice of int32s ([]int32) in packed format. +func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int32s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(uint32(u)) + } + return nil +} + +// Decode a slice of int64s ([]int64). +func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + + structPointer_Word64Slice(base, p.field).Append(u) + return nil +} + +// Decode a slice of int64s ([]int64) in packed format. +func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int64s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(u) + } + return nil +} + +// Decode a slice of strings ([]string). +func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + v := structPointer_StringSlice(base, p.field) + *v = append(*v, s) + return nil +} + +// Decode a slice of slice of bytes ([][]byte). +func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + v := structPointer_BytesSlice(base, p.field) + *v = append(*v, b) + return nil +} + +// Decode a map field. +func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + oi := o.index // index at the end of this map entry + o.index -= len(raw) // move buffer back to start of map entry + + mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V + if mptr.Elem().IsNil() { + mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) + } + v := mptr.Elem() // map[K]V + + // Prepare addressable doubly-indirect placeholders for the key and value types. + // See enc_new_map for why. + keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K + keybase := toStructPointer(keyptr.Addr()) // **K + + var valbase structPointer + var valptr reflect.Value + switch p.mtype.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valptr = reflect.ValueOf(&dummy) // *[]byte + valbase = toStructPointer(valptr) // *[]byte + case reflect.Ptr: + // message; valptr is **Msg; need to allocate the intermediate pointer + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valptr.Set(reflect.New(valptr.Type().Elem())) + valbase = toStructPointer(valptr) + default: + // everything else + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valbase = toStructPointer(valptr.Addr()) // **V + } + + // Decode. + // This parses a restricted wire format, namely the encoding of a message + // with two fields. See enc_new_map for the format. + for o.index < oi { + // tagcode for key and value properties are always a single byte + // because they have tags 1 and 2. + tagcode := o.buf[o.index] + o.index++ + switch tagcode { + case p.mkeyprop.tagcode[0]: + if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { + return err + } + case p.mvalprop.tagcode[0]: + if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { + return err + } + default: + // TODO: Should we silently skip this instead? + return fmt.Errorf("proto: bad map data tag %d", raw[0]) + } + } + keyelem, valelem := keyptr.Elem(), valptr.Elem() + if !keyelem.IsValid() { + keyelem = reflect.Zero(p.mtype.Key()) + } + if !valelem.IsValid() { + valelem = reflect.Zero(p.mtype.Elem()) + } + + v.SetMapIndex(keyelem, valelem) + return nil +} + +// Decode a group. +func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + return o.unmarshalType(p.stype, p.sprop, true, bas) +} + +// Decode an embedded message. +func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { + raw, e := o.DecodeRawBytes(false) + if e != nil { + return e + } + + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := structPointer_Interface(bas, p.stype) + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, false, bas) + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of embedded messages. +func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, false, base) +} + +// Decode a slice of embedded groups. +func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, true, base) +} + +// Decode a slice of structs ([]*struct). +func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { + v := reflect.New(p.stype) + bas := toStructPointer(v) + structPointer_StructPointerSlice(base, p.field).Append(bas) + + if is_group { + err := o.unmarshalType(p.stype, p.sprop, is_group, bas) + return err + } + + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := v.Interface() + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, is_group, bas) + + o.buf = obuf + o.index = oi + + return err +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/encode.go new file mode 100644 index 000000000..eb7e0474e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/encode.go @@ -0,0 +1,1331 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "fmt" + "reflect" + "sort" +) + +// RequiredNotSetError is the error returned if Marshal is called with +// a protocol buffer struct whose required fields have not +// all been initialized. It is also the error returned if Unmarshal is +// called with an encoded protocol buffer that does not include all the +// required fields. +// +// When printed, RequiredNotSetError reports the first unset required field in a +// message. If the field cannot be precisely determined, it is reported as +// "{Unknown}". +type RequiredNotSetError struct { + field string +} + +func (e *RequiredNotSetError) Error() string { + return fmt.Sprintf("proto: required field %q not set", e.field) +} + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // errOneofHasNil is the error returned if Marshal is called with + // a struct with a oneof field containing a nil element. + errOneofHasNil = errors.New("proto: oneof field has nil value") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + return sizeVarint(x) +} + +func sizeVarint(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +func sizeFixed64(x uint64) int { + return 8 +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +func sizeFixed32(x uint64) int { + return 4 +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +func sizeZigzag64(x uint64) int { + return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +func sizeZigzag32(x uint64) int { + return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +func sizeRawBytes(b []byte) int { + return sizeVarint(uint64(len(b))) + + len(b) +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +func sizeStringBytes(s string) int { + return sizeVarint(uint64(len(s))) + + len(s) +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, returning the data. +func Marshal(pb Message) ([]byte, error) { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + return m.Marshal() + } + p := NewBuffer(nil) + err := p.Marshal(pb) + var state errorState + if err != nil && !state.shouldContinue(err, nil) { + return nil, err + } + if p.buf == nil && err == nil { + // Return a non-nil slice on success. + return []byte{}, nil + } + return p.buf, err +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + var state errorState + err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) + } + return err +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, writing the result to the +// Buffer. +func (p *Buffer) Marshal(pb Message) error { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + data, err := m.Marshal() + if err != nil { + return err + } + p.buf = append(p.buf, data...) + return nil + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + err = p.enc_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + stats.Encode++ + } + + return err +} + +// Size returns the encoded size of a protocol buffer. +func Size(pb Message) (n int) { + // Can the object marshal itself? If so, Size is slow. + // TODO: add Size to Marshaler, or add a Sizer interface. + if m, ok := pb.(Marshaler); ok { + b, _ := m.Marshal() + return len(b) + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return 0 + } + if err == nil { + n = size_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + stats.Size++ + } + + return +} + +// Individual type encoders. + +// Encode a bool. +func (o *Buffer) enc_bool(p *Properties, base structPointer) error { + v := *structPointer_Bool(base, p.field) + if v == nil { + return ErrNil + } + x := 0 + if *v { + x = 1 + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { + v := *structPointer_BoolVal(base, p.field) + if !v { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, 1) + return nil +} + +func size_bool(p *Properties, base structPointer) int { + v := *structPointer_Bool(base, p.field) + if v == nil { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +func size_proto3_bool(p *Properties, base structPointer) int { + v := *structPointer_BoolVal(base, p.field) + if !v && !p.oneof { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +// Encode an int32. +func (o *Buffer) enc_int32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode a uint32. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := word32_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := word32_Get(v) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode an int64. +func (o *Buffer) enc_int64(p *Properties, base structPointer) error { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return ErrNil + } + x := word64_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func size_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return 0 + } + x := word64_Get(v) + n += len(p.tagcode) + n += p.valSize(x) + return +} + +func size_proto3_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(x) + return +} + +// Encode a string. +func (o *Buffer) enc_string(p *Properties, base structPointer) error { + v := *structPointer_String(base, p.field) + if v == nil { + return ErrNil + } + x := *v + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(x) + return nil +} + +func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { + v := *structPointer_StringVal(base, p.field) + if v == "" { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(v) + return nil +} + +func size_string(p *Properties, base structPointer) (n int) { + v := *structPointer_String(base, p.field) + if v == nil { + return 0 + } + x := *v + n += len(p.tagcode) + n += sizeStringBytes(x) + return +} + +func size_proto3_string(p *Properties, base structPointer) (n int) { + v := *structPointer_StringVal(base, p.field) + if v == "" && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeStringBytes(v) + return +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} + +// Encode a message struct. +func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { + var state errorState + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return state.err + } + + o.buf = append(o.buf, p.tagcode...) + return o.enc_len_struct(p.sprop, structp, &state) +} + +func size_struct_message(p *Properties, base structPointer) int { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n0 := len(p.tagcode) + n1 := sizeRawBytes(data) + return n0 + n1 + } + + n0 := len(p.tagcode) + n1 := size_struct(p.sprop, structp) + n2 := sizeVarint(uint64(n1)) // size of encoded length + return n0 + n1 + n2 +} + +// Encode a group struct. +func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { + var state errorState + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return ErrNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + err := o.enc_struct(p.sprop, b) + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return state.err +} + +func size_struct_group(p *Properties, base structPointer) (n int) { + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return 0 + } + + n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) + n += size_struct(p.sprop, b) + n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return +} + +// Encode a slice of bools ([]bool). +func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + for _, x := range s { + o.buf = append(o.buf, p.tagcode...) + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_bool(p *Properties, base structPointer) int { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + return l * (len(p.tagcode) + 1) // each bool takes exactly one byte +} + +// Encode a slice of bools ([]bool) in packed format. +func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(l)) // each bool takes exactly one byte + for _, x := range s { + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_packed_bool(p *Properties, base structPointer) (n int) { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + n += len(p.tagcode) + n += sizeVarint(uint64(l)) + n += l // each bool takes exactly one byte + return +} + +// Encode a slice of bytes ([]byte). +func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func size_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if s == nil && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +// Encode a slice of int32s ([]int32). +func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of int32s ([]int32) in packed format. +func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(buf, uint64(x)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + bufSize += p.valSize(uint64(x)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of uint32s ([]uint32). +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := s.Index(i) + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := s.Index(i) + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of uint32s ([]uint32) in packed format. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, uint64(s.Index(i))) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(uint64(s.Index(i))) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of int64s ([]int64). +func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, s.Index(i)) + } + return nil +} + +func size_slice_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + n += p.valSize(s.Index(i)) + } + return +} + +// Encode a slice of int64s ([]int64) in packed format. +func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, s.Index(i)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(s.Index(i)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of slice of bytes ([][]byte). +func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(ss[i]) + } + return nil +} + +func size_slice_slice_byte(p *Properties, base structPointer) (n int) { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return 0 + } + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeRawBytes(ss[i]) + } + return +} + +// Encode a slice of strings ([]string). +func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(ss[i]) + } + return nil +} + +func size_slice_string(p *Properties, base structPointer) (n int) { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeStringBytes(ss[i]) + } + return +} + +// Encode a slice of message structs ([]*struct). +func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return errRepeatedHasNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + continue + } + + o.buf = append(o.buf, p.tagcode...) + err := o.enc_len_struct(p.sprop, structp, &state) + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + } + return state.err +} + +func size_slice_struct_message(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return // return the size up to this point + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n += len(p.tagcode) + n += sizeRawBytes(data) + continue + } + + n0 := size_struct(p.sprop, structp) + n1 := sizeVarint(uint64(n0)) // size of encoded length + n += n0 + n1 + } + return +} + +// Encode a slice of group structs ([]*struct). +func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return errRepeatedHasNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + + err := o.enc_struct(p.sprop, b) + + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + } + return state.err +} + +func size_slice_struct_group(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) + n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return // return size up to this point + } + + n += size_struct(p.sprop, b) + } + return +} + +// Encode an extension map. +func (o *Buffer) enc_map(p *Properties, base structPointer) error { + v := *structPointer_ExtMap(base, p.field) + if err := encodeExtensionMap(v); err != nil { + return err + } + // Fast-path for common cases: zero or one extensions. + if len(v) <= 1 { + for _, e := range v { + o.buf = append(o.buf, e.enc...) + } + return nil + } + + // Sort keys to provide a deterministic encoding. + keys := make([]int, 0, len(v)) + for k := range v { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + o.buf = append(o.buf, v[int32(k)].enc...) + } + return nil +} + +func size_map(p *Properties, base structPointer) int { + v := *structPointer_ExtMap(base, p.field) + return sizeExtensionMap(v) +} + +// Encode a map field. +func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { + var state errorState // XXX: or do we need to plumb this through? + + /* + A map defined as + map map_field = N; + is encoded in the same way as + message MapFieldEntry { + key_type key = 1; + value_type value = 2; + } + repeated MapFieldEntry map_field = N; + */ + + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + if v.Len() == 0 { + return nil + } + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + enc := func() error { + if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { + return err + } + if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil { + return err + } + return nil + } + + // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + + // The only illegal map entry values are nil message pointers. + if val.Kind() == reflect.Ptr && val.IsNil() { + return errors.New("proto: map has nil element") + } + + keycopy.Set(key) + valcopy.Set(val) + + o.buf = append(o.buf, p.tagcode...) + if err := o.enc_len_thing(enc, &state); err != nil { + return err + } + } + return nil +} + +func size_new_map(p *Properties, base structPointer) int { + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + n := 0 + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + keycopy.Set(key) + valcopy.Set(val) + + // Tag codes for key and val are the responsibility of the sub-sizer. + keysize := p.mkeyprop.size(p.mkeyprop, keybase) + valsize := p.mvalprop.size(p.mvalprop, valbase) + entry := keysize + valsize + // Add on tag code and length of map entry itself. + n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry + } + return n +} + +// mapEncodeScratch returns a new reflect.Value matching the map's value type, +// and a structPointer suitable for passing to an encoder or sizer. +func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { + // Prepare addressable doubly-indirect placeholders for the key and value types. + // This is needed because the element-type encoders expect **T, but the map iteration produces T. + + keycopy = reflect.New(mapType.Key()).Elem() // addressable K + keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K + keyptr.Set(keycopy.Addr()) // + keybase = toStructPointer(keyptr.Addr()) // **K + + // Value types are more varied and require special handling. + switch mapType.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte + valbase = toStructPointer(valcopy.Addr()) + case reflect.Ptr: + // message; the generated field type is map[K]*Msg (so V is *Msg), + // so we only need one level of indirection. + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valbase = toStructPointer(valcopy.Addr()) + default: + // everything else + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V + valptr.Set(valcopy.Addr()) // + valbase = toStructPointer(valptr.Addr()) // **V + } + return +} + +// Encode a struct. +func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { + var state errorState + // Encode fields in tag order so that decoders may use optimizations + // that depend on the ordering. + // https://developers.google.com/protocol-buffers/docs/encoding#order + for _, i := range prop.order { + p := prop.Prop[i] + if p.enc != nil { + err := p.enc(o, p, base) + if err != nil { + if err == ErrNil { + if p.Required && state.err == nil { + state.err = &RequiredNotSetError{p.Name} + } + } else if err == errRepeatedHasNil { + // Give more context to nil values in repeated fields. + return errors.New("repeated field " + p.OrigName + " has nil element") + } else if !state.shouldContinue(err, p) { + return err + } + } + } + } + + // Do oneof fields. + if prop.oneofMarshaler != nil { + m := structPointer_Interface(base, prop.stype).(Message) + if err := prop.oneofMarshaler(m, o); err == ErrNil { + return errOneofHasNil + } else if err != nil { + return err + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + if len(v) > 0 { + o.buf = append(o.buf, v...) + } + } + + return state.err +} + +func size_struct(prop *StructProperties, base structPointer) (n int) { + for _, i := range prop.order { + p := prop.Prop[i] + if p.size != nil { + n += p.size(p, base) + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + n += len(v) + } + + // Factor in any oneof fields. + if prop.oneofSizer != nil { + m := structPointer_Interface(base, prop.stype).(Message) + n += prop.oneofSizer(m) + } + + return +} + +var zeroes [20]byte // longer than any conceivable sizeVarint + +// Encode a struct, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { + return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) +} + +// Encode something, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { + iLen := len(o.buf) + o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length + iMsg := len(o.buf) + err := enc() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + lMsg := len(o.buf) - iMsg + lLen := sizeVarint(uint64(lMsg)) + switch x := lLen - (iMsg - iLen); { + case x > 0: // actual length is x bytes larger than the space we reserved + // Move msg x bytes right. + o.buf = append(o.buf, zeroes[:x]...) + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + case x < 0: // actual length is x bytes smaller than the space we reserved + // Move msg x bytes left. + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + o.buf = o.buf[:len(o.buf)+x] // x is negative + } + // Encode the length in the reserved space. + o.buf = o.buf[:iLen] + o.EncodeVarint(uint64(lMsg)) + o.buf = o.buf[:len(o.buf)+lMsg] + return state.err +} + +// errorState maintains the first error that occurs and updates that error +// with additional context. +type errorState struct { + err error +} + +// shouldContinue reports whether encoding should continue upon encountering the +// given error. If the error is RequiredNotSetError, shouldContinue returns true +// and, if this is the first appearance of that error, remembers it for future +// reporting. +// +// If prop is not nil, it may update any error with additional context about the +// field with the error. +func (s *errorState) shouldContinue(err error, prop *Properties) bool { + // Ignore unset required fields. + reqNotSet, ok := err.(*RequiredNotSetError) + if !ok { + return false + } + if s.err == nil { + if prop != nil { + err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} + } + s.err = err + } + return true +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/equal.go new file mode 100644 index 000000000..f5db1def3 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/equal.go @@ -0,0 +1,276 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal (a "bytes" field, + although represented by []byte, is not a repeated field) + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + b1, ok := f1.Interface().(raw) + if ok { + b2 := f2.Interface().(raw) + // RawMessage + if !bytes.Equal(b1.Bytes(), b2.Bytes()) { + return false + } + continue + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + if !bytes.Equal(u1, u2) { + return false + } + + return true +} + +// v1 and v2 are known to have the same type. +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2, nil) { + return false + } + } + return true + case reflect.Ptr: + return equalAny(v1.Elem(), v2.Elem(), prop) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i), prop) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// em1 and em2 are extension maps. +func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + continue + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + } + + return true +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/extensions.go new file mode 100644 index 000000000..054f4f1df --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/extensions.go @@ -0,0 +1,399 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base extendableProto, id int32, b []byte) { + base.ExtensionMap()[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + // Check the extended type. + if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b { + return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m. +func encodeExtensionMap(m map[int32]Extension) error { + for k, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + p := NewBuffer(nil) + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + e.enc = p.buf + m[k] = e + } + return nil +} + +func sizeExtensionMap(m map[int32]Extension) (n int) { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + n += props.size(props, toStructPointer(x)) + } + return +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb extendableProto, extension *ExtensionDesc) bool { + // TODO: Check types, field numbers, etc.? + _, ok := pb.ExtensionMap()[extension.Field] + return ok +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb extendableProto, extension *ExtensionDesc) { + // TODO: Check types, field numbers, etc.? + delete(pb.ExtensionMap(), extension.Field) +} + +// GetExtension parses and returns the given extension of pb. +// If the extension is not present and has no default value it returns ErrMissingExtension. +func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) { + if err := checkExtensionTypes(pb, extension); err != nil { + return nil, err + } + + emap := pb.ExtensionMap() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + o := NewBuffer(b) + + t := reflect.TypeOf(extension.ExtensionType) + + props := extensionProperties(extension) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate a "field" to store the pointer/slice itself; the + // pointer/slice will be stored here. We pass + // the address of this field to props.dec. + // This passes a zero field and a *t and lets props.dec + // interpret it as a *struct{ x t }. + value := reflect.New(t).Elem() + + for { + // Discard wire type and field number varint. It isn't needed. + if _, err := o.DecodeVarint(); err != nil { + return nil, err + } + + if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { + return nil, err + } + + if o.index >= len(o.buf) { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, ok := pb.(extendableProto) + if !ok { + err = errors.New("proto: not an extendable proto") + return + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error { + if err := checkExtensionTypes(pb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return errors.New("proto: bad extension value type") + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/lib.go new file mode 100644 index 000000000..0de8f8dff --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/lib.go @@ -0,0 +1,894 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Getters are only generated for message and oneof fields. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/golang/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/golang/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // write point + + // pools of basic types to amortize allocation. + bools []bool + uint32s []uint32 + uint64s []uint64 + + // extra pools, only used with pointer_reflect.go + int32s []int32 + int64s []int64 + float32s []float32 + float64s []float64 +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + index := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = index +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a pointer to a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + v = v.Elem() + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or []*T or map[T]*T + switch f.Kind() { + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. + +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{ + vs: vs, + // default Less function: textual comparison + less: func(a, b reflect.Value) bool { + return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) + }, + } + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; + // numeric keys are sorted numerically. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +// ProtoPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion1 = true diff --git a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/message_set.go new file mode 100644 index 000000000..e25e01e63 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/message_set.go @@ -0,0 +1,280 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + if ms.find(pb) != nil { + return true + } + return false +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(m map[int32]Extension) ([]byte, error) { + if err := encodeExtensionMap(m); err != nil { + return nil, err + } + + // Sort extension IDs to provide a deterministic encoding. + // See also enc_map in encode.go. + ids := make([]int, 0, len(m)) + for id := range m { + ids = append(ids, int(id)) + } + sort.Ints(ids) + + ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} + for _, id := range ids { + e := m[int32(id)] + // Remove the wire type and field number varint, as well as the length varint. + msg := skipVarint(skipVarint(e.enc)) + + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: Int32(int32(id)), + Message: msg, + }) + } + return Marshal(ms) +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error { + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} + +// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. +// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) { + var b bytes.Buffer + b.WriteByte('{') + + // Process the map in key order for deterministic output. + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) // int32Slice defined in text.go + + for i, id := range ids { + ext := m[id] + if i > 0 { + b.WriteByte(',') + } + + msd, ok := messageSetMap[id] + if !ok { + // Unknown type; we can't render it, so skip it. + continue + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) + + x := ext.value + if x == nil { + x = reflect.New(msd.t.Elem()).Interface() + if err := Unmarshal(ext.enc, x.(Message)); err != nil { + return nil, err + } + } + d, err := json.Marshal(x) + if err != nil { + return nil, err + } + b.Write(d) + } + b.WriteByte('}') + return b.Bytes(), nil +} + +// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. +// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error { + // Common-case fast path. + if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { + return nil + } + + // This is fairly tricky, and it's not clear that it is needed. + return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(m Message, fieldNum int32, name string) { + messageSetMap[fieldNum] = messageSetDesc{ + t: reflect.TypeOf(m), + name: name, + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/pointer_reflect.go new file mode 100644 index 000000000..749919d25 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -0,0 +1,479 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "math" + "reflect" +) + +// A structPointer is a pointer to a struct. +type structPointer struct { + v reflect.Value +} + +// toStructPointer returns a structPointer equivalent to the given reflect value. +// The reflect value must itself be a pointer to a struct. +func toStructPointer(v reflect.Value) structPointer { + return structPointer{v} +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p.v.IsNil() +} + +// Interface returns the struct pointer as an interface value. +func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { + return p.v.Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// field returns the given field in the struct as a reflect value. +func structPointer_field(p structPointer, f field) reflect.Value { + // Special case: an extension map entry with a value of type T + // passes a *T to the struct-handling code with a zero field, + // expecting that it will be treated as equivalent to *struct{ X T }, + // which has the same memory layout. We have to handle that case + // specially, because reflect will panic if we call FieldByIndex on a + // non-struct. + if f == nil { + return p.v.Elem() + } + + return p.v.Elem().FieldByIndex(f) +} + +// ifield returns the given field in the struct as an interface value. +func structPointer_ifield(p structPointer, f field) interface{} { + return structPointer_field(p, f).Addr().Interface() +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return structPointer_ifield(p, f).(*[]byte) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return structPointer_ifield(p, f).(*[][]byte) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return structPointer_ifield(p, f).(**bool) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return structPointer_ifield(p, f).(*bool) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return structPointer_ifield(p, f).(*[]bool) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return structPointer_ifield(p, f).(**string) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return structPointer_ifield(p, f).(*string) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return structPointer_ifield(p, f).(*[]string) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return structPointer_ifield(p, f).(*map[int32]Extension) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return structPointer_field(p, f).Addr() +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + structPointer_field(p, f).Set(q.v) +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return structPointer{structPointer_field(p, f)} +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { + return structPointerSlice{structPointer_field(p, f)} +} + +// A structPointerSlice represents the address of a slice of pointers to structs +// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. +type structPointerSlice struct { + v reflect.Value +} + +func (p structPointerSlice) Len() int { return p.v.Len() } +func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } +func (p structPointerSlice) Append(q structPointer) { + p.v.Set(reflect.Append(p.v, q.v)) +} + +var ( + int32Type = reflect.TypeOf(int32(0)) + uint32Type = reflect.TypeOf(uint32(0)) + float32Type = reflect.TypeOf(float32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float64Type = reflect.TypeOf(float64(0)) +) + +// A word32 represents a field of type *int32, *uint32, *float32, or *enum. +// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. +type word32 struct { + v reflect.Value +} + +// IsNil reports whether p is nil. +func word32_IsNil(p word32) bool { + return p.v.IsNil() +} + +// Set sets p to point at a newly allocated word with bits set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + t := p.v.Type().Elem() + switch t { + case int32Type: + if len(o.int32s) == 0 { + o.int32s = make([]int32, uint32PoolSize) + } + o.int32s[0] = int32(x) + p.v.Set(reflect.ValueOf(&o.int32s[0])) + o.int32s = o.int32s[1:] + return + case uint32Type: + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + p.v.Set(reflect.ValueOf(&o.uint32s[0])) + o.uint32s = o.uint32s[1:] + return + case float32Type: + if len(o.float32s) == 0 { + o.float32s = make([]float32, uint32PoolSize) + } + o.float32s[0] = math.Float32frombits(x) + p.v.Set(reflect.ValueOf(&o.float32s[0])) + o.float32s = o.float32s[1:] + return + } + + // must be enum + p.v.Set(reflect.New(t)) + p.v.Elem().SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32_Get(p word32) uint32 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32{structPointer_field(p, f)} +} + +// A word32Val represents a field of type int32, uint32, float32, or enum. +// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. +type word32Val struct { + v reflect.Value +} + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + switch p.v.Type() { + case int32Type: + p.v.SetInt(int64(x)) + return + case uint32Type: + p.v.SetUint(uint64(x)) + return + case float32Type: + p.v.SetFloat(float64(math.Float32frombits(x))) + return + } + + // must be enum + p.v.SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32Val_Get(p word32Val) uint32 { + elem := p.v + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val{structPointer_field(p, f)} +} + +// A word32Slice is a slice of 32-bit values. +// That is, v.Type() is []int32, []uint32, []float32, or []enum. +type word32Slice struct { + v reflect.Value +} + +func (p word32Slice) Append(x uint32) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int32: + elem.SetInt(int64(int32(x))) + case reflect.Uint32: + elem.SetUint(uint64(x)) + case reflect.Float32: + elem.SetFloat(float64(math.Float32frombits(x))) + } +} + +func (p word32Slice) Len() int { + return p.v.Len() +} + +func (p word32Slice) Index(i int) uint32 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) word32Slice { + return word32Slice{structPointer_field(p, f)} +} + +// word64 is like word32 but for 64-bit values. +type word64 struct { + v reflect.Value +} + +func word64_Set(p word64, o *Buffer, x uint64) { + t := p.v.Type().Elem() + switch t { + case int64Type: + if len(o.int64s) == 0 { + o.int64s = make([]int64, uint64PoolSize) + } + o.int64s[0] = int64(x) + p.v.Set(reflect.ValueOf(&o.int64s[0])) + o.int64s = o.int64s[1:] + return + case uint64Type: + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + p.v.Set(reflect.ValueOf(&o.uint64s[0])) + o.uint64s = o.uint64s[1:] + return + case float64Type: + if len(o.float64s) == 0 { + o.float64s = make([]float64, uint64PoolSize) + } + o.float64s[0] = math.Float64frombits(x) + p.v.Set(reflect.ValueOf(&o.float64s[0])) + o.float64s = o.float64s[1:] + return + } + panic("unreachable") +} + +func word64_IsNil(p word64) bool { + return p.v.IsNil() +} + +func word64_Get(p word64) uint64 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64{structPointer_field(p, f)} +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val struct { + v reflect.Value +} + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + switch p.v.Type() { + case int64Type: + p.v.SetInt(int64(x)) + return + case uint64Type: + p.v.SetUint(x) + return + case float64Type: + p.v.SetFloat(math.Float64frombits(x)) + return + } + panic("unreachable") +} + +func word64Val_Get(p word64Val) uint64 { + elem := p.v + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val{structPointer_field(p, f)} +} + +type word64Slice struct { + v reflect.Value +} + +func (p word64Slice) Append(x uint64) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int64: + elem.SetInt(int64(int64(x))) + case reflect.Uint64: + elem.SetUint(uint64(x)) + case reflect.Float64: + elem.SetFloat(float64(math.Float64frombits(x))) + } +} + +func (p word64Slice) Len() int { + return p.v.Len() +} + +func (p word64Slice) Index(i int) uint64 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return uint64(elem.Uint()) + case reflect.Float64: + return math.Float64bits(float64(elem.Float())) + } + panic("unreachable") +} + +func structPointer_Word64Slice(p structPointer, f field) word64Slice { + return word64Slice{structPointer_field(p, f)} +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go new file mode 100644 index 000000000..e9be0fe92 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,266 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !appengine + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +// NOTE: These type_Foo functions would more idiomatically be methods, +// but Go does not allow methods on pointer types, and we must preserve +// some pointer type for the garbage collector. We use these +// funcs with clunky names as our poor approximation to methods. +// +// An alternative would be +// type structPointer struct { p unsafe.Pointer } +// but that does not registerize as well. + +// A structPointer is a pointer to a struct. +type structPointer unsafe.Pointer + +// toStructPointer returns a structPointer equivalent to the given reflect value. +func toStructPointer(v reflect.Value) structPointer { + return structPointer(unsafe.Pointer(v.Pointer())) +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p == nil +} + +// Interface returns the struct pointer, assumed to have element type t, +// as an interface value. +func structPointer_Interface(p structPointer, t reflect.Type) interface{} { + return reflect.NewAt(t, unsafe.Pointer(p)).Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != ^field(0) +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { + return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). +type structPointerSlice []structPointer + +func (v *structPointerSlice) Len() int { return len(*v) } +func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } +func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } + +// A word32 is the address of a "pointer to 32-bit value" field. +type word32 **uint32 + +// IsNil reports whether *v is nil. +func word32_IsNil(p word32) bool { + return *p == nil +} + +// Set sets *v to point at a newly allocated word set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + *p = &o.uint32s[0] + o.uint32s = o.uint32s[1:] +} + +// Get gets the value pointed at by *v. +func word32_Get(p word32) uint32 { + return **p +} + +// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Val is the address of a 32-bit value field. +type word32Val *uint32 + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + *p = x +} + +// Get gets the value pointed at by p. +func word32Val_Get(p word32Val) uint32 { + return *p +} + +// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Slice is a slice of 32-bit values. +type word32Slice []uint32 + +func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } +func (v *word32Slice) Len() int { return len(*v) } +func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } + +// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) *word32Slice { + return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// word64 is like word32 but for 64-bit values. +type word64 **uint64 + +func word64_Set(p word64, o *Buffer, x uint64) { + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + *p = &o.uint64s[0] + o.uint64s = o.uint64s[1:] +} + +func word64_IsNil(p word64) bool { + return *p == nil +} + +func word64_Get(p word64) uint64 { + return **p +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val *uint64 + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + *p = x +} + +func word64Val_Get(p word64Val) uint64 { + return *p +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Slice is like word32Slice but for 64-bit values. +type word64Slice []uint64 + +func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } +func (v *word64Slice) Len() int { return len(*v) } +func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } + +func structPointer_Word64Slice(p structPointer, f field) *word64Slice { + return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/properties.go new file mode 100644 index 000000000..4fe2ec22e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/properties.go @@ -0,0 +1,846 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +const startSize = 10 // initial slice/string sizes + +// Encoders are defined in encode.go +// An encoder outputs the full representation of a field, including its +// tag and encoder type. +type encoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueEncoder encodes a single integer in a particular encoding. +type valueEncoder func(o *Buffer, x uint64) error + +// Sizers are defined in encode.go +// A sizer returns the encoded size of a field, including its tag and encoder +// type. +type sizer func(prop *Properties, base structPointer) int + +// A valueSizer returns the encoded size of a single integer in a particular +// encoding. +type valueSizer func(x uint64) int + +// Decoders are defined in decode.go +// A decoder creates a value from its wire representation. +// Unrecognized subelements are saved in unrec. +type decoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueDecoder decodes a single integer in a particular encoding. +type valueDecoder func(o *Buffer) (x uint64, err error) + +// A oneofMarshaler does the marshaling for all oneof fields in a message. +type oneofMarshaler func(Message, *Buffer) error + +// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. +type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) + +// A oneofSizer does the sizing for all oneof fields in a message. +type oneofSizer func(Message) int + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + unrecField field // field id of the XXX_unrecognized []byte field + extendable bool // is this an extendable proto + + oneofMarshaler oneofMarshaler + oneofUnmarshaler oneofUnmarshaler + oneofSizer oneofSizer + stype reflect.Type + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field; set for []byte only + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + def_uint64 uint64 + + enc encoder + valEnc valueEncoder // set for bool and numeric types only + field field + tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) + tagbuf [8]byte + stype reflect.Type // set for struct types only + sprop *StructProperties // set for struct types only + isMarshaler bool + isUnmarshaler bool + + mtype reflect.Type // set for map types only + mkeyprop *Properties // set for map types only + mvalprop *Properties // set for map types only + + size sizer + valSize valueSizer // set for bool and numeric types only + + dec decoder + valDec valueDecoder // set for bool and numeric types only + + // If this is a packable field, this will be the decoder for the packed version of the field. + packedDec decoder +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s = "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeVarint + p.valDec = (*Buffer).DecodeVarint + p.valSize = sizeVarint + case "fixed32": + p.WireType = WireFixed32 + p.valEnc = (*Buffer).EncodeFixed32 + p.valDec = (*Buffer).DecodeFixed32 + p.valSize = sizeFixed32 + case "fixed64": + p.WireType = WireFixed64 + p.valEnc = (*Buffer).EncodeFixed64 + p.valDec = (*Buffer).DecodeFixed64 + p.valSize = sizeFixed64 + case "zigzag32": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag32 + p.valDec = (*Buffer).DecodeZigzag32 + p.valSize = sizeZigzag32 + case "zigzag64": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag64 + p.valDec = (*Buffer).DecodeZigzag64 + p.valSize = sizeZigzag64 + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break + } + } + } +} + +func logNoSliceEnc(t1, t2 reflect.Type) { + fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// Initialize the fields for encoding and decoding. +func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + p.enc = nil + p.dec = nil + p.size = nil + + switch t1 := typ; t1.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) + + // proto3 scalar types + + case reflect.Bool: + p.enc = (*Buffer).enc_proto3_bool + p.dec = (*Buffer).dec_proto3_bool + p.size = size_proto3_bool + case reflect.Int32: + p.enc = (*Buffer).enc_proto3_int32 + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_proto3_uint32 + p.dec = (*Buffer).dec_proto3_int32 // can reuse + p.size = size_proto3_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_proto3_int64 + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + case reflect.String: + p.enc = (*Buffer).enc_proto3_string + p.dec = (*Buffer).dec_proto3_string + p.size = size_proto3_string + + case reflect.Ptr: + switch t2 := t1.Elem(); t2.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) + break + case reflect.Bool: + p.enc = (*Buffer).enc_bool + p.dec = (*Buffer).dec_bool + p.size = size_bool + case reflect.Int32: + p.enc = (*Buffer).enc_int32 + p.dec = (*Buffer).dec_int32 + p.size = size_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_uint32 + p.dec = (*Buffer).dec_int32 // can reuse + p.size = size_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_int64 + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_int32 + p.size = size_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_int64 // can just treat them as bits + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.String: + p.enc = (*Buffer).enc_string + p.dec = (*Buffer).dec_string + p.size = size_string + case reflect.Struct: + p.stype = t1.Elem() + p.isMarshaler = isMarshaler(t1) + p.isUnmarshaler = isUnmarshaler(t1) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_struct_message + p.dec = (*Buffer).dec_struct_message + p.size = size_struct_message + } else { + p.enc = (*Buffer).enc_struct_group + p.dec = (*Buffer).dec_struct_group + p.size = size_struct_group + } + } + + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + default: + logNoSliceEnc(t1, t2) + break + case reflect.Bool: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_bool + p.size = size_slice_packed_bool + } else { + p.enc = (*Buffer).enc_slice_bool + p.size = size_slice_bool + } + p.dec = (*Buffer).dec_slice_bool + p.packedDec = (*Buffer).dec_slice_packed_bool + case reflect.Int32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int32 + p.size = size_slice_packed_int32 + } else { + p.enc = (*Buffer).enc_slice_int32 + p.size = size_slice_int32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Uint32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Int64, reflect.Uint64: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_byte + p.dec = (*Buffer).dec_slice_byte + p.size = size_slice_byte + // This is a []byte, which is either a bytes field, + // or the value of a map field. In the latter case, + // we always encode an empty []byte, so we should not + // use the proto3 enc/size funcs. + // f == nil iff this is the key/value of a map field. + if p.proto3 && f != nil { + p.enc = (*Buffer).enc_proto3_slice_byte + p.size = size_proto3_slice_byte + } + case reflect.Float32, reflect.Float64: + switch t2.Bits() { + case 32: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case 64: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + default: + logNoSliceEnc(t1, t2) + break + } + case reflect.String: + p.enc = (*Buffer).enc_slice_string + p.dec = (*Buffer).dec_slice_string + p.size = size_slice_string + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) + break + case reflect.Struct: + p.stype = t2.Elem() + p.isMarshaler = isMarshaler(t2) + p.isUnmarshaler = isUnmarshaler(t2) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_slice_struct_message + p.dec = (*Buffer).dec_slice_struct_message + p.size = size_slice_struct_message + } else { + p.enc = (*Buffer).enc_slice_struct_group + p.dec = (*Buffer).dec_slice_struct_group + p.size = size_slice_struct_group + } + } + case reflect.Slice: + switch t2.Elem().Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) + break + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_slice_byte + p.dec = (*Buffer).dec_slice_slice_byte + p.size = size_slice_slice_byte + } + } + + case reflect.Map: + p.enc = (*Buffer).enc_new_map + p.dec = (*Buffer).dec_new_map + p.size = size_new_map + + p.mtype = t1 + p.mkeyprop = &Properties{} + p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.mvalprop = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + + // precalculate tag code + wire := p.WireType + if p.Packed { + wire = WireBytes + } + x := uint32(p.Tag)<<3 | uint32(wire) + i := 0 + for i = 0; x > 127; i++ { + p.tagbuf[i] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + p.tagbuf[i] = uint8(x) + p.tagcode = p.tagbuf[0 : i+1] + + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +) + +// isMarshaler reports whether type t implements Marshaler. +func isMarshaler(t reflect.Type) bool { + // We're checking for (likely) pointer-receiver methods + // so if t is not a pointer, something is very wrong. + // The calls above only invoke isMarshaler on pointer types. + if t.Kind() != reflect.Ptr { + panic("proto: misuse of isMarshaler") + } + return t.Implements(marshalerType) +} + +// isUnmarshaler reports whether type t implements Unmarshaler. +func isUnmarshaler(t reflect.Type) bool { + // We're checking for (likely) pointer-receiver methods + // so if t is not a pointer, something is very wrong. + // The calls above only invoke isUnmarshaler on pointer types. + if t.Kind() != reflect.Ptr { + panic("proto: misuse of isUnmarshaler") + } + return t.Implements(unmarshalerType) +} + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if f != nil { + p.field = toField(f) + } + if tag == "" { + return + } + p.Parse(tag) + p.setEncAndDec(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + if collectStats { + stats.Chit++ + } + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } + return prop + } + if collectStats { + stats.Cmiss++ + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) + prop.unrecField = invalidField + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + if f.Name == "XXX_extensions" { // special case + p.enc = (*Buffer).enc_map + p.dec = nil // not needed + p.size = size_map + } + if f.Name == "XXX_unrecognized" { // special case + prop.unrecField = toField(&f) + } + oneof := f.Tag.Get("protobuf_oneof") != "" // special case + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof { + fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + var oots []interface{} + prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() + prop.stype = t + + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// Return the Properties object for the x[0]'th field of the structure. +func propByIndex(t reflect.Type, x []int) *Properties { + if len(x) != 1 { + fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) + return nil + } + prop := GetProperties(t) + return prop.Prop[x[0]] +} + +// Get the address and type of a pointer to a struct from an interface. +func getbase(pb Message) (t reflect.Type, b structPointer, err error) { + if pb == nil { + err = ErrNil + return + } + // get the reflect type of the pointer to the struct. + t = reflect.TypeOf(pb) + // get the address of the struct. + value := reflect.ValueOf(pb) + b = toStructPointer(value) + return +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypes = make(map[string]reflect.Type) + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypes[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] } + +// MessageType returns the message type (pointer to struct) for a named message. +func MessageType(name string) reflect.Type { return protoTypes[name] } diff --git a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/text.go new file mode 100644 index 000000000..37c953570 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/text.go @@ -0,0 +1,849 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + gtNewline = []byte(">\n") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Printf("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +// raw is the interface satisfied by RawMessage. +type raw interface { + Bytes() []byte +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if err := tm.writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, key, props.mkeyprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, val, props.mvalprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if b, ok := fv.Interface().(raw); ok { + if err := writeRaw(w, b.Bytes()); err != nil { + return err + } + continue + } + + // Enums have a String method, so writeAny will work fine. + if err := tm.writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv.Addr() + if pv.Type().Implements(extendableProtoType) { + if err := tm.writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeRaw writes an uninterpreted raw message. +func writeRaw(w *textWriter, b []byte) error { + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if err := writeUnknownStruct(w, b); err != nil { + return err + } + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + return nil +} + +// writeAny writes an arbitrary field. +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Interface().([]byte))); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else if err := tm.writeStruct(w, v); err != nil { + return err + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, err := fmt.Fprintf(w, "/* %v */\n", err) + return err + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, err := w.Write(endBraceNewline); err != nil { + return err + } + continue + } + if _, err := fmt.Fprint(w, tag); err != nil { + return err + } + if wire != WireStartGroup { + if err := w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err := w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err = w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + ep := pv.Interface().(extendableProto) + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + m := ep.ExtensionMap() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(ep, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: tm.Compact, + } + + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := tm.writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/text_parser.go new file mode 100644 index 000000000..b5e1c8e1f --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -0,0 +1,871 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") + errBadHex = errors.New("proto: bad hexadecimal") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + base := 8 + ss := s[:2] + s = s[2:] + if r == 'x' || r == 'X' { + base = 16 + } else { + ss = string(r) + ss + } + i, err := strconv.ParseUint(ss, base, 8) + if err != nil { + return "", "", err + } + return string([]byte{byte(i)}), s, nil + case 'u', 'U': + n := 4 + if r == 'U' { + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) + } + + bs := make([]byte, n/2) + for i := 0; i < n; i += 2 { + a, ok1 := unhex(s[i]) + b, ok2 := unhex(s[i+1]) + if !ok1 || !ok2 { + return "", "", errBadHex + } + bs[i/2] = a<<4 | b + } + s = s[n:] + return string(bs), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Adapted from src/pkg/strconv/quote.go. +func unhex(b byte) (v byte, ok bool) { + switch { + case '0' <= b && b <= '9': + return b - '0', true + case 'a' <= b && b <= 'f': + return b - 'a' + 10, true + case 'A' <= b && b <= 'F': + return b - 'A' + 10, true + } + return 0, false +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension or an Any. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + continue + } + + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == extName { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", extName) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(extendableProto) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + sv.Field(oop.Field).Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // Technically the "key" and "value" could come in any order, + // but in practice they won't. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + if err := p.consumeToken("key"); err != nil { + return err + } + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.mkeyprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + if err := p.consumeToken("value"); err != nil { + return err + } + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.mvalprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + if err := p.consumeToken(terminator); err != nil { + return err + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } else if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "]" { + break + } + if tok.value != "," { + return p.errorf("Expected ']' or ',' found %q", tok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // Either "true", "false", 1 or 0. + switch tok.value { + case "true", "1": + fv.SetBool(true) + return nil + case "false", "0": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + err := um.UnmarshalText([]byte(s)) + return err + } + pb.Reset() + v := reflect.ValueOf(pb) + if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { + return pe + } + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/.travis.yml b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/.travis.yml new file mode 100644 index 000000000..d87d46576 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.0 + - 1.1 + - 1.2 + - tip diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/LICENSE new file mode 100644 index 000000000..0e5fb8728 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/README.md b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/README.md new file mode 100644 index 000000000..c60a31b05 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/README.md @@ -0,0 +1,7 @@ +context +======= +[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context) + +gorilla/context is a general purpose registry for global request variables. + +Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/context.go b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/context.go new file mode 100644 index 000000000..81cb128b1 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/context.go @@ -0,0 +1,143 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context + +import ( + "net/http" + "sync" + "time" +) + +var ( + mutex sync.RWMutex + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) +) + +// Set stores a value for a given key in a given request. +func Set(r *http.Request, key, val interface{}) { + mutex.Lock() + if data[r] == nil { + data[r] = make(map[interface{}]interface{}) + datat[r] = time.Now().Unix() + } + data[r][key] = val + mutex.Unlock() +} + +// Get returns a value stored for a given key in a given request. +func Get(r *http.Request, key interface{}) interface{} { + mutex.RLock() + if ctx := data[r]; ctx != nil { + value := ctx[key] + mutex.RUnlock() + return value + } + mutex.RUnlock() + return nil +} + +// GetOk returns stored value and presence state like multi-value return of map access. +func GetOk(r *http.Request, key interface{}) (interface{}, bool) { + mutex.RLock() + if _, ok := data[r]; ok { + value, ok := data[r][key] + mutex.RUnlock() + return value, ok + } + mutex.RUnlock() + return nil, false +} + +// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests. +func GetAll(r *http.Request) map[interface{}]interface{} { + mutex.RLock() + if context, ok := data[r]; ok { + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result + } + mutex.RUnlock() + return nil +} + +// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if +// the request was registered. +func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) { + mutex.RLock() + context, ok := data[r] + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result, ok +} + +// Delete removes a value stored for a given key in a given request. +func Delete(r *http.Request, key interface{}) { + mutex.Lock() + if data[r] != nil { + delete(data[r], key) + } + mutex.Unlock() +} + +// Clear removes all values stored for a given request. +// +// This is usually called by a handler wrapper to clean up request +// variables at the end of a request lifetime. See ClearHandler(). +func Clear(r *http.Request) { + mutex.Lock() + clear(r) + mutex.Unlock() +} + +// clear is Clear without the lock. +func clear(r *http.Request) { + delete(data, r) + delete(datat, r) +} + +// Purge removes request data stored for longer than maxAge, in seconds. +// It returns the amount of requests removed. +// +// If maxAge <= 0, all request data is removed. +// +// This is only used for sanity check: in case context cleaning was not +// properly set some request data can be kept forever, consuming an increasing +// amount of memory. In case this is detected, Purge() must be called +// periodically until the problem is fixed. +func Purge(maxAge int) int { + mutex.Lock() + count := 0 + if maxAge <= 0 { + count = len(data) + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) + } else { + min := time.Now().Unix() - int64(maxAge) + for r := range data { + if datat[r] < min { + clear(r) + count++ + } + } + } + mutex.Unlock() + return count +} + +// ClearHandler wraps an http.Handler and clears request values at the end +// of a request lifetime. +func ClearHandler(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer Clear(r) + h.ServeHTTP(w, r) + }) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/doc.go b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/doc.go new file mode 100644 index 000000000..73c740031 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/doc.go @@ -0,0 +1,82 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package context stores values shared during a request lifetime. + +For example, a router can set variables extracted from the URL and later +application handlers can access those values, or it can be used to store +sessions values to be saved at the end of a request. There are several +others common uses. + +The idea was posted by Brad Fitzpatrick to the go-nuts mailing list: + + http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53 + +Here's the basic usage: first define the keys that you will need. The key +type is interface{} so a key can be of any type that supports equality. +Here we define a key using a custom int type to avoid name collisions: + + package foo + + import ( + "github.com/gorilla/context" + ) + + type key int + + const MyKey key = 0 + +Then set a variable. Variables are bound to an http.Request object, so you +need a request instance to set a value: + + context.Set(r, MyKey, "bar") + +The application can later access the variable using the same key you provided: + + func MyHandler(w http.ResponseWriter, r *http.Request) { + // val is "bar". + val := context.Get(r, foo.MyKey) + + // returns ("bar", true) + val, ok := context.GetOk(r, foo.MyKey) + // ... + } + +And that's all about the basic usage. We discuss some other ideas below. + +Any type can be stored in the context. To enforce a given type, make the key +private and wrap Get() and Set() to accept and return values of a specific +type: + + type key int + + const mykey key = 0 + + // GetMyKey returns a value for this package from the request values. + func GetMyKey(r *http.Request) SomeType { + if rv := context.Get(r, mykey); rv != nil { + return rv.(SomeType) + } + return nil + } + + // SetMyKey sets a value for this package in the request values. + func SetMyKey(r *http.Request, val SomeType) { + context.Set(r, mykey, val) + } + +Variables must be cleared at the end of a request, to remove all values +that were stored. This can be done in an http.Handler, after a request was +served. Just call Clear() passing the request: + + context.Clear(r) + +...or use ClearHandler(), which conveniently wraps an http.Handler to clear +variables at the end of a request lifetime. + +The Routers from the packages gorilla/mux and gorilla/pat call Clear() +so if you are using either of them you don't need to clear the context manually. +*/ +package context diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/.travis.yml b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/.travis.yml new file mode 100644 index 000000000..354b7f8b2 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/.travis.yml @@ -0,0 +1,8 @@ +language: go + +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - tip diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/LICENSE new file mode 100644 index 000000000..66ea3c8ae --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 The Gorilla Handlers Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/README.md b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/README.md new file mode 100644 index 000000000..a340abe08 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/README.md @@ -0,0 +1,52 @@ +gorilla/handlers +================ +[![GoDoc](https://godoc.org/github.com/gorilla/handlers?status.svg)](https://godoc.org/github.com/gorilla/handlers) [![Build Status](https://travis-ci.org/gorilla/handlers.svg?branch=master)](https://travis-ci.org/gorilla/handlers) + +Package handlers is a collection of handlers (aka "HTTP middleware") for use +with Go's `net/http` package (or any framework supporting `http.Handler`), including: + +* `LoggingHandler` for logging HTTP requests in the Apache [Common Log + Format](http://httpd.apache.org/docs/2.2/logs.html#common). +* `CombinedLoggingHandler` for logging HTTP requests in the Apache [Combined Log + Format](http://httpd.apache.org/docs/2.2/logs.html#combined) commonly used by + both Apache and nginx. +* `CompressHandler` for gzipping responses. +* `ContentTypeHandler` for validating requests against a list of accepted + content types. +* `MethodHandler` for matching HTTP methods against handlers in a + `map[string]http.Handler` +* `ProxyHeaders` for populating `r.RemoteAddr` and `r.URL.Scheme` based on the + `X-Forwarded-For`, `X-Real-IP`, `X-Forwarded-Proto` and RFC7239 `Forwarded` + headers when running a Go server behind a HTTP reverse proxy. +* `CanonicalHost` for re-directing to the preferred host when handling multiple + domains (i.e. multiple CNAME aliases). + +Other handlers are documented [on the Gorilla +website](http://www.gorillatoolkit.org/pkg/handlers). + +## Example + +A simple example using `handlers.LoggingHandler` and `handlers.CompressHandler`: + +```go +import ( + "net/http" + "github.com/gorilla/handlers" +) + +func main() { + r := http.NewServeMux() + + // Only log requests to our admin dashboard to stdout + r.Handle("/admin", handlers.LoggingHandler(os.Stdout, http.HandlerFunc(ShowAdminDashboard))) + r.HandleFunc("/", ShowIndex) + + // Wrap our server with our gzip handler to gzip compress all responses. + http.ListenAndServe(":8000", handlers.CompressHandler(r)) +} +``` + +## License + +BSD licensed. See the included LICENSE file for details. + diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/canonical.go b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/canonical.go new file mode 100644 index 000000000..3961695c4 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/canonical.go @@ -0,0 +1,71 @@ +package handlers + +import ( + "net/http" + "net/url" + "strings" +) + +type canonical struct { + h http.Handler + domain string + code int +} + +// CanonicalHost is HTTP middleware that re-directs requests to the canonical +// domain. It accepts a domain and a status code (e.g. 301 or 302) and +// re-directs clients to this domain. The existing request path is maintained. +// +// Note: If the provided domain is considered invalid by url.Parse or otherwise +// returns an empty scheme or host, clients are not re-directed. +// not re-directed. +// +// Example: +// +// r := mux.NewRouter() +// canonical := handlers.CanonicalHost("http://www.gorillatoolkit.org", 302) +// r.HandleFunc("/route", YourHandler) +// +// log.Fatal(http.ListenAndServe(":7000", canonical(r))) +// +func CanonicalHost(domain string, code int) func(h http.Handler) http.Handler { + fn := func(h http.Handler) http.Handler { + return canonical{h, domain, code} + } + + return fn +} + +func (c canonical) ServeHTTP(w http.ResponseWriter, r *http.Request) { + dest, err := url.Parse(c.domain) + if err != nil { + // Call the next handler if the provided domain fails to parse. + c.h.ServeHTTP(w, r) + return + } + + if dest.Scheme == "" || dest.Host == "" { + // Call the next handler if the scheme or host are empty. + // Note that url.Parse won't fail on in this case. + c.h.ServeHTTP(w, r) + return + } + + if !strings.EqualFold(cleanHost(r.Host), dest.Host) { + // Re-build the destination URL + dest := dest.Scheme + "://" + dest.Host + r.URL.Path + http.Redirect(w, r, dest, c.code) + } + + c.h.ServeHTTP(w, r) +} + +// cleanHost cleans invalid Host headers by stripping anything after '/' or ' '. +// This is backported from Go 1.5 (in response to issue #11206) and attempts to +// mitigate malformed Host headers that do not match the format in RFC7230. +func cleanHost(in string) string { + if i := strings.IndexAny(in, " /"); i != -1 { + return in[:i] + } + return in +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/compress.go b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/compress.go new file mode 100644 index 000000000..3d90e1914 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/compress.go @@ -0,0 +1,84 @@ +// Copyright 2013 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package handlers + +import ( + "compress/flate" + "compress/gzip" + "io" + "net/http" + "strings" +) + +type compressResponseWriter struct { + io.Writer + http.ResponseWriter + http.Hijacker +} + +func (w *compressResponseWriter) Header() http.Header { + return w.ResponseWriter.Header() +} + +func (w *compressResponseWriter) Write(b []byte) (int, error) { + h := w.ResponseWriter.Header() + if h.Get("Content-Type") == "" { + h.Set("Content-Type", http.DetectContentType(b)) + } + + return w.Writer.Write(b) +} + +// CompressHandler gzip compresses HTTP responses for clients that support it +// via the 'Accept-Encoding' header. +func CompressHandler(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + L: + for _, enc := range strings.Split(r.Header.Get("Accept-Encoding"), ",") { + switch strings.TrimSpace(enc) { + case "gzip": + w.Header().Set("Content-Encoding", "gzip") + w.Header().Add("Vary", "Accept-Encoding") + + gw := gzip.NewWriter(w) + defer gw.Close() + + h, hok := w.(http.Hijacker) + if !hok { /* w is not Hijacker... oh well... */ + h = nil + } + + w = &compressResponseWriter{ + Writer: gw, + ResponseWriter: w, + Hijacker: h, + } + + break L + case "deflate": + w.Header().Set("Content-Encoding", "deflate") + w.Header().Add("Vary", "Accept-Encoding") + + fw, _ := flate.NewWriter(w, flate.DefaultCompression) + defer fw.Close() + + h, hok := w.(http.Hijacker) + if !hok { /* w is not Hijacker... oh well... */ + h = nil + } + + w = &compressResponseWriter{ + Writer: fw, + ResponseWriter: w, + Hijacker: h, + } + + break L + } + } + + h.ServeHTTP(w, r) + }) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/doc.go b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/doc.go new file mode 100644 index 000000000..944e5a8ae --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/doc.go @@ -0,0 +1,9 @@ +/* +Package handlers is a collection of handlers (aka "HTTP middleware") for use +with Go's net/http package (or any framework supporting http.Handler). + +The package includes handlers for logging in standardised formats, compressing +HTTP responses, validating content types and other useful tools for manipulating +requests and responses. +*/ +package handlers diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/handlers.go b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/handlers.go new file mode 100644 index 000000000..c3c20e5b9 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/handlers.go @@ -0,0 +1,378 @@ +// Copyright 2013 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package handlers + +import ( + "bufio" + "fmt" + "io" + "net" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// MethodHandler is an http.Handler that dispatches to a handler whose key in the MethodHandler's +// map matches the name of the HTTP request's method, eg: GET +// +// If the request's method is OPTIONS and OPTIONS is not a key in the map then the handler +// responds with a status of 200 and sets the Allow header to a comma-separated list of +// available methods. +// +// If the request's method doesn't match any of its keys the handler responds with +// a status of 405, Method not allowed and sets the Allow header to a comma-separated list +// of available methods. +type MethodHandler map[string]http.Handler + +func (h MethodHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if handler, ok := h[req.Method]; ok { + handler.ServeHTTP(w, req) + } else { + allow := []string{} + for k := range h { + allow = append(allow, k) + } + sort.Strings(allow) + w.Header().Set("Allow", strings.Join(allow, ", ")) + if req.Method == "OPTIONS" { + w.WriteHeader(http.StatusOK) + } else { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + } + } +} + +// loggingHandler is the http.Handler implementation for LoggingHandlerTo and its friends +type loggingHandler struct { + writer io.Writer + handler http.Handler +} + +// combinedLoggingHandler is the http.Handler implementation for LoggingHandlerTo and its friends +type combinedLoggingHandler struct { + writer io.Writer + handler http.Handler +} + +func (h loggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + t := time.Now() + logger := makeLogger(w) + url := *req.URL + h.handler.ServeHTTP(logger, req) + writeLog(h.writer, req, url, t, logger.Status(), logger.Size()) +} + +func (h combinedLoggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + t := time.Now() + logger := makeLogger(w) + url := *req.URL + h.handler.ServeHTTP(logger, req) + writeCombinedLog(h.writer, req, url, t, logger.Status(), logger.Size()) +} + +func makeLogger(w http.ResponseWriter) loggingResponseWriter { + var logger loggingResponseWriter = &responseLogger{w: w} + if _, ok := w.(http.Hijacker); ok { + logger = &hijackLogger{responseLogger{w: w}} + } + h, ok1 := logger.(http.Hijacker) + c, ok2 := w.(http.CloseNotifier) + if ok1 && ok2 { + return hijackCloseNotifier{logger, h, c} + } + if ok2 { + return &closeNotifyWriter{logger, c} + } + return logger +} + +type loggingResponseWriter interface { + http.ResponseWriter + http.Flusher + Status() int + Size() int +} + +// responseLogger is wrapper of http.ResponseWriter that keeps track of its HTTP status +// code and body size +type responseLogger struct { + w http.ResponseWriter + status int + size int +} + +func (l *responseLogger) Header() http.Header { + return l.w.Header() +} + +func (l *responseLogger) Write(b []byte) (int, error) { + if l.status == 0 { + // The status will be StatusOK if WriteHeader has not been called yet + l.status = http.StatusOK + } + size, err := l.w.Write(b) + l.size += size + return size, err +} + +func (l *responseLogger) WriteHeader(s int) { + l.w.WriteHeader(s) + l.status = s +} + +func (l *responseLogger) Status() int { + return l.status +} + +func (l *responseLogger) Size() int { + return l.size +} + +func (l *responseLogger) Flush() { + f, ok := l.w.(http.Flusher) + if ok { + f.Flush() + } +} + +type hijackLogger struct { + responseLogger +} + +func (l *hijackLogger) Hijack() (net.Conn, *bufio.ReadWriter, error) { + h := l.responseLogger.w.(http.Hijacker) + conn, rw, err := h.Hijack() + if err == nil && l.responseLogger.status == 0 { + // The status will be StatusSwitchingProtocols if there was no error and WriteHeader has not been called yet + l.responseLogger.status = http.StatusSwitchingProtocols + } + return conn, rw, err +} + +type closeNotifyWriter struct { + loggingResponseWriter + http.CloseNotifier +} + +type hijackCloseNotifier struct { + loggingResponseWriter + http.Hijacker + http.CloseNotifier +} + +const lowerhex = "0123456789abcdef" + +func appendQuoted(buf []byte, s string) []byte { + var runeTmp [utf8.UTFMax]byte + for width := 0; len(s) > 0; s = s[width:] { + r := rune(s[0]) + width = 1 + if r >= utf8.RuneSelf { + r, width = utf8.DecodeRuneInString(s) + } + if width == 1 && r == utf8.RuneError { + buf = append(buf, `\x`...) + buf = append(buf, lowerhex[s[0]>>4]) + buf = append(buf, lowerhex[s[0]&0xF]) + continue + } + if r == rune('"') || r == '\\' { // always backslashed + buf = append(buf, '\\') + buf = append(buf, byte(r)) + continue + } + if strconv.IsPrint(r) { + n := utf8.EncodeRune(runeTmp[:], r) + buf = append(buf, runeTmp[:n]...) + continue + } + switch r { + case '\a': + buf = append(buf, `\a`...) + case '\b': + buf = append(buf, `\b`...) + case '\f': + buf = append(buf, `\f`...) + case '\n': + buf = append(buf, `\n`...) + case '\r': + buf = append(buf, `\r`...) + case '\t': + buf = append(buf, `\t`...) + case '\v': + buf = append(buf, `\v`...) + default: + switch { + case r < ' ': + buf = append(buf, `\x`...) + buf = append(buf, lowerhex[s[0]>>4]) + buf = append(buf, lowerhex[s[0]&0xF]) + case r > utf8.MaxRune: + r = 0xFFFD + fallthrough + case r < 0x10000: + buf = append(buf, `\u`...) + for s := 12; s >= 0; s -= 4 { + buf = append(buf, lowerhex[r>>uint(s)&0xF]) + } + default: + buf = append(buf, `\U`...) + for s := 28; s >= 0; s -= 4 { + buf = append(buf, lowerhex[r>>uint(s)&0xF]) + } + } + } + } + return buf + +} + +// buildCommonLogLine builds a log entry for req in Apache Common Log Format. +// ts is the timestamp with which the entry should be logged. +// status and size are used to provide the response HTTP status and size. +func buildCommonLogLine(req *http.Request, url url.URL, ts time.Time, status int, size int) []byte { + username := "-" + if url.User != nil { + if name := url.User.Username(); name != "" { + username = name + } + } + + host, _, err := net.SplitHostPort(req.RemoteAddr) + + if err != nil { + host = req.RemoteAddr + } + + uri := url.RequestURI() + + buf := make([]byte, 0, 3*(len(host)+len(username)+len(req.Method)+len(uri)+len(req.Proto)+50)/2) + buf = append(buf, host...) + buf = append(buf, " - "...) + buf = append(buf, username...) + buf = append(buf, " ["...) + buf = append(buf, ts.Format("02/Jan/2006:15:04:05 -0700")...) + buf = append(buf, `] "`...) + buf = append(buf, req.Method...) + buf = append(buf, " "...) + buf = appendQuoted(buf, uri) + buf = append(buf, " "...) + buf = append(buf, req.Proto...) + buf = append(buf, `" `...) + buf = append(buf, strconv.Itoa(status)...) + buf = append(buf, " "...) + buf = append(buf, strconv.Itoa(size)...) + return buf +} + +// writeLog writes a log entry for req to w in Apache Common Log Format. +// ts is the timestamp with which the entry should be logged. +// status and size are used to provide the response HTTP status and size. +func writeLog(w io.Writer, req *http.Request, url url.URL, ts time.Time, status, size int) { + buf := buildCommonLogLine(req, url, ts, status, size) + buf = append(buf, '\n') + w.Write(buf) +} + +// writeCombinedLog writes a log entry for req to w in Apache Combined Log Format. +// ts is the timestamp with which the entry should be logged. +// status and size are used to provide the response HTTP status and size. +func writeCombinedLog(w io.Writer, req *http.Request, url url.URL, ts time.Time, status, size int) { + buf := buildCommonLogLine(req, url, ts, status, size) + buf = append(buf, ` "`...) + buf = appendQuoted(buf, req.Referer()) + buf = append(buf, `" "`...) + buf = appendQuoted(buf, req.UserAgent()) + buf = append(buf, '"', '\n') + w.Write(buf) +} + +// CombinedLoggingHandler return a http.Handler that wraps h and logs requests to out in +// Apache Combined Log Format. +// +// See http://httpd.apache.org/docs/2.2/logs.html#combined for a description of this format. +// +// LoggingHandler always sets the ident field of the log to - +func CombinedLoggingHandler(out io.Writer, h http.Handler) http.Handler { + return combinedLoggingHandler{out, h} +} + +// LoggingHandler return a http.Handler that wraps h and logs requests to out in +// Apache Common Log Format (CLF). +// +// See http://httpd.apache.org/docs/2.2/logs.html#common for a description of this format. +// +// LoggingHandler always sets the ident field of the log to - +func LoggingHandler(out io.Writer, h http.Handler) http.Handler { + return loggingHandler{out, h} +} + +// isContentType validates the Content-Type header +// is contentType. That is, its type and subtype match. +func isContentType(h http.Header, contentType string) bool { + ct := h.Get("Content-Type") + if i := strings.IndexRune(ct, ';'); i != -1 { + ct = ct[0:i] + } + return ct == contentType +} + +// ContentTypeHandler wraps and returns a http.Handler, validating the request content type +// is acompatible with the contentTypes list. +// It writes a HTTP 415 error if that fails. +// +// Only PUT, POST, and PATCH requests are considered. +func ContentTypeHandler(h http.Handler, contentTypes ...string) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !(r.Method == "PUT" || r.Method == "POST" || r.Method == "PATCH") { + h.ServeHTTP(w, r) + return + } + + for _, ct := range contentTypes { + if isContentType(r.Header, ct) { + h.ServeHTTP(w, r) + return + } + } + http.Error(w, fmt.Sprintf("Unsupported content type %q; expected one of %q", r.Header.Get("Content-Type"), contentTypes), http.StatusUnsupportedMediaType) + }) +} + +const ( + // HTTPMethodOverrideHeader is a commonly used + // http header to override a request method. + HTTPMethodOverrideHeader = "X-HTTP-Method-Override" + // HTTPMethodOverrideFormKey is a commonly used + // HTML form key to override a request method. + HTTPMethodOverrideFormKey = "_method" +) + +// HTTPMethodOverrideHandler wraps and returns a http.Handler which checks for the X-HTTP-Method-Override header +// or the _method form key, and overrides (if valid) request.Method with its value. +// +// This is especially useful for http clients that don't support many http verbs. +// It isn't secure to override e.g a GET to a POST, so only POST requests are considered. +// Likewise, the override method can only be a "write" method: PUT, PATCH or DELETE. +// +// Form method takes precedence over header method. +func HTTPMethodOverrideHandler(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == "POST" { + om := r.FormValue(HTTPMethodOverrideFormKey) + if om == "" { + om = r.Header.Get(HTTPMethodOverrideHeader) + } + if om == "PUT" || om == "PATCH" || om == "DELETE" { + r.Method = om + } + } + h.ServeHTTP(w, r) + }) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/proxy_headers.go b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/proxy_headers.go new file mode 100644 index 000000000..268de9c6a --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/proxy_headers.go @@ -0,0 +1,113 @@ +package handlers + +import ( + "net/http" + "regexp" + "strings" +) + +var ( + // De-facto standard header keys. + xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For") + xRealIP = http.CanonicalHeaderKey("X-Real-IP") + xForwardedProto = http.CanonicalHeaderKey("X-Forwarded-Scheme") +) + +var ( + // RFC7239 defines a new "Forwarded: " header designed to replace the + // existing use of X-Forwarded-* headers. + // e.g. Forwarded: for=192.0.2.60;proto=https;by=203.0.113.43 + forwarded = http.CanonicalHeaderKey("Forwarded") + // Allows for a sub-match of the first value after 'for=' to the next + // comma, semi-colon or space. The match is case-insensitive. + forRegex = regexp.MustCompile(`(?i)(?:for=)([^(;|,| )]+)`) + // Allows for a sub-match for the first instance of scheme (http|https) + // prefixed by 'proto='. The match is case-insensitive. + protoRegex = regexp.MustCompile(`(?i)(?:proto=)(https|http)`) +) + +// ProxyHeaders inspects common reverse proxy headers and sets the corresponding +// fields in the HTTP request struct. These are X-Forwarded-For and X-Real-IP +// for the remote (client) IP address, X-Forwarded-Proto for the scheme +// (http|https) and the RFC7239 Forwarded header, which may include both client +// IPs and schemes. +// +// NOTE: This middleware should only be used when behind a reverse +// proxy like nginx, HAProxy or Apache. Reverse proxies that don't (or are +// configured not to) strip these headers from client requests, or where these +// headers are accepted "as is" from a remote client (e.g. when Go is not behind +// a proxy), can manifest as a vulnerability if your application uses these +// headers for validating the 'trustworthiness' of a request. +func ProxyHeaders(h http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + // Set the remote IP with the value passed from the proxy. + if fwd := getIP(r); fwd != "" { + r.RemoteAddr = fwd + } + + // Set the scheme (proto) with the value passed from the proxy. + if scheme := getScheme(r); scheme != "" { + r.URL.Scheme = scheme + } + + // Call the next handler in the chain. + h.ServeHTTP(w, r) + } + + return http.HandlerFunc(fn) +} + +// getIP retrieves the IP from the X-Forwarded-For, X-Real-IP and RFC7239 +// Forwarded headers (in that order). +func getIP(r *http.Request) string { + var addr string + + if fwd := r.Header.Get(xForwardedFor); fwd != "" { + // Only grab the first (client) address. Note that '192.168.0.1, + // 10.1.1.1' is a valid key for X-Forwarded-For where addresses after + // the first may represent forwarding proxies earlier in the chain. + s := strings.Index(fwd, ", ") + if s == -1 { + s = len(fwd) + } + addr = fwd[:s] + } else if fwd := r.Header.Get(xRealIP); fwd != "" { + // X-Real-IP should only contain one IP address (the client making the + // request). + addr = fwd + } else if fwd := r.Header.Get(forwarded); fwd != "" { + // match should contain at least two elements if the protocol was + // specified in the Forwarded header. The first element will always be + // the 'for=' capture, which we ignore. In the case of multiple IP + // addresses (for=8.8.8.8, 8.8.4.4,172.16.1.20 is valid) we only + // extract the first, which should be the client IP. + if match := forRegex.FindStringSubmatch(fwd); len(match) > 1 { + // IPv6 addresses in Forwarded headers are quoted-strings. We strip + // these quotes. + addr = strings.Trim(match[1], `"`) + } + } + + return addr +} + +// getScheme retrieves the scheme from the X-Forwarded-Proto and RFC7239 +// Forwarded headers (in that order). +func getScheme(r *http.Request) string { + var scheme string + + // Retrieve the scheme from X-Forwarded-Proto. + if proto := r.Header.Get(xForwardedProto); proto != "" { + scheme = strings.ToLower(proto) + } else if proto := r.Header.Get(forwarded); proto != "" { + // match should contain at least two elements if the protocol was + // specified in the Forwarded header. The first element will always be + // the 'proto=' capture, which we ignore. In the case of multiple proto + // parameters (invalid) we only extract the first. + if match := protoRegex.FindStringSubmatch(proto); len(match) > 1 { + scheme = strings.ToLower(match[1]) + } + } + + return scheme +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/.travis.yml b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/.travis.yml new file mode 100644 index 000000000..d87d46576 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.0 + - 1.1 + - 1.2 + - tip diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/LICENSE new file mode 100644 index 000000000..0e5fb8728 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/README.md new file mode 100644 index 000000000..e60301b03 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/README.md @@ -0,0 +1,7 @@ +mux +=== +[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux) + +gorilla/mux is a powerful URL router and dispatcher. + +Read the full documentation here: http://www.gorillatoolkit.org/pkg/mux diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/doc.go new file mode 100644 index 000000000..b2deed34c --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/doc.go @@ -0,0 +1,199 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package gorilla/mux implements a request router and dispatcher. + +The name mux stands for "HTTP request multiplexer". Like the standard +http.ServeMux, mux.Router matches incoming requests against a list of +registered routes and calls a handler for the route that matches the URL +or other conditions. The main features are: + + * Requests can be matched based on URL host, path, path prefix, schemes, + header and query values, HTTP methods or using custom matchers. + * URL hosts and paths can have variables with an optional regular + expression. + * Registered URLs can be built, or "reversed", which helps maintaining + references to resources. + * Routes can be used as subrouters: nested routes are only tested if the + parent route matches. This is useful to define groups of routes that + share common conditions like a host, a path prefix or other repeated + attributes. As a bonus, this optimizes request matching. + * It implements the http.Handler interface so it is compatible with the + standard http.ServeMux. + +Let's start registering a couple of URL paths and handlers: + + func main() { + r := mux.NewRouter() + r.HandleFunc("/", HomeHandler) + r.HandleFunc("/products", ProductsHandler) + r.HandleFunc("/articles", ArticlesHandler) + http.Handle("/", r) + } + +Here we register three routes mapping URL paths to handlers. This is +equivalent to how http.HandleFunc() works: if an incoming request URL matches +one of the paths, the corresponding handler is called passing +(http.ResponseWriter, *http.Request) as parameters. + +Paths can have variables. They are defined using the format {name} or +{name:pattern}. If a regular expression pattern is not defined, the matched +variable will be anything until the next slash. For example: + + r := mux.NewRouter() + r.HandleFunc("/products/{key}", ProductHandler) + r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) + +The names are used to create a map of route variables which can be retrieved +calling mux.Vars(): + + vars := mux.Vars(request) + category := vars["category"] + +And this is all you need to know about the basic usage. More advanced options +are explained below. + +Routes can also be restricted to a domain or subdomain. Just define a host +pattern to be matched. They can also have variables: + + r := mux.NewRouter() + // Only matches if domain is "www.domain.com". + r.Host("www.domain.com") + // Matches a dynamic subdomain. + r.Host("{subdomain:[a-z]+}.domain.com") + +There are several other matchers that can be added. To match path prefixes: + + r.PathPrefix("/products/") + +...or HTTP methods: + + r.Methods("GET", "POST") + +...or URL schemes: + + r.Schemes("https") + +...or header values: + + r.Headers("X-Requested-With", "XMLHttpRequest") + +...or query values: + + r.Queries("key", "value") + +...or to use a custom matcher function: + + r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { + return r.ProtoMajor == 0 + }) + +...and finally, it is possible to combine several matchers in a single route: + + r.HandleFunc("/products", ProductsHandler). + Host("www.domain.com"). + Methods("GET"). + Schemes("http") + +Setting the same matching conditions again and again can be boring, so we have +a way to group several routes that share the same requirements. +We call it "subrouting". + +For example, let's say we have several URLs that should only match when the +host is "www.domain.com". Create a route for that host and get a "subrouter" +from it: + + r := mux.NewRouter() + s := r.Host("www.domain.com").Subrouter() + +Then register routes in the subrouter: + + s.HandleFunc("/products/", ProductsHandler) + s.HandleFunc("/products/{key}", ProductHandler) + s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) + +The three URL paths we registered above will only be tested if the domain is +"www.domain.com", because the subrouter is tested first. This is not +only convenient, but also optimizes request matching. You can create +subrouters combining any attribute matchers accepted by a route. + +Subrouters can be used to create domain or path "namespaces": you define +subrouters in a central place and then parts of the app can register its +paths relatively to a given subrouter. + +There's one more thing about subroutes. When a subrouter has a path prefix, +the inner routes use it as base for their paths: + + r := mux.NewRouter() + s := r.PathPrefix("/products").Subrouter() + // "/products/" + s.HandleFunc("/", ProductsHandler) + // "/products/{key}/" + s.HandleFunc("/{key}/", ProductHandler) + // "/products/{key}/details" + s.HandleFunc("/{key}/details", ProductDetailsHandler) + +Now let's see how to build registered URLs. + +Routes can be named. All routes that define a name can have their URLs built, +or "reversed". We define a name calling Name() on a route. For example: + + r := mux.NewRouter() + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). + Name("article") + +To build a URL, get the route and call the URL() method, passing a sequence of +key/value pairs for the route variables. For the previous route, we would do: + + url, err := r.Get("article").URL("category", "technology", "id", "42") + +...and the result will be a url.URL with the following path: + + "/articles/technology/42" + +This also works for host variables: + + r := mux.NewRouter() + r.Host("{subdomain}.domain.com"). + Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // url.String() will be "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") + +All variables defined in the route are required, and their values must +conform to the corresponding patterns. These requirements guarantee that a +generated URL will always match a registered route -- the only exception is +for explicitly defined "build-only" routes which never match. + +There's also a way to build only the URL host or path for a route: +use the methods URLHost() or URLPath() instead. For the previous route, +we would do: + + // "http://news.domain.com/" + host, err := r.Get("article").URLHost("subdomain", "news") + + // "/articles/technology/42" + path, err := r.Get("article").URLPath("category", "technology", "id", "42") + +And if you use subrouters, host and path defined separately can be built +as well: + + r := mux.NewRouter() + s := r.Host("{subdomain}.domain.com").Subrouter() + s.Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") +*/ +package mux diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/mux.go new file mode 100644 index 000000000..5b5f8e7db --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/mux.go @@ -0,0 +1,353 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "fmt" + "net/http" + "path" + + "github.com/gorilla/context" +) + +// NewRouter returns a new router instance. +func NewRouter() *Router { + return &Router{namedRoutes: make(map[string]*Route), KeepContext: false} +} + +// Router registers routes to be matched and dispatches a handler. +// +// It implements the http.Handler interface, so it can be registered to serve +// requests: +// +// var router = mux.NewRouter() +// +// func main() { +// http.Handle("/", router) +// } +// +// Or, for Google App Engine, register it in a init() function: +// +// func init() { +// http.Handle("/", router) +// } +// +// This will send all incoming requests to the router. +type Router struct { + // Configurable Handler to be used when no route matches. + NotFoundHandler http.Handler + // Parent route, if this is a subrouter. + parent parentRoute + // Routes to be matched, in order. + routes []*Route + // Routes by name for URL building. + namedRoutes map[string]*Route + // See Router.StrictSlash(). This defines the flag for new routes. + strictSlash bool + // If true, do not clear the request context after handling the request + KeepContext bool +} + +// Match matches registered routes against the request. +func (r *Router) Match(req *http.Request, match *RouteMatch) bool { + for _, route := range r.routes { + if route.Match(req, match) { + return true + } + } + return false +} + +// ServeHTTP dispatches the handler registered in the matched route. +// +// When there is a match, the route variables can be retrieved calling +// mux.Vars(request). +func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { + // Clean path to canonical form and redirect. + if p := cleanPath(req.URL.Path); p != req.URL.Path { + + // Added 3 lines (Philip Schlump) - It was droping the query string and #whatever from query. + // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: + // http://code.google.com/p/go/issues/detail?id=5252 + url := *req.URL + url.Path = p + p = url.String() + + w.Header().Set("Location", p) + w.WriteHeader(http.StatusMovedPermanently) + return + } + var match RouteMatch + var handler http.Handler + if r.Match(req, &match) { + handler = match.Handler + setVars(req, match.Vars) + setCurrentRoute(req, match.Route) + } + if handler == nil { + handler = r.NotFoundHandler + if handler == nil { + handler = http.NotFoundHandler() + } + } + if !r.KeepContext { + defer context.Clear(req) + } + handler.ServeHTTP(w, req) +} + +// Get returns a route registered with the given name. +func (r *Router) Get(name string) *Route { + return r.getNamedRoutes()[name] +} + +// GetRoute returns a route registered with the given name. This method +// was renamed to Get() and remains here for backwards compatibility. +func (r *Router) GetRoute(name string) *Route { + return r.getNamedRoutes()[name] +} + +// StrictSlash defines the trailing slash behavior for new routes. The initial +// value is false. +// +// When true, if the route path is "/path/", accessing "/path" will redirect +// to the former and vice versa. In other words, your application will always +// see the path as specified in the route. +// +// When false, if the route path is "/path", accessing "/path/" will not match +// this route and vice versa. +// +// Special case: when a route sets a path prefix using the PathPrefix() method, +// strict slash is ignored for that route because the redirect behavior can't +// be determined from a prefix alone. However, any subrouters created from that +// route inherit the original StrictSlash setting. +func (r *Router) StrictSlash(value bool) *Router { + r.strictSlash = value + return r +} + +// ---------------------------------------------------------------------------- +// parentRoute +// ---------------------------------------------------------------------------- + +// getNamedRoutes returns the map where named routes are registered. +func (r *Router) getNamedRoutes() map[string]*Route { + if r.namedRoutes == nil { + if r.parent != nil { + r.namedRoutes = r.parent.getNamedRoutes() + } else { + r.namedRoutes = make(map[string]*Route) + } + } + return r.namedRoutes +} + +// getRegexpGroup returns regexp definitions from the parent route, if any. +func (r *Router) getRegexpGroup() *routeRegexpGroup { + if r.parent != nil { + return r.parent.getRegexpGroup() + } + return nil +} + +// ---------------------------------------------------------------------------- +// Route factories +// ---------------------------------------------------------------------------- + +// NewRoute registers an empty route. +func (r *Router) NewRoute() *Route { + route := &Route{parent: r, strictSlash: r.strictSlash} + r.routes = append(r.routes, route) + return route +} + +// Handle registers a new route with a matcher for the URL path. +// See Route.Path() and Route.Handler(). +func (r *Router) Handle(path string, handler http.Handler) *Route { + return r.NewRoute().Path(path).Handler(handler) +} + +// HandleFunc registers a new route with a matcher for the URL path. +// See Route.Path() and Route.HandlerFunc(). +func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, + *http.Request)) *Route { + return r.NewRoute().Path(path).HandlerFunc(f) +} + +// Headers registers a new route with a matcher for request header values. +// See Route.Headers(). +func (r *Router) Headers(pairs ...string) *Route { + return r.NewRoute().Headers(pairs...) +} + +// Host registers a new route with a matcher for the URL host. +// See Route.Host(). +func (r *Router) Host(tpl string) *Route { + return r.NewRoute().Host(tpl) +} + +// MatcherFunc registers a new route with a custom matcher function. +// See Route.MatcherFunc(). +func (r *Router) MatcherFunc(f MatcherFunc) *Route { + return r.NewRoute().MatcherFunc(f) +} + +// Methods registers a new route with a matcher for HTTP methods. +// See Route.Methods(). +func (r *Router) Methods(methods ...string) *Route { + return r.NewRoute().Methods(methods...) +} + +// Path registers a new route with a matcher for the URL path. +// See Route.Path(). +func (r *Router) Path(tpl string) *Route { + return r.NewRoute().Path(tpl) +} + +// PathPrefix registers a new route with a matcher for the URL path prefix. +// See Route.PathPrefix(). +func (r *Router) PathPrefix(tpl string) *Route { + return r.NewRoute().PathPrefix(tpl) +} + +// Queries registers a new route with a matcher for URL query values. +// See Route.Queries(). +func (r *Router) Queries(pairs ...string) *Route { + return r.NewRoute().Queries(pairs...) +} + +// Schemes registers a new route with a matcher for URL schemes. +// See Route.Schemes(). +func (r *Router) Schemes(schemes ...string) *Route { + return r.NewRoute().Schemes(schemes...) +} + +// ---------------------------------------------------------------------------- +// Context +// ---------------------------------------------------------------------------- + +// RouteMatch stores information about a matched route. +type RouteMatch struct { + Route *Route + Handler http.Handler + Vars map[string]string +} + +type contextKey int + +const ( + varsKey contextKey = iota + routeKey +) + +// Vars returns the route variables for the current request, if any. +func Vars(r *http.Request) map[string]string { + if rv := context.Get(r, varsKey); rv != nil { + return rv.(map[string]string) + } + return nil +} + +// CurrentRoute returns the matched route for the current request, if any. +func CurrentRoute(r *http.Request) *Route { + if rv := context.Get(r, routeKey); rv != nil { + return rv.(*Route) + } + return nil +} + +func setVars(r *http.Request, val interface{}) { + context.Set(r, varsKey, val) +} + +func setCurrentRoute(r *http.Request, val interface{}) { + context.Set(r, routeKey, val) +} + +// ---------------------------------------------------------------------------- +// Helpers +// ---------------------------------------------------------------------------- + +// cleanPath returns the canonical path for p, eliminating . and .. elements. +// Borrowed from the net/http package. +func cleanPath(p string) string { + if p == "" { + return "/" + } + if p[0] != '/' { + p = "/" + p + } + np := path.Clean(p) + // path.Clean removes trailing slash except for root; + // put the trailing slash back if necessary. + if p[len(p)-1] == '/' && np != "/" { + np += "/" + } + return np +} + +// uniqueVars returns an error if two slices contain duplicated strings. +func uniqueVars(s1, s2 []string) error { + for _, v1 := range s1 { + for _, v2 := range s2 { + if v1 == v2 { + return fmt.Errorf("mux: duplicated route variable %q", v2) + } + } + } + return nil +} + +// mapFromPairs converts variadic string parameters to a string map. +func mapFromPairs(pairs ...string) (map[string]string, error) { + length := len(pairs) + if length%2 != 0 { + return nil, fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + } + m := make(map[string]string, length/2) + for i := 0; i < length; i += 2 { + m[pairs[i]] = pairs[i+1] + } + return m, nil +} + +// matchInArray returns true if the given string value is in the array. +func matchInArray(arr []string, value string) bool { + for _, v := range arr { + if v == value { + return true + } + } + return false +} + +// matchMap returns true if the given key/value pairs exist in a given map. +func matchMap(toCheck map[string]string, toMatch map[string][]string, + canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != "" { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v == value { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/regexp.go new file mode 100644 index 000000000..a6305483d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/regexp.go @@ -0,0 +1,276 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "bytes" + "fmt" + "net/http" + "net/url" + "regexp" + "strings" +) + +// newRouteRegexp parses a route template and returns a routeRegexp, +// used to match a host, a path or a query string. +// +// It will extract named variables, assemble a regexp to be matched, create +// a "reverse" template to build URLs and compile regexps to validate variable +// values used in URL building. +// +// Previously we accepted only Python-like identifiers for variable +// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that +// name and pattern can't be empty, and names can't contain a colon. +func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash bool) (*routeRegexp, error) { + // Check if it is well-formed. + idxs, errBraces := braceIndices(tpl) + if errBraces != nil { + return nil, errBraces + } + // Backup the original. + template := tpl + // Now let's parse it. + defaultPattern := "[^/]+" + if matchQuery { + defaultPattern = "[^?&]+" + matchPrefix = true + } else if matchHost { + defaultPattern = "[^.]+" + matchPrefix = false + } + // Only match strict slash if not matching + if matchPrefix || matchHost || matchQuery { + strictSlash = false + } + // Set a flag for strictSlash. + endSlash := false + if strictSlash && strings.HasSuffix(tpl, "/") { + tpl = tpl[:len(tpl)-1] + endSlash = true + } + varsN := make([]string, len(idxs)/2) + varsR := make([]*regexp.Regexp, len(idxs)/2) + pattern := bytes.NewBufferString("") + if !matchQuery { + pattern.WriteByte('^') + } + reverse := bytes.NewBufferString("") + var end int + var err error + for i := 0; i < len(idxs); i += 2 { + // Set all values we are interested in. + raw := tpl[end:idxs[i]] + end = idxs[i+1] + parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) + name := parts[0] + patt := defaultPattern + if len(parts) == 2 { + patt = parts[1] + } + // Name or pattern can't be empty. + if name == "" || patt == "" { + return nil, fmt.Errorf("mux: missing name or pattern in %q", + tpl[idxs[i]:end]) + } + // Build the regexp pattern. + fmt.Fprintf(pattern, "%s(%s)", regexp.QuoteMeta(raw), patt) + // Build the reverse template. + fmt.Fprintf(reverse, "%s%%s", raw) + // Append variable name and compiled pattern. + varsN[i/2] = name + varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) + if err != nil { + return nil, err + } + } + // Add the remaining. + raw := tpl[end:] + pattern.WriteString(regexp.QuoteMeta(raw)) + if strictSlash { + pattern.WriteString("[/]?") + } + if !matchPrefix { + pattern.WriteByte('$') + } + reverse.WriteString(raw) + if endSlash { + reverse.WriteByte('/') + } + // Compile full regexp. + reg, errCompile := regexp.Compile(pattern.String()) + if errCompile != nil { + return nil, errCompile + } + // Done! + return &routeRegexp{ + template: template, + matchHost: matchHost, + matchQuery: matchQuery, + strictSlash: strictSlash, + regexp: reg, + reverse: reverse.String(), + varsN: varsN, + varsR: varsR, + }, nil +} + +// routeRegexp stores a regexp to match a host or path and information to +// collect and validate route variables. +type routeRegexp struct { + // The unmodified template. + template string + // True for host match, false for path or query string match. + matchHost bool + // True for query string match, false for path and host match. + matchQuery bool + // The strictSlash value defined on the route, but disabled if PathPrefix was used. + strictSlash bool + // Expanded regexp. + regexp *regexp.Regexp + // Reverse template. + reverse string + // Variable names. + varsN []string + // Variable regexps (validators). + varsR []*regexp.Regexp +} + +// Match matches the regexp against the URL host or path. +func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { + if !r.matchHost { + if r.matchQuery { + return r.regexp.MatchString(req.URL.RawQuery) + } else { + return r.regexp.MatchString(req.URL.Path) + } + } + return r.regexp.MatchString(getHost(req)) +} + +// url builds a URL part using the given values. +func (r *routeRegexp) url(pairs ...string) (string, error) { + values, err := mapFromPairs(pairs...) + if err != nil { + return "", err + } + urlValues := make([]interface{}, len(r.varsN)) + for k, v := range r.varsN { + value, ok := values[v] + if !ok { + return "", fmt.Errorf("mux: missing route variable %q", v) + } + urlValues[k] = value + } + rv := fmt.Sprintf(r.reverse, urlValues...) + if !r.regexp.MatchString(rv) { + // The URL is checked against the full regexp, instead of checking + // individual variables. This is faster but to provide a good error + // message, we check individual regexps if the URL doesn't match. + for k, v := range r.varsN { + if !r.varsR[k].MatchString(values[v]) { + return "", fmt.Errorf( + "mux: variable %q doesn't match, expected %q", values[v], + r.varsR[k].String()) + } + } + } + return rv, nil +} + +// braceIndices returns the first level curly brace indices from a string. +// It returns an error in case of unbalanced braces. +func braceIndices(s string) ([]int, error) { + var level, idx int + idxs := make([]int, 0) + for i := 0; i < len(s); i++ { + switch s[i] { + case '{': + if level++; level == 1 { + idx = i + } + case '}': + if level--; level == 0 { + idxs = append(idxs, idx, i+1) + } else if level < 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + } + } + if level != 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + return idxs, nil +} + +// ---------------------------------------------------------------------------- +// routeRegexpGroup +// ---------------------------------------------------------------------------- + +// routeRegexpGroup groups the route matchers that carry variables. +type routeRegexpGroup struct { + host *routeRegexp + path *routeRegexp + queries []*routeRegexp +} + +// setMatch extracts the variables from the URL once a route matches. +func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { + // Store host variables. + if v.host != nil { + hostVars := v.host.regexp.FindStringSubmatch(getHost(req)) + if hostVars != nil { + for k, v := range v.host.varsN { + m.Vars[v] = hostVars[k+1] + } + } + } + // Store path variables. + if v.path != nil { + pathVars := v.path.regexp.FindStringSubmatch(req.URL.Path) + if pathVars != nil { + for k, v := range v.path.varsN { + m.Vars[v] = pathVars[k+1] + } + // Check if we should redirect. + if v.path.strictSlash { + p1 := strings.HasSuffix(req.URL.Path, "/") + p2 := strings.HasSuffix(v.path.template, "/") + if p1 != p2 { + u, _ := url.Parse(req.URL.String()) + if p1 { + u.Path = u.Path[:len(u.Path)-1] + } else { + u.Path += "/" + } + m.Handler = http.RedirectHandler(u.String(), 301) + } + } + } + } + // Store query string variables. + rawQuery := req.URL.RawQuery + for _, q := range v.queries { + queryVars := q.regexp.FindStringSubmatch(rawQuery) + if queryVars != nil { + for k, v := range q.varsN { + m.Vars[v] = queryVars[k+1] + } + } + } +} + +// getHost tries its best to return the request host. +func getHost(r *http.Request) string { + if r.URL.IsAbs() { + return r.URL.Host + } + host := r.Host + // Slice off any port information. + if i := strings.Index(host, ":"); i != -1 { + host = host[:i] + } + return host + +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/route.go new file mode 100644 index 000000000..c310e66bc --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/route.go @@ -0,0 +1,524 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" +) + +// Route stores information to match a request and build URLs. +type Route struct { + // Parent where the route was registered (a Router). + parent parentRoute + // Request handler for the route. + handler http.Handler + // List of matchers. + matchers []matcher + // Manager for the variables from host and path. + regexp *routeRegexpGroup + // If true, when the path pattern is "/path/", accessing "/path" will + // redirect to the former and vice versa. + strictSlash bool + // If true, this route never matches: it is only used to build URLs. + buildOnly bool + // The name used to build URLs. + name string + // Error resulted from building a route. + err error +} + +// Match matches the route against the request. +func (r *Route) Match(req *http.Request, match *RouteMatch) bool { + if r.buildOnly || r.err != nil { + return false + } + // Match everything. + for _, m := range r.matchers { + if matched := m.Match(req, match); !matched { + return false + } + } + // Yay, we have a match. Let's collect some info about it. + if match.Route == nil { + match.Route = r + } + if match.Handler == nil { + match.Handler = r.handler + } + if match.Vars == nil { + match.Vars = make(map[string]string) + } + // Set variables. + if r.regexp != nil { + r.regexp.setMatch(req, match, r) + } + return true +} + +// ---------------------------------------------------------------------------- +// Route attributes +// ---------------------------------------------------------------------------- + +// GetError returns an error resulted from building the route, if any. +func (r *Route) GetError() error { + return r.err +} + +// BuildOnly sets the route to never match: it is only used to build URLs. +func (r *Route) BuildOnly() *Route { + r.buildOnly = true + return r +} + +// Handler -------------------------------------------------------------------- + +// Handler sets a handler for the route. +func (r *Route) Handler(handler http.Handler) *Route { + if r.err == nil { + r.handler = handler + } + return r +} + +// HandlerFunc sets a handler function for the route. +func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { + return r.Handler(http.HandlerFunc(f)) +} + +// GetHandler returns the handler for the route, if any. +func (r *Route) GetHandler() http.Handler { + return r.handler +} + +// Name ----------------------------------------------------------------------- + +// Name sets the name for the route, used to build URLs. +// If the name was registered already it will be overwritten. +func (r *Route) Name(name string) *Route { + if r.name != "" { + r.err = fmt.Errorf("mux: route already has name %q, can't set %q", + r.name, name) + } + if r.err == nil { + r.name = name + r.getNamedRoutes()[name] = r + } + return r +} + +// GetName returns the name for the route, if any. +func (r *Route) GetName() string { + return r.name +} + +// ---------------------------------------------------------------------------- +// Matchers +// ---------------------------------------------------------------------------- + +// matcher types try to match a request. +type matcher interface { + Match(*http.Request, *RouteMatch) bool +} + +// addMatcher adds a matcher to the route. +func (r *Route) addMatcher(m matcher) *Route { + if r.err == nil { + r.matchers = append(r.matchers, m) + } + return r +} + +// addRegexpMatcher adds a host or path matcher and builder to a route. +func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error { + if r.err != nil { + return r.err + } + r.regexp = r.getRegexpGroup() + if !matchHost && !matchQuery { + if len(tpl) == 0 || tpl[0] != '/' { + return fmt.Errorf("mux: path must start with a slash, got %q", tpl) + } + if r.regexp.path != nil { + tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl + } + } + rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash) + if err != nil { + return err + } + for _, q := range r.regexp.queries { + if err = uniqueVars(rr.varsN, q.varsN); err != nil { + return err + } + } + if matchHost { + if r.regexp.path != nil { + if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { + return err + } + } + r.regexp.host = rr + } else { + if r.regexp.host != nil { + if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { + return err + } + } + if matchQuery { + r.regexp.queries = append(r.regexp.queries, rr) + } else { + r.regexp.path = rr + } + } + r.addMatcher(rr) + return nil +} + +// Headers -------------------------------------------------------------------- + +// headerMatcher matches the request against header values. +type headerMatcher map[string]string + +func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMap(m, r.Header, true) +} + +// Headers adds a matcher for request header values. +// It accepts a sequence of key/value pairs to be matched. For example: +// +// r := mux.NewRouter() +// r.Headers("Content-Type", "application/json", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both request header values match. +// +// It the value is an empty string, it will match any value if the key is set. +func (r *Route) Headers(pairs ...string) *Route { + if r.err == nil { + var headers map[string]string + headers, r.err = mapFromPairs(pairs...) + return r.addMatcher(headerMatcher(headers)) + } + return r +} + +// Host ----------------------------------------------------------------------- + +// Host adds a matcher for the URL host. +// It accepts a template with zero or more URL variables enclosed by {}. +// Variables can define an optional regexp pattern to me matched: +// +// - {name} matches anything until the next dot. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Host("www.domain.com") +// r.Host("{subdomain}.domain.com") +// r.Host("{subdomain:[a-z]+}.domain.com") +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Host(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, true, false, false) + return r +} + +// MatcherFunc ---------------------------------------------------------------- + +// MatcherFunc is the function signature used by custom matchers. +type MatcherFunc func(*http.Request, *RouteMatch) bool + +func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { + return m(r, match) +} + +// MatcherFunc adds a custom function to be used as request matcher. +func (r *Route) MatcherFunc(f MatcherFunc) *Route { + return r.addMatcher(f) +} + +// Methods -------------------------------------------------------------------- + +// methodMatcher matches the request against HTTP methods. +type methodMatcher []string + +func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchInArray(m, r.Method) +} + +// Methods adds a matcher for HTTP methods. +// It accepts a sequence of one or more methods to be matched, e.g.: +// "GET", "POST", "PUT". +func (r *Route) Methods(methods ...string) *Route { + for k, v := range methods { + methods[k] = strings.ToUpper(v) + } + return r.addMatcher(methodMatcher(methods)) +} + +// Path ----------------------------------------------------------------------- + +// Path adds a matcher for the URL path. +// It accepts a template with zero or more URL variables enclosed by {}. The +// template must start with a "/". +// Variables can define an optional regexp pattern to me matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Path("/products/").Handler(ProductsHandler) +// r.Path("/products/{key}").Handler(ProductsHandler) +// r.Path("/articles/{category}/{id:[0-9]+}"). +// Handler(ArticleHandler) +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Path(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, false, false, false) + return r +} + +// PathPrefix ----------------------------------------------------------------- + +// PathPrefix adds a matcher for the URL path prefix. This matches if the given +// template is a prefix of the full URL path. See Route.Path() for details on +// the tpl argument. +// +// Note that it does not treat slashes specially ("/foobar/" will be matched by +// the prefix "/foo") so you may want to use a trailing slash here. +// +// Also note that the setting of Router.StrictSlash() has no effect on routes +// with a PathPrefix matcher. +func (r *Route) PathPrefix(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, false, true, false) + return r +} + +// Query ---------------------------------------------------------------------- + +// Queries adds a matcher for URL query values. +// It accepts a sequence of key/value pairs. Values may define variables. +// For example: +// +// r := mux.NewRouter() +// r.Queries("foo", "bar", "id", "{id:[0-9]+}") +// +// The above route will only match if the URL contains the defined queries +// values, e.g.: ?foo=bar&id=42. +// +// It the value is an empty string, it will match any value if the key is set. +// +// Variables can define an optional regexp pattern to me matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +func (r *Route) Queries(pairs ...string) *Route { + length := len(pairs) + if length%2 != 0 { + r.err = fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + return nil + } + for i := 0; i < length; i += 2 { + if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, true, true); r.err != nil { + return r + } + } + + return r +} + +// Schemes -------------------------------------------------------------------- + +// schemeMatcher matches the request against URL schemes. +type schemeMatcher []string + +func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchInArray(m, r.URL.Scheme) +} + +// Schemes adds a matcher for URL schemes. +// It accepts a sequence of schemes to be matched, e.g.: "http", "https". +func (r *Route) Schemes(schemes ...string) *Route { + for k, v := range schemes { + schemes[k] = strings.ToLower(v) + } + return r.addMatcher(schemeMatcher(schemes)) +} + +// Subrouter ------------------------------------------------------------------ + +// Subrouter creates a subrouter for the route. +// +// It will test the inner routes only if the parent route matched. For example: +// +// r := mux.NewRouter() +// s := r.Host("www.domain.com").Subrouter() +// s.HandleFunc("/products/", ProductsHandler) +// s.HandleFunc("/products/{key}", ProductHandler) +// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) +// +// Here, the routes registered in the subrouter won't be tested if the host +// doesn't match. +func (r *Route) Subrouter() *Router { + router := &Router{parent: r, strictSlash: r.strictSlash} + r.addMatcher(router) + return router +} + +// ---------------------------------------------------------------------------- +// URL building +// ---------------------------------------------------------------------------- + +// URL builds a URL for the route. +// +// It accepts a sequence of key/value pairs for the route variables. For +// example, given this route: +// +// r := mux.NewRouter() +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") +// +// ...a URL for it can be built using: +// +// url, err := r.Get("article").URL("category", "technology", "id", "42") +// +// ...which will return an url.URL with the following path: +// +// "/articles/technology/42" +// +// This also works for host variables: +// +// r := mux.NewRouter() +// r.Host("{subdomain}.domain.com"). +// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") +// +// // url.String() will be "http://news.domain.com/articles/technology/42" +// url, err := r.Get("article").URL("subdomain", "news", +// "category", "technology", +// "id", "42") +// +// All variables defined in the route are required, and their values must +// conform to the corresponding patterns. +func (r *Route) URL(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil { + return nil, errors.New("mux: route doesn't have a host or path") + } + var scheme, host, path string + var err error + if r.regexp.host != nil { + // Set a default scheme. + scheme = "http" + if host, err = r.regexp.host.url(pairs...); err != nil { + return nil, err + } + } + if r.regexp.path != nil { + if path, err = r.regexp.path.url(pairs...); err != nil { + return nil, err + } + } + return &url.URL{ + Scheme: scheme, + Host: host, + Path: path, + }, nil +} + +// URLHost builds the host part of the URL for a route. See Route.URL(). +// +// The route must have a host defined. +func (r *Route) URLHost(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil || r.regexp.host == nil { + return nil, errors.New("mux: route doesn't have a host") + } + host, err := r.regexp.host.url(pairs...) + if err != nil { + return nil, err + } + return &url.URL{ + Scheme: "http", + Host: host, + }, nil +} + +// URLPath builds the path part of the URL for a route. See Route.URL(). +// +// The route must have a path defined. +func (r *Route) URLPath(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil || r.regexp.path == nil { + return nil, errors.New("mux: route doesn't have a path") + } + path, err := r.regexp.path.url(pairs...) + if err != nil { + return nil, err + } + return &url.URL{ + Path: path, + }, nil +} + +// ---------------------------------------------------------------------------- +// parentRoute +// ---------------------------------------------------------------------------- + +// parentRoute allows routes to know about parent host and path definitions. +type parentRoute interface { + getNamedRoutes() map[string]*Route + getRegexpGroup() *routeRegexpGroup +} + +// getNamedRoutes returns the map where named routes are registered. +func (r *Route) getNamedRoutes() map[string]*Route { + if r.parent == nil { + // During tests router is not always set. + r.parent = NewRouter() + } + return r.parent.getNamedRoutes() +} + +// getRegexpGroup returns regexp definitions from this route. +func (r *Route) getRegexpGroup() *routeRegexpGroup { + if r.regexp == nil { + if r.parent == nil { + // During tests router is not always set. + r.parent = NewRouter() + } + regexp := r.parent.getRegexpGroup() + if regexp == nil { + r.regexp = new(routeRegexpGroup) + } else { + // Copy. + r.regexp = &routeRegexpGroup{ + host: regexp.host, + path: regexp.path, + queries: regexp.queries, + } + } + } + return r.regexp +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/LICENSE new file mode 100644 index 000000000..5f0d1fb6a --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/LICENSE @@ -0,0 +1,13 @@ +Copyright 2014 Alan Shreve + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/README.md new file mode 100644 index 000000000..7a950d177 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/README.md @@ -0,0 +1,23 @@ +# mousetrap + +mousetrap is a tiny library that answers a single question. + +On a Windows machine, was the process invoked by someone double clicking on +the executable file while browsing in explorer? + +### Motivation + +Windows developers unfamiliar with command line tools will often "double-click" +the executable for a tool. Because most CLI tools print the help and then exit +when invoked without arguments, this is often very frustrating for those users. + +mousetrap provides a way to detect these invocations so that you can provide +more helpful behavior and instructions on how to run the CLI tool. To see what +this looks like, both from an organizational and a technical perspective, see +https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/ + +### The interface + +The library exposes a single interface: + + func StartedByExplorer() (bool) diff --git a/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/trap_others.go new file mode 100644 index 000000000..9d2d8a4ba --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/trap_others.go @@ -0,0 +1,15 @@ +// +build !windows + +package mousetrap + +// StartedByExplorer returns true if the program was invoked by the user +// double-clicking on the executable from explorer.exe +// +// It is conservative and returns false if any of the internal calls fail. +// It does not guarantee that the program was run from a terminal. It only can tell you +// whether it was launched from explorer.exe +// +// On non-Windows platforms, it always returns false. +func StartedByExplorer() bool { + return false +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/trap_windows.go new file mode 100644 index 000000000..336142a5e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/trap_windows.go @@ -0,0 +1,98 @@ +// +build windows +// +build !go1.4 + +package mousetrap + +import ( + "fmt" + "os" + "syscall" + "unsafe" +) + +const ( + // defined by the Win32 API + th32cs_snapprocess uintptr = 0x2 +) + +var ( + kernel = syscall.MustLoadDLL("kernel32.dll") + CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot") + Process32First = kernel.MustFindProc("Process32FirstW") + Process32Next = kernel.MustFindProc("Process32NextW") +) + +// ProcessEntry32 structure defined by the Win32 API +type processEntry32 struct { + dwSize uint32 + cntUsage uint32 + th32ProcessID uint32 + th32DefaultHeapID int + th32ModuleID uint32 + cntThreads uint32 + th32ParentProcessID uint32 + pcPriClassBase int32 + dwFlags uint32 + szExeFile [syscall.MAX_PATH]uint16 +} + +func getProcessEntry(pid int) (pe *processEntry32, err error) { + snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0)) + if snapshot == uintptr(syscall.InvalidHandle) { + err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1) + return + } + defer syscall.CloseHandle(syscall.Handle(snapshot)) + + var processEntry processEntry32 + processEntry.dwSize = uint32(unsafe.Sizeof(processEntry)) + ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) + if ok == 0 { + err = fmt.Errorf("Process32First: %v", e1) + return + } + + for { + if processEntry.th32ProcessID == uint32(pid) { + pe = &processEntry + return + } + + ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) + if ok == 0 { + err = fmt.Errorf("Process32Next: %v", e1) + return + } + } +} + +func getppid() (pid int, err error) { + pe, err := getProcessEntry(os.Getpid()) + if err != nil { + return + } + + pid = int(pe.th32ParentProcessID) + return +} + +// StartedByExplorer returns true if the program was invoked by the user double-clicking +// on the executable from explorer.exe +// +// It is conservative and returns false if any of the internal calls fail. +// It does not guarantee that the program was run from a terminal. It only can tell you +// whether it was launched from explorer.exe +func StartedByExplorer() bool { + ppid, err := getppid() + if err != nil { + return false + } + + pe, err := getProcessEntry(ppid) + if err != nil { + return false + } + + name := syscall.UTF16ToString(pe.szExeFile[:]) + return name == "explorer.exe" +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go new file mode 100644 index 000000000..9a28e57c3 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go @@ -0,0 +1,46 @@ +// +build windows +// +build go1.4 + +package mousetrap + +import ( + "os" + "syscall" + "unsafe" +) + +func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) { + snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0) + if err != nil { + return nil, err + } + defer syscall.CloseHandle(snapshot) + var procEntry syscall.ProcessEntry32 + procEntry.Size = uint32(unsafe.Sizeof(procEntry)) + if err = syscall.Process32First(snapshot, &procEntry); err != nil { + return nil, err + } + for { + if procEntry.ProcessID == uint32(pid) { + return &procEntry, nil + } + err = syscall.Process32Next(snapshot, &procEntry) + if err != nil { + return nil, err + } + } +} + +// StartedByExplorer returns true if the program was invoked by the user double-clicking +// on the executable from explorer.exe +// +// It is conservative and returns false if any of the internal calls fail. +// It does not guarantee that the program was run from a terminal. It only can tell you +// whether it was launched from explorer.exe +func StartedByExplorer() bool { + pe, err := getProcessEntry(os.Getppid()) + if err != nil { + return false + } + return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:]) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/.travis.yml b/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/.travis.yml new file mode 100644 index 000000000..7f3fe9a96 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.4 + +script: + - go test diff --git a/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/LICENSE new file mode 100644 index 000000000..f9c841a51 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/README.md new file mode 100644 index 000000000..659d6885f --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/README.md @@ -0,0 +1,46 @@ +# mapstructure + +mapstructure is a Go library for decoding generic map values to structures +and vice versa, while providing helpful error handling. + +This library is most useful when decoding values from some data stream (JSON, +Gob, etc.) where you don't _quite_ know the structure of the underlying data +until you read a part of it. You can therefore read a `map[string]interface{}` +and use this library to decode it into the proper underlying native Go +structure. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/mapstructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). + +The `Decode` function has examples associated with it there. + +## But Why?! + +Go offers fantastic standard libraries for decoding formats such as JSON. +The standard method is to have a struct pre-created, and populate that struct +from the bytes of the encoded format. This is great, but the problem is if +you have configuration or an encoding that changes slightly depending on +specific fields. For example, consider this JSON: + +```json +{ + "type": "person", + "name": "Mitchell" +} +``` + +Perhaps we can't populate a specific structure without first reading +the "type" field from the JSON. We could always do two passes over the +decoding of the JSON (reading the "type" first, and the rest later). +However, it is much simpler to just decode this into a `map[string]interface{}` +structure, read the "type" key, then use something like this library +to decode it into the proper structure. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/decode_hooks.go new file mode 100644 index 000000000..aa91f76ce --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -0,0 +1,151 @@ +package mapstructure + +import ( + "errors" + "reflect" + "strconv" + "strings" + "time" +) + +// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. +func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { + // Create variables here so we can reference them with the reflect pkg + var f1 DecodeHookFuncType + var f2 DecodeHookFuncKind + + // Fill in the variables into this interface and the rest is done + // automatically using the reflect package. + potential := []interface{}{f1, f2} + + v := reflect.ValueOf(h) + vt := v.Type() + for _, raw := range potential { + pt := reflect.ValueOf(raw).Type() + if vt.ConvertibleTo(pt) { + return v.Convert(pt).Interface() + } + } + + return nil +} + +// DecodeHookExec executes the given decode hook. This should be used +// since it'll naturally degrade to the older backwards compatible DecodeHookFunc +// that took reflect.Kind instead of reflect.Type. +func DecodeHookExec( + raw DecodeHookFunc, + from reflect.Type, to reflect.Type, + data interface{}) (interface{}, error) { + // Build our arguments that reflect expects + argVals := make([]reflect.Value, 3) + argVals[0] = reflect.ValueOf(from) + argVals[1] = reflect.ValueOf(to) + argVals[2] = reflect.ValueOf(data) + + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return f(from, to, data) + case DecodeHookFuncKind: + return f(from.Kind(), to.Kind(), data) + default: + return nil, errors.New("invalid decode hook signature") + } +} + +// ComposeDecodeHookFunc creates a single DecodeHookFunc that +// automatically composes multiple DecodeHookFuncs. +// +// The composed funcs are called in order, with the result of the +// previous transformation. +func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + var err error + for _, f1 := range fs { + data, err = DecodeHookExec(f1, f, t, data) + if err != nil { + return nil, err + } + + // Modify the from kind to be correct with the new data + f = reflect.ValueOf(data).Type() + } + + return data, nil + } +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +func StringToSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + if f != reflect.String || t != reflect.Slice { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts +// strings to time.Duration. +func StringToTimeDurationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Duration(5)) { + return data, nil + } + + // Convert it by parsing + return time.ParseDuration(data.(string)) + } +} + +func WeaklyTypedHook( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + dataVal := reflect.ValueOf(data) + switch t { + case reflect.String: + switch f { + case reflect.Bool: + if dataVal.Bool() { + return "1", nil + } else { + return "0", nil + } + case reflect.Float32: + return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil + case reflect.Int: + return strconv.FormatInt(dataVal.Int(), 10), nil + case reflect.Slice: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + return string(dataVal.Interface().([]uint8)), nil + } + case reflect.Uint: + return strconv.FormatUint(dataVal.Uint(), 10), nil + } + } + + return data, nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/error.go new file mode 100644 index 000000000..47a99e5af --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/error.go @@ -0,0 +1,50 @@ +package mapstructure + +import ( + "errors" + "fmt" + "sort" + "strings" +) + +// Error implements the error interface and can represents multiple +// errors that occur in the course of a single decode. +type Error struct { + Errors []string +} + +func (e *Error) Error() string { + points := make([]string, len(e.Errors)) + for i, err := range e.Errors { + points[i] = fmt.Sprintf("* %s", err) + } + + sort.Strings(points) + return fmt.Sprintf( + "%d error(s) decoding:\n\n%s", + len(e.Errors), strings.Join(points, "\n")) +} + +// WrappedErrors implements the errwrap.Wrapper interface to make this +// return value more useful with the errwrap and go-multierror libraries. +func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } + + result := make([]error, len(e.Errors)) + for i, e := range e.Errors { + result[i] = errors.New(e) + } + + return result +} + +func appendErrors(errors []string, err error) []string { + switch e := err.(type) { + case *Error: + return append(errors, e.Errors...) + default: + return append(errors, e.Error()) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/mapstructure.go new file mode 100644 index 000000000..d3cb4e8f6 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -0,0 +1,746 @@ +// The mapstructure package exposes functionality to convert an +// abitrary map[string]interface{} into a native Go structure. +// +// The Go structure can be arbitrarily complex, containing slices, +// other structs, etc. and the decoder will properly decode nested +// maps and so on into the proper structures in the native Go struct. +// See the examples to see what the decoder is capable of. +package mapstructure + +import ( + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" +) + +// DecodeHookFunc is the callback function that can be used for +// data transformations. See "DecodeHook" in the DecoderConfig +// struct. +// +// The type should be DecodeHookFuncType or DecodeHookFuncKind. +// Either is accepted. Types are a superset of Kinds (Types can return +// Kinds) and are generally a richer thing to use, but Kinds are simpler +// if you only need those. +// +// The reason DecodeHookFunc is multi-typed is for backwards compatibility: +// we started with Kinds and then realized Types were the better solution, +// but have a promise to not break backwards compat so we now support +// both. +type DecodeHookFunc interface{} + +type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) +type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) + +// DecoderConfig is the configuration that is used to create a new decoder +// and allows customization of various aspects of decoding. +type DecoderConfig struct { + // DecodeHook, if set, will be called before any decoding and any + // type conversion (if WeaklyTypedInput is on). This lets you modify + // the values before they're set down onto the resulting struct. + // + // If an error is returned, the entire decode will fail with that + // error. + DecodeHook DecodeHookFunc + + // If ErrorUnused is true, then it is an error for there to exist + // keys in the original map that were unused in the decoding process + // (extra keys). + ErrorUnused bool + + // ZeroFields, if set to true, will zero fields before writing them. + // For example, a map will be emptied before decoded values are put in + // it. If this is false, a map will be merged. + ZeroFields bool + + // If WeaklyTypedInput is true, the decoder will make the following + // "weak" conversions: + // + // - bools to string (true = "1", false = "0") + // - numbers to string (base 10) + // - bools to int/uint (true = 1, false = 0) + // - strings to int/uint (base implied by prefix) + // - int to bool (true if value != 0) + // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, + // FALSE, false, False. Anything else is an error) + // - empty array = empty map and vice versa + // - negative numbers to overflowed uint values (base 10) + // + WeaklyTypedInput bool + + // Metadata is the struct that will contain extra metadata about + // the decoding. If this is nil, then no metadata will be tracked. + Metadata *Metadata + + // Result is a pointer to the struct that will contain the decoded + // value. + Result interface{} + + // The tag name that mapstructure reads for field names. This + // defaults to "mapstructure" + TagName string +} + +// A Decoder takes a raw interface value and turns it into structured +// data, keeping track of rich error information along the way in case +// anything goes wrong. Unlike the basic top-level Decode method, you can +// more finely control how the Decoder behaves using the DecoderConfig +// structure. The top-level Decode method is just a convenience that sets +// up the most basic Decoder. +type Decoder struct { + config *DecoderConfig +} + +// Metadata contains information about decoding a structure that +// is tedious or difficult to get otherwise. +type Metadata struct { + // Keys are the keys of the structure which were successfully decoded + Keys []string + + // Unused is a slice of keys that were found in the raw value but + // weren't decoded since there was no matching field in the result interface + Unused []string +} + +// Decode takes a map and uses reflection to convert it into the +// given Go native structure. val must be a pointer to a struct. +func Decode(m interface{}, rawVal interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: rawVal, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(m) +} + +// WeakDecode is the same as Decode but is shorthand to enable +// WeaklyTypedInput. See DecoderConfig for more info. +func WeakDecode(input, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// NewDecoder returns a new decoder for the given configuration. Once +// a decoder has been returned, the same configuration must not be used +// again. +func NewDecoder(config *DecoderConfig) (*Decoder, error) { + val := reflect.ValueOf(config.Result) + if val.Kind() != reflect.Ptr { + return nil, errors.New("result must be a pointer") + } + + val = val.Elem() + if !val.CanAddr() { + return nil, errors.New("result must be addressable (a pointer)") + } + + if config.Metadata != nil { + if config.Metadata.Keys == nil { + config.Metadata.Keys = make([]string, 0) + } + + if config.Metadata.Unused == nil { + config.Metadata.Unused = make([]string, 0) + } + } + + if config.TagName == "" { + config.TagName = "mapstructure" + } + + result := &Decoder{ + config: config, + } + + return result, nil +} + +// Decode decodes the given raw interface to the target pointer specified +// by the configuration. +func (d *Decoder) Decode(raw interface{}) error { + return d.decode("", raw, reflect.ValueOf(d.config.Result).Elem()) +} + +// Decodes an unknown data type into a specific reflection value. +func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error { + if data == nil { + // If the data is nil, then we don't set anything. + return nil + } + + dataVal := reflect.ValueOf(data) + if !dataVal.IsValid() { + // If the data value is invalid, then we just set the value + // to be the zero value. + val.Set(reflect.Zero(val.Type())) + return nil + } + + if d.config.DecodeHook != nil { + // We have a DecodeHook, so let's pre-process the data. + var err error + data, err = DecodeHookExec( + d.config.DecodeHook, + dataVal.Type(), val.Type(), data) + if err != nil { + return err + } + } + + var err error + dataKind := getKind(val) + switch dataKind { + case reflect.Bool: + err = d.decodeBool(name, data, val) + case reflect.Interface: + err = d.decodeBasic(name, data, val) + case reflect.String: + err = d.decodeString(name, data, val) + case reflect.Int: + err = d.decodeInt(name, data, val) + case reflect.Uint: + err = d.decodeUint(name, data, val) + case reflect.Float32: + err = d.decodeFloat(name, data, val) + case reflect.Struct: + err = d.decodeStruct(name, data, val) + case reflect.Map: + err = d.decodeMap(name, data, val) + case reflect.Ptr: + err = d.decodePtr(name, data, val) + case reflect.Slice: + err = d.decodeSlice(name, data, val) + default: + // If we reached this point then we weren't able to decode it + return fmt.Errorf("%s: unsupported type: %s", name, dataKind) + } + + // If we reached here, then we successfully decoded SOMETHING, so + // mark the key as used if we're tracking metadata. + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + + return err +} + +// This decodes a basic type (bool, int, string, etc.) and sets the +// value to "data" of that type. +func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataValType := dataVal.Type() + if !dataValType.AssignableTo(val.Type()) { + return fmt.Errorf( + "'%s' expected type '%s', got '%s'", + name, val.Type(), dataValType) + } + + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + converted := true + switch { + case dataKind == reflect.String: + val.SetString(dataVal.String()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetString("1") + } else { + val.SetString("0") + } + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatInt(dataVal.Int(), 10)) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) + case dataKind == reflect.Slice && d.config.WeaklyTypedInput: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + switch { + case elemKind == reflect.Uint8: + val.SetString(string(dataVal.Interface().([]uint8))) + default: + converted = false + } + default: + converted = false + } + + if !converted { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Int: + val.SetInt(dataVal.Int()) + case dataKind == reflect.Uint: + val.SetInt(int64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetInt(int64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetInt(1) + } else { + val.SetInt(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits()) + if err == nil { + val.SetInt(i) + } else { + return fmt.Errorf("cannot parse '%s' as int: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Int: + i := dataVal.Int() + if i < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %d overflows uint", + name, i) + } + val.SetUint(uint64(i)) + case dataKind == reflect.Uint: + val.SetUint(dataVal.Uint()) + case dataKind == reflect.Float32: + f := dataVal.Float() + if f < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %f overflows uint", + name, f) + } + val.SetUint(uint64(f)) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetUint(1) + } else { + val.SetUint(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits()) + if err == nil { + val.SetUint(i) + } else { + return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Bool: + val.SetBool(dataVal.Bool()) + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Int() != 0) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Uint() != 0) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Float() != 0) + case dataKind == reflect.String && d.config.WeaklyTypedInput: + b, err := strconv.ParseBool(dataVal.String()) + if err == nil { + val.SetBool(b) + } else if dataVal.String() == "" { + val.SetBool(false) + } else { + return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Int: + val.SetFloat(float64(dataVal.Int())) + case dataKind == reflect.Uint: + val.SetFloat(float64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetFloat(float64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetFloat(1) + } else { + val.SetFloat(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits()) + if err == nil { + val.SetFloat(f) + } else { + return fmt.Errorf("cannot parse '%s' as float: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // By default we overwrite keys in the current map + valMap := val + + // If the map is nil or we're purposely zeroing fields, make a new map + if valMap.IsNil() || d.config.ZeroFields { + // Make a new map to hold our result + mapType := reflect.MapOf(valKeyType, valElemType) + valMap = reflect.MakeMap(mapType) + } + + // Check input type + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if dataVal.Kind() != reflect.Map { + // Accept empty array/slice instead of an empty map in weakly typed mode + if d.config.WeaklyTypedInput && + (dataVal.Kind() == reflect.Slice || dataVal.Kind() == reflect.Array) && + dataVal.Len() == 0 { + val.Set(valMap) + return nil + } else { + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } + } + + // Accumulate errors + errors := make([]string, 0) + + for _, k := range dataVal.MapKeys() { + fieldName := fmt.Sprintf("%s[%s]", name, k) + + // First decode the key into the proper type + currentKey := reflect.Indirect(reflect.New(valKeyType)) + if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { + errors = appendErrors(errors, err) + continue + } + + // Next decode the data into the proper type + v := dataVal.MapIndex(k).Interface() + currentVal := reflect.Indirect(reflect.New(valElemType)) + if err := d.decode(fieldName, v, currentVal); err != nil { + errors = appendErrors(errors, err) + continue + } + + valMap.SetMapIndex(currentKey, currentVal) + } + + // Set the built up map to the value + val.Set(valMap) + + // If we had errors, return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + valType := val.Type() + valElemType := valType.Elem() + realVal := reflect.New(valElemType) + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { + return err + } + + val.Set(realVal) + return nil +} + +func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + sliceType := reflect.SliceOf(valElemType) + + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + // Accept empty map instead of array/slice in weakly typed mode + if d.config.WeaklyTypedInput && dataVal.Kind() == reflect.Map && dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } else { + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + } + } + + // Make a new slice to hold our result, same size as the original data. + valSlice := reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + currentField := valSlice.Index(i) + + fieldName := fmt.Sprintf("%s[%d]", name, i) + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the slice we built up + val.Set(valSlice) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + + // If the type of the value to write to and the data match directly, + // then we just set it directly instead of recursing into the structure. + if dataVal.Type() == val.Type() { + val.Set(dataVal) + return nil + } + + dataValKind := dataVal.Kind() + if dataValKind != reflect.Map { + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind) + } + + dataValType := dataVal.Type() + if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { + return fmt.Errorf( + "'%s' needs a map with string keys, has '%s' keys", + name, dataValType.Key().Kind()) + } + + dataValKeys := make(map[reflect.Value]struct{}) + dataValKeysUnused := make(map[interface{}]struct{}) + for _, dataValKey := range dataVal.MapKeys() { + dataValKeys[dataValKey] = struct{}{} + dataValKeysUnused[dataValKey.Interface()] = struct{}{} + } + + errors := make([]string, 0) + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = val + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + fields := make(map[*reflect.StructField]reflect.Value) + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + + if fieldType.Anonymous { + fieldKind := fieldType.Type.Kind() + if fieldKind != reflect.Struct { + errors = appendErrors(errors, + fmt.Errorf("%s: unsupported type: %s", fieldType.Name, fieldKind)) + continue + } + + // We have an embedded field. We "squash" the fields down + // if specified in the tag. + squash := false + tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + + if squash { + structs = append(structs, val.FieldByName(fieldType.Name)) + continue + } + } + + // Normal struct field, store it away + fields[&fieldType] = structVal.Field(i) + } + } + + for fieldType, field := range fields { + fieldName := fieldType.Name + + tagValue := fieldType.Tag.Get(d.config.TagName) + tagValue = strings.SplitN(tagValue, ",", 2)[0] + if tagValue != "" { + fieldName = tagValue + } + + rawMapKey := reflect.ValueOf(fieldName) + rawMapVal := dataVal.MapIndex(rawMapKey) + if !rawMapVal.IsValid() { + // Do a slower search by iterating over each key and + // doing case-insensitive search. + for dataValKey, _ := range dataValKeys { + mK, ok := dataValKey.Interface().(string) + if !ok { + // Not a string key + continue + } + + if strings.EqualFold(mK, fieldName) { + rawMapKey = dataValKey + rawMapVal = dataVal.MapIndex(dataValKey) + break + } + } + + if !rawMapVal.IsValid() { + // There was no matching key in the map for the value in + // the struct. Just ignore. + continue + } + } + + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + + if !field.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !field.CanSet() { + continue + } + + // If the name is empty string, then we're at the root, and we + // don't dot-join the fields. + if name != "" { + fieldName = fmt.Sprintf("%s.%s", name, fieldName) + } + + if err := d.decode(fieldName, rawMapVal.Interface(), field); err != nil { + errors = appendErrors(errors, err) + } + } + + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { + keys := make([]string, 0, len(dataValKeysUnused)) + for rawKey, _ := range dataValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if len(errors) > 0 { + return &Error{errors} + } + + // Add the unused keys to the list of unused keys if we're tracking metadata + if d.config.Metadata != nil { + for rawKey, _ := range dataValKeysUnused { + key := rawKey.(string) + if name != "" { + key = fmt.Sprintf("%s.%s", name, key) + } + + d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) + } + } + + return nil +} + +func getKind(val reflect.Value) reflect.Kind { + kind := val.Kind() + + switch { + case kind >= reflect.Int && kind <= reflect.Int64: + return reflect.Int + case kind >= reflect.Uint && kind <= reflect.Uint64: + return reflect.Uint + case kind >= reflect.Float32 && kind <= reflect.Float64: + return reflect.Float32 + default: + return kind + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/.gitignore b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/.gitignore new file mode 100644 index 000000000..5cdbab794 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/.gitignore @@ -0,0 +1,4 @@ +*~ +*.pyc +test-env* +junk/ \ No newline at end of file diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/.travis.yml b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/.travis.yml new file mode 100644 index 000000000..bf37e41f0 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/.travis.yml @@ -0,0 +1,14 @@ +language: go +sudo: false + +go: + - 1.1.2 + - 1.2.2 + - 1.3.3 + - 1.4.2 + - 1.5.1 + - tip + +script: + - test -z "$(go fmt ./...)" + - go test diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/COPYING b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/COPYING new file mode 100644 index 000000000..8c27c67fd --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/COPYING @@ -0,0 +1,20 @@ +Copyright (C) 2012 by Nick Craig-Wood http://www.craig-wood.com/nick/ + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/README.md b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/README.md new file mode 100644 index 000000000..3855a394e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/README.md @@ -0,0 +1,139 @@ +Swift +===== + +This package provides an easy to use library for interfacing with +Swift / Openstack Object Storage / Rackspace cloud files from the Go +Language + +See here for package docs + + http://godoc.org/github.com/ncw/swift + +[![Build Status](https://api.travis-ci.org/ncw/swift.svg?branch=master)](https://travis-ci.org/ncw/swift) [![GoDoc](https://godoc.org/github.com/ncw/swift?status.svg)](https://godoc.org/github.com/ncw/swift) + +Install +------- + +Use go to install the library + + go get github.com/ncw/swift + +Usage +----- + +See here for full package docs + +- http://godoc.org/github.com/ncw/swift + +Here is a short example from the docs + + import "github.com/ncw/swift" + + // Create a connection + c := swift.Connection{ + UserName: "user", + ApiKey: "key", + AuthUrl: "auth_url", + Domain: "domain", // Name of the domain (v3 auth only) + Tenant: "tenant", // Name of the tenant (v2 auth only) + } + // Authenticate + err := c.Authenticate() + if err != nil { + panic(err) + } + // List all the containers + containers, err := c.ContainerNames(nil) + fmt.Println(containers) + // etc... + +Additions +--------- + +The `rs` sub project contains a wrapper for the Rackspace specific CDN Management interface. + +Testing +------- + +To run the tests you can either use an embedded fake Swift server +either use a real Openstack Swift server or a Rackspace Cloud files account. + +When using a real Swift server, you need to set these environment variables +before running the tests + + export SWIFT_API_USER='user' + export SWIFT_API_KEY='key' + export SWIFT_AUTH_URL='https://url.of.auth.server/v1.0' + +And optionally these if using v2 authentication + + export SWIFT_TENANT='TenantName' + export SWIFT_TENANT_ID='TenantId' + +And optionally these if using v3 authentication + + export SWIFT_TENANT='TenantName' + export SWIFT_TENANT_ID='TenantId' + export SWIFT_API_DOMAIN_ID='domain id' + export SWIFT_API_DOMAIN='domain name' + +And optionally these if using v3 trust + + export SWIFT_TRUST_ID='TrustId' + +And optionally this if you want to skip server certificate validation + + export SWIFT_AUTH_INSECURE=1 + +And optionally this to configure the connect channel timeout, in seconds + + export SWIFT_CONNECTION_CHANNEL_TIMEOUT=60 + +And optionally this to configure the data channel timeout, in seconds + + export SWIFT_DATA_CHANNEL_TIMEOUT=60 + +Then run the tests with `go test` + +License +------- + +This is free software under the terms of MIT license (check COPYING file +included in this package). + +Contact and support +------------------- + +The project website is at: + +- https://github.com/ncw/swift + +There you can file bug reports, ask for help or contribute patches. + +Authors +------- + +- Nick Craig-Wood + +Contributors +------------ + +- Brian "bojo" Jones +- Janika Liiv +- Yamamoto, Hirotaka +- Stephen +- platformpurple +- Paul Querna +- Livio Soares +- thesyncim +- lsowen +- Sylvain Baubeau +- Chris Kastorff +- Dai HaoJun +- Hua Wang +- Fabian Ruff +- Arturo Reuschenbach Puncernau +- Petr Kotek +- Stefan Majewsky +- Cezar Sa Espinola +- Sam Gunaratne diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/auth.go b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/auth.go new file mode 100644 index 000000000..316dc7fe0 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/auth.go @@ -0,0 +1,320 @@ +package swift + +import ( + "bytes" + "encoding/json" + "net/http" + "net/url" + "strings" +) + +// Auth defines the operations needed to authenticate with swift +// +// This encapsulates the different authentication schemes in use +type Authenticator interface { + // Request creates an http.Request for the auth - return nil if not needed + Request(*Connection) (*http.Request, error) + // Response parses the http.Response + Response(resp *http.Response) error + // The public storage URL - set Internal to true to read + // internal/service net URL + StorageUrl(Internal bool) string + // The access token + Token() string + // The CDN url if available + CdnUrl() string +} + +type CustomEndpointAuthenticator interface { + StorageUrlForEndpoint(endpointType EndpointType) string +} + +type EndpointType string + +const ( + // Use public URL as storage URL + EndpointTypePublic = EndpointType("public") + + // Use internal URL as storage URL + EndpointTypeInternal = EndpointType("internal") + + // Use admin URL as storage URL + EndpointTypeAdmin = EndpointType("admin") +) + +// newAuth - create a new Authenticator from the AuthUrl +// +// A hint for AuthVersion can be provided +func newAuth(c *Connection) (Authenticator, error) { + AuthVersion := c.AuthVersion + if AuthVersion == 0 { + if strings.Contains(c.AuthUrl, "v3") { + AuthVersion = 3 + } else if strings.Contains(c.AuthUrl, "v2") { + AuthVersion = 2 + } else if strings.Contains(c.AuthUrl, "v1") { + AuthVersion = 1 + } else { + return nil, newErrorf(500, "Can't find AuthVersion in AuthUrl - set explicitly") + } + } + switch AuthVersion { + case 1: + return &v1Auth{}, nil + case 2: + return &v2Auth{ + // Guess as to whether using API key or + // password it will try both eventually so + // this is just an optimization. + useApiKey: len(c.ApiKey) >= 32, + }, nil + case 3: + return &v3Auth{}, nil + } + return nil, newErrorf(500, "Auth Version %d not supported", AuthVersion) +} + +// ------------------------------------------------------------ + +// v1 auth +type v1Auth struct { + Headers http.Header // V1 auth: the authentication headers so extensions can access them +} + +// v1 Authentication - make request +func (auth *v1Auth) Request(c *Connection) (*http.Request, error) { + req, err := http.NewRequest("GET", c.AuthUrl, nil) + if err != nil { + return nil, err + } + req.Header.Set("User-Agent", c.UserAgent) + req.Header.Set("X-Auth-Key", c.ApiKey) + req.Header.Set("X-Auth-User", c.UserName) + return req, nil +} + +// v1 Authentication - read response +func (auth *v1Auth) Response(resp *http.Response) error { + auth.Headers = resp.Header + return nil +} + +// v1 Authentication - read storage url +func (auth *v1Auth) StorageUrl(Internal bool) string { + storageUrl := auth.Headers.Get("X-Storage-Url") + if Internal { + newUrl, err := url.Parse(storageUrl) + if err != nil { + return storageUrl + } + newUrl.Host = "snet-" + newUrl.Host + storageUrl = newUrl.String() + } + return storageUrl +} + +// v1 Authentication - read auth token +func (auth *v1Auth) Token() string { + return auth.Headers.Get("X-Auth-Token") +} + +// v1 Authentication - read cdn url +func (auth *v1Auth) CdnUrl() string { + return auth.Headers.Get("X-CDN-Management-Url") +} + +// ------------------------------------------------------------ + +// v2 Authentication +type v2Auth struct { + Auth *v2AuthResponse + Region string + useApiKey bool // if set will use API key not Password + useApiKeyOk bool // if set won't change useApiKey any more + notFirst bool // set after first run +} + +// v2 Authentication - make request +func (auth *v2Auth) Request(c *Connection) (*http.Request, error) { + auth.Region = c.Region + // Toggle useApiKey if not first run and not OK yet + if auth.notFirst && !auth.useApiKeyOk { + auth.useApiKey = !auth.useApiKey + } + auth.notFirst = true + // Create a V2 auth request for the body of the connection + var v2i interface{} + if !auth.useApiKey { + // Normal swift authentication + v2 := v2AuthRequest{} + v2.Auth.PasswordCredentials.UserName = c.UserName + v2.Auth.PasswordCredentials.Password = c.ApiKey + v2.Auth.Tenant = c.Tenant + v2.Auth.TenantId = c.TenantId + v2i = v2 + } else { + // Rackspace special with API Key + v2 := v2AuthRequestRackspace{} + v2.Auth.ApiKeyCredentials.UserName = c.UserName + v2.Auth.ApiKeyCredentials.ApiKey = c.ApiKey + v2.Auth.Tenant = c.Tenant + v2.Auth.TenantId = c.TenantId + v2i = v2 + } + body, err := json.Marshal(v2i) + if err != nil { + return nil, err + } + url := c.AuthUrl + if !strings.HasSuffix(url, "/") { + url += "/" + } + url += "tokens" + req, err := http.NewRequest("POST", url, bytes.NewBuffer(body)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("User-Agent", c.UserAgent) + return req, nil +} + +// v2 Authentication - read response +func (auth *v2Auth) Response(resp *http.Response) error { + auth.Auth = new(v2AuthResponse) + err := readJson(resp, auth.Auth) + // If successfully read Auth then no need to toggle useApiKey any more + if err == nil { + auth.useApiKeyOk = true + } + return err +} + +// Finds the Endpoint Url of "type" from the v2AuthResponse using the +// Region if set or defaulting to the first one if not +// +// Returns "" if not found +func (auth *v2Auth) endpointUrl(Type string, endpointType EndpointType) string { + for _, catalog := range auth.Auth.Access.ServiceCatalog { + if catalog.Type == Type { + for _, endpoint := range catalog.Endpoints { + if auth.Region == "" || (auth.Region == endpoint.Region) { + switch endpointType { + case EndpointTypeInternal: + return endpoint.InternalUrl + case EndpointTypePublic: + return endpoint.PublicUrl + case EndpointTypeAdmin: + return endpoint.AdminUrl + default: + return "" + } + } + } + } + } + return "" +} + +// v2 Authentication - read storage url +// +// If Internal is true then it reads the private (internal / service +// net) URL. +func (auth *v2Auth) StorageUrl(Internal bool) string { + endpointType := EndpointTypePublic + if Internal { + endpointType = EndpointTypeInternal + } + return auth.StorageUrlForEndpoint(endpointType) +} + +// v2 Authentication - read storage url +// +// Use the indicated endpointType to choose a URL. +func (auth *v2Auth) StorageUrlForEndpoint(endpointType EndpointType) string { + return auth.endpointUrl("object-store", endpointType) +} + +// v2 Authentication - read auth token +func (auth *v2Auth) Token() string { + return auth.Auth.Access.Token.Id +} + +// v2 Authentication - read cdn url +func (auth *v2Auth) CdnUrl() string { + return auth.endpointUrl("rax:object-cdn", EndpointTypePublic) +} + +// ------------------------------------------------------------ + +// V2 Authentication request +// +// http://docs.openstack.org/developer/keystone/api_curl_examples.html +// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html +// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html +type v2AuthRequest struct { + Auth struct { + PasswordCredentials struct { + UserName string `json:"username"` + Password string `json:"password"` + } `json:"passwordCredentials"` + Tenant string `json:"tenantName,omitempty"` + TenantId string `json:"tenantId,omitempty"` + } `json:"auth"` +} + +// V2 Authentication request - Rackspace variant +// +// http://docs.openstack.org/developer/keystone/api_curl_examples.html +// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html +// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html +type v2AuthRequestRackspace struct { + Auth struct { + ApiKeyCredentials struct { + UserName string `json:"username"` + ApiKey string `json:"apiKey"` + } `json:"RAX-KSKEY:apiKeyCredentials"` + Tenant string `json:"tenantName,omitempty"` + TenantId string `json:"tenantId,omitempty"` + } `json:"auth"` +} + +// V2 Authentication reply +// +// http://docs.openstack.org/developer/keystone/api_curl_examples.html +// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html +// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html +type v2AuthResponse struct { + Access struct { + ServiceCatalog []struct { + Endpoints []struct { + InternalUrl string + PublicUrl string + AdminUrl string + Region string + TenantId string + } + Name string + Type string + } + Token struct { + Expires string + Id string + Tenant struct { + Id string + Name string + } + } + User struct { + DefaultRegion string `json:"RAX-AUTH:defaultRegion"` + Id string + Name string + Roles []struct { + Description string + Id string + Name string + TenantId string + } + } + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/auth_v3.go b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/auth_v3.go new file mode 100644 index 000000000..21e96718b --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/auth_v3.go @@ -0,0 +1,227 @@ +package swift + +import ( + "bytes" + "encoding/json" + "net/http" + "strings" +) + +const ( + v3AuthMethodToken = "token" + v3AuthMethodPassword = "password" + v3CatalogTypeObjectStore = "object-store" +) + +// V3 Authentication request +// http://docs.openstack.org/developer/keystone/api_curl_examples.html +// http://developer.openstack.org/api-ref-identity-v3.html +type v3AuthRequest struct { + Auth struct { + Identity struct { + Methods []string `json:"methods"` + Password *v3AuthPassword `json:"password,omitempty"` + Token *v3AuthToken `json:"token,omitempty"` + } `json:"identity"` + Scope *v3Scope `json:"scope,omitempty"` + } `json:"auth"` +} + +type v3Scope struct { + Project *v3Project `json:"project,omitempty"` + Domain *v3Domain `json:"domain,omitempty"` + Trust *v3Trust `json:"OS-TRUST:trust,omitempty"` +} + +type v3Domain struct { + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` +} + +type v3Project struct { + Name string `json:"name,omitempty"` + Id string `json:"id,omitempty"` + Domain *v3Domain `json:"domain,omitempty"` +} + +type v3Trust struct { + Id string `json:"id"` +} + +type v3User struct { + Domain *v3Domain `json:"domain,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Password string `json:"password,omitempty"` +} + +type v3AuthToken struct { + Id string `json:"id"` +} + +type v3AuthPassword struct { + User v3User `json:"user"` +} + +// V3 Authentication response +type v3AuthResponse struct { + Token struct { + Expires_At, Issued_At string + Methods []string + Roles []struct { + Id, Name string + Links struct { + Self string + } + } + + Project struct { + Domain struct { + Id, Name string + } + Id, Name string + } + + Catalog []struct { + Id, Namem, Type string + Endpoints []struct { + Id, Region_Id, Url, Region string + Interface EndpointType + } + } + + User struct { + Id, Name string + Domain struct { + Id, Name string + Links struct { + Self string + } + } + } + + Audit_Ids []string + } +} + +type v3Auth struct { + Region string + Auth *v3AuthResponse + Headers http.Header +} + +func (auth *v3Auth) Request(c *Connection) (*http.Request, error) { + auth.Region = c.Region + + var v3i interface{} + + v3 := v3AuthRequest{} + + if c.UserName == "" { + v3.Auth.Identity.Methods = []string{v3AuthMethodToken} + v3.Auth.Identity.Token = &v3AuthToken{Id: c.ApiKey} + } else { + v3.Auth.Identity.Methods = []string{v3AuthMethodPassword} + v3.Auth.Identity.Password = &v3AuthPassword{ + User: v3User{ + Name: c.UserName, + Password: c.ApiKey, + }, + } + + var domain *v3Domain + + if c.Domain != "" { + domain = &v3Domain{Name: c.Domain} + } else if c.DomainId != "" { + domain = &v3Domain{Id: c.DomainId} + } + v3.Auth.Identity.Password.User.Domain = domain + } + + if c.TrustId != "" { + v3.Auth.Scope = &v3Scope{Trust: &v3Trust{Id: c.TrustId}} + } else if c.TenantId != "" || c.Tenant != "" { + + v3.Auth.Scope = &v3Scope{Project: &v3Project{}} + + if c.TenantId != "" { + v3.Auth.Scope.Project.Id = c.TenantId + } else if c.Tenant != "" { + v3.Auth.Scope.Project.Name = c.Tenant + switch { + case c.TenantDomain != "": + v3.Auth.Scope.Project.Domain = &v3Domain{Name: c.TenantDomain} + case c.TenantDomainId != "": + v3.Auth.Scope.Project.Domain = &v3Domain{Id: c.TenantDomainId} + case c.Domain != "": + v3.Auth.Scope.Project.Domain = &v3Domain{Name: c.Domain} + case c.DomainId != "": + v3.Auth.Scope.Project.Domain = &v3Domain{Id: c.DomainId} + default: + v3.Auth.Scope.Project.Domain = &v3Domain{Name: "Default"} + } + } + } + + v3i = v3 + + body, err := json.Marshal(v3i) + + if err != nil { + return nil, err + } + + url := c.AuthUrl + if !strings.HasSuffix(url, "/") { + url += "/" + } + url += "auth/tokens" + req, err := http.NewRequest("POST", url, bytes.NewBuffer(body)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("User-Agent", c.UserAgent) + return req, nil +} + +func (auth *v3Auth) Response(resp *http.Response) error { + auth.Auth = &v3AuthResponse{} + auth.Headers = resp.Header + err := readJson(resp, auth.Auth) + return err +} + +func (auth *v3Auth) endpointUrl(Type string, endpointType EndpointType) string { + for _, catalog := range auth.Auth.Token.Catalog { + if catalog.Type == Type { + for _, endpoint := range catalog.Endpoints { + if endpoint.Interface == endpointType && (auth.Region == "" || (auth.Region == endpoint.Region)) { + return endpoint.Url + } + } + } + } + return "" +} + +func (auth *v3Auth) StorageUrl(Internal bool) string { + endpointType := EndpointTypePublic + if Internal { + endpointType = EndpointTypeInternal + } + return auth.StorageUrlForEndpoint(endpointType) +} + +func (auth *v3Auth) StorageUrlForEndpoint(endpointType EndpointType) string { + return auth.endpointUrl("object-store", endpointType) +} + +func (auth *v3Auth) Token() string { + return auth.Headers.Get("X-Subject-Token") +} + +func (auth *v3Auth) CdnUrl() string { + return "" +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/compatibility_1_0.go b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/compatibility_1_0.go new file mode 100644 index 000000000..7b69a757a --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/compatibility_1_0.go @@ -0,0 +1,28 @@ +// Go 1.0 compatibility functions + +// +build !go1.1 + +package swift + +import ( + "log" + "net/http" + "time" +) + +// Cancel the request - doesn't work under < go 1.1 +func cancelRequest(transport http.RoundTripper, req *http.Request) { + log.Printf("Tried to cancel a request but couldn't - recompile with go 1.1") +} + +// Reset a timer - Doesn't work properly < go 1.1 +// +// This is quite hard to do properly under go < 1.1 so we do a crude +// approximation and hope that everyone upgrades to go 1.1 quickly +func resetTimer(t *time.Timer, d time.Duration) { + t.Stop() + // Very likely this doesn't actually work if we are already + // selecting on t.C. However we've stopped the original timer + // so won't break transfers but may not time them out :-( + *t = *time.NewTimer(d) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/compatibility_1_1.go b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/compatibility_1_1.go new file mode 100644 index 000000000..a4f9c3ab2 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/compatibility_1_1.go @@ -0,0 +1,24 @@ +// Go 1.1 and later compatibility functions +// +// +build go1.1 + +package swift + +import ( + "net/http" + "time" +) + +// Cancel the request +func cancelRequest(transport http.RoundTripper, req *http.Request) { + if tr, ok := transport.(interface { + CancelRequest(*http.Request) + }); ok { + tr.CancelRequest(req) + } +} + +// Reset a timer +func resetTimer(t *time.Timer, d time.Duration) { + t.Reset(d) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/doc.go b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/doc.go new file mode 100644 index 000000000..44efde7bf --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/doc.go @@ -0,0 +1,19 @@ +/* +Package swift provides an easy to use interface to Swift / Openstack Object Storage / Rackspace Cloud Files + +Standard Usage + +Most of the work is done through the Container*() and Object*() methods. + +All methods are safe to use concurrently in multiple go routines. + +Object Versioning + +As defined by http://docs.openstack.org/api/openstack-object-storage/1.0/content/Object_Versioning-e1e3230.html#d6e983 one can create a container which allows for version control of files. The suggested method is to create a version container for holding all non-current files, and a current container for holding the latest version that the file points to. The container and objects inside it can be used in the standard manner, however, pushing a file multiple times will result in it being copied to the version container and the new file put in it's place. If the current file is deleted, the previous file in the version container will replace it. This means that if a file is updated 5 times, it must be deleted 5 times to be completely removed from the system. + +Rackspace Sub Module + +This module specifically allows the enabling/disabling of Rackspace Cloud File CDN management on a container. This is specific to the Rackspace API and not Swift/Openstack, therefore it has been placed in a submodule. One can easily create a RsConnection and use it like the standard Connection to access and manipulate containers and objects. + +*/ +package swift diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/meta.go b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/meta.go new file mode 100644 index 000000000..e52d68608 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/meta.go @@ -0,0 +1,174 @@ +// Metadata manipulation in and out of Headers + +package swift + +import ( + "fmt" + "net/http" + "strconv" + "strings" + "time" +) + +// Metadata stores account, container or object metadata. +type Metadata map[string]string + +// Metadata gets the Metadata starting with the metaPrefix out of the Headers. +// +// The keys in the Metadata will be converted to lower case +func (h Headers) Metadata(metaPrefix string) Metadata { + m := Metadata{} + metaPrefix = http.CanonicalHeaderKey(metaPrefix) + for key, value := range h { + if strings.HasPrefix(key, metaPrefix) { + metaKey := strings.ToLower(key[len(metaPrefix):]) + m[metaKey] = value + } + } + return m +} + +// AccountMetadata converts Headers from account to a Metadata. +// +// The keys in the Metadata will be converted to lower case. +func (h Headers) AccountMetadata() Metadata { + return h.Metadata("X-Account-Meta-") +} + +// ContainerMetadata converts Headers from container to a Metadata. +// +// The keys in the Metadata will be converted to lower case. +func (h Headers) ContainerMetadata() Metadata { + return h.Metadata("X-Container-Meta-") +} + +// ObjectMetadata converts Headers from object to a Metadata. +// +// The keys in the Metadata will be converted to lower case. +func (h Headers) ObjectMetadata() Metadata { + return h.Metadata("X-Object-Meta-") +} + +// Headers convert the Metadata starting with the metaPrefix into a +// Headers. +// +// The keys in the Metadata will be converted from lower case to http +// Canonical (see http.CanonicalHeaderKey). +func (m Metadata) Headers(metaPrefix string) Headers { + h := Headers{} + for key, value := range m { + key = http.CanonicalHeaderKey(metaPrefix + key) + h[key] = value + } + return h +} + +// AccountHeaders converts the Metadata for the account. +func (m Metadata) AccountHeaders() Headers { + return m.Headers("X-Account-Meta-") +} + +// ContainerHeaders converts the Metadata for the container. +func (m Metadata) ContainerHeaders() Headers { + return m.Headers("X-Container-Meta-") +} + +// ObjectHeaders converts the Metadata for the object. +func (m Metadata) ObjectHeaders() Headers { + return m.Headers("X-Object-Meta-") +} + +// Turns a number of ns into a floating point string in seconds +// +// Trims trailing zeros and guaranteed to be perfectly accurate +func nsToFloatString(ns int64) string { + if ns < 0 { + return "-" + nsToFloatString(-ns) + } + result := fmt.Sprintf("%010d", ns) + split := len(result) - 9 + result, decimals := result[:split], result[split:] + decimals = strings.TrimRight(decimals, "0") + if decimals != "" { + result += "." + result += decimals + } + return result +} + +// Turns a floating point string in seconds into a ns integer +// +// Guaranteed to be perfectly accurate +func floatStringToNs(s string) (int64, error) { + const zeros = "000000000" + if point := strings.IndexRune(s, '.'); point >= 0 { + tail := s[point+1:] + if fill := 9 - len(tail); fill < 0 { + tail = tail[:9] + } else { + tail += zeros[:fill] + } + s = s[:point] + tail + } else if len(s) > 0 { // Make sure empty string produces an error + s += zeros + } + return strconv.ParseInt(s, 10, 64) +} + +// FloatStringToTime converts a floating point number string to a time.Time +// +// The string is floating point number of seconds since the epoch +// (Unix time). The number should be in fixed point format (not +// exponential), eg "1354040105.123456789" which represents the time +// "2012-11-27T18:15:05.123456789Z" +// +// Some care is taken to preserve all the accuracy in the time.Time +// (which wouldn't happen with a naive conversion through float64) so +// a round trip conversion won't change the data. +// +// If an error is returned then time will be returned as the zero time. +func FloatStringToTime(s string) (t time.Time, err error) { + ns, err := floatStringToNs(s) + if err != nil { + return + } + t = time.Unix(0, ns) + return +} + +// TimeToFloatString converts a time.Time object to a floating point string +// +// The string is floating point number of seconds since the epoch +// (Unix time). The number is in fixed point format (not +// exponential), eg "1354040105.123456789" which represents the time +// "2012-11-27T18:15:05.123456789Z". Trailing zeros will be dropped +// from the output. +// +// Some care is taken to preserve all the accuracy in the time.Time +// (which wouldn't happen with a naive conversion through float64) so +// a round trip conversion won't change the data. +func TimeToFloatString(t time.Time) string { + return nsToFloatString(t.UnixNano()) +} + +// Read a modification time (mtime) from a Metadata object +// +// This is a defacto standard (used in the official python-swiftclient +// amongst others) for storing the modification time (as read using +// os.Stat) for an object. It is stored using the key 'mtime', which +// for example when written to an object will be 'X-Object-Meta-Mtime'. +// +// If an error is returned then time will be returned as the zero time. +func (m Metadata) GetModTime() (t time.Time, err error) { + return FloatStringToTime(m["mtime"]) +} + +// Write an modification time (mtime) to a Metadata object +// +// This is a defacto standard (used in the official python-swiftclient +// amongst others) for storing the modification time (as read using +// os.Stat) for an object. It is stored using the key 'mtime', which +// for example when written to an object will be 'X-Object-Meta-Mtime'. +func (m Metadata) SetModTime(t time.Time) { + m["mtime"] = TimeToFloatString(t) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/notes.txt b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/notes.txt new file mode 100644 index 000000000..f738552cd --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/notes.txt @@ -0,0 +1,55 @@ +Notes on Go Swift +================= + +Make a builder style interface like the Google Go APIs? Advantages +are that it is easy to add named methods to the service object to do +specific things. Slightly less efficient. Not sure about how to +return extra stuff though - in an object? + +Make a container struct so these could be methods on it? + +Make noResponse check for 204? + +Make storage public so it can be extended easily? + +Rename to go-swift to match user agent string? + +Reconnect on auth error - 401 when token expires isn't tested + +Make more api compatible with python cloudfiles? + +Retry operations on timeout / network errors? +- also 408 error +- GET requests only? + +Make Connection thread safe - whenever it is changed take a write lock whenever it is read from a read lock + +Add extra headers field to Connection (for via etc) + +Make errors use an error heirachy then can catch them with a type assertion + + Error(...) + ObjectCorrupted{ Error } + +Make a Debug flag in connection for logging stuff + +Object If-Match, If-None-Match, If-Modified-Since, If-Unmodified-Since etc + +Object range + +Object create, update with X-Delete-At or X-Delete-After + +Large object support +- check uploads are less than 5GB in normal mode? + +Access control CORS? + +Swift client retries and backs off for all types of errors + +Implement net error interface? + +type Error interface { + error + Timeout() bool // Is the error a timeout? + Temporary() bool // Is the error temporary? +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/swift.go b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/swift.go new file mode 100644 index 000000000..1337f10dc --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/swift.go @@ -0,0 +1,1909 @@ +package swift + +import ( + "bufio" + "bytes" + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "io" + "mime" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "sync" + "time" +) + +const ( + DefaultUserAgent = "goswift/1.0" // Default user agent + DefaultRetries = 3 // Default number of retries on token expiry + TimeFormat = "2006-01-02T15:04:05" // Python date format for json replies parsed as UTC + UploadTar = "tar" // Data format specifier for Connection.BulkUpload(). + UploadTarGzip = "tar.gz" // Data format specifier for Connection.BulkUpload(). + UploadTarBzip2 = "tar.bz2" // Data format specifier for Connection.BulkUpload(). + allContainersLimit = 10000 // Number of containers to fetch at once + allObjectsLimit = 10000 // Number objects to fetch at once + allObjectsChanLimit = 1000 // ...when fetching to a channel +) + +// Connection holds the details of the connection to the swift server. +// +// You need to provide UserName, ApiKey and AuthUrl when you create a +// connection then call Authenticate on it. +// +// The auth version in use will be detected from the AuthURL - you can +// override this with the AuthVersion parameter. +// +// If using v2 auth you can also set Region in the Connection +// structure. If you don't set Region you will get the default region +// which may not be what you want. +// +// For reference some common AuthUrls looks like this: +// +// Rackspace US https://auth.api.rackspacecloud.com/v1.0 +// Rackspace UK https://lon.auth.api.rackspacecloud.com/v1.0 +// Rackspace v2 https://identity.api.rackspacecloud.com/v2.0 +// Memset Memstore UK https://auth.storage.memset.com/v1.0 +// Memstore v2 https://auth.storage.memset.com/v2.0 +// +// When using Google Appengine you must provide the Connection with an +// appengine-specific Transport: +// +// import ( +// "appengine/urlfetch" +// "fmt" +// "github.com/ncw/swift" +// ) +// +// func handler(w http.ResponseWriter, r *http.Request) { +// ctx := appengine.NewContext(r) +// tr := urlfetch.Transport{Context: ctx} +// c := swift.Connection{ +// UserName: "user", +// ApiKey: "key", +// AuthUrl: "auth_url", +// Transport: tr, +// } +// _ := c.Authenticate() +// containers, _ := c.ContainerNames(nil) +// fmt.Fprintf(w, "containers: %q", containers) +// } +// +// If you don't supply a Transport, one is made which relies on +// http.ProxyFromEnvironment (http://golang.org/pkg/net/http/#ProxyFromEnvironment). +// This means that the connection will respect the HTTP proxy specified by the +// environment variables $HTTP_PROXY and $NO_PROXY. +type Connection struct { + // Parameters - fill these in before calling Authenticate + // They are all optional except UserName, ApiKey and AuthUrl + Domain string // User's domain name + DomainId string // User's domain Id + UserName string // UserName for api + ApiKey string // Key for api access + AuthUrl string // Auth URL + Retries int // Retries on error (default is 3) + UserAgent string // Http User agent (default goswift/1.0) + ConnectTimeout time.Duration // Connect channel timeout (default 10s) + Timeout time.Duration // Data channel timeout (default 60s) + Region string // Region to use eg "LON", "ORD" - default is use first region (v2,v3 auth only) + AuthVersion int // Set to 1, 2 or 3 or leave at 0 for autodetect + Internal bool // Set this to true to use the the internal / service network + Tenant string // Name of the tenant (v2,v3 auth only) + TenantId string // Id of the tenant (v2,v3 auth only) + EndpointType EndpointType // Endpoint type (v2,v3 auth only) (default is public URL unless Internal is set) + TenantDomain string // Name of the tenant's domain (v3 auth only), only needed if it differs from the user domain + TenantDomainId string // Id of the tenant's domain (v3 auth only), only needed if it differs the from user domain + TrustId string // Id of the trust (v3 auth only) + Transport http.RoundTripper `json:"-" xml:"-"` // Optional specialised http.Transport (eg. for Google Appengine) + // These are filled in after Authenticate is called as are the defaults for above + StorageUrl string + AuthToken string + client *http.Client + Auth Authenticator `json:"-" xml:"-"` // the current authenticator + authLock sync.Mutex // lock when R/W StorageUrl, AuthToken, Auth +} + +// Error - all errors generated by this package are of this type. Other error +// may be passed on from library functions though. +type Error struct { + StatusCode int // HTTP status code if relevant or 0 if not + Text string +} + +// Error satisfy the error interface. +func (e *Error) Error() string { + return e.Text +} + +// newError make a new error from a string. +func newError(StatusCode int, Text string) *Error { + return &Error{ + StatusCode: StatusCode, + Text: Text, + } +} + +// newErrorf makes a new error from sprintf parameters. +func newErrorf(StatusCode int, Text string, Parameters ...interface{}) *Error { + return newError(StatusCode, fmt.Sprintf(Text, Parameters...)) +} + +// errorMap defines http error codes to error mappings. +type errorMap map[int]error + +var ( + // Specific Errors you might want to check for equality + BadRequest = newError(400, "Bad Request") + AuthorizationFailed = newError(401, "Authorization Failed") + ContainerNotFound = newError(404, "Container Not Found") + ContainerNotEmpty = newError(409, "Container Not Empty") + ObjectNotFound = newError(404, "Object Not Found") + ObjectCorrupted = newError(422, "Object Corrupted") + TimeoutError = newError(408, "Timeout when reading or writing data") + Forbidden = newError(403, "Operation forbidden") + TooLargeObject = newError(413, "Too Large Object") + + // Mappings for authentication errors + authErrorMap = errorMap{ + 400: BadRequest, + 401: AuthorizationFailed, + 403: Forbidden, + } + + // Mappings for container errors + ContainerErrorMap = errorMap{ + 400: BadRequest, + 403: Forbidden, + 404: ContainerNotFound, + 409: ContainerNotEmpty, + } + + // Mappings for object errors + objectErrorMap = errorMap{ + 400: BadRequest, + 403: Forbidden, + 404: ObjectNotFound, + 413: TooLargeObject, + 422: ObjectCorrupted, + } +) + +// checkClose is used to check the return from Close in a defer +// statement. +func checkClose(c io.Closer, err *error) { + cerr := c.Close() + if *err == nil { + *err = cerr + } +} + +// parseHeaders checks a response for errors and translates into +// standard errors if necessary. +func (c *Connection) parseHeaders(resp *http.Response, errorMap errorMap) error { + if errorMap != nil { + if err, ok := errorMap[resp.StatusCode]; ok { + return err + } + } + if resp.StatusCode < 200 || resp.StatusCode > 299 { + return newErrorf(resp.StatusCode, "HTTP Error: %d: %s", resp.StatusCode, resp.Status) + } + return nil +} + +// readHeaders returns a Headers object from the http.Response. +// +// If it receives multiple values for a key (which should never +// happen) it will use the first one +func readHeaders(resp *http.Response) Headers { + headers := Headers{} + for key, values := range resp.Header { + headers[key] = values[0] + } + return headers +} + +// Headers stores HTTP headers (can only have one of each header like Swift). +type Headers map[string]string + +// Does an http request using the running timer passed in +func (c *Connection) doTimeoutRequest(timer *time.Timer, req *http.Request) (*http.Response, error) { + // Do the request in the background so we can check the timeout + type result struct { + resp *http.Response + err error + } + done := make(chan result, 1) + go func() { + resp, err := c.client.Do(req) + done <- result{resp, err} + }() + // Wait for the read or the timeout + select { + case r := <-done: + return r.resp, r.err + case <-timer.C: + // Kill the connection on timeout so we don't leak sockets or goroutines + cancelRequest(c.Transport, req) + return nil, TimeoutError + } + panic("unreachable") // For Go 1.0 +} + +// Set defaults for any unset values +// +// Call with authLock held +func (c *Connection) setDefaults() { + if c.UserAgent == "" { + c.UserAgent = DefaultUserAgent + } + if c.Retries == 0 { + c.Retries = DefaultRetries + } + if c.ConnectTimeout == 0 { + c.ConnectTimeout = 10 * time.Second + } + if c.Timeout == 0 { + c.Timeout = 60 * time.Second + } + if c.Transport == nil { + c.Transport = &http.Transport{ + // TLSClientConfig: &tls.Config{RootCAs: pool}, + // DisableCompression: true, + Proxy: http.ProxyFromEnvironment, + MaxIdleConnsPerHost: 2048, + } + } + if c.client == nil { + c.client = &http.Client{ + // CheckRedirect: redirectPolicyFunc, + Transport: c.Transport, + } + } +} + +// Authenticate connects to the Swift server. +// +// If you don't call it before calling one of the connection methods +// then it will be called for you on the first access. +func (c *Connection) Authenticate() (err error) { + c.authLock.Lock() + defer c.authLock.Unlock() + return c.authenticate() +} + +// Internal implementation of Authenticate +// +// Call with authLock held +func (c *Connection) authenticate() (err error) { + c.setDefaults() + + // Flush the keepalives connection - if we are + // re-authenticating then stuff has gone wrong + flushKeepaliveConnections(c.Transport) + + if c.Auth == nil { + c.Auth, err = newAuth(c) + if err != nil { + return + } + } + + retries := 1 +again: + var req *http.Request + req, err = c.Auth.Request(c) + if err != nil { + return + } + if req != nil { + timer := time.NewTimer(c.ConnectTimeout) + var resp *http.Response + resp, err = c.doTimeoutRequest(timer, req) + if err != nil { + return + } + defer func() { + checkClose(resp.Body, &err) + // Flush the auth connection - we don't want to keep + // it open if keepalives were enabled + flushKeepaliveConnections(c.Transport) + }() + if err = c.parseHeaders(resp, authErrorMap); err != nil { + // Try again for a limited number of times on + // AuthorizationFailed or BadRequest. This allows us + // to try some alternate forms of the request + if (err == AuthorizationFailed || err == BadRequest) && retries > 0 { + retries-- + goto again + } + return + } + err = c.Auth.Response(resp) + if err != nil { + return + } + } + if customAuth, isCustom := c.Auth.(CustomEndpointAuthenticator); isCustom && c.EndpointType != "" { + c.StorageUrl = customAuth.StorageUrlForEndpoint(c.EndpointType) + } else { + c.StorageUrl = c.Auth.StorageUrl(c.Internal) + } + c.AuthToken = c.Auth.Token() + if !c.authenticated() { + err = newError(0, "Response didn't have storage url and auth token") + return + } + return +} + +// Get an authToken and url +// +// The Url may be updated if it needed to authenticate using the OnReAuth function +func (c *Connection) getUrlAndAuthToken(targetUrlIn string, OnReAuth func() (string, error)) (targetUrlOut, authToken string, err error) { + c.authLock.Lock() + defer c.authLock.Unlock() + targetUrlOut = targetUrlIn + if !c.authenticated() { + err = c.authenticate() + if err != nil { + return + } + if OnReAuth != nil { + targetUrlOut, err = OnReAuth() + if err != nil { + return + } + } + } + authToken = c.AuthToken + return +} + +// flushKeepaliveConnections is called to flush pending requests after an error. +func flushKeepaliveConnections(transport http.RoundTripper) { + if tr, ok := transport.(interface { + CloseIdleConnections() + }); ok { + tr.CloseIdleConnections() + } +} + +// UnAuthenticate removes the authentication from the Connection. +func (c *Connection) UnAuthenticate() { + c.authLock.Lock() + c.StorageUrl = "" + c.AuthToken = "" + c.authLock.Unlock() +} + +// Authenticated returns a boolean to show if the current connection +// is authenticated. +// +// Doesn't actually check the credentials against the server. +func (c *Connection) Authenticated() bool { + c.authLock.Lock() + defer c.authLock.Unlock() + return c.authenticated() +} + +// Internal version of Authenticated() +// +// Call with authLock held +func (c *Connection) authenticated() bool { + return c.StorageUrl != "" && c.AuthToken != "" +} + +// SwiftInfo contains the JSON object returned by Swift when the /info +// route is queried. The object contains, among others, the Swift version, +// the enabled middlewares and their configuration +type SwiftInfo map[string]interface{} + +// Discover Swift configuration by doing a request against /info +func (c *Connection) QueryInfo() (infos SwiftInfo, err error) { + infoUrl, err := url.Parse(c.StorageUrl) + if err != nil { + return nil, err + } + infoUrl.Path = path.Join(infoUrl.Path, "..", "..", "info") + resp, err := http.Get(infoUrl.String()) + if err == nil { + err = readJson(resp, &infos) + return infos, err + } + return nil, err +} + +// RequestOpts contains parameters for Connection.storage. +type RequestOpts struct { + Container string + ObjectName string + Operation string + Parameters url.Values + Headers Headers + ErrorMap errorMap + NoResponse bool + Body io.Reader + Retries int + // if set this is called on re-authentication to refresh the targetUrl + OnReAuth func() (string, error) +} + +// Call runs a remote command on the targetUrl, returns a +// response, headers and possible error. +// +// operation is GET, HEAD etc +// container is the name of a container +// Any other parameters (if not None) are added to the targetUrl +// +// Returns a response or an error. If response is returned then +// resp.Body.Close() must be called on it, unless noResponse is set in +// which case the body will be closed in this function +// +// If "Content-Length" is set in p.Headers it will be used - this can +// be used to override the default chunked transfer encoding for +// uploads. +// +// This will Authenticate if necessary, and re-authenticate if it +// receives a 401 error which means the token has expired +// +// This method is exported so extensions can call it. +func (c *Connection) Call(targetUrl string, p RequestOpts) (resp *http.Response, headers Headers, err error) { + c.authLock.Lock() + c.setDefaults() + c.authLock.Unlock() + retries := p.Retries + if retries == 0 { + retries = c.Retries + } + var req *http.Request + for { + var authToken string + if targetUrl, authToken, err = c.getUrlAndAuthToken(targetUrl, p.OnReAuth); err != nil { + return //authentication failure + } + var URL *url.URL + URL, err = url.Parse(targetUrl) + if err != nil { + return + } + if p.Container != "" { + URL.Path += "/" + p.Container + if p.ObjectName != "" { + URL.Path += "/" + p.ObjectName + } + } + if p.Parameters != nil { + URL.RawQuery = p.Parameters.Encode() + } + timer := time.NewTimer(c.ConnectTimeout) + reader := p.Body + if reader != nil { + reader = newWatchdogReader(reader, c.Timeout, timer) + } + req, err = http.NewRequest(p.Operation, URL.String(), reader) + if err != nil { + return + } + if p.Headers != nil { + for k, v := range p.Headers { + // Set ContentLength in req if the user passed it in in the headers + if k == "Content-Length" { + contentLength, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("Invalid %q header %q: %v", k, v, err) + } + req.ContentLength = contentLength + } else { + req.Header.Add(k, v) + } + } + } + req.Header.Add("User-Agent", c.UserAgent) + req.Header.Add("X-Auth-Token", authToken) + resp, err = c.doTimeoutRequest(timer, req) + if err != nil { + if (p.Operation == "HEAD" || p.Operation == "GET") && retries > 0 { + retries-- + continue + } + return nil, nil, err + } + // Check to see if token has expired + if resp.StatusCode == 401 && retries > 0 { + _ = resp.Body.Close() + c.UnAuthenticate() + retries-- + } else { + break + } + } + + if err = c.parseHeaders(resp, p.ErrorMap); err != nil { + _ = resp.Body.Close() + return nil, nil, err + } + headers = readHeaders(resp) + if p.NoResponse { + err = resp.Body.Close() + if err != nil { + return nil, nil, err + } + } else { + // Cancel the request on timeout + cancel := func() { + cancelRequest(c.Transport, req) + } + // Wrap resp.Body to make it obey an idle timeout + resp.Body = newTimeoutReader(resp.Body, c.Timeout, cancel) + } + return +} + +// storage runs a remote command on a the storage url, returns a +// response, headers and possible error. +// +// operation is GET, HEAD etc +// container is the name of a container +// Any other parameters (if not None) are added to the storage url +// +// Returns a response or an error. If response is returned then +// resp.Body.Close() must be called on it, unless noResponse is set in +// which case the body will be closed in this function +// +// This will Authenticate if necessary, and re-authenticate if it +// receives a 401 error which means the token has expired +func (c *Connection) storage(p RequestOpts) (resp *http.Response, headers Headers, err error) { + p.OnReAuth = func() (string, error) { + return c.StorageUrl, nil + } + c.authLock.Lock() + url := c.StorageUrl + c.authLock.Unlock() + return c.Call(url, p) +} + +// readLines reads the response into an array of strings. +// +// Closes the response when done +func readLines(resp *http.Response) (lines []string, err error) { + defer checkClose(resp.Body, &err) + reader := bufio.NewReader(resp.Body) + buffer := bytes.NewBuffer(make([]byte, 0, 128)) + var part []byte + var prefix bool + for { + if part, prefix, err = reader.ReadLine(); err != nil { + break + } + buffer.Write(part) + if !prefix { + lines = append(lines, buffer.String()) + buffer.Reset() + } + } + if err == io.EOF { + err = nil + } + return +} + +// readJson reads the response into the json type passed in +// +// Closes the response when done +func readJson(resp *http.Response, result interface{}) (err error) { + defer checkClose(resp.Body, &err) + decoder := json.NewDecoder(resp.Body) + return decoder.Decode(result) +} + +/* ------------------------------------------------------------ */ + +// ContainersOpts is options for Containers() and ContainerNames() +type ContainersOpts struct { + Limit int // For an integer value n, limits the number of results to at most n values. + Prefix string // Given a string value x, return container names matching the specified prefix. + Marker string // Given a string value x, return container names greater in value than the specified marker. + EndMarker string // Given a string value x, return container names less in value than the specified marker. + Headers Headers // Any additional HTTP headers - can be nil +} + +// parse the ContainerOpts +func (opts *ContainersOpts) parse() (url.Values, Headers) { + v := url.Values{} + var h Headers + if opts != nil { + if opts.Limit > 0 { + v.Set("limit", strconv.Itoa(opts.Limit)) + } + if opts.Prefix != "" { + v.Set("prefix", opts.Prefix) + } + if opts.Marker != "" { + v.Set("marker", opts.Marker) + } + if opts.EndMarker != "" { + v.Set("end_marker", opts.EndMarker) + } + h = opts.Headers + } + return v, h +} + +// ContainerNames returns a slice of names of containers in this account. +func (c *Connection) ContainerNames(opts *ContainersOpts) ([]string, error) { + v, h := opts.parse() + resp, _, err := c.storage(RequestOpts{ + Operation: "GET", + Parameters: v, + ErrorMap: ContainerErrorMap, + Headers: h, + }) + if err != nil { + return nil, err + } + lines, err := readLines(resp) + return lines, err +} + +// Container contains information about a container +type Container struct { + Name string // Name of the container + Count int64 // Number of objects in the container + Bytes int64 // Total number of bytes used in the container +} + +// Containers returns a slice of structures with full information as +// described in Container. +func (c *Connection) Containers(opts *ContainersOpts) ([]Container, error) { + v, h := opts.parse() + v.Set("format", "json") + resp, _, err := c.storage(RequestOpts{ + Operation: "GET", + Parameters: v, + ErrorMap: ContainerErrorMap, + Headers: h, + }) + if err != nil { + return nil, err + } + var containers []Container + err = readJson(resp, &containers) + return containers, err +} + +// containersAllOpts makes a copy of opts if set or makes a new one and +// overrides Limit and Marker +func containersAllOpts(opts *ContainersOpts) *ContainersOpts { + var newOpts ContainersOpts + if opts != nil { + newOpts = *opts + } + if newOpts.Limit == 0 { + newOpts.Limit = allContainersLimit + } + newOpts.Marker = "" + return &newOpts +} + +// ContainersAll is like Containers but it returns all the Containers +// +// It calls Containers multiple times using the Marker parameter +// +// It has a default Limit parameter but you may pass in your own +func (c *Connection) ContainersAll(opts *ContainersOpts) ([]Container, error) { + opts = containersAllOpts(opts) + containers := make([]Container, 0) + for { + newContainers, err := c.Containers(opts) + if err != nil { + return nil, err + } + containers = append(containers, newContainers...) + if len(newContainers) < opts.Limit { + break + } + opts.Marker = newContainers[len(newContainers)-1].Name + } + return containers, nil +} + +// ContainerNamesAll is like ContainerNamess but it returns all the Containers +// +// It calls ContainerNames multiple times using the Marker parameter +// +// It has a default Limit parameter but you may pass in your own +func (c *Connection) ContainerNamesAll(opts *ContainersOpts) ([]string, error) { + opts = containersAllOpts(opts) + containers := make([]string, 0) + for { + newContainers, err := c.ContainerNames(opts) + if err != nil { + return nil, err + } + containers = append(containers, newContainers...) + if len(newContainers) < opts.Limit { + break + } + opts.Marker = newContainers[len(newContainers)-1] + } + return containers, nil +} + +/* ------------------------------------------------------------ */ + +// ObjectOpts is options for Objects() and ObjectNames() +type ObjectsOpts struct { + Limit int // For an integer value n, limits the number of results to at most n values. + Marker string // Given a string value x, return object names greater in value than the specified marker. + EndMarker string // Given a string value x, return object names less in value than the specified marker + Prefix string // For a string value x, causes the results to be limited to object names beginning with the substring x. + Path string // For a string value x, return the object names nested in the pseudo path + Delimiter rune // For a character c, return all the object names nested in the container + Headers Headers // Any additional HTTP headers - can be nil +} + +// parse reads values out of ObjectsOpts +func (opts *ObjectsOpts) parse() (url.Values, Headers) { + v := url.Values{} + var h Headers + if opts != nil { + if opts.Limit > 0 { + v.Set("limit", strconv.Itoa(opts.Limit)) + } + if opts.Marker != "" { + v.Set("marker", opts.Marker) + } + if opts.EndMarker != "" { + v.Set("end_marker", opts.EndMarker) + } + if opts.Prefix != "" { + v.Set("prefix", opts.Prefix) + } + if opts.Path != "" { + v.Set("path", opts.Path) + } + if opts.Delimiter != 0 { + v.Set("delimiter", string(opts.Delimiter)) + } + h = opts.Headers + } + return v, h +} + +// ObjectNames returns a slice of names of objects in a given container. +func (c *Connection) ObjectNames(container string, opts *ObjectsOpts) ([]string, error) { + v, h := opts.parse() + resp, _, err := c.storage(RequestOpts{ + Container: container, + Operation: "GET", + Parameters: v, + ErrorMap: ContainerErrorMap, + Headers: h, + }) + if err != nil { + return nil, err + } + return readLines(resp) +} + +// Object contains information about an object +type Object struct { + Name string `json:"name"` // object name + ContentType string `json:"content_type"` // eg application/directory + Bytes int64 `json:"bytes"` // size in bytes + ServerLastModified string `json:"last_modified"` // Last modified time, eg '2011-06-30T08:20:47.736680' as a string supplied by the server + LastModified time.Time // Last modified time converted to a time.Time + Hash string `json:"hash"` // MD5 hash, eg "d41d8cd98f00b204e9800998ecf8427e" + PseudoDirectory bool // Set when using delimiter to show that this directory object does not really exist + SubDir string `json:"subdir"` // returned only when using delimiter to mark "pseudo directories" +} + +// Objects returns a slice of Object with information about each +// object in the container. +// +// If Delimiter is set in the opts then PseudoDirectory may be set, +// with ContentType 'application/directory'. These are not real +// objects but represent directories of objects which haven't had an +// object created for them. +func (c *Connection) Objects(container string, opts *ObjectsOpts) ([]Object, error) { + v, h := opts.parse() + v.Set("format", "json") + resp, _, err := c.storage(RequestOpts{ + Container: container, + Operation: "GET", + Parameters: v, + ErrorMap: ContainerErrorMap, + Headers: h, + }) + if err != nil { + return nil, err + } + var objects []Object + err = readJson(resp, &objects) + // Convert Pseudo directories and dates + for i := range objects { + object := &objects[i] + if object.SubDir != "" { + object.Name = object.SubDir + object.PseudoDirectory = true + object.ContentType = "application/directory" + } + if object.ServerLastModified != "" { + // 2012-11-11T14:49:47.887250 + // + // Remove fractional seconds if present. This + // then keeps it consistent with Object + // which can only return timestamps accurate + // to 1 second + // + // The TimeFormat will parse fractional + // seconds if desired though + datetime := strings.SplitN(object.ServerLastModified, ".", 2)[0] + object.LastModified, err = time.Parse(TimeFormat, datetime) + if err != nil { + return nil, err + } + } + } + return objects, err +} + +// objectsAllOpts makes a copy of opts if set or makes a new one and +// overrides Limit and Marker +func objectsAllOpts(opts *ObjectsOpts, Limit int) *ObjectsOpts { + var newOpts ObjectsOpts + if opts != nil { + newOpts = *opts + } + if newOpts.Limit == 0 { + newOpts.Limit = Limit + } + newOpts.Marker = "" + return &newOpts +} + +// A closure defined by the caller to iterate through all objects +// +// Call Objects or ObjectNames from here with the *ObjectOpts passed in +// +// Do whatever is required with the results then return them +type ObjectsWalkFn func(*ObjectsOpts) (interface{}, error) + +// ObjectsWalk is uses to iterate through all the objects in chunks as +// returned by Objects or ObjectNames using the Marker and Limit +// parameters in the ObjectsOpts. +// +// Pass in a closure `walkFn` which calls Objects or ObjectNames with +// the *ObjectsOpts passed to it and does something with the results. +// +// Errors will be returned from this function +// +// It has a default Limit parameter but you may pass in your own +func (c *Connection) ObjectsWalk(container string, opts *ObjectsOpts, walkFn ObjectsWalkFn) error { + opts = objectsAllOpts(opts, allObjectsChanLimit) + for { + objects, err := walkFn(opts) + if err != nil { + return err + } + var n int + var last string + switch objects := objects.(type) { + case []string: + n = len(objects) + if n > 0 { + last = objects[len(objects)-1] + } + case []Object: + n = len(objects) + if n > 0 { + last = objects[len(objects)-1].Name + } + default: + panic("Unknown type returned to ObjectsWalk") + } + if n < opts.Limit { + break + } + opts.Marker = last + } + return nil +} + +// ObjectsAll is like Objects but it returns an unlimited number of Objects in a slice +// +// It calls Objects multiple times using the Marker parameter +func (c *Connection) ObjectsAll(container string, opts *ObjectsOpts) ([]Object, error) { + objects := make([]Object, 0) + err := c.ObjectsWalk(container, opts, func(opts *ObjectsOpts) (interface{}, error) { + newObjects, err := c.Objects(container, opts) + if err == nil { + objects = append(objects, newObjects...) + } + return newObjects, err + }) + return objects, err +} + +// ObjectNamesAll is like ObjectNames but it returns all the Objects +// +// It calls ObjectNames multiple times using the Marker parameter +// +// It has a default Limit parameter but you may pass in your own +func (c *Connection) ObjectNamesAll(container string, opts *ObjectsOpts) ([]string, error) { + objects := make([]string, 0) + err := c.ObjectsWalk(container, opts, func(opts *ObjectsOpts) (interface{}, error) { + newObjects, err := c.ObjectNames(container, opts) + if err == nil { + objects = append(objects, newObjects...) + } + return newObjects, err + }) + return objects, err +} + +// Account contains information about this account. +type Account struct { + BytesUsed int64 // total number of bytes used + Containers int64 // total number of containers + Objects int64 // total number of objects +} + +// getInt64FromHeader is a helper function to decode int64 from header. +func getInt64FromHeader(resp *http.Response, header string) (result int64, err error) { + value := resp.Header.Get(header) + result, err = strconv.ParseInt(value, 10, 64) + if err != nil { + err = newErrorf(0, "Bad Header '%s': '%s': %s", header, value, err) + } + return +} + +// Account returns info about the account in an Account struct. +func (c *Connection) Account() (info Account, headers Headers, err error) { + var resp *http.Response + resp, headers, err = c.storage(RequestOpts{ + Operation: "HEAD", + ErrorMap: ContainerErrorMap, + NoResponse: true, + }) + if err != nil { + return + } + // Parse the headers into a dict + // + // {'Accept-Ranges': 'bytes', + // 'Content-Length': '0', + // 'Date': 'Tue, 05 Jul 2011 16:37:06 GMT', + // 'X-Account-Bytes-Used': '316598182', + // 'X-Account-Container-Count': '4', + // 'X-Account-Object-Count': '1433'} + if info.BytesUsed, err = getInt64FromHeader(resp, "X-Account-Bytes-Used"); err != nil { + return + } + if info.Containers, err = getInt64FromHeader(resp, "X-Account-Container-Count"); err != nil { + return + } + if info.Objects, err = getInt64FromHeader(resp, "X-Account-Object-Count"); err != nil { + return + } + return +} + +// AccountUpdate adds, replaces or remove account metadata. +// +// Add or update keys by mentioning them in the Headers. +// +// Remove keys by setting them to an empty string. +func (c *Connection) AccountUpdate(h Headers) error { + _, _, err := c.storage(RequestOpts{ + Operation: "POST", + ErrorMap: ContainerErrorMap, + NoResponse: true, + Headers: h, + }) + return err +} + +// ContainerCreate creates a container. +// +// If you don't want to add Headers just pass in nil +// +// No error is returned if it already exists but the metadata if any will be updated. +func (c *Connection) ContainerCreate(container string, h Headers) error { + _, _, err := c.storage(RequestOpts{ + Container: container, + Operation: "PUT", + ErrorMap: ContainerErrorMap, + NoResponse: true, + Headers: h, + }) + return err +} + +// ContainerDelete deletes a container. +// +// May return ContainerDoesNotExist or ContainerNotEmpty +func (c *Connection) ContainerDelete(container string) error { + _, _, err := c.storage(RequestOpts{ + Container: container, + Operation: "DELETE", + ErrorMap: ContainerErrorMap, + NoResponse: true, + }) + return err +} + +// Container returns info about a single container including any +// metadata in the headers. +func (c *Connection) Container(container string) (info Container, headers Headers, err error) { + var resp *http.Response + resp, headers, err = c.storage(RequestOpts{ + Container: container, + Operation: "HEAD", + ErrorMap: ContainerErrorMap, + NoResponse: true, + }) + if err != nil { + return + } + // Parse the headers into the struct + info.Name = container + if info.Bytes, err = getInt64FromHeader(resp, "X-Container-Bytes-Used"); err != nil { + return + } + if info.Count, err = getInt64FromHeader(resp, "X-Container-Object-Count"); err != nil { + return + } + return +} + +// ContainerUpdate adds, replaces or removes container metadata. +// +// Add or update keys by mentioning them in the Metadata. +// +// Remove keys by setting them to an empty string. +// +// Container metadata can only be read with Container() not with Containers(). +func (c *Connection) ContainerUpdate(container string, h Headers) error { + _, _, err := c.storage(RequestOpts{ + Container: container, + Operation: "POST", + ErrorMap: ContainerErrorMap, + NoResponse: true, + Headers: h, + }) + return err +} + +// ------------------------------------------------------------ + +// ObjectCreateFile represents a swift object open for writing +type ObjectCreateFile struct { + checkHash bool // whether we are checking the hash + pipeReader *io.PipeReader // pipe for the caller to use + pipeWriter *io.PipeWriter + hash hash.Hash // hash being build up as we go along + done chan struct{} // signals when the upload has finished + resp *http.Response // valid when done has signalled + err error // ditto + headers Headers // ditto +} + +// Write bytes to the object - see io.Writer +func (file *ObjectCreateFile) Write(p []byte) (n int, err error) { + n, err = file.pipeWriter.Write(p) + if err == io.ErrClosedPipe { + if file.err != nil { + return 0, file.err + } + return 0, newError(500, "Write on closed file") + } + if err == nil && file.checkHash { + _, _ = file.hash.Write(p) + } + return +} + +// Close the object and checks the md5sum if it was required. +// +// Also returns any other errors from the server (eg container not +// found) so it is very important to check the errors on this method. +func (file *ObjectCreateFile) Close() error { + // Close the body + err := file.pipeWriter.Close() + if err != nil { + return err + } + + // Wait for the HTTP operation to complete + <-file.done + + // Check errors + if file.err != nil { + return file.err + } + if file.checkHash { + receivedMd5 := strings.ToLower(file.headers["Etag"]) + calculatedMd5 := fmt.Sprintf("%x", file.hash.Sum(nil)) + if receivedMd5 != calculatedMd5 { + return ObjectCorrupted + } + } + return nil +} + +// Check it satisfies the interface +var _ io.WriteCloser = &ObjectCreateFile{} + +// objectPutHeaders create a set of headers for a PUT +// +// It guesses the contentType from the objectName if it isn't set +// +// checkHash may be changed +func objectPutHeaders(objectName string, checkHash *bool, Hash string, contentType string, h Headers) Headers { + if contentType == "" { + contentType = mime.TypeByExtension(path.Ext(objectName)) + if contentType == "" { + contentType = "application/octet-stream" + } + } + // Meta stuff + extraHeaders := map[string]string{ + "Content-Type": contentType, + } + for key, value := range h { + extraHeaders[key] = value + } + if Hash != "" { + extraHeaders["Etag"] = Hash + *checkHash = false // the server will do it + } + return extraHeaders +} + +// ObjectCreate creates or updates the object in the container. It +// returns an io.WriteCloser you should write the contents to. You +// MUST call Close() on it and you MUST check the error return from +// Close(). +// +// If checkHash is True then it will calculate the MD5 Hash of the +// file as it is being uploaded and check it against that returned +// from the server. If it is wrong then it will return +// ObjectCorrupted on Close() +// +// If you know the MD5 hash of the object ahead of time then set the +// Hash parameter and it will be sent to the server (as an Etag +// header) and the server will check the MD5 itself after the upload, +// and this will return ObjectCorrupted on Close() if it is incorrect. +// +// If you don't want any error protection (not recommended) then set +// checkHash to false and Hash to "". +// +// If contentType is set it will be used, otherwise one will be +// guessed from objectName using mime.TypeByExtension +func (c *Connection) ObjectCreate(container string, objectName string, checkHash bool, Hash string, contentType string, h Headers) (file *ObjectCreateFile, err error) { + extraHeaders := objectPutHeaders(objectName, &checkHash, Hash, contentType, h) + pipeReader, pipeWriter := io.Pipe() + file = &ObjectCreateFile{ + hash: md5.New(), + checkHash: checkHash, + pipeReader: pipeReader, + pipeWriter: pipeWriter, + done: make(chan struct{}), + } + // Run the PUT in the background piping it data + go func() { + file.resp, file.headers, file.err = c.storage(RequestOpts{ + Container: container, + ObjectName: objectName, + Operation: "PUT", + Headers: extraHeaders, + Body: pipeReader, + NoResponse: true, + ErrorMap: objectErrorMap, + }) + // Signal finished + pipeReader.Close() + close(file.done) + }() + return +} + +// ObjectPut creates or updates the path in the container from +// contents. contents should be an open io.Reader which will have all +// its contents read. +// +// This is a low level interface. +// +// If checkHash is True then it will calculate the MD5 Hash of the +// file as it is being uploaded and check it against that returned +// from the server. If it is wrong then it will return +// ObjectCorrupted. +// +// If you know the MD5 hash of the object ahead of time then set the +// Hash parameter and it will be sent to the server (as an Etag +// header) and the server will check the MD5 itself after the upload, +// and this will return ObjectCorrupted if it is incorrect. +// +// If you don't want any error protection (not recommended) then set +// checkHash to false and Hash to "". +// +// If contentType is set it will be used, otherwise one will be +// guessed from objectName using mime.TypeByExtension +func (c *Connection) ObjectPut(container string, objectName string, contents io.Reader, checkHash bool, Hash string, contentType string, h Headers) (headers Headers, err error) { + extraHeaders := objectPutHeaders(objectName, &checkHash, Hash, contentType, h) + hash := md5.New() + var body io.Reader = contents + if checkHash { + body = io.TeeReader(contents, hash) + } + _, headers, err = c.storage(RequestOpts{ + Container: container, + ObjectName: objectName, + Operation: "PUT", + Headers: extraHeaders, + Body: body, + NoResponse: true, + ErrorMap: objectErrorMap, + }) + if err != nil { + return + } + if checkHash { + receivedMd5 := strings.ToLower(headers["Etag"]) + calculatedMd5 := fmt.Sprintf("%x", hash.Sum(nil)) + if receivedMd5 != calculatedMd5 { + err = ObjectCorrupted + return + } + } + return +} + +// ObjectPutBytes creates an object from a []byte in a container. +// +// This is a simplified interface which checks the MD5. +func (c *Connection) ObjectPutBytes(container string, objectName string, contents []byte, contentType string) (err error) { + buf := bytes.NewBuffer(contents) + _, err = c.ObjectPut(container, objectName, buf, true, "", contentType, nil) + return +} + +// ObjectPutString creates an object from a string in a container. +// +// This is a simplified interface which checks the MD5 +func (c *Connection) ObjectPutString(container string, objectName string, contents string, contentType string) (err error) { + buf := strings.NewReader(contents) + _, err = c.ObjectPut(container, objectName, buf, true, "", contentType, nil) + return +} + +// ObjectOpenFile represents a swift object open for reading +type ObjectOpenFile struct { + connection *Connection // stored copy of Connection used in Open + container string // stored copy of container used in Open + objectName string // stored copy of objectName used in Open + headers Headers // stored copy of headers used in Open + resp *http.Response // http connection + body io.Reader // read data from this + checkHash bool // true if checking MD5 + hash hash.Hash // currently accumulating MD5 + bytes int64 // number of bytes read on this connection + eof bool // whether we have read end of file + pos int64 // current position when reading + lengthOk bool // whether length is valid + length int64 // length of the object if read + seeked bool // whether we have seeked this file or not +} + +// Read bytes from the object - see io.Reader +func (file *ObjectOpenFile) Read(p []byte) (n int, err error) { + n, err = file.body.Read(p) + file.bytes += int64(n) + file.pos += int64(n) + if err == io.EOF { + file.eof = true + } + return +} + +// Seek sets the offset for the next Read to offset, interpreted +// according to whence: 0 means relative to the origin of the file, 1 +// means relative to the current offset, and 2 means relative to the +// end. Seek returns the new offset and an Error, if any. +// +// Seek uses HTTP Range headers which, if the file pointer is moved, +// will involve reopening the HTTP connection. +// +// Note that you can't seek to the end of a file or beyond; HTTP Range +// requests don't support the file pointer being outside the data, +// unlike os.File +// +// Seek(0, 1) will return the current file pointer. +func (file *ObjectOpenFile) Seek(offset int64, whence int) (newPos int64, err error) { + switch whence { + case 0: // relative to start + newPos = offset + case 1: // relative to current + newPos = file.pos + offset + case 2: // relative to end + if !file.lengthOk { + return file.pos, newError(0, "Length of file unknown so can't seek from end") + } + newPos = file.length + offset + default: + panic("Unknown whence in ObjectOpenFile.Seek") + } + // If at correct position (quite likely), do nothing + if newPos == file.pos { + return + } + // Close the file... + file.seeked = true + err = file.Close() + if err != nil { + return + } + // ...and re-open with a Range header + if file.headers == nil { + file.headers = Headers{} + } + if newPos > 0 { + file.headers["Range"] = fmt.Sprintf("bytes=%d-", newPos) + } else { + delete(file.headers, "Range") + } + newFile, _, err := file.connection.ObjectOpen(file.container, file.objectName, false, file.headers) + if err != nil { + return + } + // Update the file + file.resp = newFile.resp + file.body = newFile.body + file.checkHash = false + file.pos = newPos + return +} + +// Length gets the objects content length either from a cached copy or +// from the server. +func (file *ObjectOpenFile) Length() (int64, error) { + if !file.lengthOk { + info, _, err := file.connection.Object(file.container, file.objectName) + file.length = info.Bytes + file.lengthOk = (err == nil) + return file.length, err + } + return file.length, nil +} + +// Close the object and checks the length and md5sum if it was +// required and all the object was read +func (file *ObjectOpenFile) Close() (err error) { + // Close the body at the end + defer checkClose(file.resp.Body, &err) + + // If not end of file or seeked then can't check anything + if !file.eof || file.seeked { + return + } + + // Check the MD5 sum if requested + if file.checkHash { + receivedMd5 := strings.ToLower(file.resp.Header.Get("Etag")) + calculatedMd5 := fmt.Sprintf("%x", file.hash.Sum(nil)) + if receivedMd5 != calculatedMd5 { + err = ObjectCorrupted + return + } + } + + // Check to see we read the correct number of bytes + if file.lengthOk && file.length != file.bytes { + err = ObjectCorrupted + return + } + return +} + +// Check it satisfies the interfaces +var _ io.ReadCloser = &ObjectOpenFile{} +var _ io.Seeker = &ObjectOpenFile{} + +// ObjectOpen returns an ObjectOpenFile for reading the contents of +// the object. This satisfies the io.ReadCloser and the io.Seeker +// interfaces. +// +// You must call Close() on contents when finished +// +// Returns the headers of the response. +// +// If checkHash is true then it will calculate the md5sum of the file +// as it is being received and check it against that returned from the +// server. If it is wrong then it will return ObjectCorrupted. It +// will also check the length returned. No checking will be done if +// you don't read all the contents. +// +// Note that objects with X-Object-Manifest or X-Static-Large-Object +// set won't ever have their md5sum's checked as the md5sum reported +// on the object is actually the md5sum of the md5sums of the +// parts. This isn't very helpful to detect a corrupted download as +// the size of the parts aren't known without doing more operations. +// If you want to ensure integrity of an object with a manifest then +// you will need to download everything in the manifest separately. +// +// headers["Content-Type"] will give the content type if desired. +func (c *Connection) ObjectOpen(container string, objectName string, checkHash bool, h Headers) (file *ObjectOpenFile, headers Headers, err error) { + var resp *http.Response + resp, headers, err = c.storage(RequestOpts{ + Container: container, + ObjectName: objectName, + Operation: "GET", + ErrorMap: objectErrorMap, + Headers: h, + }) + if err != nil { + return + } + // Can't check MD5 on an object with X-Object-Manifest or X-Static-Large-Object set + if checkHash && (headers["X-Object-Manifest"] != "" || headers["X-Static-Large-Object"] != "") { + // log.Printf("swift: turning off md5 checking on object with manifest %v", objectName) + checkHash = false + } + file = &ObjectOpenFile{ + connection: c, + container: container, + objectName: objectName, + headers: h, + resp: resp, + checkHash: checkHash, + body: resp.Body, + } + if checkHash { + file.hash = md5.New() + file.body = io.TeeReader(resp.Body, file.hash) + } + // Read Content-Length + if resp.Header.Get("Content-Length") != "" { + file.length, err = getInt64FromHeader(resp, "Content-Length") + file.lengthOk = (err == nil) + } + return +} + +// ObjectGet gets the object into the io.Writer contents. +// +// Returns the headers of the response. +// +// If checkHash is true then it will calculate the md5sum of the file +// as it is being received and check it against that returned from the +// server. If it is wrong then it will return ObjectCorrupted. +// +// headers["Content-Type"] will give the content type if desired. +func (c *Connection) ObjectGet(container string, objectName string, contents io.Writer, checkHash bool, h Headers) (headers Headers, err error) { + file, headers, err := c.ObjectOpen(container, objectName, checkHash, h) + if err != nil { + return + } + defer checkClose(file, &err) + _, err = io.Copy(contents, file) + return +} + +// ObjectGetBytes returns an object as a []byte. +// +// This is a simplified interface which checks the MD5 +func (c *Connection) ObjectGetBytes(container string, objectName string) (contents []byte, err error) { + var buf bytes.Buffer + _, err = c.ObjectGet(container, objectName, &buf, true, nil) + contents = buf.Bytes() + return +} + +// ObjectGetString returns an object as a string. +// +// This is a simplified interface which checks the MD5 +func (c *Connection) ObjectGetString(container string, objectName string) (contents string, err error) { + var buf bytes.Buffer + _, err = c.ObjectGet(container, objectName, &buf, true, nil) + contents = buf.String() + return +} + +// ObjectDelete deletes the object. +// +// May return ObjectNotFound if the object isn't found +func (c *Connection) ObjectDelete(container string, objectName string) error { + _, _, err := c.storage(RequestOpts{ + Container: container, + ObjectName: objectName, + Operation: "DELETE", + ErrorMap: objectErrorMap, + }) + return err +} + +// ObjectTempUrl returns a temporary URL for an object +func (c *Connection) ObjectTempUrl(container string, objectName string, secretKey string, method string, expires time.Time) string { + mac := hmac.New(sha1.New, []byte(secretKey)) + prefix, _ := url.Parse(c.StorageUrl) + body := fmt.Sprintf("%s\n%d\n%s/%s/%s", method, expires.Unix(), prefix.Path, container, objectName) + mac.Write([]byte(body)) + sig := hex.EncodeToString(mac.Sum(nil)) + return fmt.Sprintf("%s/%s/%s?temp_url_sig=%s&temp_url_expires=%d", c.StorageUrl, container, objectName, sig, expires.Unix()) +} + +// parseResponseStatus parses string like "200 OK" and returns Error. +// +// For status codes beween 200 and 299, this returns nil. +func parseResponseStatus(resp string, errorMap errorMap) error { + code := 0 + reason := resp + t := strings.SplitN(resp, " ", 2) + if len(t) == 2 { + ncode, err := strconv.Atoi(t[0]) + if err == nil { + code = ncode + reason = t[1] + } + } + if errorMap != nil { + if err, ok := errorMap[code]; ok { + return err + } + } + if 200 <= code && code <= 299 { + return nil + } + return newError(code, reason) +} + +// BulkDeleteResult stores results of BulkDelete(). +// +// Individual errors may (or may not) be returned by Errors. +// Errors is a map whose keys are a full path of where the object was +// to be deleted, and whose values are Error objects. A full path of +// object looks like "/API_VERSION/USER_ACCOUNT/CONTAINER/OBJECT_PATH". +type BulkDeleteResult struct { + NumberNotFound int64 // # of objects not found. + NumberDeleted int64 // # of deleted objects. + Errors map[string]error // Mapping between object name and an error. + Headers Headers // Response HTTP headers. +} + +// BulkDelete deletes multiple objectNames from container in one operation. +// +// Some servers may not accept bulk-delete requests since bulk-delete is +// an optional feature of swift - these will return the Forbidden error. +// +// See also: +// * http://docs.openstack.org/trunk/openstack-object-storage/admin/content/object-storage-bulk-delete.html +// * http://docs.rackspace.com/files/api/v1/cf-devguide/content/Bulk_Delete-d1e2338.html +func (c *Connection) BulkDelete(container string, objectNames []string) (result BulkDeleteResult, err error) { + var buffer bytes.Buffer + for _, s := range objectNames { + buffer.WriteString(fmt.Sprintf("/%s/%s\n", container, + url.QueryEscape(s))) + } + resp, headers, err := c.storage(RequestOpts{ + Operation: "DELETE", + Parameters: url.Values{"bulk-delete": []string{"1"}}, + Headers: Headers{ + "Accept": "application/json", + "Content-Type": "text/plain", + }, + ErrorMap: ContainerErrorMap, + Body: &buffer, + }) + if err != nil { + return + } + var jsonResult struct { + NotFound int64 `json:"Number Not Found"` + Status string `json:"Response Status"` + Errors [][]string + Deleted int64 `json:"Number Deleted"` + } + err = readJson(resp, &jsonResult) + if err != nil { + return + } + + err = parseResponseStatus(jsonResult.Status, objectErrorMap) + result.NumberNotFound = jsonResult.NotFound + result.NumberDeleted = jsonResult.Deleted + result.Headers = headers + el := make(map[string]error, len(jsonResult.Errors)) + for _, t := range jsonResult.Errors { + if len(t) != 2 { + continue + } + el[t[0]] = parseResponseStatus(t[1], objectErrorMap) + } + result.Errors = el + return +} + +// BulkUploadResult stores results of BulkUpload(). +// +// Individual errors may (or may not) be returned by Errors. +// Errors is a map whose keys are a full path of where an object was +// to be created, and whose values are Error objects. A full path of +// object looks like "/API_VERSION/USER_ACCOUNT/CONTAINER/OBJECT_PATH". +type BulkUploadResult struct { + NumberCreated int64 // # of created objects. + Errors map[string]error // Mapping between object name and an error. + Headers Headers // Response HTTP headers. +} + +// BulkUpload uploads multiple files in one operation. +// +// uploadPath can be empty, a container name, or a pseudo-directory +// within a container. If uploadPath is empty, new containers may be +// automatically created. +// +// Files are read from dataStream. The format of the stream is specified +// by the format parameter. Available formats are: +// * UploadTar - Plain tar stream. +// * UploadTarGzip - Gzip compressed tar stream. +// * UploadTarBzip2 - Bzip2 compressed tar stream. +// +// Some servers may not accept bulk-upload requests since bulk-upload is +// an optional feature of swift - these will return the Forbidden error. +// +// See also: +// * http://docs.openstack.org/trunk/openstack-object-storage/admin/content/object-storage-extract-archive.html +// * http://docs.rackspace.com/files/api/v1/cf-devguide/content/Extract_Archive-d1e2338.html +func (c *Connection) BulkUpload(uploadPath string, dataStream io.Reader, format string, h Headers) (result BulkUploadResult, err error) { + extraHeaders := Headers{"Accept": "application/json"} + for key, value := range h { + extraHeaders[key] = value + } + // The following code abuses Container parameter intentionally. + // The best fix might be to rename Container to UploadPath. + resp, headers, err := c.storage(RequestOpts{ + Container: uploadPath, + Operation: "PUT", + Parameters: url.Values{"extract-archive": []string{format}}, + Headers: extraHeaders, + ErrorMap: ContainerErrorMap, + Body: dataStream, + }) + if err != nil { + return + } + // Detect old servers which don't support this feature + if headers["Content-Type"] != "application/json" { + err = Forbidden + return + } + var jsonResult struct { + Created int64 `json:"Number Files Created"` + Status string `json:"Response Status"` + Errors [][]string + } + err = readJson(resp, &jsonResult) + if err != nil { + return + } + + err = parseResponseStatus(jsonResult.Status, objectErrorMap) + result.NumberCreated = jsonResult.Created + result.Headers = headers + el := make(map[string]error, len(jsonResult.Errors)) + for _, t := range jsonResult.Errors { + if len(t) != 2 { + continue + } + el[t[0]] = parseResponseStatus(t[1], objectErrorMap) + } + result.Errors = el + return +} + +// Object returns info about a single object including any metadata in the header. +// +// May return ObjectNotFound. +// +// Use headers.ObjectMetadata() to read the metadata in the Headers. +func (c *Connection) Object(container string, objectName string) (info Object, headers Headers, err error) { + var resp *http.Response + resp, headers, err = c.storage(RequestOpts{ + Container: container, + ObjectName: objectName, + Operation: "HEAD", + ErrorMap: objectErrorMap, + NoResponse: true, + }) + if err != nil { + return + } + // Parse the headers into the struct + // HTTP/1.1 200 OK + // Date: Thu, 07 Jun 2010 20:59:39 GMT + // Server: Apache + // Last-Modified: Fri, 12 Jun 2010 13:40:18 GMT + // ETag: 8a964ee2a5e88be344f36c22562a6486 + // Content-Length: 512000 + // Content-Type: text/plain; charset=UTF-8 + // X-Object-Meta-Meat: Bacon + // X-Object-Meta-Fruit: Bacon + // X-Object-Meta-Veggie: Bacon + // X-Object-Meta-Dairy: Bacon + info.Name = objectName + info.ContentType = resp.Header.Get("Content-Type") + if resp.Header.Get("Content-Length") != "" { + if info.Bytes, err = getInt64FromHeader(resp, "Content-Length"); err != nil { + return + } + } + //HACK + //Currently ceph doestn't return Last-Modified header for DLO manifest without any segments + //Currently it affects all versions of ceph http://tracker.ceph.com/issues/15812 + if resp.Header.Get("Last-Modified") != "" { + info.ServerLastModified = resp.Header.Get("Last-Modified") + if info.LastModified, err = time.Parse(http.TimeFormat, info.ServerLastModified); err != nil { + return + } + } + info.Hash = resp.Header.Get("Etag") + return +} + +// ObjectUpdate adds, replaces or removes object metadata. +// +// Add or Update keys by mentioning them in the Metadata. Use +// Metadata.ObjectHeaders and Headers.ObjectMetadata to convert your +// Metadata to and from normal HTTP headers. +// +// This removes all metadata previously added to the object and +// replaces it with that passed in so to delete keys, just don't +// mention them the headers you pass in. +// +// Object metadata can only be read with Object() not with Objects(). +// +// This can also be used to set headers not already assigned such as +// X-Delete-At or X-Delete-After for expiring objects. +// +// You cannot use this to change any of the object's other headers +// such as Content-Type, ETag, etc. +// +// Refer to copying an object when you need to update metadata or +// other headers such as Content-Type or CORS headers. +// +// May return ObjectNotFound. +func (c *Connection) ObjectUpdate(container string, objectName string, h Headers) error { + _, _, err := c.storage(RequestOpts{ + Container: container, + ObjectName: objectName, + Operation: "POST", + ErrorMap: objectErrorMap, + NoResponse: true, + Headers: h, + }) + return err +} + +// ObjectCopy does a server side copy of an object to a new position +// +// All metadata is preserved. If metadata is set in the headers then +// it overrides the old metadata on the copied object. +// +// The destination container must exist before the copy. +// +// You can use this to copy an object to itself - this is the only way +// to update the content type of an object. +func (c *Connection) ObjectCopy(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string, h Headers) (headers Headers, err error) { + // Meta stuff + extraHeaders := map[string]string{ + "Destination": dstContainer + "/" + dstObjectName, + } + for key, value := range h { + extraHeaders[key] = value + } + _, headers, err = c.storage(RequestOpts{ + Container: srcContainer, + ObjectName: srcObjectName, + Operation: "COPY", + ErrorMap: objectErrorMap, + NoResponse: true, + Headers: extraHeaders, + }) + return +} + +// ObjectMove does a server side move of an object to a new position +// +// This is a convenience method which calls ObjectCopy then ObjectDelete +// +// All metadata is preserved. +// +// The destination container must exist before the copy. +func (c *Connection) ObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) (err error) { + _, err = c.ObjectCopy(srcContainer, srcObjectName, dstContainer, dstObjectName, nil) + if err != nil { + return + } + return c.ObjectDelete(srcContainer, srcObjectName) +} + +// ObjectUpdateContentType updates the content type of an object +// +// This is a convenience method which calls ObjectCopy +// +// All other metadata is preserved. +func (c *Connection) ObjectUpdateContentType(container string, objectName string, contentType string) (err error) { + h := Headers{"Content-Type": contentType} + _, err = c.ObjectCopy(container, objectName, container, objectName, h) + return +} + +// ------------------------------------------------------------ + +// VersionContainerCreate is a helper method for creating and enabling version controlled containers. +// +// It builds the current object container, the non-current object version container, and enables versioning. +// +// If the server doesn't support versioning then it will return +// Forbidden however it will have created both the containers at that point. +func (c *Connection) VersionContainerCreate(current, version string) error { + if err := c.ContainerCreate(version, nil); err != nil { + return err + } + if err := c.ContainerCreate(current, nil); err != nil { + return err + } + if err := c.VersionEnable(current, version); err != nil { + return err + } + return nil +} + +// VersionEnable enables versioning on the current container with version as the tracking container. +// +// May return Forbidden if this isn't supported by the server +func (c *Connection) VersionEnable(current, version string) error { + h := Headers{"X-Versions-Location": version} + if err := c.ContainerUpdate(current, h); err != nil { + return err + } + // Check to see if the header was set properly + _, headers, err := c.Container(current) + if err != nil { + return err + } + // If failed to set versions header, return Forbidden as the server doesn't support this + if headers["X-Versions-Location"] != version { + return Forbidden + } + return nil +} + +// VersionDisable disables versioning on the current container. +func (c *Connection) VersionDisable(current string) error { + h := Headers{"X-Versions-Location": ""} + if err := c.ContainerUpdate(current, h); err != nil { + return err + } + return nil +} + +// VersionObjectList returns a list of older versions of the object. +// +// Objects are returned in the format / +func (c *Connection) VersionObjectList(version, object string) ([]string, error) { + opts := &ObjectsOpts{ + // <3-character zero-padded hexadecimal character length>/ + Prefix: fmt.Sprintf("%03x", len(object)) + object + "/", + } + return c.ObjectNames(version, opts) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/swifttest/server.go b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/swifttest/server.go new file mode 100644 index 000000000..a49abb9a0 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/swifttest/server.go @@ -0,0 +1,941 @@ +// This implements a very basic Swift server +// Everything is stored in memory +// +// This comes from the https://github.com/mitchellh/goamz +// and was adapted for Swift +// +package swifttest + +import ( + "bytes" + "crypto/hmac" + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "mime" + "net" + "net/http" + "net/url" + "path" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/ncw/swift" +) + +const ( + DEBUG = false + TEST_ACCOUNT = "swifttest" +) + +type SwiftServer struct { + t *testing.T + reqId int + mu sync.Mutex + Listener net.Listener + AuthURL string + URL string + Accounts map[string]*account + Sessions map[string]*session +} + +// The Folder type represents a container stored in an account +type Folder struct { + Count int `json:"count"` + Bytes int `json:"bytes"` + Name string `json:"name"` +} + +// The Key type represents an item stored in an container. +type Key struct { + Key string `json:"name"` + LastModified string `json:"last_modified"` + Size int64 `json:"bytes"` + // ETag gives the hex-encoded MD5 sum of the contents, + // surrounded with double-quotes. + ETag string `json:"hash"` + ContentType string `json:"content_type"` + // Owner Owner +} + +type Subdir struct { + Subdir string `json:"subdir"` +} + +type swiftError struct { + statusCode int + Code string + Message string +} + +type action struct { + srv *SwiftServer + w http.ResponseWriter + req *http.Request + reqId string + user *account +} + +type session struct { + username string +} + +type metadata struct { + meta http.Header // metadata to return with requests. +} + +type account struct { + swift.Account + metadata + password string + Containers map[string]*container +} + +type object struct { + metadata + name string + mtime time.Time + checksum []byte // also held as ETag in meta. + data []byte + content_type string +} + +type container struct { + metadata + name string + ctime time.Time + objects map[string]*object + bytes int +} + +// A resource encapsulates the subject of an HTTP request. +// The resource referred to may or may not exist +// when the request is made. +type resource interface { + put(a *action) interface{} + get(a *action) interface{} + post(a *action) interface{} + delete(a *action) interface{} + copy(a *action) interface{} +} + +type objectResource struct { + name string + version string + container *container // always non-nil. + object *object // may be nil. +} + +type containerResource struct { + name string + container *container // non-nil if the container already exists. +} + +var responseParams = map[string]bool{ + "content-type": true, + "content-language": true, + "expires": true, + "cache-control": true, + "content-disposition": true, + "content-encoding": true, +} + +func fatalf(code int, codeStr string, errf string, a ...interface{}) { + panic(&swiftError{ + statusCode: code, + Code: codeStr, + Message: fmt.Sprintf(errf, a...), + }) +} + +func (m metadata) setMetadata(a *action, resource string) { + for key, values := range a.req.Header { + key = http.CanonicalHeaderKey(key) + if metaHeaders[key] || strings.HasPrefix(key, "X-"+strings.Title(resource)+"-Meta-") { + if values[0] != "" || resource == "object" { + m.meta[key] = values + } else { + m.meta.Del(key) + } + } + } +} + +func (m metadata) getMetadata(a *action) { + h := a.w.Header() + for name, d := range m.meta { + h[name] = d + } +} + +func (c container) list(delimiter string, marker string, prefix string, parent string) (resp []interface{}) { + var tmp orderedObjects + + // first get all matching objects and arrange them in alphabetical order. + for _, obj := range c.objects { + if strings.HasPrefix(obj.name, prefix) { + tmp = append(tmp, obj) + } + } + sort.Sort(tmp) + + var prefixes []string + for _, obj := range tmp { + if !strings.HasPrefix(obj.name, prefix) { + continue + } + + isPrefix := false + name := obj.name + if parent != "" { + if path.Dir(obj.name) != path.Clean(parent) { + continue + } + } else if delimiter != "" { + if i := strings.Index(obj.name[len(prefix):], delimiter); i >= 0 { + name = obj.name[:len(prefix)+i+len(delimiter)] + if prefixes != nil && prefixes[len(prefixes)-1] == name { + continue + } + isPrefix = true + } + } + + if name <= marker { + continue + } + + if isPrefix { + prefixes = append(prefixes, name) + + resp = append(resp, Subdir{ + Subdir: name, + }) + } else { + resp = append(resp, obj) + } + } + + return +} + +// GET on a container lists the objects in the container. +func (r containerResource) get(a *action) interface{} { + if r.container == nil { + fatalf(404, "NoSuchContainer", "The specified container does not exist") + } + + delimiter := a.req.Form.Get("delimiter") + marker := a.req.Form.Get("marker") + prefix := a.req.Form.Get("prefix") + format := a.req.URL.Query().Get("format") + parent := a.req.Form.Get("path") + + a.w.Header().Set("X-Container-Bytes-Used", strconv.Itoa(r.container.bytes)) + a.w.Header().Set("X-Container-Object-Count", strconv.Itoa(len(r.container.objects))) + r.container.getMetadata(a) + + if a.req.Method == "HEAD" { + return nil + } + + objects := r.container.list(delimiter, marker, prefix, parent) + + if format == "json" { + a.w.Header().Set("Content-Type", "application/json") + var resp []interface{} + for _, item := range objects { + if obj, ok := item.(*object); ok { + resp = append(resp, obj.Key()) + } else { + resp = append(resp, item) + } + } + return resp + } else { + for _, item := range objects { + if obj, ok := item.(*object); ok { + a.w.Write([]byte(obj.name + "\n")) + } else if subdir, ok := item.(Subdir); ok { + a.w.Write([]byte(subdir.Subdir + "\n")) + } + } + return nil + } +} + +// orderedContainers holds a slice of containers that can be sorted +// by name. +type orderedContainers []*container + +func (s orderedContainers) Len() int { + return len(s) +} +func (s orderedContainers) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s orderedContainers) Less(i, j int) bool { + return s[i].name < s[j].name +} + +func (r containerResource) delete(a *action) interface{} { + b := r.container + if b == nil { + fatalf(404, "NoSuchContainer", "The specified container does not exist") + } + if len(b.objects) > 0 { + fatalf(409, "Conflict", "The container you tried to delete is not empty") + } + delete(a.user.Containers, b.name) + a.user.Account.Containers-- + return nil +} + +func (r containerResource) put(a *action) interface{} { + if a.req.URL.Query().Get("extract-archive") != "" { + fatalf(403, "Operation forbidden", "Bulk upload is not supported") + } + + if r.container == nil { + if !validContainerName(r.name) { + fatalf(400, "InvalidContainerName", "The specified container is not valid") + } + r.container = &container{ + name: r.name, + objects: make(map[string]*object), + metadata: metadata{ + meta: make(http.Header), + }, + } + r.container.setMetadata(a, "container") + a.user.Containers[r.name] = r.container + a.user.Account.Containers++ + } + + return nil +} + +func (r containerResource) post(a *action) interface{} { + if r.container == nil { + fatalf(400, "Method", "The resource could not be found.") + } else { + r.container.setMetadata(a, "container") + a.w.WriteHeader(201) + jsonMarshal(a.w, Folder{ + Count: len(r.container.objects), + Bytes: r.container.bytes, + Name: r.container.name, + }) + } + return nil +} + +func (containerResource) copy(a *action) interface{} { return notAllowed() } + +// validContainerName returns whether name is a valid bucket name. +// Here are the rules, from: +// http://docs.openstack.org/api/openstack-object-storage/1.0/content/ch_object-storage-dev-api-storage.html +// +// Container names cannot exceed 256 bytes and cannot contain the / character. +// +func validContainerName(name string) bool { + if len(name) == 0 || len(name) > 256 { + return false + } + for _, r := range name { + switch { + case r == '/': + return false + default: + } + } + return true +} + +// orderedObjects holds a slice of objects that can be sorted +// by name. +type orderedObjects []*object + +func (s orderedObjects) Len() int { + return len(s) +} +func (s orderedObjects) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s orderedObjects) Less(i, j int) bool { + return s[i].name < s[j].name +} + +func (obj *object) Key() Key { + return Key{ + Key: obj.name, + LastModified: obj.mtime.Format("2006-01-02T15:04:05"), + Size: int64(len(obj.data)), + ETag: fmt.Sprintf("%x", obj.checksum), + ContentType: obj.content_type, + } +} + +var metaHeaders = map[string]bool{ + "Content-Type": true, + "Content-Encoding": true, + "Content-Disposition": true, + "X-Object-Manifest": true, +} + +var rangeRegexp = regexp.MustCompile("(bytes=)?([0-9]*)-([0-9]*)") + +// GET on an object gets the contents of the object. +func (objr objectResource) get(a *action) interface{} { + var ( + etag []byte + reader io.Reader + start int + end int = -1 + ) + obj := objr.object + if obj == nil { + fatalf(404, "Not Found", "The resource could not be found.") + } + + h := a.w.Header() + // add metadata + obj.getMetadata(a) + + if r := a.req.Header.Get("Range"); r != "" { + m := rangeRegexp.FindStringSubmatch(r) + if m[2] != "" { + start, _ = strconv.Atoi(m[2]) + } + if m[3] != "" { + end, _ = strconv.Atoi(m[3]) + } + } + + max := func(a int, b int) int { + if a > b { + return a + } + return b + } + + if manifest, ok := obj.meta["X-Object-Manifest"]; ok { + var segments []io.Reader + components := strings.SplitN(manifest[0], "/", 2) + segContainer := a.user.Containers[components[0]] + prefix := components[1] + resp := segContainer.list("", "", prefix, "") + sum := md5.New() + cursor := 0 + size := 0 + for _, item := range resp { + if obj, ok := item.(*object); ok { + length := len(obj.data) + size += length + sum.Write([]byte(hex.EncodeToString(obj.checksum))) + if start >= cursor+length { + continue + } + segments = append(segments, bytes.NewReader(obj.data[max(0, start-cursor):])) + cursor += length + } + } + etag = sum.Sum(nil) + if end == -1 { + end = size + } + reader = io.LimitReader(io.MultiReader(segments...), int64(end-start)) + } else { + if end == -1 { + end = len(obj.data) + } + etag = obj.checksum + reader = bytes.NewReader(obj.data[start:end]) + } + + h.Set("Content-Length", fmt.Sprint(end-start)) + h.Set("ETag", hex.EncodeToString(etag)) + h.Set("Last-Modified", obj.mtime.Format(http.TimeFormat)) + + if a.req.Method == "HEAD" { + return nil + } + + // TODO avoid holding the lock when writing data. + _, err := io.Copy(a.w, reader) + if err != nil { + // we can't do much except just log the fact. + log.Printf("error writing data: %v", err) + } + return nil +} + +// PUT on an object creates the object. +func (objr objectResource) put(a *action) interface{} { + var expectHash []byte + if c := a.req.Header.Get("ETag"); c != "" { + var err error + expectHash, err = hex.DecodeString(c) + if err != nil || len(expectHash) != md5.Size { + fatalf(400, "InvalidDigest", "The ETag you specified was invalid") + } + } + sum := md5.New() + // TODO avoid holding lock while reading data. + data, err := ioutil.ReadAll(io.TeeReader(a.req.Body, sum)) + if err != nil { + fatalf(400, "TODO", "read error") + } + gotHash := sum.Sum(nil) + if expectHash != nil && bytes.Compare(gotHash, expectHash) != 0 { + fatalf(422, "Bad ETag", "The ETag you specified did not match what we received") + } + if a.req.ContentLength >= 0 && int64(len(data)) != a.req.ContentLength { + fatalf(400, "IncompleteBody", "You did not provide the number of bytes specified by the Content-Length HTTP header") + } + + // TODO is this correct, or should we erase all previous metadata? + obj := objr.object + if obj == nil { + obj = &object{ + name: objr.name, + metadata: metadata{ + meta: make(http.Header), + }, + } + a.user.Objects++ + } else { + objr.container.bytes -= len(obj.data) + a.user.BytesUsed -= int64(len(obj.data)) + } + + var content_type string + if content_type = a.req.Header.Get("Content-Type"); content_type == "" { + content_type = mime.TypeByExtension(obj.name) + if content_type == "" { + content_type = "application/octet-stream" + } + } + + // PUT request has been successful - save data and metadata + obj.setMetadata(a, "object") + obj.content_type = content_type + obj.data = data + obj.checksum = gotHash + obj.mtime = time.Now().UTC() + objr.container.objects[objr.name] = obj + objr.container.bytes += len(data) + a.user.BytesUsed += int64(len(data)) + + h := a.w.Header() + h.Set("ETag", hex.EncodeToString(obj.checksum)) + + return nil +} + +func (objr objectResource) delete(a *action) interface{} { + if objr.object == nil { + fatalf(404, "NoSuchKey", "The specified key does not exist.") + } + + objr.container.bytes -= len(objr.object.data) + a.user.BytesUsed -= int64(len(objr.object.data)) + delete(objr.container.objects, objr.name) + a.user.Objects-- + return nil +} + +func (objr objectResource) post(a *action) interface{} { + obj := objr.object + obj.setMetadata(a, "object") + return nil +} + +func (objr objectResource) copy(a *action) interface{} { + if objr.object == nil { + fatalf(404, "NoSuchKey", "The specified key does not exist.") + } + + obj := objr.object + destination := a.req.Header.Get("Destination") + if destination == "" { + fatalf(400, "Bad Request", "You must provide a Destination header") + } + + var ( + obj2 *object + objr2 objectResource + ) + + destURL, _ := url.Parse("/v1/AUTH_" + TEST_ACCOUNT + "/" + destination) + r := a.srv.resourceForURL(destURL) + switch t := r.(type) { + case objectResource: + objr2 = t + if objr2.object == nil { + obj2 = &object{ + name: objr2.name, + metadata: metadata{ + meta: make(http.Header), + }, + } + a.user.Objects++ + } else { + obj2 = objr2.object + objr2.container.bytes -= len(obj2.data) + a.user.BytesUsed -= int64(len(obj2.data)) + } + default: + fatalf(400, "Bad Request", "Destination must point to a valid object path") + } + + obj2.content_type = obj.content_type + obj2.data = obj.data + obj2.checksum = obj.checksum + obj2.mtime = time.Now() + objr2.container.objects[objr2.name] = obj2 + objr2.container.bytes += len(obj.data) + a.user.BytesUsed += int64(len(obj.data)) + + for key, values := range obj.metadata.meta { + obj2.metadata.meta[key] = values + } + obj2.setMetadata(a, "object") + + return nil +} + +func (s *SwiftServer) serveHTTP(w http.ResponseWriter, req *http.Request) { + // ignore error from ParseForm as it's usually spurious. + req.ParseForm() + + s.mu.Lock() + defer s.mu.Unlock() + + if DEBUG { + log.Printf("swifttest %q %q", req.Method, req.URL) + } + a := &action{ + srv: s, + w: w, + req: req, + reqId: fmt.Sprintf("%09X", s.reqId), + } + s.reqId++ + + var r resource + defer func() { + switch err := recover().(type) { + case *swiftError: + w.Header().Set("Content-Type", `text/plain; charset=utf-8`) + http.Error(w, err.Message, err.statusCode) + case nil: + default: + panic(err) + } + }() + + var resp interface{} + + if req.URL.String() == "/v1.0" { + username := req.Header.Get("x-auth-user") + key := req.Header.Get("x-auth-key") + if acct, ok := s.Accounts[username]; ok { + if acct.password == key { + r := make([]byte, 16) + _, _ = rand.Read(r) + id := fmt.Sprintf("%X", r) + w.Header().Set("X-Storage-Url", s.URL+"/AUTH_"+username) + w.Header().Set("X-Auth-Token", "AUTH_tk"+string(id)) + w.Header().Set("X-Storage-Token", "AUTH_tk"+string(id)) + s.Sessions[id] = &session{ + username: username, + } + return + } + } + panic(notAuthorized()) + } + + if req.URL.String() == "/info" { + jsonMarshal(w, &swift.SwiftInfo{ + "swift": map[string]interface{}{ + "version": "1.2", + }, + "tempurl": map[string]interface{}{ + "methods": []string{"GET", "HEAD", "PUT"}, + }, + }) + return + } + + r = s.resourceForURL(req.URL) + + key := req.Header.Get("x-auth-token") + signature := req.URL.Query().Get("temp_url_sig") + expires := req.URL.Query().Get("temp_url_expires") + if key == "" && signature != "" && expires != "" { + accountName, _, _, _ := s.parseURL(req.URL) + secretKey := "" + if account, ok := s.Accounts[accountName]; ok { + secretKey = account.meta.Get("X-Account-Meta-Temp-Url-Key") + } + + get_hmac := func(method string) string { + mac := hmac.New(sha1.New, []byte(secretKey)) + body := fmt.Sprintf("%s\n%s\n%s", method, expires, req.URL.Path) + mac.Write([]byte(body)) + return hex.EncodeToString(mac.Sum(nil)) + } + + if req.Method == "HEAD" { + if signature != get_hmac("GET") && signature != get_hmac("POST") && signature != get_hmac("PUT") { + panic(notAuthorized()) + } + } else if signature != get_hmac(req.Method) { + panic(notAuthorized()) + } + } else { + session, ok := s.Sessions[key[7:]] + if !ok { + panic(notAuthorized()) + } + + a.user = s.Accounts[session.username] + } + + switch req.Method { + case "PUT": + resp = r.put(a) + case "GET", "HEAD": + resp = r.get(a) + case "DELETE": + resp = r.delete(a) + case "POST": + resp = r.post(a) + case "COPY": + resp = r.copy(a) + default: + fatalf(400, "MethodNotAllowed", "unknown http request method %q", req.Method) + } + + content_type := req.Header.Get("Content-Type") + if resp != nil && req.Method != "HEAD" { + if strings.HasPrefix(content_type, "application/json") || + req.URL.Query().Get("format") == "json" { + jsonMarshal(w, resp) + } else { + switch r := resp.(type) { + case string: + w.Write([]byte(r)) + default: + w.Write(resp.([]byte)) + } + } + } +} + +func jsonMarshal(w io.Writer, x interface{}) { + if err := json.NewEncoder(w).Encode(x); err != nil { + panic(fmt.Errorf("error marshalling %#v: %v", x, err)) + } +} + +var pathRegexp = regexp.MustCompile("/v1/AUTH_([a-zA-Z0-9]+)(/([^/]+)(/(.*))?)?") + +func (srv *SwiftServer) parseURL(u *url.URL) (account string, container string, object string, err error) { + m := pathRegexp.FindStringSubmatch(u.Path) + if m == nil { + return "", "", "", fmt.Errorf("Couldn't parse the specified URI") + } + account = m[1] + container = m[3] + object = m[5] + return +} + +// resourceForURL returns a resource object for the given URL. +func (srv *SwiftServer) resourceForURL(u *url.URL) (r resource) { + accountName, containerName, objectName, err := srv.parseURL(u) + + if err != nil { + fatalf(404, "InvalidURI", err.Error()) + } + + account, ok := srv.Accounts[accountName] + if !ok { + fatalf(404, "NoSuchAccount", "The specified account does not exist") + } + + if containerName == "" { + return rootResource{} + } + b := containerResource{ + name: containerName, + container: account.Containers[containerName], + } + + if objectName == "" { + return b + } + + if b.container == nil { + fatalf(404, "NoSuchContainer", "The specified container does not exist") + } + + objr := objectResource{ + name: objectName, + version: u.Query().Get("versionId"), + container: b.container, + } + + if obj := objr.container.objects[objr.name]; obj != nil { + objr.object = obj + } + return objr +} + +// nullResource has error stubs for all resource methods. +type nullResource struct{} + +func notAllowed() interface{} { + fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource") + return nil +} + +func notAuthorized() interface{} { + fatalf(401, "Unauthorized", "This server could not verify that you are authorized to access the document you requested.") + return nil +} + +func (nullResource) put(a *action) interface{} { return notAllowed() } +func (nullResource) get(a *action) interface{} { return notAllowed() } +func (nullResource) post(a *action) interface{} { return notAllowed() } +func (nullResource) delete(a *action) interface{} { return notAllowed() } +func (nullResource) copy(a *action) interface{} { return notAllowed() } + +type rootResource struct{} + +func (rootResource) put(a *action) interface{} { return notAllowed() } +func (rootResource) get(a *action) interface{} { + marker := a.req.Form.Get("marker") + prefix := a.req.Form.Get("prefix") + format := a.req.URL.Query().Get("format") + + h := a.w.Header() + + h.Set("X-Account-Bytes-Used", strconv.Itoa(int(a.user.BytesUsed))) + h.Set("X-Account-Container-Count", strconv.Itoa(int(a.user.Account.Containers))) + h.Set("X-Account-Object-Count", strconv.Itoa(int(a.user.Objects))) + + // add metadata + a.user.metadata.getMetadata(a) + + if a.req.Method == "HEAD" { + return nil + } + + var tmp orderedContainers + // first get all matching objects and arrange them in alphabetical order. + for _, container := range a.user.Containers { + if strings.HasPrefix(container.name, prefix) { + tmp = append(tmp, container) + } + } + sort.Sort(tmp) + + resp := make([]Folder, 0) + for _, container := range tmp { + if container.name <= marker { + continue + } + if format == "json" { + resp = append(resp, Folder{ + Count: len(container.objects), + Bytes: container.bytes, + Name: container.name, + }) + } else { + a.w.Write([]byte(container.name + "\n")) + } + } + + if format == "json" { + return resp + } else { + return nil + } +} + +func (r rootResource) post(a *action) interface{} { + a.user.metadata.setMetadata(a, "account") + return nil +} + +func (rootResource) delete(a *action) interface{} { + if a.req.URL.Query().Get("bulk-delete") == "1" { + fatalf(403, "Operation forbidden", "Bulk delete is not supported") + } + + return notAllowed() +} + +func (rootResource) copy(a *action) interface{} { return notAllowed() } + +func NewSwiftServer(address string) (*SwiftServer, error) { + var ( + l net.Listener + err error + ) + if strings.Index(address, ":") == -1 { + for port := 1024; port < 65535; port++ { + addr := fmt.Sprintf("%s:%d", address, port) + if l, err = net.Listen("tcp", addr); err == nil { + address = addr + break + } + } + } else { + l, err = net.Listen("tcp", address) + } + if err != nil { + return nil, fmt.Errorf("cannot listen on %s: %v", address, err) + } + + server := &SwiftServer{ + Listener: l, + AuthURL: "http://" + l.Addr().String() + "/v1.0", + URL: "http://" + l.Addr().String() + "/v1", + Accounts: make(map[string]*account), + Sessions: make(map[string]*session), + } + + server.Accounts[TEST_ACCOUNT] = &account{ + password: TEST_ACCOUNT, + metadata: metadata{ + meta: make(http.Header), + }, + Containers: make(map[string]*container), + } + + go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + server.serveHTTP(w, req) + })) + + return server, nil +} + +func (srv *SwiftServer) Close() { + srv.Listener.Close() +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/timeout_reader.go b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/timeout_reader.go new file mode 100644 index 000000000..3839e9ea0 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/timeout_reader.go @@ -0,0 +1,57 @@ +package swift + +import ( + "io" + "time" +) + +// An io.ReadCloser which obeys an idle timeout +type timeoutReader struct { + reader io.ReadCloser + timeout time.Duration + cancel func() +} + +// Returns a wrapper around the reader which obeys an idle +// timeout. The cancel function is called if the timeout happens +func newTimeoutReader(reader io.ReadCloser, timeout time.Duration, cancel func()) *timeoutReader { + return &timeoutReader{ + reader: reader, + timeout: timeout, + cancel: cancel, + } +} + +// Read reads up to len(p) bytes into p +// +// Waits at most for timeout for the read to complete otherwise returns a timeout +func (t *timeoutReader) Read(p []byte) (int, error) { + // FIXME limit the amount of data read in one chunk so as to not exceed the timeout? + // Do the read in the background + type result struct { + n int + err error + } + done := make(chan result, 1) + go func() { + n, err := t.reader.Read(p) + done <- result{n, err} + }() + // Wait for the read or the timeout + select { + case r := <-done: + return r.n, r.err + case <-time.After(t.timeout): + t.cancel() + return 0, TimeoutError + } + panic("unreachable") // for Go 1.0 +} + +// Close the channel +func (t *timeoutReader) Close() error { + return t.reader.Close() +} + +// Check it satisfies the interface +var _ io.ReadCloser = &timeoutReader{} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/watchdog_reader.go b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/watchdog_reader.go new file mode 100644 index 000000000..b12b1bbe2 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/watchdog_reader.go @@ -0,0 +1,34 @@ +package swift + +import ( + "io" + "time" +) + +// An io.Reader which resets a watchdog timer whenever data is read +type watchdogReader struct { + timeout time.Duration + reader io.Reader + timer *time.Timer +} + +// Returns a new reader which will kick the watchdog timer whenever data is read +func newWatchdogReader(reader io.Reader, timeout time.Duration, timer *time.Timer) *watchdogReader { + return &watchdogReader{ + timeout: timeout, + reader: reader, + timer: timer, + } +} + +// Read reads up to len(p) bytes into p +func (t *watchdogReader) Read(p []byte) (n int, err error) { + // FIXME limit the amount of data read in one chunk so as to not exceed the timeout? + resetTimer(t.timer, t.timeout) + n, err = t.reader.Read(p) + resetTimer(t.timer, t.timeout) + return +} + +// Check it satisfies the interface +var _ io.Reader = &watchdogReader{} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/.gitignore new file mode 100644 index 000000000..36d1a84d3 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +cobra.test diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/.travis.yml new file mode 100644 index 000000000..dc43afd61 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/.travis.yml @@ -0,0 +1,8 @@ +language: go +go: + - 1.3 + - 1.4.2 + - tip +script: + - go test ./... + - go build diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/LICENSE.txt b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/LICENSE.txt new file mode 100644 index 000000000..298f0e266 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/LICENSE.txt @@ -0,0 +1,174 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/README.md new file mode 100644 index 000000000..b1fb0889e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/README.md @@ -0,0 +1,485 @@ +# Cobra + +A Commander for modern go CLI interactions + +[![Build Status](https://travis-ci.org/spf13/cobra.svg)](https://travis-ci.org/spf13/cobra) + +## Overview + +Cobra is a commander providing a simple interface to create powerful modern CLI +interfaces similar to git & go tools. In addition to providing an interface, Cobra +simultaneously provides a controller to organize your application code. + +Inspired by go, go-Commander, gh and subcommand, Cobra improves on these by +providing **fully posix compliant flags** (including short & long versions), +**nesting commands**, and the ability to **define your own help and usage** for any or +all commands. + +Cobra has an exceptionally clean interface and simple design without needless +constructors or initialization methods. + +Applications built with Cobra commands are designed to be as user friendly as +possible. Flags can be placed before or after the command (as long as a +confusing space isn’t provided). Both short and long flags can be used. A +command need not even be fully typed. The shortest unambiguous string will +suffice. Help is automatically generated and available for the application or +for a specific command using either the help command or the --help flag. + +## Concepts + +Cobra is built on a structure of commands & flags. + +**Commands** represent actions and **Flags** are modifiers for those actions. + +In the following example 'server' is a command and 'port' is a flag. + + hugo server --port=1313 + +### Commands + +Command is the central point of the application. Each interaction that +the application supports will be contained in a Command. A command can +have children commands and optionally run an action. + +In the example above 'server' is the command + +A Command has the following structure: + + type Command struct { + Use string // The one-line usage message. + Short string // The short description shown in the 'help' output. + Long string // The long message shown in the 'help ' output. + Run func(cmd *Command, args []string) // Run runs the command. + } + +### Flags + +A Flag is a way to modify the behavior of an command. Cobra supports +fully posix compliant flags as well as the go flag package. +A Cobra command can define flags that persist through to children commands +and flags that are only available to that command. + +In the example above 'port' is the flag. + +Flag functionality is provided by the [pflag +library](https://github.com/ogier/pflag), a fork of the flag standard library +which maintains the same interface while adding posix compliance. + +## Usage + +Cobra works by creating a set of commands and then organizing them into a tree. +The tree defines the structure of the application. + +Once each command is defined with it's corresponding flags, then the +tree is assigned to the commander which is finally executed. + +### Installing +Using Cobra is easy. First use go get to install the latest version +of the library. + + $ go get github.com/spf13/cobra + +Next include cobra in your application. + + import "github.com/spf13/cobra" + +### Create the root command + +The root command represents your binary itself. + +Cobra doesn't require any special constructors. Simply create your commands. + + var HugoCmd = &cobra.Command{ + Use: "hugo", + Short: "Hugo is a very fast static site generator", + Long: `A Fast and Flexible Static Site Generator built with + love by spf13 and friends in Go. + Complete documentation is available at http://hugo.spf13.com`, + Run: func(cmd *cobra.Command, args []string) { + // Do Stuff Here + }, + } + +### Create additional commands + +Additional commands can be defined. + + var versionCmd = &cobra.Command{ + Use: "version", + Short: "Print the version number of Hugo", + Long: `All software has versions. This is Hugo's`, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") + }, + } + +### Attach command to its parent +In this example we are attaching it to the root, but commands can be attached at any level. + + HugoCmd.AddCommand(versionCmd) + +### Assign flags to a command + +Since the flags are defined and used in different locations, we need to +define a variable outside with the correct scope to assign the flag to +work with. + + var Verbose bool + var Source string + +There are two different approaches to assign a flag. + +#### Persistent Flags + +A flag can be 'persistent' meaning that this flag will be available to the +command it's assigned to as well as every command under that command. For +global flags assign a flag as a persistent flag on the root. + + HugoCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") + +#### Local Flags + +A flag can also be assigned locally which will only apply to that specific command. + + HugoCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") + +### Remove a command from its parent + +Removing a command is not a common action in simple programs but it allows 3rd parties to customize an existing command tree. + +In this example, we remove the existing `VersionCmd` command of an existing root command, and we replace it by our own version. + + mainlib.RootCmd.RemoveCommand(mainlib.VersionCmd) + mainlib.RootCmd.AddCommand(versionCmd) + +### Once all commands and flags are defined, Execute the commands + +Execute should be run on the root for clarity, though it can be called on any command. + + HugoCmd.Execute() + +## Example + +In the example below we have defined three commands. Two are at the top level +and one (cmdTimes) is a child of one of the top commands. In this case the root +is not executable meaning that a subcommand is required. This is accomplished +by not providing a 'Run' for the 'rootCmd'. + +We have only defined one flag for a single command. + +More documentation about flags is available at https://github.com/spf13/pflag + + import( + "github.com/spf13/cobra" + "fmt" + "strings" + ) + + func main() { + + var echoTimes int + + var cmdPrint = &cobra.Command{ + Use: "print [string to print]", + Short: "Print anything to the screen", + Long: `print is for printing anything back to the screen. + For many years people have printed back to the screen. + `, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Print: " + strings.Join(args, " ")) + }, + } + + var cmdEcho = &cobra.Command{ + Use: "echo [string to echo]", + Short: "Echo anything to the screen", + Long: `echo is for echoing anything back. + Echo works a lot like print, except it has a child command. + `, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Print: " + strings.Join(args, " ")) + }, + } + + var cmdTimes = &cobra.Command{ + Use: "times [# times] [string to echo]", + Short: "Echo anything to the screen more times", + Long: `echo things multiple times back to the user by providing + a count and a string.`, + Run: func(cmd *cobra.Command, args []string) { + for i:=0; i < echoTimes; i++ { + fmt.Println("Echo: " + strings.Join(args, " ")) + } + }, + } + + cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") + + var rootCmd = &cobra.Command{Use: "app"} + rootCmd.AddCommand(cmdPrint, cmdEcho) + cmdEcho.AddCommand(cmdTimes) + rootCmd.Execute() + } + +For a more complete example of a larger application, please checkout [Hugo](http://hugo.spf13.com) + +## The Help Command + +Cobra automatically adds a help command to your application when you have subcommands. +This will be called when a user runs 'app help'. Additionally help will also +support all other commands as input. Say for instance you have a command called +'create' without any additional configuration cobra will work when 'app help +create' is called. Every command will automatically have the '--help' flag added. + +### Example + +The following output is automatically generated by cobra. Nothing beyond the +command and flag definitions are needed. + + > hugo help + + A Fast and Flexible Static Site Generator built with + love by spf13 and friends in Go. + + Complete documentation is available at http://hugo.spf13.com + + Usage: + hugo [flags] + hugo [command] + + Available Commands: + server :: Hugo runs it's own a webserver to render the files + version :: Print the version number of Hugo + check :: Check content in the source directory + benchmark :: Benchmark hugo by building a site a number of times + help [command] :: Help about any command + + Available Flags: + -b, --base-url="": hostname (and path) to the root eg. http://spf13.com/ + -D, --build-drafts=false: include content marked as draft + --config="": config file (default is path/config.yaml|json|toml) + -d, --destination="": filesystem path to write files to + -s, --source="": filesystem path to read files relative from + --stepAnalysis=false: display memory and timing of different steps of the program + --uglyurls=false: if true, use /filename.html instead of /filename/ + -v, --verbose=false: verbose output + -w, --watch=false: watch filesystem for changes and recreate as needed + + Use "hugo help [command]" for more information about that command. + + + +Help is just a command like any other. There is no special logic or behavior +around it. In fact you can provide your own if you want. + +### Defining your own help + +You can provide your own Help command or you own template for the default command to use. + +The default help command is + + func (c *Command) initHelp() { + if c.helpCommand == nil { + c.helpCommand = &Command{ + Use: "help [command]", + Short: "Help about any command", + Long: `Help provides help for any command in the application. + Simply type ` + c.Name() + ` help [path to command] for full details.`, + Run: c.HelpFunc(), + } + } + c.AddCommand(c.helpCommand) + } + +You can provide your own command, function or template through the following methods. + + command.SetHelpCommand(cmd *Command) + + command.SetHelpFunc(f func(*Command, []string)) + + command.SetHelpTemplate(s string) + +The latter two will also apply to any children commands. + +## Usage + +When the user provides an invalid flag or invalid command Cobra responds by +showing the user the 'usage' + +### Example +You may recognize this from the help above. That's because the default help +embeds the usage as part of it's output. + + Usage: + hugo [flags] + hugo [command] + + Available Commands: + server Hugo runs it's own a webserver to render the files + version Print the version number of Hugo + check Check content in the source directory + benchmark Benchmark hugo by building a site a number of times + help [command] Help about any command + + Available Flags: + -b, --base-url="": hostname (and path) to the root eg. http://spf13.com/ + -D, --build-drafts=false: include content marked as draft + --config="": config file (default is path/config.yaml|json|toml) + -d, --destination="": filesystem path to write files to + -s, --source="": filesystem path to read files relative from + --stepAnalysis=false: display memory and timing of different steps of the program + --uglyurls=false: if true, use /filename.html instead of /filename/ + -v, --verbose=false: verbose output + -w, --watch=false: watch filesystem for changes and recreate as needed + +### Defining your own usage +You can provide your own usage function or template for cobra to use. + +The default usage function is + + return func(c *Command) error { + err := tmpl(c.Out(), c.UsageTemplate(), c) + return err + } + +Like help the function and template are over ridable through public methods. + + command.SetUsageFunc(f func(*Command) error) + + command.SetUsageTemplate(s string) + +## PreRun or PostRun Hooks + +It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistendPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherrited by children if they do not declare their own. These function are run in the following order: + +- `PersistentPreRun` +- `PreRun` +- `Run` +- `PostRun` +- `PersistenPostRun` + +And example of two commands which use all of these features is below. When the subcommand in executed it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun` + +```go +package main + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func main() { + + var rootCmd = &cobra.Command{ + Use: "root [sub]", + Short: "My root command", + PersistentPreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) + }, + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) + }, + } + + var subCmd = &cobra.Command{ + Use: "sub [no options!]", + Short: "My sub command", + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) + }, + } + + rootCmd.AddCommand(subCmd) + + rootCmd.SetArgs([]string{""}) + _ = rootCmd.Execute() + fmt.Print("\n") + rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) + _ = rootCmd.Execute() +} +``` + +## Generating markdown formatted documentation for your command + +Cobra can generate a markdown formatted document based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Markdown Docs](md_docs.md) + +## Generating bash completions for your command + +Cobra can generate a bash completions file. If you add more information to your command these completions can be amazingly powerful and flexible. Read more about [Bash Completions](bash_completions.md) + +## Debugging + +Cobra provides a ‘DebugFlags’ method on a command which when called will print +out everything Cobra knows about the flags for each command + +### Example + + command.DebugFlags() + +## Release Notes +* **0.9.0** June 17, 2014 + * flags can appears anywhere in the args (provided they are unambiguous) + * --help prints usage screen for app or command + * Prefix matching for commands + * Cleaner looking help and usage output + * Extensive test suite +* **0.8.0** Nov 5, 2013 + * Reworked interface to remove commander completely + * Command now primary structure + * No initialization needed + * Usage & Help templates & functions definable at any level + * Updated Readme +* **0.7.0** Sept 24, 2013 + * Needs more eyes + * Test suite + * Support for automatic error messages + * Support for help command + * Support for printing to any io.Writer instead of os.Stderr + * Support for persistent flags which cascade down tree + * Ready for integration into Hugo +* **0.1.0** Sept 3, 2013 + * Implement first draft + +## ToDo +* Launch proper documentation site + +## Contributing + +1. Fork it +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Commit your changes (`git commit -am 'Add some feature'`) +4. Push to the branch (`git push origin my-new-feature`) +5. Create new Pull Request + +## Contributors + +Names in no particular order: + +* [spf13](https://github.com/spf13) + +## License + +Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt) + + +[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/spf13/cobra/trend.png)](https://bitdeli.com/free "Bitdeli Badge") + diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/bash_completions.go new file mode 100644 index 000000000..735d2f54c --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/bash_completions.go @@ -0,0 +1,357 @@ +package cobra + +import ( + "bytes" + "fmt" + "os" + "sort" + "strings" + + "github.com/spf13/pflag" +) + +const ( + BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extentions" + BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag" +) + +func preamble(out *bytes.Buffer) { + fmt.Fprintf(out, `#!/bin/bash + + +__debug() +{ + if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then + echo "$*" >> "${BASH_COMP_DEBUG_FILE}" + fi +} + +__index_of_word() +{ + local w word=$1 + shift + index=0 + for w in "$@"; do + [[ $w = "$word" ]] && return + index=$((index+1)) + done + index=-1 +} + +__contains_word() +{ + local w word=$1; shift + for w in "$@"; do + [[ $w = "$word" ]] && return + done + return 1 +} + +__handle_reply() +{ + __debug "${FUNCNAME}" + case $cur in + -*) + compopt -o nospace + local allflags + if [ ${#must_have_one_flag[@]} -ne 0 ]; then + allflags=("${must_have_one_flag[@]}") + else + allflags=("${flags[*]} ${two_word_flags[*]}") + fi + COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") ) + [[ $COMPREPLY == *= ]] || compopt +o nospace + return 0; + ;; + esac + + # check if we are handling a flag with special work handling + local index + __index_of_word "${prev}" "${flags_with_completion[@]}" + if [[ ${index} -ge 0 ]]; then + ${flags_completion[${index}]} + return + fi + + # we are parsing a flag and don't have a special handler, no completion + if [[ ${cur} != "${words[cword]}" ]]; then + return + fi + + local completions + if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then + completions=("${must_have_one_flag[@]}") + elif [[ ${#must_have_one_noun[@]} -ne 0 ]]; then + completions=("${must_have_one_noun[@]}") + else + completions=("${commands[@]}") + fi + COMPREPLY=( $(compgen -W "${completions[*]}" -- "$cur") ) + + if [[ ${#COMPREPLY[@]} -eq 0 ]]; then + declare -F __custom_func >/dev/null && __custom_func + fi +} + +# The arguments should be in the form "ext1|ext2|extn" +__handle_filename_extension_flag() +{ + local ext="$1" + _filedir "@(${ext})" +} + +__handle_flag() +{ + __debug "${FUNCNAME}: c is $c words[c] is ${words[c]}" + + # if a command required a flag, and we found it, unset must_have_one_flag() + local flagname=${words[c]} + # if the word contained an = + if [[ ${words[c]} == *"="* ]]; then + flagname=${flagname%%=*} # strip everything after the = + flagname="${flagname}=" # but put the = back + fi + __debug "${FUNCNAME}: looking for ${flagname}" + if __contains_word "${flagname}" "${must_have_one_flag[@]}"; then + must_have_one_flag=() + fi + + # skip the argument to a two word flag + if __contains_word "${words[c]}" "${two_word_flags[@]}"; then + c=$((c+1)) + # if we are looking for a flags value, don't show commands + if [[ $c -eq $cword ]]; then + commands=() + fi + fi + + # skip the flag itself + c=$((c+1)) + +} + +__handle_noun() +{ + __debug "${FUNCNAME}: c is $c words[c] is ${words[c]}" + + if __contains_word "${words[c]}" "${must_have_one_noun[@]}"; then + must_have_one_noun=() + fi + + nouns+=("${words[c]}") + c=$((c+1)) +} + +__handle_command() +{ + __debug "${FUNCNAME}: c is $c words[c] is ${words[c]}" + + local next_command + if [[ -n ${last_command} ]]; then + next_command="_${last_command}_${words[c]}" + else + next_command="_${words[c]}" + fi + c=$((c+1)) + __debug "${FUNCNAME}: looking for ${next_command}" + declare -F $next_command >/dev/null && $next_command +} + +__handle_word() +{ + if [[ $c -ge $cword ]]; then + __handle_reply + return + fi + __debug "${FUNCNAME}: c is $c words[c] is ${words[c]}" + if [[ "${words[c]}" == -* ]]; then + __handle_flag + elif __contains_word "${words[c]}" "${commands[@]}"; then + __handle_command + else + __handle_noun + fi + __handle_word +} + +`) +} + +func postscript(out *bytes.Buffer, name string) { + fmt.Fprintf(out, "__start_%s()\n", name) + fmt.Fprintf(out, `{ + local cur prev words cword + _init_completion -s || return + + local c=0 + local flags=() + local two_word_flags=() + local flags_with_completion=() + local flags_completion=() + local commands=("%s") + local must_have_one_flag=() + local must_have_one_noun=() + local last_command + local nouns=() + + __handle_word +} + +`, name) + fmt.Fprintf(out, "complete -F __start_%s %s\n", name, name) + fmt.Fprintf(out, "# ex: ts=4 sw=4 et filetype=sh\n") +} + +func writeCommands(cmd *Command, out *bytes.Buffer) { + fmt.Fprintf(out, " commands=()\n") + for _, c := range cmd.Commands() { + if len(c.Deprecated) > 0 { + continue + } + fmt.Fprintf(out, " commands+=(%q)\n", c.Name()) + } + fmt.Fprintf(out, "\n") +} + +func writeFlagHandler(name string, annotations map[string][]string, out *bytes.Buffer) { + for key, value := range annotations { + switch key { + case BashCompFilenameExt: + fmt.Fprintf(out, " flags_with_completion+=(%q)\n", name) + + ext := strings.Join(value, "|") + ext = "__handle_filename_extension_flag " + ext + fmt.Fprintf(out, " flags_completion+=(%q)\n", ext) + } + } +} + +func writeShortFlag(flag *pflag.Flag, out *bytes.Buffer) { + b := (flag.Value.Type() == "bool") + name := flag.Shorthand + format := " " + if !b { + format += "two_word_" + } + format += "flags+=(\"-%s\")\n" + fmt.Fprintf(out, format, name) + writeFlagHandler("-"+name, flag.Annotations, out) +} + +func writeFlag(flag *pflag.Flag, out *bytes.Buffer) { + b := (flag.Value.Type() == "bool") + name := flag.Name + format := " flags+=(\"--%s" + if !b { + format += "=" + } + format += "\")\n" + fmt.Fprintf(out, format, name) + writeFlagHandler("--"+name, flag.Annotations, out) +} + +func writeFlags(cmd *Command, out *bytes.Buffer) { + fmt.Fprintf(out, ` flags=() + two_word_flags=() + flags_with_completion=() + flags_completion=() + +`) + cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { + writeFlag(flag, out) + if len(flag.Shorthand) > 0 { + writeShortFlag(flag, out) + } + }) + + fmt.Fprintf(out, "\n") +} + +func writeRequiredFlag(cmd *Command, out *bytes.Buffer) { + fmt.Fprintf(out, " must_have_one_flag=()\n") + flags := cmd.NonInheritedFlags() + flags.VisitAll(func(flag *pflag.Flag) { + for key, _ := range flag.Annotations { + switch key { + case BashCompOneRequiredFlag: + format := " must_have_one_flag+=(\"--%s" + b := (flag.Value.Type() == "bool") + if !b { + format += "=" + } + format += "\")\n" + fmt.Fprintf(out, format, flag.Name) + + if len(flag.Shorthand) > 0 { + fmt.Fprintf(out, " must_have_one_flag+=(\"-%s\")\n", flag.Shorthand) + } + } + } + }) +} + +func writeRequiredNoun(cmd *Command, out *bytes.Buffer) { + fmt.Fprintf(out, " must_have_one_noun=()\n") + sort.Sort(sort.StringSlice(cmd.ValidArgs)) + for _, value := range cmd.ValidArgs { + fmt.Fprintf(out, " must_have_one_noun+=(%q)\n", value) + } +} + +func gen(cmd *Command, out *bytes.Buffer) { + for _, c := range cmd.Commands() { + if len(c.Deprecated) > 0 { + continue + } + gen(c, out) + } + commandName := cmd.CommandPath() + commandName = strings.Replace(commandName, " ", "_", -1) + fmt.Fprintf(out, "_%s()\n{\n", commandName) + fmt.Fprintf(out, " last_command=%q\n", commandName) + writeCommands(cmd, out) + writeFlags(cmd, out) + writeRequiredFlag(cmd, out) + writeRequiredNoun(cmd, out) + fmt.Fprintf(out, "}\n\n") +} + +func (cmd *Command) GenBashCompletion(out *bytes.Buffer) { + preamble(out) + if len(cmd.BashCompletionFunction) > 0 { + fmt.Fprintf(out, "%s\n", cmd.BashCompletionFunction) + } + gen(cmd, out) + postscript(out, cmd.Name()) +} + +func (cmd *Command) GenBashCompletionFile(filename string) error { + out := new(bytes.Buffer) + + cmd.GenBashCompletion(out) + + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + _, err = outFile.Write(out.Bytes()) + if err != nil { + return err + } + return nil +} + +func (cmd *Command) MarkFlagRequired(name string) { + flag := cmd.Flags().Lookup(name) + if flag == nil { + return + } + if flag.Annotations == nil { + flag.Annotations = make(map[string][]string) + } + annotation := make([]string, 1) + annotation[0] = "true" + flag.Annotations[BashCompOneRequiredFlag] = annotation +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/bash_completions.md new file mode 100644 index 000000000..204704efc --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/bash_completions.md @@ -0,0 +1,149 @@ +# Generating Bash Completions For Your Own cobra.Command + +Generating bash completions from a cobra command is incredibly easy. An actual program which does so for the kubernetes kubectl binary is as follows: + +```go +package main + +import ( + "io/ioutil" + "os" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd" +) + +func main() { + kubectl := cmd.NewFactory(nil).NewKubectlCommand(os.Stdin, ioutil.Discard, ioutil.Discard) + kubectl.GenBashCompletionFile("out.sh") +} +``` + +That will get you completions of subcommands and flags. If you make additional annotations to your code, you can get even more intelligent and flexible behavior. + +## Creating your own custom functions + +Some more actual code that works in kubernetes: + +```bash +const ( + bash_completion_func = `__kubectl_parse_get() +{ + local kubectl_output out + if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then + out=($(echo "${kubectl_output}" | awk '{print $1}')) + COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) ) + fi +} + +__kubectl_get_resource() +{ + if [[ ${#nouns[@]} -eq 0 ]]; then + return 1 + fi + __kubectl_parse_get ${nouns[${#nouns[@]} -1]} + if [[ $? -eq 0 ]]; then + return 0 + fi +} + +__custom_func() { + case ${last_command} in + kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop) + __kubectl_get_resource + return + ;; + *) + ;; + esac +} +`) +``` + +And then I set that in my command definition: + +```go +cmds := &cobra.Command{ + Use: "kubectl", + Short: "kubectl controls the Kubernetes cluster manager", + Long: `kubectl controls the Kubernetes cluster manager. + +Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`, + Run: runHelp, + BashCompletionFunction: bash_completion_func, +} +``` + +The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__custom_func()` to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods! + +## Have the completions code complete your 'nouns' + +In the above example "pod" was assumed to already be typed. But if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like: + +```go +validArgs []string = { "pods", "nodes", "services", "replicationControllers" } + +cmd := &cobra.Command{ + Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)", + Short: "Display one or many resources", + Long: get_long, + Example: get_example, + Run: func(cmd *cobra.Command, args []string) { + err := RunGet(f, out, cmd, args) + util.CheckErr(err) + }, + ValidArgs: validArgs, +} +``` + +Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give results like + +```bash +# kubectl get [tab][tab] +nodes pods replicationControllers services +``` + +## Mark flags as required + +Most of the time completions will only show subcommands. But if a flag is required to make a subcommand work, you probably want it to show up when the user types [tab][tab]. Marking a flag as 'Required' is incredibly easy. + +```go +cmd.MarkFlagRequired("pod") +cmd.MarkFlagRequired("container") +``` + +and you'll get something like + +```bash +# kubectl exec [tab][tab][tab] +-c --container= -p --pod= +``` + +# Specify valid filename extensions for flags that take a filename + +In this example we use --filename= and expect to get a json or yaml file as the argument. To make this easier we annotate the --filename flag with valid filename extensions. + +```go + annotations := []string{"json", "yaml", "yml"} + annotation := make(map[string][]string) + annotation[cobra.BashCompFilenameExt] = annotations + + flag := &pflag.Flag{ + Name: "filename", + Shorthand: "f", + Usage: usage, + Value: value, + DefValue: value.String(), + Annotations: annotation, + } + cmd.Flags().AddFlag(flag) +``` + +Now when you run a command with this filename flag you'll get something like + +```bash +# kubectl create -f +test/ example/ rpmbuild/ +hello.yml test.json +``` + +So while there are many other files in the CWD it only shows me subdirs and those with valid extensions. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/cobra.go new file mode 100644 index 000000000..78b92b0af --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/cobra.go @@ -0,0 +1,112 @@ +// Copyright © 2013 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Commands similar to git, go tools and other modern CLI tools +// inspired by go, go-Commander, gh and subcommand + +package cobra + +import ( + "fmt" + "io" + "reflect" + "strconv" + "strings" + "text/template" +) + +var initializers []func() + +// automatic prefix matching can be a dangerous thing to automatically enable in CLI tools. +// Set this to true to enable it +var EnablePrefixMatching bool = false + +// enables an information splash screen on Windows if the CLI is started from explorer.exe. +var EnableWindowsMouseTrap bool = true + +var MousetrapHelpText string = `This is a command line tool + +You need to open cmd.exe and run it from there. +` + +//OnInitialize takes a series of func() arguments and appends them to a slice of func(). +func OnInitialize(y ...func()) { + for _, x := range y { + initializers = append(initializers, x) + } +} + +//Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans, +//Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as +//ints and then compared. +func Gt(a interface{}, b interface{}) bool { + var left, right int64 + av := reflect.ValueOf(a) + + switch av.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + left = int64(av.Len()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + left = av.Int() + case reflect.String: + left, _ = strconv.ParseInt(av.String(), 10, 64) + } + + bv := reflect.ValueOf(b) + + switch bv.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + right = int64(bv.Len()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + right = bv.Int() + case reflect.String: + right, _ = strconv.ParseInt(bv.String(), 10, 64) + } + + return left > right +} + +//Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic. +func Eq(a interface{}, b interface{}) bool { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + switch av.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + panic("Eq called on unsupported type") + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Int() == bv.Int() + case reflect.String: + return av.String() == bv.String() + } + return false +} + +//rpad adds padding to the right of a string +func rpad(s string, padding int) string { + template := fmt.Sprintf("%%-%ds", padding) + return fmt.Sprintf(template, s) +} + +// tmpl executes the given template text on data, writing the result to w. +func tmpl(w io.Writer, text string, data interface{}) error { + t := template.New("top") + t.Funcs(template.FuncMap{ + "trim": strings.TrimSpace, + "rpad": rpad, + "gt": Gt, + "eq": Eq, + }) + template.Must(t.Parse(text)) + return t.Execute(w, data) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/command.go new file mode 100644 index 000000000..3831c3376 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/command.go @@ -0,0 +1,1031 @@ +// Copyright © 2013 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces. +//In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code. +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" + "runtime" + "strings" + "time" + + "github.com/inconshreveable/mousetrap" + flag "github.com/spf13/pflag" +) + +// Command is just that, a command for your application. +// eg. 'go run' ... 'run' is the command. Cobra requires +// you to define the usage and description as part of your command +// definition to ensure usability. +type Command struct { + // Name is the command name, usually the executable's name. + name string + // The one-line usage message. + Use string + // An array of aliases that can be used instead of the first word in Use. + Aliases []string + // The short description shown in the 'help' output. + Short string + // The long message shown in the 'help ' output. + Long string + // Examples of how to use the command + Example string + // List of all valid non-flag arguments, used for bash completions *TODO* actually validate these + ValidArgs []string + // Custom functions used by the bash autocompletion generator + BashCompletionFunction string + // Is this command deprecated and should print this string when used? + Deprecated string + // Full set of flags + flags *flag.FlagSet + // Set of flags childrens of this command will inherit + pflags *flag.FlagSet + // Flags that are declared specifically by this command (not inherited). + lflags *flag.FlagSet + // The *Run functions are executed in the following order: + // * PersistentPreRun() + // * PreRun() + // * Run() + // * PostRun() + // * PersistentPostRun() + // All functions get the same args, the arguments after the command name + // PersistentPreRun: children of this command will inherit and execute + PersistentPreRun func(cmd *Command, args []string) + // PreRun: children of this command will not inherit. + PreRun func(cmd *Command, args []string) + // Run: Typically the actual work function. Most commands will only implement this + Run func(cmd *Command, args []string) + // PostRun: run after the Run command. + PostRun func(cmd *Command, args []string) + // PersistentPostRun: children of this command will inherit and execute after PostRun + PersistentPostRun func(cmd *Command, args []string) + // Commands is the list of commands supported by this program. + commands []*Command + // Parent Command for this command + parent *Command + // max lengths of commands' string lengths for use in padding + commandsMaxUseLen int + commandsMaxCommandPathLen int + commandsMaxNameLen int + + flagErrorBuf *bytes.Buffer + cmdErrorBuf *bytes.Buffer + + args []string // actual args parsed from flags + output *io.Writer // nil means stderr; use Out() method instead + usageFunc func(*Command) error // Usage can be defined by application + usageTemplate string // Can be defined by Application + helpTemplate string // Can be defined by Application + helpFunc func(*Command, []string) // Help can be defined by application + helpCommand *Command // The help command + helpFlagVal bool + // The global normalization function that we can use on every pFlag set and children commands + globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName +} + +// os.Args[1:] by default, if desired, can be overridden +// particularly useful when testing. +func (c *Command) SetArgs(a []string) { + c.args = a +} + +func (c *Command) getOut(def io.Writer) io.Writer { + if c.output != nil { + return *c.output + } + + if c.HasParent() { + return c.parent.Out() + } else { + return def + } +} + +func (c *Command) Out() io.Writer { + return c.getOut(os.Stderr) +} + +func (c *Command) getOutOrStdout() io.Writer { + return c.getOut(os.Stdout) +} + +// SetOutput sets the destination for usage and error messages. +// If output is nil, os.Stderr is used. +func (c *Command) SetOutput(output io.Writer) { + c.output = &output +} + +// Usage can be defined by application +func (c *Command) SetUsageFunc(f func(*Command) error) { + c.usageFunc = f +} + +// Can be defined by Application +func (c *Command) SetUsageTemplate(s string) { + c.usageTemplate = s +} + +// Can be defined by Application +func (c *Command) SetHelpFunc(f func(*Command, []string)) { + c.helpFunc = f +} + +func (c *Command) SetHelpCommand(cmd *Command) { + c.helpCommand = cmd +} + +// Can be defined by Application +func (c *Command) SetHelpTemplate(s string) { + c.helpTemplate = s +} + +// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands. +// The user should not have a cyclic dependency on commands. +func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) { + c.Flags().SetNormalizeFunc(n) + c.PersistentFlags().SetNormalizeFunc(n) + c.LocalFlags().SetNormalizeFunc(n) + c.globNormFunc = n + + for _, command := range c.commands { + command.SetGlobalNormalizationFunc(n) + } +} + +func (c *Command) UsageFunc() (f func(*Command) error) { + if c.usageFunc != nil { + return c.usageFunc + } + + if c.HasParent() { + return c.parent.UsageFunc() + } else { + return func(c *Command) error { + err := tmpl(c.Out(), c.UsageTemplate(), c) + return err + } + } +} +func (c *Command) HelpFunc() func(*Command, []string) { + if c.helpFunc != nil { + return c.helpFunc + } + + if c.HasParent() { + return c.parent.HelpFunc() + } else { + return func(c *Command, args []string) { + if len(args) == 0 { + // Help called without any topic, calling on root + c.Root().Help() + return + } + + cmd, _, e := c.Root().Find(args) + if cmd == nil || e != nil { + c.Printf("Unknown help topic %#q.", args) + + c.Root().Usage() + } else { + err := cmd.Help() + if err != nil { + c.Println(err) + } + } + } + } +} + +var minUsagePadding int = 25 + +func (c *Command) UsagePadding() int { + if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen { + return minUsagePadding + } else { + return c.parent.commandsMaxUseLen + } +} + +var minCommandPathPadding int = 11 + +// +func (c *Command) CommandPathPadding() int { + if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen { + return minCommandPathPadding + } else { + return c.parent.commandsMaxCommandPathLen + } +} + +var minNamePadding int = 11 + +func (c *Command) NamePadding() int { + if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen { + return minNamePadding + } else { + return c.parent.commandsMaxNameLen + } +} + +func (c *Command) UsageTemplate() string { + if c.usageTemplate != "" { + return c.usageTemplate + } + + if c.HasParent() { + return c.parent.UsageTemplate() + } else { + return `{{ $cmd := . }} +Usage: {{if .Runnable}} + {{.UseLine}}{{if .HasFlags}} [flags]{{end}}{{end}}{{if .HasSubCommands}} + {{ .CommandPath}} [command]{{end}}{{if gt .Aliases 0}} + +Aliases: + {{.NameAndAliases}} +{{end}}{{if .HasExample}} + +Examples: +{{ .Example }} +{{end}}{{ if .HasRunnableSubCommands}} + +Available Commands: {{range .Commands}}{{if and (.Runnable) (not .Deprecated)}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}} +{{end}} +{{ if .HasLocalFlags}}Flags: +{{.LocalFlags.FlagUsages}}{{end}} +{{ if .HasInheritedFlags}}Global Flags: +{{.InheritedFlags.FlagUsages}}{{end}}{{if or (.HasHelpSubCommands) (.HasRunnableSiblings)}} +Additional help topics: +{{if .HasHelpSubCommands}}{{range .Commands}}{{if and (not .Runnable) (not .Deprecated)}} {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasRunnableSiblings }}{{range .Parent.Commands}}{{if and (not .Runnable) (not .Deprecated)}}{{if not (eq .Name $cmd.Name) }} + {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{end}} +{{end}}{{ if .HasSubCommands }} +Use "{{.Root.Name}} help [command]" for more information about a command. +{{end}}` + } +} + +func (c *Command) HelpTemplate() string { + if c.helpTemplate != "" { + return c.helpTemplate + } + + if c.HasParent() { + return c.parent.HelpTemplate() + } else { + return `{{with or .Long .Short }}{{. | trim}}{{end}} +{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}} +` + } +} + +// Really only used when casting a command to a commander +func (c *Command) resetChildrensParents() { + for _, x := range c.commands { + x.parent = c + } +} + +// Test if the named flag is a boolean flag. +func isBooleanFlag(name string, f *flag.FlagSet) bool { + flag := f.Lookup(name) + if flag == nil { + return false + } + return flag.Value.Type() == "bool" +} + +// Test if the named flag is a boolean flag. +func isBooleanShortFlag(name string, f *flag.FlagSet) bool { + result := false + f.VisitAll(func(f *flag.Flag) { + if f.Shorthand == name && f.Value.Type() == "bool" { + result = true + } + }) + return result +} + +func stripFlags(args []string, c *Command) []string { + if len(args) < 1 { + return args + } + c.mergePersistentFlags() + + commands := []string{} + + inQuote := false + inFlag := false + for _, y := range args { + if !inQuote { + switch { + case strings.HasPrefix(y, "\""): + inQuote = true + case strings.Contains(y, "=\""): + inQuote = true + case strings.HasPrefix(y, "--") && !strings.Contains(y, "="): + // TODO: this isn't quite right, we should really check ahead for 'true' or 'false' + inFlag = !isBooleanFlag(y[2:], c.Flags()) + case strings.HasPrefix(y, "-") && !strings.Contains(y, "=") && len(y) == 2 && !isBooleanShortFlag(y[1:], c.Flags()): + inFlag = true + case inFlag: + inFlag = false + case y == "": + // strip empty commands, as the go tests expect this to be ok.... + case !strings.HasPrefix(y, "-"): + commands = append(commands, y) + inFlag = false + } + } + + if strings.HasSuffix(y, "\"") && !strings.HasSuffix(y, "\\\"") { + inQuote = false + } + } + + return commands +} + +// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like +// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]). +func argsMinusFirstX(args []string, x string) []string { + for i, y := range args { + if x == y { + ret := []string{} + ret = append(ret, args[:i]...) + ret = append(ret, args[i+1:]...) + return ret + } + } + return args +} + +// find the target command given the args and command tree +// Meant to be run on the highest node. Only searches down. +func (c *Command) Find(args []string) (*Command, []string, error) { + if c == nil { + return nil, nil, fmt.Errorf("Called find() on a nil Command") + } + + // If there are no arguments, return the root command. If the root has no + // subcommands, args reflects arguments that should actually be passed to + // the root command, so also return the root command. + if len(args) == 0 || !c.Root().HasSubCommands() { + return c.Root(), args, nil + } + + var innerfind func(*Command, []string) (*Command, []string) + + innerfind = func(c *Command, innerArgs []string) (*Command, []string) { + if len(innerArgs) > 0 && c.HasSubCommands() { + argsWOflags := stripFlags(innerArgs, c) + if len(argsWOflags) > 0 { + matches := make([]*Command, 0) + for _, cmd := range c.commands { + if cmd.Name() == argsWOflags[0] || cmd.HasAlias(argsWOflags[0]) { // exact name or alias match + return innerfind(cmd, argsMinusFirstX(innerArgs, argsWOflags[0])) + } else if EnablePrefixMatching { + if strings.HasPrefix(cmd.Name(), argsWOflags[0]) { // prefix match + matches = append(matches, cmd) + } + for _, x := range cmd.Aliases { + if strings.HasPrefix(x, argsWOflags[0]) { + matches = append(matches, cmd) + } + } + } + } + + // only accept a single prefix match - multiple matches would be ambiguous + if len(matches) == 1 { + return innerfind(matches[0], argsMinusFirstX(innerArgs, argsWOflags[0])) + } + } + } + + return c, innerArgs + } + + commandFound, a := innerfind(c, args) + + // If we matched on the root, but we asked for a subcommand, return an error + if commandFound.Name() == c.Name() && len(stripFlags(args, c)) > 0 && commandFound.Name() != args[0] { + return nil, a, fmt.Errorf("unknown command %q", a[0]) + } + + return commandFound, a, nil +} + +func (c *Command) Root() *Command { + var findRoot func(*Command) *Command + + findRoot = func(x *Command) *Command { + if x.HasParent() { + return findRoot(x.parent) + } else { + return x + } + } + + return findRoot(c) +} + +func (c *Command) execute(a []string) (err error) { + if c == nil { + return fmt.Errorf("Called Execute() on a nil Command") + } + + if len(c.Deprecated) > 0 { + c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated) + } + + err = c.ParseFlags(a) + if err == flag.ErrHelp { + c.Help() + return nil + } + if err != nil { + // We're writing subcommand usage to root command's error buffer to have it displayed to the user + r := c.Root() + if r.cmdErrorBuf == nil { + r.cmdErrorBuf = new(bytes.Buffer) + } + // for writing the usage to the buffer we need to switch the output temporarily + // since Out() returns root output, you also need to revert that on root + out := r.Out() + r.SetOutput(r.cmdErrorBuf) + c.Usage() + r.SetOutput(out) + return err + } + // If help is called, regardless of other flags, we print that. + // Print help also if c.Run is nil. + if c.helpFlagVal || !c.Runnable() { + c.Help() + return nil + } + + c.preRun() + argWoFlags := c.Flags().Args() + + for p := c; p != nil; p = p.Parent() { + if p.PersistentPreRun != nil { + p.PersistentPreRun(c, argWoFlags) + break + } + } + if c.PreRun != nil { + c.PreRun(c, argWoFlags) + } + + c.Run(c, argWoFlags) + + if c.PostRun != nil { + c.PostRun(c, argWoFlags) + } + for p := c; p != nil; p = p.Parent() { + if p.PersistentPostRun != nil { + p.PersistentPostRun(c, argWoFlags) + break + } + } + + return nil +} + +func (c *Command) preRun() { + for _, x := range initializers { + x() + } +} + +func (c *Command) errorMsgFromParse() string { + s := c.flagErrorBuf.String() + + x := strings.Split(s, "\n") + + if len(x) > 0 { + return x[0] + } else { + return "" + } +} + +// Call execute to use the args (os.Args[1:] by default) +// and run through the command tree finding appropriate matches +// for commands and then corresponding flags. +func (c *Command) Execute() (err error) { + + // Regardless of what command execute is called on, run on Root only + if c.HasParent() { + return c.Root().Execute() + } + + if EnableWindowsMouseTrap && runtime.GOOS == "windows" { + if mousetrap.StartedByExplorer() { + c.Print(MousetrapHelpText) + time.Sleep(5 * time.Second) + os.Exit(1) + } + } + + // initialize help as the last point possible to allow for user + // overriding + c.initHelp() + + var args []string + + if len(c.args) == 0 { + args = os.Args[1:] + } else { + args = c.args + } + + cmd, flags, err := c.Find(args) + if err == nil { + err = cmd.execute(flags) + } + + if err != nil { + if err == flag.ErrHelp { + c.Help() + + } else { + c.Println("Error:", err.Error()) + c.Printf("Run '%v help' for usage.\n", c.Root().Name()) + } + } + + return +} + +func (c *Command) initHelp() { + if c.helpCommand == nil { + if !c.HasSubCommands() { + return + } + + c.helpCommand = &Command{ + Use: "help [command]", + Short: "Help about any command", + Long: `Help provides help for any command in the application. + Simply type ` + c.Name() + ` help [path to command] for full details.`, + Run: c.HelpFunc(), + PersistentPreRun: func(cmd *Command, args []string) {}, + PersistentPostRun: func(cmd *Command, args []string) {}, + } + } + c.AddCommand(c.helpCommand) +} + +// Used for testing +func (c *Command) ResetCommands() { + c.commands = nil + c.helpCommand = nil + c.cmdErrorBuf = new(bytes.Buffer) + c.cmdErrorBuf.Reset() +} + +//Commands returns a slice of child commands. +func (c *Command) Commands() []*Command { + return c.commands +} + +// AddCommand adds one or more commands to this parent command. +func (c *Command) AddCommand(cmds ...*Command) { + for i, x := range cmds { + if cmds[i] == c { + panic("Command can't be a child of itself") + } + cmds[i].parent = c + // update max lengths + usageLen := len(x.Use) + if usageLen > c.commandsMaxUseLen { + c.commandsMaxUseLen = usageLen + } + commandPathLen := len(x.CommandPath()) + if commandPathLen > c.commandsMaxCommandPathLen { + c.commandsMaxCommandPathLen = commandPathLen + } + nameLen := len(x.Name()) + if nameLen > c.commandsMaxNameLen { + c.commandsMaxNameLen = nameLen + } + // If glabal normalization function exists, update all children + if c.globNormFunc != nil { + x.SetGlobalNormalizationFunc(c.globNormFunc) + } + c.commands = append(c.commands, x) + } +} + +// AddCommand removes one or more commands from a parent command. +func (c *Command) RemoveCommand(cmds ...*Command) { + commands := []*Command{} +main: + for _, command := range c.commands { + for _, cmd := range cmds { + if command == cmd { + command.parent = nil + continue main + } + } + commands = append(commands, command) + } + c.commands = commands + // recompute all lengths + c.commandsMaxUseLen = 0 + c.commandsMaxCommandPathLen = 0 + c.commandsMaxNameLen = 0 + for _, command := range c.commands { + usageLen := len(command.Use) + if usageLen > c.commandsMaxUseLen { + c.commandsMaxUseLen = usageLen + } + commandPathLen := len(command.CommandPath()) + if commandPathLen > c.commandsMaxCommandPathLen { + c.commandsMaxCommandPathLen = commandPathLen + } + nameLen := len(command.Name()) + if nameLen > c.commandsMaxNameLen { + c.commandsMaxNameLen = nameLen + } + } +} + +// Convenience method to Print to the defined output +func (c *Command) Print(i ...interface{}) { + fmt.Fprint(c.Out(), i...) +} + +// Convenience method to Println to the defined output +func (c *Command) Println(i ...interface{}) { + str := fmt.Sprintln(i...) + c.Print(str) +} + +// Convenience method to Printf to the defined output +func (c *Command) Printf(format string, i ...interface{}) { + str := fmt.Sprintf(format, i...) + c.Print(str) +} + +// Output the usage for the command +// Used when a user provides invalid input +// Can be defined by user by overriding UsageFunc +func (c *Command) Usage() error { + c.mergePersistentFlags() + err := c.UsageFunc()(c) + return err +} + +// Output the help for the command +// Used when a user calls help [command] +// by the default HelpFunc in the commander +func (c *Command) Help() error { + c.mergePersistentFlags() + err := tmpl(c.getOutOrStdout(), c.HelpTemplate(), c) + return err +} + +func (c *Command) UsageString() string { + tmpOutput := c.output + bb := new(bytes.Buffer) + c.SetOutput(bb) + c.Usage() + c.output = tmpOutput + return bb.String() +} + +// CommandPath returns the full path to this command. +func (c *Command) CommandPath() string { + str := c.Name() + x := c + for x.HasParent() { + str = x.parent.Name() + " " + str + x = x.parent + } + return str +} + +//The full usage for a given command (including parents) +func (c *Command) UseLine() string { + str := "" + if c.HasParent() { + str = c.parent.CommandPath() + " " + } + return str + c.Use +} + +// For use in determining which flags have been assigned to which commands +// and which persist +func (c *Command) DebugFlags() { + c.Println("DebugFlags called on", c.Name()) + var debugflags func(*Command) + + debugflags = func(x *Command) { + if x.HasFlags() || x.HasPersistentFlags() { + c.Println(x.Name()) + } + if x.HasFlags() { + x.flags.VisitAll(func(f *flag.Flag) { + if x.HasPersistentFlags() { + if x.persistentFlag(f.Name) == nil { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]") + } else { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]") + } + } else { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]") + } + }) + } + if x.HasPersistentFlags() { + x.pflags.VisitAll(func(f *flag.Flag) { + if x.HasFlags() { + if x.flags.Lookup(f.Name) == nil { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") + } + } else { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") + } + }) + } + c.Println(x.flagErrorBuf) + if x.HasSubCommands() { + for _, y := range x.commands { + debugflags(y) + } + } + } + + debugflags(c) +} + +// Name returns the command's name: the first word in the use line. +func (c *Command) Name() string { + if c.name != "" { + return c.name + } + name := c.Use + i := strings.Index(name, " ") + if i >= 0 { + name = name[:i] + } + return name +} + +// Determine if a given string is an alias of the command. +func (c *Command) HasAlias(s string) bool { + for _, a := range c.Aliases { + if a == s { + return true + } + } + return false +} + +func (c *Command) NameAndAliases() string { + return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ") +} + +func (c *Command) HasExample() bool { + return len(c.Example) > 0 +} + +// Determine if the command is itself runnable +func (c *Command) Runnable() bool { + return c.Run != nil +} + +// Determine if the command has children commands +func (c *Command) HasSubCommands() bool { + return len(c.commands) > 0 +} + +func (c *Command) HasRunnableSiblings() bool { + if !c.HasParent() { + return false + } + for _, sub := range c.parent.commands { + if sub.Runnable() { + return true + } + } + return false +} + +func (c *Command) HasHelpSubCommands() bool { + for _, sub := range c.commands { + if !sub.Runnable() { + return true + } + } + return false +} + +// Determine if the command has runnable children commands +func (c *Command) HasRunnableSubCommands() bool { + for _, sub := range c.commands { + if sub.Runnable() { + return true + } + } + return false +} + +// Determine if the command is a child command +func (c *Command) HasParent() bool { + return c.parent != nil +} + +// GlobalNormalizationFunc returns the global normalization function or nil if doesn't exists +func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName { + return c.globNormFunc +} + +// Get the complete FlagSet that applies to this command (local and persistent declared here and by all parents) +func (c *Command) Flags() *flag.FlagSet { + if c.flags == nil { + c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.flags.SetOutput(c.flagErrorBuf) + c.PersistentFlags().BoolVarP(&c.helpFlagVal, "help", "h", false, "help for "+c.Name()) + } + return c.flags +} + +// Get the local FlagSet specifically set in the current command +func (c *Command) LocalFlags() *flag.FlagSet { + c.mergePersistentFlags() + + local := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.lflags.VisitAll(func(f *flag.Flag) { + local.AddFlag(f) + }) + return local +} + +// All Flags which were inherited from parents commands +func (c *Command) InheritedFlags() *flag.FlagSet { + c.mergePersistentFlags() + + inherited := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + local := c.LocalFlags() + + var rmerge func(x *Command) + + rmerge = func(x *Command) { + if x.HasPersistentFlags() { + x.PersistentFlags().VisitAll(func(f *flag.Flag) { + if inherited.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil { + inherited.AddFlag(f) + } + }) + } + if x.HasParent() { + rmerge(x.parent) + } + } + + if c.HasParent() { + rmerge(c.parent) + } + + return inherited +} + +// All Flags which were not inherited from parent commands +func (c *Command) NonInheritedFlags() *flag.FlagSet { + return c.LocalFlags() +} + +// Get the Persistent FlagSet specifically set in the current command +func (c *Command) PersistentFlags() *flag.FlagSet { + if c.pflags == nil { + c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.pflags.SetOutput(c.flagErrorBuf) + } + return c.pflags +} + +// For use in testing +func (c *Command) ResetFlags() { + c.flagErrorBuf = new(bytes.Buffer) + c.flagErrorBuf.Reset() + c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.flags.SetOutput(c.flagErrorBuf) + c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.pflags.SetOutput(c.flagErrorBuf) +} + +// Does the command contain any flags (local plus persistent from the entire structure) +func (c *Command) HasFlags() bool { + return c.Flags().HasFlags() +} + +// Does the command contain persistent flags +func (c *Command) HasPersistentFlags() bool { + return c.PersistentFlags().HasFlags() +} + +// Does the command has flags specifically declared locally +func (c *Command) HasLocalFlags() bool { + return c.LocalFlags().HasFlags() +} + +func (c *Command) HasInheritedFlags() bool { + return c.InheritedFlags().HasFlags() +} + +// Climbs up the command tree looking for matching flag +func (c *Command) Flag(name string) (flag *flag.Flag) { + flag = c.Flags().Lookup(name) + + if flag == nil { + flag = c.persistentFlag(name) + } + + return +} + +// recursively find matching persistent flag +func (c *Command) persistentFlag(name string) (flag *flag.Flag) { + if c.HasPersistentFlags() { + flag = c.PersistentFlags().Lookup(name) + } + + if flag == nil && c.HasParent() { + flag = c.parent.persistentFlag(name) + } + return +} + +// Parses persistent flag tree & local flags +func (c *Command) ParseFlags(args []string) (err error) { + c.mergePersistentFlags() + err = c.Flags().Parse(args) + return +} + +func (c *Command) Parent() *Command { + return c.parent +} + +func (c *Command) mergePersistentFlags() { + var rmerge func(x *Command) + + // Save the set of local flags + if c.lflags == nil { + c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.lflags.SetOutput(c.flagErrorBuf) + addtolocal := func(f *flag.Flag) { + c.lflags.AddFlag(f) + } + c.Flags().VisitAll(addtolocal) + c.PersistentFlags().VisitAll(addtolocal) + } + rmerge = func(x *Command) { + if !x.HasParent() { + flag.CommandLine.VisitAll(func(f *flag.Flag) { + if x.PersistentFlags().Lookup(f.Name) == nil { + x.PersistentFlags().AddFlag(f) + } + }) + } + if x.HasPersistentFlags() { + x.PersistentFlags().VisitAll(func(f *flag.Flag) { + if c.Flags().Lookup(f.Name) == nil { + c.Flags().AddFlag(f) + } + }) + } + if x.HasParent() { + rmerge(x.parent) + } + } + + rmerge(c) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/md_docs.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/md_docs.go new file mode 100644 index 000000000..6092c85af --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/md_docs.go @@ -0,0 +1,138 @@ +//Copyright 2015 Red Hat Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cobra + +import ( + "bytes" + "fmt" + "os" + "sort" + "strings" + "time" +) + +func printOptions(out *bytes.Buffer, cmd *Command, name string) { + flags := cmd.NonInheritedFlags() + flags.SetOutput(out) + if flags.HasFlags() { + fmt.Fprintf(out, "### Options\n\n```\n") + flags.PrintDefaults() + fmt.Fprintf(out, "```\n\n") + } + + parentFlags := cmd.InheritedFlags() + parentFlags.SetOutput(out) + if parentFlags.HasFlags() { + fmt.Fprintf(out, "### Options inherited from parent commands\n\n```\n") + parentFlags.PrintDefaults() + fmt.Fprintf(out, "```\n\n") + } +} + +type byName []*Command + +func (s byName) Len() int { return len(s) } +func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() } + +func GenMarkdown(cmd *Command, out *bytes.Buffer) { + GenMarkdownCustom(cmd, out, func(s string) string { return s }) +} + +func GenMarkdownCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) string) { + name := cmd.CommandPath() + + short := cmd.Short + long := cmd.Long + if len(long) == 0 { + long = short + } + + fmt.Fprintf(out, "## %s\n\n", name) + fmt.Fprintf(out, "%s\n\n", short) + fmt.Fprintf(out, "### Synopsis\n\n") + fmt.Fprintf(out, "\n%s\n\n", long) + + if cmd.Runnable() { + fmt.Fprintf(out, "```\n%s\n```\n\n", cmd.UseLine()) + } + + if len(cmd.Example) > 0 { + fmt.Fprintf(out, "### Examples\n\n") + fmt.Fprintf(out, "```\n%s\n```\n\n", cmd.Example) + } + + printOptions(out, cmd, name) + + if len(cmd.Commands()) > 0 || cmd.HasParent() { + fmt.Fprintf(out, "### SEE ALSO\n") + if cmd.HasParent() { + parent := cmd.Parent() + pname := parent.CommandPath() + link := pname + ".md" + link = strings.Replace(link, " ", "_", -1) + fmt.Fprintf(out, "* [%s](%s)\t - %s\n", pname, linkHandler(link), parent.Short) + } + + children := cmd.Commands() + sort.Sort(byName(children)) + + for _, child := range children { + if len(child.Deprecated) > 0 { + continue + } + cname := name + " " + child.Name() + link := cname + ".md" + link = strings.Replace(link, " ", "_", -1) + fmt.Fprintf(out, "* [%s](%s)\t - %s\n", cname, linkHandler(link), child.Short) + } + fmt.Fprintf(out, "\n") + } + + fmt.Fprintf(out, "###### Auto generated by spf13/cobra at %s\n", time.Now().UTC()) +} + +func GenMarkdownTree(cmd *Command, dir string) { + identity := func(s string) string { return s } + emptyStr := func(s string) string { return "" } + GenMarkdownTreeCustom(cmd, dir, emptyStr, identity) +} + +func GenMarkdownTreeCustom(cmd *Command, dir string, filePrepender func(string) string, linkHandler func(string) string) { + for _, c := range cmd.Commands() { + GenMarkdownTreeCustom(c, dir, filePrepender, linkHandler) + } + out := new(bytes.Buffer) + + GenMarkdownCustom(cmd, out, linkHandler) + + filename := cmd.CommandPath() + filename = dir + strings.Replace(filename, " ", "_", -1) + ".md" + outFile, err := os.Create(filename) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + defer outFile.Close() + _, err = outFile.WriteString(filePrepender(filename)) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + _, err = outFile.Write(out.Bytes()) + if err != nil { + fmt.Println(err) + os.Exit(1) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/md_docs.md b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/md_docs.md new file mode 100644 index 000000000..3a0d55ab9 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/md_docs.md @@ -0,0 +1,81 @@ +# Generating Markdown Docs For Your Own cobra.Command + +## Generate markdown docs for the entire command tree + +This program can actually generate docs for the kubectl command in the kubernetes project + +```go +package main + +import ( + "io/ioutil" + "os" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd" + "github.com/spf13/cobra" +) + +func main() { + kubectl := cmd.NewFactory(nil).NewKubectlCommand(os.Stdin, ioutil.Discard, ioutil.Discard) + cobra.GenMarkdownTree(kubectl, "./") +} +``` + +This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./") + +## Generate markdown docs for a single command + +You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenMarkdown` instead of `GenMarkdownTree` + +```go + out := new(bytes.Buffer) + cobra.GenMarkdown(cmd, out) +``` + +This will write the markdown doc for ONLY "cmd" into the out, buffer. + +## Customize the output + +Both `GenMarkdown` and `GenMarkdownTree` have alternate versions with callbacks to get some control of the output: + +```go +func GenMarkdownTreeCustom(cmd *Command, dir string, filePrepender func(string) string, linkHandler func(string) string) { + //... +} +``` + +```go +func GenMarkdownCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) string) { + //... +} +``` + +The `filePrepender` will prepend the return value given the full filepath to the rendered Markdown file. A common use case is to add front matter to use the generated documentation with [Hugo](http://gohugo.io/): + +```go +const fmTemplate = `--- +date: %s +title: "%s" +slug: %s +url: %s +--- +` + +filePrepender := func(filename string) string { + now := time.Now().Format(time.RFC3339) + name := filepath.Base(filename) + base := strings.TrimSuffix(name, path.Ext(name)) + url := "/commands/" + strings.ToLower(base) + "/" + return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url) +} +``` + +The `linkHandler` can be used to customize the rendered internal links to the commands, given a filename: + +```go +linkHandler := func(name string) string { + base := strings.TrimSuffix(name, path.Ext(name)) + return "/commands/" + strings.ToLower(base) + "/" +} +``` + diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/.travis.yml new file mode 100644 index 000000000..c4d88e374 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/.travis.yml @@ -0,0 +1,8 @@ +sudo: false + +language: go + +go: + - 1.3 + - 1.4 + - tip diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/LICENSE new file mode 100644 index 000000000..63ed1cfea --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 Alex Ogier. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/README.md new file mode 100644 index 000000000..f7d63500f --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/README.md @@ -0,0 +1,191 @@ +[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag) + +## Description + +pflag is a drop-in replacement for Go's flag package, implementing +POSIX/GNU-style --flags. + +pflag is compatible with the [GNU extensions to the POSIX recommendations +for command-line options][1]. For a more precise description, see the +"Command-line flag syntax" section below. + +[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html + +pflag is available under the same style of BSD license as the Go language, +which can be found in the LICENSE file. + +## Installation + +pflag is available using the standard `go get` command. + +Install by running: + + go get github.com/ogier/pflag + +Run tests by running: + + go test github.com/ogier/pflag + +## Usage + +pflag is a drop-in replacement of Go's native flag package. If you import +pflag under the name "flag" then all code should continue to function +with no changes. + +``` go +import flag "github.com/ogier/pflag" +``` + +There is one exception to this: if you directly instantiate the Flag struct +there is one more field "Shorthand" that you will need to set. +Most code never instantiates this struct directly, and instead uses +functions such as String(), BoolVar(), and Var(), and is therefore +unaffected. + +Define flags using flag.String(), Bool(), Int(), etc. + +This declares an integer flag, -flagname, stored in the pointer ip, with type *int. + +``` go +var ip *int = flag.Int("flagname", 1234, "help message for flagname") +``` + +If you like, you can bind the flag to a variable using the Var() functions. + +``` go +var flagvar int +func init() { + flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") +} +``` + +Or you can create custom flags that satisfy the Value interface (with +pointer receivers) and couple them to flag parsing by + +``` go +flag.Var(&flagVal, "name", "help message for flagname") +``` + +For such flags, the default value is just the initial value of the variable. + +After all flags are defined, call + +``` go +flag.Parse() +``` + +to parse the command line into the defined flags. + +Flags may then be used directly. If you're using the flags themselves, +they are all pointers; if you bind to variables, they're values. + +``` go +fmt.Println("ip has value ", *ip) +fmt.Println("flagvar has value ", flagvar) +``` + +After parsing, the arguments after the flag are available as the +slice flag.Args() or individually as flag.Arg(i). +The arguments are indexed from 0 through flag.NArg()-1. + +The pflag package also defines some new functions that are not in flag, +that give one-letter shorthands for flags. You can use these by appending +'P' to the name of any function that defines a flag. + +``` go +var ip = flag.IntP("flagname", "f", 1234, "help message") +var flagvar bool +func init() { + flag.BoolVarP("boolname", "b", true, "help message") +} +flag.VarP(&flagVar, "varname", "v", 1234, "help message") +``` + +Shorthand letters can be used with single dashes on the command line. +Boolean shorthand flags can be combined with other shorthand flags. + +The default set of command-line flags is controlled by +top-level functions. The FlagSet type allows one to define +independent sets of flags, such as to implement subcommands +in a command-line interface. The methods of FlagSet are +analogous to the top-level functions for the command-line +flag set. + +## Command line flag syntax + +``` +--flag // boolean flags only +--flag=x +``` + +Unlike the flag package, a single dash before an option means something +different than a double dash. Single dashes signify a series of shorthand +letters for flags. All but the last shorthand letter must be boolean flags. + +``` +// boolean flags +-f +-abc + +// non-boolean flags +-n 1234 +-Ifile + +// mixed +-abcs "hello" +-abcn1234 +``` + +Flag parsing stops after the terminator "--". Unlike the flag package, +flags can be interspersed with arguments anywhere on the command line +before this terminator. + +Integer flags accept 1234, 0664, 0x1234 and may be negative. +Boolean flags (in their long form) accept 1, 0, t, f, true, false, +TRUE, FALSE, True, False. +Duration flags accept any input valid for time.ParseDuration. + +## Mutating or "Normalizing" Flag names + +It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow. + +**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag + +```go +func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { + from := []string{"-", "_"} + to := "." + for _, sep := range from { + name = strings.Replace(name, sep, to, -1) + } + return pflag.NormalizedName(name) +} + +myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc) +``` + +**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name + +```go +func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { + switch name { + case "old-flag-name": + name = "new-flag-name" + break + } + return pflag.NormalizedName(name) +} + +myFlagSet.SetNormalizeFunc(aliasNormalizeFunc) +``` + +## More info + +You can see the full reference documentation of the pflag package +[at godoc.org][3], or through go's standard documentation system by +running `godoc -http=:6060` and browsing to +[http://localhost:6060/pkg/github.com/ogier/pflag][2] after +installation. + +[2]: http://localhost:6060/pkg/github.com/ogier/pflag +[3]: http://godoc.org/github.com/ogier/pflag diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/bool.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/bool.go new file mode 100644 index 000000000..70e2e0a6b --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/bool.go @@ -0,0 +1,83 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// optional interface to indicate boolean flags that can be +// supplied without "=value" text +type boolFlag interface { + Value + IsBoolFlag() bool +} + +// -- bool Value +type boolValue bool + +func newBoolValue(val bool, p *bool) *boolValue { + *p = val + return (*boolValue)(p) +} + +func (b *boolValue) Set(s string) error { + v, err := strconv.ParseBool(s) + *b = boolValue(v) + return err +} + +func (b *boolValue) Type() string { + return "bool" +} + +func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) } + +func (b *boolValue) IsBoolFlag() bool { return true } + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) { + f.VarP(newBoolValue(value, p), name, "", usage) +} + +// Like BoolVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) { + f.VarP(newBoolValue(value, p), name, shorthand, usage) +} + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func BoolVar(p *bool, name string, value bool, usage string) { + CommandLine.VarP(newBoolValue(value, p), name, "", usage) +} + +// Like BoolVar, but accepts a shorthand letter that can be used after a single dash. +func BoolVarP(p *bool, name, shorthand string, value bool, usage string) { + CommandLine.VarP(newBoolValue(value, p), name, shorthand, usage) +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func (f *FlagSet) Bool(name string, value bool, usage string) *bool { + p := new(bool) + f.BoolVarP(p, name, "", value, usage) + return p +} + +// Like Bool, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool { + p := new(bool) + f.BoolVarP(p, name, shorthand, value, usage) + return p +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func Bool(name string, value bool, usage string) *bool { + return CommandLine.BoolP(name, "", value, usage) +} + +// Like Bool, but accepts a shorthand letter that can be used after a single dash. +func BoolP(name, shorthand string, value bool, usage string) *bool { + return CommandLine.BoolP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/duration.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/duration.go new file mode 100644 index 000000000..66ed7ac93 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/duration.go @@ -0,0 +1,71 @@ +package pflag + +import "time" + +// -- time.Duration Value +type durationValue time.Duration + +func newDurationValue(val time.Duration, p *time.Duration) *durationValue { + *p = val + return (*durationValue)(p) +} + +func (d *durationValue) Set(s string) error { + v, err := time.ParseDuration(s) + *d = durationValue(v) + return err +} + +func (d *durationValue) Type() string { + return "duration" +} + +func (d *durationValue) String() string { return (*time.Duration)(d).String() } + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) { + f.VarP(newDurationValue(value, p), name, "", usage) +} + +// Like DurationVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { + f.VarP(newDurationValue(value, p), name, shorthand, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func DurationVar(p *time.Duration, name string, value time.Duration, usage string) { + CommandLine.VarP(newDurationValue(value, p), name, "", usage) +} + +// Like DurationVar, but accepts a shorthand letter that can be used after a single dash. +func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { + CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage) +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration { + p := new(time.Duration) + f.DurationVarP(p, name, "", value, usage) + return p +} + +// Like Duration, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { + p := new(time.Duration) + f.DurationVarP(p, name, shorthand, value, usage) + return p +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func Duration(name string, value time.Duration, usage string) *time.Duration { + return CommandLine.DurationP(name, "", value, usage) +} + +// Like Duration, but accepts a shorthand letter that can be used after a single dash. +func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { + return CommandLine.DurationP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/flag.go new file mode 100644 index 000000000..0070b93ee --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/flag.go @@ -0,0 +1,695 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + pflag is a drop-in replacement for Go's flag package, implementing + POSIX/GNU-style --flags. + + pflag is compatible with the GNU extensions to the POSIX recommendations + for command-line options. See + http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html + + Usage: + + pflag is a drop-in replacement of Go's native flag package. If you import + pflag under the name "flag" then all code should continue to function + with no changes. + + import flag "github.com/ogier/pflag" + + There is one exception to this: if you directly instantiate the Flag struct + there is one more field "Shorthand" that you will need to set. + Most code never instantiates this struct directly, and instead uses + functions such as String(), BoolVar(), and Var(), and is therefore + unaffected. + + Define flags using flag.String(), Bool(), Int(), etc. + + This declares an integer flag, -flagname, stored in the pointer ip, with type *int. + var ip = flag.Int("flagname", 1234, "help message for flagname") + If you like, you can bind the flag to a variable using the Var() functions. + var flagvar int + func init() { + flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") + } + Or you can create custom flags that satisfy the Value interface (with + pointer receivers) and couple them to flag parsing by + flag.Var(&flagVal, "name", "help message for flagname") + For such flags, the default value is just the initial value of the variable. + + After all flags are defined, call + flag.Parse() + to parse the command line into the defined flags. + + Flags may then be used directly. If you're using the flags themselves, + they are all pointers; if you bind to variables, they're values. + fmt.Println("ip has value ", *ip) + fmt.Println("flagvar has value ", flagvar) + + After parsing, the arguments after the flag are available as the + slice flag.Args() or individually as flag.Arg(i). + The arguments are indexed from 0 through flag.NArg()-1. + + The pflag package also defines some new functions that are not in flag, + that give one-letter shorthands for flags. You can use these by appending + 'P' to the name of any function that defines a flag. + var ip = flag.IntP("flagname", "f", 1234, "help message") + var flagvar bool + func init() { + flag.BoolVarP("boolname", "b", true, "help message") + } + flag.VarP(&flagVar, "varname", "v", 1234, "help message") + Shorthand letters can be used with single dashes on the command line. + Boolean shorthand flags can be combined with other shorthand flags. + + Command line flag syntax: + --flag // boolean flags only + --flag=x + + Unlike the flag package, a single dash before an option means something + different than a double dash. Single dashes signify a series of shorthand + letters for flags. All but the last shorthand letter must be boolean flags. + // boolean flags + -f + -abc + // non-boolean flags + -n 1234 + -Ifile + // mixed + -abcs "hello" + -abcn1234 + + Flag parsing stops after the terminator "--". Unlike the flag package, + flags can be interspersed with arguments anywhere on the command line + before this terminator. + + Integer flags accept 1234, 0664, 0x1234 and may be negative. + Boolean flags (in their long form) accept 1, 0, t, f, true, false, + TRUE, FALSE, True, False. + Duration flags accept any input valid for time.ParseDuration. + + The default set of command-line flags is controlled by + top-level functions. The FlagSet type allows one to define + independent sets of flags, such as to implement subcommands + in a command-line interface. The methods of FlagSet are + analogous to the top-level functions for the command-line + flag set. +*/ +package pflag + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "sort" + "strings" +) + +// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. +var ErrHelp = errors.New("pflag: help requested") + +// ErrorHandling defines how to handle flag parsing errors. +type ErrorHandling int + +const ( + ContinueOnError ErrorHandling = iota + ExitOnError + PanicOnError +) + +// NormalizedName is a flag name that has been normalized according to rules +// for the FlagSet (e.g. making '-' and '_' equivalent). +type NormalizedName string + +// A FlagSet represents a set of defined flags. +type FlagSet struct { + // Usage is the function called when an error occurs while parsing flags. + // The field is a function (not a method) that may be changed to point to + // a custom error handler. + Usage func() + + name string + parsed bool + actual map[NormalizedName]*Flag + formal map[NormalizedName]*Flag + shorthands map[byte]*Flag + args []string // arguments after flags + exitOnError bool // does the program exit if there's an error? + errorHandling ErrorHandling + output io.Writer // nil means stderr; use out() accessor + interspersed bool // allow interspersed option/non-option args + normalizeNameFunc func(f *FlagSet, name string) NormalizedName +} + +// A Flag represents the state of a flag. +type Flag struct { + Name string // name as it appears on command line + Shorthand string // one-letter abbreviated flag + Usage string // help message + Value Value // value as set + DefValue string // default value (as text); for usage message + Changed bool // If the user set the value (or if left to default) + Deprecated string // If this flag is deprecated, this string is the new or now thing to use + Annotations map[string][]string // used by cobra.Command bash autocomple code +} + +// Value is the interface to the dynamic value stored in a flag. +// (The default value is represented as a string.) +type Value interface { + String() string + Set(string) error + Type() string +} + +// sortFlags returns the flags as a slice in lexicographical sorted order. +func sortFlags(flags map[NormalizedName]*Flag) []*Flag { + list := make(sort.StringSlice, len(flags)) + i := 0 + for k := range flags { + list[i] = string(k) + i++ + } + list.Sort() + result := make([]*Flag, len(list)) + for i, name := range list { + result[i] = flags[NormalizedName(name)] + } + return result +} + +func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) { + f.normalizeNameFunc = n + for k, v := range f.formal { + delete(f.formal, k) + nname := f.normalizeFlagName(string(k)) + f.formal[nname] = v + v.Name = string(nname) + } +} + +func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName { + if f.normalizeNameFunc != nil { + return f.normalizeNameFunc + } + return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) } +} + +func (f *FlagSet) normalizeFlagName(name string) NormalizedName { + n := f.GetNormalizeFunc() + return n(f, name) +} + +func (f *FlagSet) out() io.Writer { + if f.output == nil { + return os.Stderr + } + return f.output +} + +// SetOutput sets the destination for usage and error messages. +// If output is nil, os.Stderr is used. +func (f *FlagSet) SetOutput(output io.Writer) { + f.output = output +} + +// VisitAll visits the flags in lexicographical order, calling fn for each. +// It visits all flags, even those not set. +func (f *FlagSet) VisitAll(fn func(*Flag)) { + for _, flag := range sortFlags(f.formal) { + fn(flag) + } +} + +func (f *FlagSet) HasFlags() bool { + return len(f.formal) > 0 +} + +// VisitAll visits the command-line flags in lexicographical order, calling +// fn for each. It visits all flags, even those not set. +func VisitAll(fn func(*Flag)) { + CommandLine.VisitAll(fn) +} + +// Visit visits the flags in lexicographical order, calling fn for each. +// It visits only those flags that have been set. +func (f *FlagSet) Visit(fn func(*Flag)) { + for _, flag := range sortFlags(f.actual) { + fn(flag) + } +} + +// Visit visits the command-line flags in lexicographical order, calling fn +// for each. It visits only those flags that have been set. +func Visit(fn func(*Flag)) { + CommandLine.Visit(fn) +} + +// Lookup returns the Flag structure of the named flag, returning nil if none exists. +func (f *FlagSet) Lookup(name string) *Flag { + return f.lookup(f.normalizeFlagName(name)) +} + +// lookup returns the Flag structure of the named flag, returning nil if none exists. +func (f *FlagSet) lookup(name NormalizedName) *Flag { + return f.formal[name] +} + +// Mark a flag deprecated in your program +func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { + flag := f.Lookup(name) + if flag == nil { + return fmt.Errorf("flag %q does not exist", name) + } + flag.Deprecated = usageMessage + return nil +} + +// Lookup returns the Flag structure of the named command-line flag, +// returning nil if none exists. +func Lookup(name string) *Flag { + return CommandLine.Lookup(name) +} + +// Set sets the value of the named flag. +func (f *FlagSet) Set(name, value string) error { + normalName := f.normalizeFlagName(name) + flag, ok := f.formal[normalName] + if !ok { + return fmt.Errorf("no such flag -%v", name) + } + err := flag.Value.Set(value) + if err != nil { + return err + } + if f.actual == nil { + f.actual = make(map[NormalizedName]*Flag) + } + f.actual[normalName] = flag + flag.Changed = true + if len(flag.Deprecated) > 0 { + fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) + } + return nil +} + +// Set sets the value of the named command-line flag. +func Set(name, value string) error { + return CommandLine.Set(name, value) +} + +// PrintDefaults prints, to standard error unless configured +// otherwise, the default values of all defined flags in the set. +func (f *FlagSet) PrintDefaults() { + f.VisitAll(func(flag *Flag) { + if len(flag.Deprecated) > 0 { + return + } + format := "--%s=%s: %s\n" + if _, ok := flag.Value.(*stringValue); ok { + // put quotes on the value + format = "--%s=%q: %s\n" + } + if len(flag.Shorthand) > 0 { + format = " -%s, " + format + } else { + format = " %s " + format + } + fmt.Fprintf(f.out(), format, flag.Shorthand, flag.Name, flag.DefValue, flag.Usage) + }) +} + +func (f *FlagSet) FlagUsages() string { + x := new(bytes.Buffer) + + f.VisitAll(func(flag *Flag) { + if len(flag.Deprecated) > 0 { + return + } + format := "--%s=%s: %s\n" + if _, ok := flag.Value.(*stringValue); ok { + // put quotes on the value + format = "--%s=%q: %s\n" + } + if len(flag.Shorthand) > 0 { + format = " -%s, " + format + } else { + format = " %s " + format + } + fmt.Fprintf(x, format, flag.Shorthand, flag.Name, flag.DefValue, flag.Usage) + }) + + return x.String() +} + +// PrintDefaults prints to standard error the default values of all defined command-line flags. +func PrintDefaults() { + CommandLine.PrintDefaults() +} + +// defaultUsage is the default function to print a usage message. +func defaultUsage(f *FlagSet) { + fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) + f.PrintDefaults() +} + +// NOTE: Usage is not just defaultUsage(CommandLine) +// because it serves (via godoc flag Usage) as the example +// for how to write your own usage function. + +// Usage prints to standard error a usage message documenting all defined command-line flags. +// The function is a variable that may be changed to point to a custom function. +var Usage = func() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + PrintDefaults() +} + +// NFlag returns the number of flags that have been set. +func (f *FlagSet) NFlag() int { return len(f.actual) } + +// NFlag returns the number of command-line flags that have been set. +func NFlag() int { return len(CommandLine.actual) } + +// Arg returns the i'th argument. Arg(0) is the first remaining argument +// after flags have been processed. +func (f *FlagSet) Arg(i int) string { + if i < 0 || i >= len(f.args) { + return "" + } + return f.args[i] +} + +// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument +// after flags have been processed. +func Arg(i int) string { + return CommandLine.Arg(i) +} + +// NArg is the number of arguments remaining after flags have been processed. +func (f *FlagSet) NArg() int { return len(f.args) } + +// NArg is the number of arguments remaining after flags have been processed. +func NArg() int { return len(CommandLine.args) } + +// Args returns the non-flag arguments. +func (f *FlagSet) Args() []string { return f.args } + +// Args returns the non-flag command-line arguments. +func Args() []string { return CommandLine.args } + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func (f *FlagSet) Var(value Value, name string, usage string) { + f.VarP(value, name, "", usage) +} + +// Like Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) VarP(value Value, name, shorthand, usage string) { + // Remember the default value as a string; it won't change. + flag := &Flag{ + Name: name, + Shorthand: shorthand, + Usage: usage, + Value: value, + DefValue: value.String(), + } + f.AddFlag(flag) +} + +func (f *FlagSet) AddFlag(flag *Flag) { + // Call normalizeFlagName function only once + var normalizedFlagName NormalizedName = f.normalizeFlagName(flag.Name) + + _, alreadythere := f.formal[normalizedFlagName] + if alreadythere { + msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name) + fmt.Fprintln(f.out(), msg) + panic(msg) // Happens only if flags are declared with identical names + } + if f.formal == nil { + f.formal = make(map[NormalizedName]*Flag) + } + + flag.Name = string(normalizedFlagName) + f.formal[normalizedFlagName] = flag + + if len(flag.Shorthand) == 0 { + return + } + if len(flag.Shorthand) > 1 { + fmt.Fprintf(f.out(), "%s shorthand more than ASCII character: %s\n", f.name, flag.Shorthand) + panic("shorthand is more than one character") + } + if f.shorthands == nil { + f.shorthands = make(map[byte]*Flag) + } + c := flag.Shorthand[0] + old, alreadythere := f.shorthands[c] + if alreadythere { + fmt.Fprintf(f.out(), "%s shorthand reused: %q for %s already used for %s\n", f.name, c, flag.Name, old.Name) + panic("shorthand redefinition") + } + f.shorthands[c] = flag +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func Var(value Value, name string, usage string) { + CommandLine.VarP(value, name, "", usage) +} + +// Like Var, but accepts a shorthand letter that can be used after a single dash. +func VarP(value Value, name, shorthand, usage string) { + CommandLine.VarP(value, name, shorthand, usage) +} + +// failf prints to standard error a formatted error and usage message and +// returns the error. +func (f *FlagSet) failf(format string, a ...interface{}) error { + err := fmt.Errorf(format, a...) + fmt.Fprintln(f.out(), err) + f.usage() + return err +} + +// usage calls the Usage method for the flag set, or the usage function if +// the flag set is CommandLine. +func (f *FlagSet) usage() { + if f == CommandLine { + Usage() + } else if f.Usage == nil { + defaultUsage(f) + } else { + f.Usage() + } +} + +func (f *FlagSet) setFlag(flag *Flag, value string, origArg string) error { + if err := flag.Value.Set(value); err != nil { + return f.failf("invalid argument %q for %s: %v", value, origArg, err) + } + // mark as visited for Visit() + if f.actual == nil { + f.actual = make(map[NormalizedName]*Flag) + } + f.actual[f.normalizeFlagName(flag.Name)] = flag + flag.Changed = true + if len(flag.Deprecated) > 0 { + fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) + } + return nil +} + +func (f *FlagSet) parseLongArg(s string, args []string) (a []string, err error) { + a = args + name := s[2:] + if len(name) == 0 || name[0] == '-' || name[0] == '=' { + err = f.failf("bad flag syntax: %s", s) + return + } + split := strings.SplitN(name, "=", 2) + name = split[0] + flag, alreadythere := f.formal[f.normalizeFlagName(name)] + if !alreadythere { + if name == "help" { // special case for nice help message. + f.usage() + return a, ErrHelp + } + err = f.failf("unknown flag: --%s", name) + return + } + var value string + if len(split) == 2 { + // '--flag=arg' + value = split[1] + } else if bv, ok := flag.Value.(boolFlag); ok && bv.IsBoolFlag() { + // '--flag' (where flag is a bool) + value = "true" + } else { + // '--flag' (where flag was not a bool) + err = f.failf("flag needs an argument: %s", s) + return + } + err = f.setFlag(flag, value, s) + return +} + +func (f *FlagSet) parseSingleShortArg(shorthands string, args []string) (outShorts string, outArgs []string, err error) { + outArgs = args + outShorts = shorthands[1:] + c := shorthands[0] + + flag, alreadythere := f.shorthands[c] + if !alreadythere { + if c == 'h' { // special case for nice help message. + f.usage() + err = ErrHelp + return + } + //TODO continue on error + err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands) + return + } + var value string + if len(shorthands) > 2 && shorthands[1] == '=' { + value = shorthands[2:] + outShorts = "" + } else if bv, ok := flag.Value.(boolFlag); ok && bv.IsBoolFlag() { + value = "true" + } else if len(shorthands) > 1 { + value = shorthands[1:] + outShorts = "" + } else if len(args) > 0 { + value = args[0] + outArgs = args[1:] + } else { + err = f.failf("flag needs an argument: %q in -%s", c, shorthands) + return + } + err = f.setFlag(flag, value, shorthands) + return +} + +func (f *FlagSet) parseShortArg(s string, args []string) (a []string, err error) { + a = args + shorthands := s[1:] + + for len(shorthands) > 0 { + shorthands, a, err = f.parseSingleShortArg(shorthands, args) + if err != nil { + return + } + } + + return +} + +func (f *FlagSet) parseArgs(args []string) (err error) { + for len(args) > 0 { + s := args[0] + args = args[1:] + if len(s) == 0 || s[0] != '-' || len(s) == 1 { + if !f.interspersed { + f.args = append(f.args, s) + f.args = append(f.args, args...) + return nil + } + f.args = append(f.args, s) + continue + } + + if s[1] == '-' { + if len(s) == 2 { // "--" terminates the flags + f.args = append(f.args, args...) + break + } + args, err = f.parseLongArg(s, args) + } else { + args, err = f.parseShortArg(s, args) + } + if err != nil { + return + } + } + return +} + +// Parse parses flag definitions from the argument list, which should not +// include the command name. Must be called after all flags in the FlagSet +// are defined and before flags are accessed by the program. +// The return value will be ErrHelp if -help was set but not defined. +func (f *FlagSet) Parse(arguments []string) error { + f.parsed = true + f.args = make([]string, 0, len(arguments)) + err := f.parseArgs(arguments) + if err != nil { + switch f.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(2) + case PanicOnError: + panic(err) + } + } + return nil +} + +// Parsed reports whether f.Parse has been called. +func (f *FlagSet) Parsed() bool { + return f.parsed +} + +// Parse parses the command-line flags from os.Args[1:]. Must be called +// after all flags are defined and before flags are accessed by the program. +func Parse() { + // Ignore errors; CommandLine is set for ExitOnError. + CommandLine.Parse(os.Args[1:]) +} + +// Whether to support interspersed option/non-option arguments. +func SetInterspersed(interspersed bool) { + CommandLine.SetInterspersed(interspersed) +} + +// Parsed returns true if the command-line flags have been parsed. +func Parsed() bool { + return CommandLine.Parsed() +} + +// The default set of command-line flags, parsed from os.Args. +var CommandLine = NewFlagSet(os.Args[0], ExitOnError) + +// NewFlagSet returns a new, empty flag set with the specified name and +// error handling property. +func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { + f := &FlagSet{ + name: name, + errorHandling: errorHandling, + interspersed: true, + } + return f +} + +// Whether to support interspersed option/non-option arguments. +func (f *FlagSet) SetInterspersed(interspersed bool) { + f.interspersed = interspersed +} + +// Init sets the name and error handling property for a flag set. +// By default, the zero FlagSet uses an empty name and the +// ContinueOnError error handling policy. +func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { + f.name = name + f.errorHandling = errorHandling +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/float32.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/float32.go new file mode 100644 index 000000000..b7ad67d9a --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/float32.go @@ -0,0 +1,74 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- float32 Value +type float32Value float32 + +func newFloat32Value(val float32, p *float32) *float32Value { + *p = val + return (*float32Value)(p) +} + +func (f *float32Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 32) + *f = float32Value(v) + return err +} + +func (f *float32Value) Type() string { + return "float32" +} + +func (f *float32Value) String() string { return fmt.Sprintf("%v", *f) } + +// Float32Var defines a float32 flag with specified name, default value, and usage string. +// The argument p points to a float32 variable in which to store the value of the flag. +func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) { + f.VarP(newFloat32Value(value, p), name, "", usage) +} + +// Like Float32Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) { + f.VarP(newFloat32Value(value, p), name, shorthand, usage) +} + +// Float32Var defines a float32 flag with specified name, default value, and usage string. +// The argument p points to a float32 variable in which to store the value of the flag. +func Float32Var(p *float32, name string, value float32, usage string) { + CommandLine.VarP(newFloat32Value(value, p), name, "", usage) +} + +// Like Float32Var, but accepts a shorthand letter that can be used after a single dash. +func Float32VarP(p *float32, name, shorthand string, value float32, usage string) { + CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage) +} + +// Float32 defines a float32 flag with specified name, default value, and usage string. +// The return value is the address of a float32 variable that stores the value of the flag. +func (f *FlagSet) Float32(name string, value float32, usage string) *float32 { + p := new(float32) + f.Float32VarP(p, name, "", value, usage) + return p +} + +// Like Float32, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 { + p := new(float32) + f.Float32VarP(p, name, shorthand, value, usage) + return p +} + +// Float32 defines a float32 flag with specified name, default value, and usage string. +// The return value is the address of a float32 variable that stores the value of the flag. +func Float32(name string, value float32, usage string) *float32 { + return CommandLine.Float32P(name, "", value, usage) +} + +// Like Float32, but accepts a shorthand letter that can be used after a single dash. +func Float32P(name, shorthand string, value float32, usage string) *float32 { + return CommandLine.Float32P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/float64.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/float64.go new file mode 100644 index 000000000..031551233 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/float64.go @@ -0,0 +1,74 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- float64 Value +type float64Value float64 + +func newFloat64Value(val float64, p *float64) *float64Value { + *p = val + return (*float64Value)(p) +} + +func (f *float64Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 64) + *f = float64Value(v) + return err +} + +func (f *float64Value) Type() string { + return "float64" +} + +func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) { + f.VarP(newFloat64Value(value, p), name, "", usage) +} + +// Like Float64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) { + f.VarP(newFloat64Value(value, p), name, shorthand, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func Float64Var(p *float64, name string, value float64, usage string) { + CommandLine.VarP(newFloat64Value(value, p), name, "", usage) +} + +// Like Float64Var, but accepts a shorthand letter that can be used after a single dash. +func Float64VarP(p *float64, name, shorthand string, value float64, usage string) { + CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage) +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func (f *FlagSet) Float64(name string, value float64, usage string) *float64 { + p := new(float64) + f.Float64VarP(p, name, "", value, usage) + return p +} + +// Like Float64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 { + p := new(float64) + f.Float64VarP(p, name, shorthand, value, usage) + return p +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func Float64(name string, value float64, usage string) *float64 { + return CommandLine.Float64P(name, "", value, usage) +} + +// Like Float64, but accepts a shorthand letter that can be used after a single dash. +func Float64P(name, shorthand string, value float64, usage string) *float64 { + return CommandLine.Float64P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/int.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/int.go new file mode 100644 index 000000000..dca9da6e6 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/int.go @@ -0,0 +1,74 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- int Value +type intValue int + +func newIntValue(val int, p *int) *intValue { + *p = val + return (*intValue)(p) +} + +func (i *intValue) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = intValue(v) + return err +} + +func (i *intValue) Type() string { + return "int" +} + +func (i *intValue) String() string { return fmt.Sprintf("%v", *i) } + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func (f *FlagSet) IntVar(p *int, name string, value int, usage string) { + f.VarP(newIntValue(value, p), name, "", usage) +} + +// Like IntVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) { + f.VarP(newIntValue(value, p), name, shorthand, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func IntVar(p *int, name string, value int, usage string) { + CommandLine.VarP(newIntValue(value, p), name, "", usage) +} + +// Like IntVar, but accepts a shorthand letter that can be used after a single dash. +func IntVarP(p *int, name, shorthand string, value int, usage string) { + CommandLine.VarP(newIntValue(value, p), name, shorthand, usage) +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func (f *FlagSet) Int(name string, value int, usage string) *int { + p := new(int) + f.IntVarP(p, name, "", value, usage) + return p +} + +// Like Int, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int { + p := new(int) + f.IntVarP(p, name, shorthand, value, usage) + return p +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func Int(name string, value int, usage string) *int { + return CommandLine.IntP(name, "", value, usage) +} + +// Like Int, but accepts a shorthand letter that can be used after a single dash. +func IntP(name, shorthand string, value int, usage string) *int { + return CommandLine.IntP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/int32.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/int32.go new file mode 100644 index 000000000..18eaacd60 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/int32.go @@ -0,0 +1,74 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- int32 Value +type int32Value int32 + +func newInt32Value(val int32, p *int32) *int32Value { + *p = val + return (*int32Value)(p) +} + +func (i *int32Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 32) + *i = int32Value(v) + return err +} + +func (i *int32Value) Type() string { + return "int32" +} + +func (i *int32Value) String() string { return fmt.Sprintf("%v", *i) } + +// Int32Var defines an int32 flag with specified name, default value, and usage string. +// The argument p points to an int32 variable in which to store the value of the flag. +func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) { + f.VarP(newInt32Value(value, p), name, "", usage) +} + +// Like Int32Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) { + f.VarP(newInt32Value(value, p), name, shorthand, usage) +} + +// Int32Var defines an int32 flag with specified name, default value, and usage string. +// The argument p points to an int32 variable in which to store the value of the flag. +func Int32Var(p *int32, name string, value int32, usage string) { + CommandLine.VarP(newInt32Value(value, p), name, "", usage) +} + +// Like Int32Var, but accepts a shorthand letter that can be used after a single dash. +func Int32VarP(p *int32, name, shorthand string, value int32, usage string) { + CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage) +} + +// Int32 defines an int32 flag with specified name, default value, and usage string. +// The return value is the address of an int32 variable that stores the value of the flag. +func (f *FlagSet) Int32(name string, value int32, usage string) *int32 { + p := new(int32) + f.Int32VarP(p, name, "", value, usage) + return p +} + +// Like Int32, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 { + p := new(int32) + f.Int32VarP(p, name, shorthand, value, usage) + return p +} + +// Int32 defines an int32 flag with specified name, default value, and usage string. +// The return value is the address of an int32 variable that stores the value of the flag. +func Int32(name string, value int32, usage string) *int32 { + return CommandLine.Int32P(name, "", value, usage) +} + +// Like Int32, but accepts a shorthand letter that can be used after a single dash. +func Int32P(name, shorthand string, value int32, usage string) *int32 { + return CommandLine.Int32P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/int64.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/int64.go new file mode 100644 index 000000000..0114aaaa8 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/int64.go @@ -0,0 +1,74 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- int64 Value +type int64Value int64 + +func newInt64Value(val int64, p *int64) *int64Value { + *p = val + return (*int64Value)(p) +} + +func (i *int64Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = int64Value(v) + return err +} + +func (i *int64Value) Type() string { + return "int64" +} + +func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) } + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) { + f.VarP(newInt64Value(value, p), name, "", usage) +} + +// Like Int64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) { + f.VarP(newInt64Value(value, p), name, shorthand, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func Int64Var(p *int64, name string, value int64, usage string) { + CommandLine.VarP(newInt64Value(value, p), name, "", usage) +} + +// Like Int64Var, but accepts a shorthand letter that can be used after a single dash. +func Int64VarP(p *int64, name, shorthand string, value int64, usage string) { + CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage) +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func (f *FlagSet) Int64(name string, value int64, usage string) *int64 { + p := new(int64) + f.Int64VarP(p, name, "", value, usage) + return p +} + +// Like Int64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 { + p := new(int64) + f.Int64VarP(p, name, shorthand, value, usage) + return p +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func Int64(name string, value int64, usage string) *int64 { + return CommandLine.Int64P(name, "", value, usage) +} + +// Like Int64, but accepts a shorthand letter that can be used after a single dash. +func Int64P(name, shorthand string, value int64, usage string) *int64 { + return CommandLine.Int64P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/int8.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/int8.go new file mode 100644 index 000000000..aab1022f9 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/int8.go @@ -0,0 +1,74 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- int8 Value +type int8Value int8 + +func newInt8Value(val int8, p *int8) *int8Value { + *p = val + return (*int8Value)(p) +} + +func (i *int8Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 8) + *i = int8Value(v) + return err +} + +func (i *int8Value) Type() string { + return "int8" +} + +func (i *int8Value) String() string { return fmt.Sprintf("%v", *i) } + +// Int8Var defines an int8 flag with specified name, default value, and usage string. +// The argument p points to an int8 variable in which to store the value of the flag. +func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) { + f.VarP(newInt8Value(value, p), name, "", usage) +} + +// Like Int8Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) { + f.VarP(newInt8Value(value, p), name, shorthand, usage) +} + +// Int8Var defines an int8 flag with specified name, default value, and usage string. +// The argument p points to an int8 variable in which to store the value of the flag. +func Int8Var(p *int8, name string, value int8, usage string) { + CommandLine.VarP(newInt8Value(value, p), name, "", usage) +} + +// Like Int8Var, but accepts a shorthand letter that can be used after a single dash. +func Int8VarP(p *int8, name, shorthand string, value int8, usage string) { + CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage) +} + +// Int8 defines an int8 flag with specified name, default value, and usage string. +// The return value is the address of an int8 variable that stores the value of the flag. +func (f *FlagSet) Int8(name string, value int8, usage string) *int8 { + p := new(int8) + f.Int8VarP(p, name, "", value, usage) + return p +} + +// Like Int8, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 { + p := new(int8) + f.Int8VarP(p, name, shorthand, value, usage) + return p +} + +// Int8 defines an int8 flag with specified name, default value, and usage string. +// The return value is the address of an int8 variable that stores the value of the flag. +func Int8(name string, value int8, usage string) *int8 { + return CommandLine.Int8P(name, "", value, usage) +} + +// Like Int8, but accepts a shorthand letter that can be used after a single dash. +func Int8P(name, shorthand string, value int8, usage string) *int8 { + return CommandLine.Int8P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/ip.go new file mode 100644 index 000000000..efa75fbc3 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/ip.go @@ -0,0 +1,76 @@ +package pflag + +import ( + "fmt" + "net" +) + +// -- net.IP value +type ipValue net.IP + +func newIPValue(val net.IP, p *net.IP) *ipValue { + *p = val + return (*ipValue)(p) +} + +func (i *ipValue) String() string { return net.IP(*i).String() } +func (i *ipValue) Set(s string) error { + ip := net.ParseIP(s) + if ip == nil { + return fmt.Errorf("failed to parse IP: %q", s) + } + *i = ipValue(ip) + return nil +} + +func (i *ipValue) Type() string { + return "ip" +} + +// IPVar defines an net.IP flag with specified name, default value, and usage string. +// The argument p points to an net.IP variable in which to store the value of the flag. +func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) { + f.VarP(newIPValue(value, p), name, "", usage) +} + +// Like IPVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { + f.VarP(newIPValue(value, p), name, shorthand, usage) +} + +// IPVar defines an net.IP flag with specified name, default value, and usage string. +// The argument p points to an net.IP variable in which to store the value of the flag. +func IPVar(p *net.IP, name string, value net.IP, usage string) { + CommandLine.VarP(newIPValue(value, p), name, "", usage) +} + +// Like IPVar, but accepts a shorthand letter that can be used after a single dash. +func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { + CommandLine.VarP(newIPValue(value, p), name, shorthand, usage) +} + +// IP defines an net.IP flag with specified name, default value, and usage string. +// The return value is the address of an net.IP variable that stores the value of the flag. +func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP { + p := new(net.IP) + f.IPVarP(p, name, "", value, usage) + return p +} + +// Like IP, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP { + p := new(net.IP) + f.IPVarP(p, name, shorthand, value, usage) + return p +} + +// IP defines an net.IP flag with specified name, default value, and usage string. +// The return value is the address of an net.IP variable that stores the value of the flag. +func IP(name string, value net.IP, usage string) *net.IP { + return CommandLine.IPP(name, "", value, usage) +} + +// Like IP, but accepts a shorthand letter that can be used after a single dash. +func IPP(name, shorthand string, value net.IP, usage string) *net.IP { + return CommandLine.IPP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/ipmask.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/ipmask.go new file mode 100644 index 000000000..09b9533e7 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/ipmask.go @@ -0,0 +1,86 @@ +package pflag + +import ( + "fmt" + "net" +) + +// -- net.IPMask value +type ipMaskValue net.IPMask + +func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue { + *p = val + return (*ipMaskValue)(p) +} + +func (i *ipMaskValue) String() string { return net.IPMask(*i).String() } +func (i *ipMaskValue) Set(s string) error { + ip := ParseIPv4Mask(s) + if ip == nil { + return fmt.Errorf("failed to parse IP mask: %q", s) + } + *i = ipMaskValue(ip) + return nil +} + +func (i *ipMaskValue) Type() string { + return "ipMask" +} + +// Parse IPv4 netmask written in IP form (e.g. 255.255.255.0). +// This function should really belong to the net package. +func ParseIPv4Mask(s string) net.IPMask { + mask := net.ParseIP(s) + if mask == nil { + return nil + } + return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15]) +} + +// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. +// The argument p points to an net.IPMask variable in which to store the value of the flag. +func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { + f.VarP(newIPMaskValue(value, p), name, "", usage) +} + +// Like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { + f.VarP(newIPMaskValue(value, p), name, shorthand, usage) +} + +// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. +// The argument p points to an net.IPMask variable in which to store the value of the flag. +func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { + CommandLine.VarP(newIPMaskValue(value, p), name, "", usage) +} + +// Like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. +func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { + CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage) +} + +// IPMask defines an net.IPMask flag with specified name, default value, and usage string. +// The return value is the address of an net.IPMask variable that stores the value of the flag. +func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask { + p := new(net.IPMask) + f.IPMaskVarP(p, name, "", value, usage) + return p +} + +// Like IPMask, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { + p := new(net.IPMask) + f.IPMaskVarP(p, name, shorthand, value, usage) + return p +} + +// IPMask defines an net.IPMask flag with specified name, default value, and usage string. +// The return value is the address of an net.IPMask variable that stores the value of the flag. +func IPMask(name string, value net.IPMask, usage string) *net.IPMask { + return CommandLine.IPMaskP(name, "", value, usage) +} + +// Like IP, but accepts a shorthand letter that can be used after a single dash. +func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { + return CommandLine.IPMaskP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/string.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/string.go new file mode 100644 index 000000000..362fbf8a8 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/string.go @@ -0,0 +1,69 @@ +package pflag + +import "fmt" + +// -- string Value +type stringValue string + +func newStringValue(val string, p *string) *stringValue { + *p = val + return (*stringValue)(p) +} + +func (s *stringValue) Set(val string) error { + *s = stringValue(val) + return nil +} +func (s *stringValue) Type() string { + return "string" +} + +func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) } + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func (f *FlagSet) StringVar(p *string, name string, value string, usage string) { + f.VarP(newStringValue(value, p), name, "", usage) +} + +// Like StringVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) { + f.VarP(newStringValue(value, p), name, shorthand, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func StringVar(p *string, name string, value string, usage string) { + CommandLine.VarP(newStringValue(value, p), name, "", usage) +} + +// Like StringVar, but accepts a shorthand letter that can be used after a single dash. +func StringVarP(p *string, name, shorthand string, value string, usage string) { + CommandLine.VarP(newStringValue(value, p), name, shorthand, usage) +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func (f *FlagSet) String(name string, value string, usage string) *string { + p := new(string) + f.StringVarP(p, name, "", value, usage) + return p +} + +// Like String, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string { + p := new(string) + f.StringVarP(p, name, shorthand, value, usage) + return p +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func String(name string, value string, usage string) *string { + return CommandLine.StringP(name, "", value, usage) +} + +// Like String, but accepts a shorthand letter that can be used after a single dash. +func StringP(name, shorthand string, value string, usage string) *string { + return CommandLine.StringP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint.go new file mode 100644 index 000000000..c063fe7cb --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint.go @@ -0,0 +1,74 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- uint Value +type uintValue uint + +func newUintValue(val uint, p *uint) *uintValue { + *p = val + return (*uintValue)(p) +} + +func (i *uintValue) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uintValue(v) + return err +} + +func (i *uintValue) Type() string { + return "uint" +} + +func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) } + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) { + f.VarP(newUintValue(value, p), name, "", usage) +} + +// Like UintVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) { + f.VarP(newUintValue(value, p), name, shorthand, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func UintVar(p *uint, name string, value uint, usage string) { + CommandLine.VarP(newUintValue(value, p), name, "", usage) +} + +// Like UintVar, but accepts a shorthand letter that can be used after a single dash. +func UintVarP(p *uint, name, shorthand string, value uint, usage string) { + CommandLine.VarP(newUintValue(value, p), name, shorthand, usage) +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func (f *FlagSet) Uint(name string, value uint, usage string) *uint { + p := new(uint) + f.UintVarP(p, name, "", value, usage) + return p +} + +// Like Uint, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint { + p := new(uint) + f.UintVarP(p, name, shorthand, value, usage) + return p +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func Uint(name string, value uint, usage string) *uint { + return CommandLine.UintP(name, "", value, usage) +} + +// Like Uint, but accepts a shorthand letter that can be used after a single dash. +func UintP(name, shorthand string, value uint, usage string) *uint { + return CommandLine.UintP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint16.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint16.go new file mode 100644 index 000000000..ab1c1f9ee --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint16.go @@ -0,0 +1,72 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- uint16 value +type uint16Value uint16 + +func newUint16Value(val uint16, p *uint16) *uint16Value { + *p = val + return (*uint16Value)(p) +} +func (i *uint16Value) String() string { return fmt.Sprintf("%d", *i) } +func (i *uint16Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 16) + *i = uint16Value(v) + return err +} + +func (i *uint16Value) Type() string { + return "uint16" +} + +// Uint16Var defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) { + f.VarP(newUint16Value(value, p), name, "", usage) +} + +// Like Uint16Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { + f.VarP(newUint16Value(value, p), name, shorthand, usage) +} + +// Uint16Var defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func Uint16Var(p *uint16, name string, value uint16, usage string) { + CommandLine.VarP(newUint16Value(value, p), name, "", usage) +} + +// Like Uint16Var, but accepts a shorthand letter that can be used after a single dash. +func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { + CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage) +} + +// Uint16 defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 { + p := new(uint16) + f.Uint16VarP(p, name, "", value, usage) + return p +} + +// Like Uint16, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 { + p := new(uint16) + f.Uint16VarP(p, name, shorthand, value, usage) + return p +} + +// Uint16 defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func Uint16(name string, value uint16, usage string) *uint16 { + return CommandLine.Uint16P(name, "", value, usage) +} + +// Like Uint16, but accepts a shorthand letter that can be used after a single dash. +func Uint16P(name, shorthand string, value uint16, usage string) *uint16 { + return CommandLine.Uint16P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint32.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint32.go new file mode 100644 index 000000000..db635ae88 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint32.go @@ -0,0 +1,72 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- uint16 value +type uint32Value uint32 + +func newUint32Value(val uint32, p *uint32) *uint32Value { + *p = val + return (*uint32Value)(p) +} +func (i *uint32Value) String() string { return fmt.Sprintf("%d", *i) } +func (i *uint32Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 32) + *i = uint32Value(v) + return err +} + +func (i *uint32Value) Type() string { + return "uint32" +} + +// Uint32Var defines a uint32 flag with specified name, default value, and usage string. +// The argument p points to a uint32 variable in which to store the value of the flag. +func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) { + f.VarP(newUint32Value(value, p), name, "", usage) +} + +// Like Uint32Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { + f.VarP(newUint32Value(value, p), name, shorthand, usage) +} + +// Uint32Var defines a uint32 flag with specified name, default value, and usage string. +// The argument p points to a uint32 variable in which to store the value of the flag. +func Uint32Var(p *uint32, name string, value uint32, usage string) { + CommandLine.VarP(newUint32Value(value, p), name, "", usage) +} + +// Like Uint32Var, but accepts a shorthand letter that can be used after a single dash. +func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { + CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage) +} + +// Uint32 defines a uint32 flag with specified name, default value, and usage string. +// The return value is the address of a uint32 variable that stores the value of the flag. +func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 { + p := new(uint32) + f.Uint32VarP(p, name, "", value, usage) + return p +} + +// Like Uint32, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 { + p := new(uint32) + f.Uint32VarP(p, name, shorthand, value, usage) + return p +} + +// Uint32 defines a uint32 flag with specified name, default value, and usage string. +// The return value is the address of a uint32 variable that stores the value of the flag. +func Uint32(name string, value uint32, usage string) *uint32 { + return CommandLine.Uint32P(name, "", value, usage) +} + +// Like Uint32, but accepts a shorthand letter that can be used after a single dash. +func Uint32P(name, shorthand string, value uint32, usage string) *uint32 { + return CommandLine.Uint32P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint64.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint64.go new file mode 100644 index 000000000..99c7e805d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint64.go @@ -0,0 +1,74 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- uint64 Value +type uint64Value uint64 + +func newUint64Value(val uint64, p *uint64) *uint64Value { + *p = val + return (*uint64Value)(p) +} + +func (i *uint64Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uint64Value(v) + return err +} + +func (i *uint64Value) Type() string { + return "uint64" +} + +func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) { + f.VarP(newUint64Value(value, p), name, "", usage) +} + +// Like Uint64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { + f.VarP(newUint64Value(value, p), name, shorthand, usage) +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func Uint64Var(p *uint64, name string, value uint64, usage string) { + CommandLine.VarP(newUint64Value(value, p), name, "", usage) +} + +// Like Uint64Var, but accepts a shorthand letter that can be used after a single dash. +func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { + CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage) +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 { + p := new(uint64) + f.Uint64VarP(p, name, "", value, usage) + return p +} + +// Like Uint64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 { + p := new(uint64) + f.Uint64VarP(p, name, shorthand, value, usage) + return p +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func Uint64(name string, value uint64, usage string) *uint64 { + return CommandLine.Uint64P(name, "", value, usage) +} + +// Like Uint64, but accepts a shorthand letter that can be used after a single dash. +func Uint64P(name, shorthand string, value uint64, usage string) *uint64 { + return CommandLine.Uint64P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint8.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint8.go new file mode 100644 index 000000000..6fef508de --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint8.go @@ -0,0 +1,74 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- uint8 Value +type uint8Value uint8 + +func newUint8Value(val uint8, p *uint8) *uint8Value { + *p = val + return (*uint8Value)(p) +} + +func (i *uint8Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 8) + *i = uint8Value(v) + return err +} + +func (i *uint8Value) Type() string { + return "uint8" +} + +func (i *uint8Value) String() string { return fmt.Sprintf("%v", *i) } + +// Uint8Var defines a uint8 flag with specified name, default value, and usage string. +// The argument p points to a uint8 variable in which to store the value of the flag. +func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) { + f.VarP(newUint8Value(value, p), name, "", usage) +} + +// Like Uint8Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { + f.VarP(newUint8Value(value, p), name, shorthand, usage) +} + +// Uint8Var defines a uint8 flag with specified name, default value, and usage string. +// The argument p points to a uint8 variable in which to store the value of the flag. +func Uint8Var(p *uint8, name string, value uint8, usage string) { + CommandLine.VarP(newUint8Value(value, p), name, "", usage) +} + +// Like Uint8Var, but accepts a shorthand letter that can be used after a single dash. +func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { + CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage) +} + +// Uint8 defines a uint8 flag with specified name, default value, and usage string. +// The return value is the address of a uint8 variable that stores the value of the flag. +func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 { + p := new(uint8) + f.Uint8VarP(p, name, "", value, usage) + return p +} + +// Like Uint8, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 { + p := new(uint8) + f.Uint8VarP(p, name, shorthand, value, usage) + return p +} + +// Uint8 defines a uint8 flag with specified name, default value, and usage string. +// The return value is the address of a uint8 variable that stores the value of the flag. +func Uint8(name string, value uint8, usage string) *uint8 { + return CommandLine.Uint8P(name, "", value, usage) +} + +// Like Uint8, but accepts a shorthand letter that can be used after a single dash. +func Uint8P(name, shorthand string, value uint8, usage string) *uint8 { + return CommandLine.Uint8P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/.gitignore b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/.gitignore new file mode 100644 index 000000000..daf913b1b --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/LICENSE new file mode 100644 index 000000000..2815cc36c --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/README.md b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/README.md new file mode 100644 index 000000000..d2d3fb89e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/README.md @@ -0,0 +1,6 @@ +# go-crypto +A Subset of the Go `crypto` Package with a Resumable Hash Interface + +### Documentation + +GoDocs: http://godoc.org/github.com/stevvooe/resumable diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/resumable.go b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/resumable.go new file mode 100644 index 000000000..af4488f12 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/resumable.go @@ -0,0 +1,43 @@ +// Package resumable registers resumable versions of hash functions. Resumable +// varieties of hash functions are available via the standard crypto package. +// Support can be checked by type assertion against the resumable.Hash +// interface. +// +// While one can use these sub-packages directly, it makes more sense to +// register them using side-effect imports: +// +// import _ "github.com/stevvooe/resumable/sha256" +// +// This will make the resumable hashes available to the application through +// the standard crypto package. For example, if a new sha256 is required, one +// should use the following: +// +// h := crypto.SHA256.New() +// +// Such a features allows one to control the inclusion of resumable hash +// support in a single file. Applications that require the resumable hash +// implementation can type switch to detect support, while other parts of the +// application can be completely oblivious to the presence of the alternative +// hash functions. +// +// Also note that the implementations available in this package are completely +// untouched from their Go counterparts in the standard library. Only an extra +// file is added to each package to implement the extra resumable hash +// functions. +package resumable + +import "hash" + +// Hash is the common interface implemented by all resumable hash functions. +type Hash interface { + hash.Hash + + // Len returns the number of bytes written to the Hash so far. + Len() int64 + + // State returns a snapshot of the state of the Hash. + State() ([]byte, error) + + // Restore resets the Hash to the given state. + Restore(state []byte) error +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/resume.go b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/resume.go new file mode 100644 index 000000000..426d78adc --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/resume.go @@ -0,0 +1,53 @@ +package sha256 + +import ( + "bytes" + "encoding/gob" + + // import to ensure that our init function runs after the standard package + _ "crypto/sha256" +) + +// Len returns the number of bytes which have been written to the digest. +func (d *digest) Len() int64 { + return int64(d.len) +} + +// State returns a snapshot of the state of the digest. +func (d *digest) State() ([]byte, error) { + var buf bytes.Buffer + encoder := gob.NewEncoder(&buf) + + // We encode this way so that we do not have + // to export these fields of the digest struct. + vals := []interface{}{ + d.h, d.x, d.nx, d.len, d.is224, + } + + for _, val := range vals { + if err := encoder.Encode(val); err != nil { + return nil, err + } + } + + return buf.Bytes(), nil +} + +// Restore resets the digest to the given state. +func (d *digest) Restore(state []byte) error { + decoder := gob.NewDecoder(bytes.NewReader(state)) + + // We decode this way so that we do not have + // to export these fields of the digest struct. + vals := []interface{}{ + &d.h, &d.x, &d.nx, &d.len, &d.is224, + } + + for _, val := range vals { + if err := decoder.Decode(val); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256.go b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256.go new file mode 100644 index 000000000..d84cebf2f --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256.go @@ -0,0 +1,193 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sha256 implements the SHA224 and SHA256 hash algorithms as defined +// in FIPS 180-4. +package sha256 + +import ( + "crypto" + "hash" +) + +func init() { + crypto.RegisterHash(crypto.SHA224, New224) + crypto.RegisterHash(crypto.SHA256, New) +} + +// The size of a SHA256 checksum in bytes. +const Size = 32 + +// The size of a SHA224 checksum in bytes. +const Size224 = 28 + +// The blocksize of SHA256 and SHA224 in bytes. +const BlockSize = 64 + +const ( + chunk = 64 + init0 = 0x6A09E667 + init1 = 0xBB67AE85 + init2 = 0x3C6EF372 + init3 = 0xA54FF53A + init4 = 0x510E527F + init5 = 0x9B05688C + init6 = 0x1F83D9AB + init7 = 0x5BE0CD19 + init0_224 = 0xC1059ED8 + init1_224 = 0x367CD507 + init2_224 = 0x3070DD17 + init3_224 = 0xF70E5939 + init4_224 = 0xFFC00B31 + init5_224 = 0x68581511 + init6_224 = 0x64F98FA7 + init7_224 = 0xBEFA4FA4 +) + +// digest represents the partial evaluation of a checksum. +type digest struct { + h [8]uint32 + x [chunk]byte + nx int + len uint64 + is224 bool // mark if this digest is SHA-224 +} + +func (d *digest) Reset() { + if !d.is224 { + d.h[0] = init0 + d.h[1] = init1 + d.h[2] = init2 + d.h[3] = init3 + d.h[4] = init4 + d.h[5] = init5 + d.h[6] = init6 + d.h[7] = init7 + } else { + d.h[0] = init0_224 + d.h[1] = init1_224 + d.h[2] = init2_224 + d.h[3] = init3_224 + d.h[4] = init4_224 + d.h[5] = init5_224 + d.h[6] = init6_224 + d.h[7] = init7_224 + } + d.nx = 0 + d.len = 0 +} + +// New returns a new hash.Hash computing the SHA256 checksum. +func New() hash.Hash { + d := new(digest) + d.Reset() + return d +} + +// New224 returns a new hash.Hash computing the SHA224 checksum. +func New224() hash.Hash { + d := new(digest) + d.is224 = true + d.Reset() + return d +} + +func (d *digest) Size() int { + if !d.is224 { + return Size + } + return Size224 +} + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Write(p []byte) (nn int, err error) { + nn = len(p) + d.len += uint64(nn) + if d.nx > 0 { + n := copy(d.x[d.nx:], p) + d.nx += n + if d.nx == chunk { + block(d, d.x[:]) + d.nx = 0 + } + p = p[n:] + } + if len(p) >= chunk { + n := len(p) &^ (chunk - 1) + block(d, p[:n]) + p = p[n:] + } + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +func (d0 *digest) Sum(in []byte) []byte { + // Make a copy of d0 so that caller can keep writing and summing. + d := *d0 + hash := d.checkSum() + if d.is224 { + return append(in, hash[:Size224]...) + } + return append(in, hash[:]...) +} + +func (d *digest) checkSum() [Size]byte { + len := d.len + // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. + var tmp [64]byte + tmp[0] = 0x80 + if len%64 < 56 { + d.Write(tmp[0 : 56-len%64]) + } else { + d.Write(tmp[0 : 64+56-len%64]) + } + + // Length in bits. + len <<= 3 + for i := uint(0); i < 8; i++ { + tmp[i] = byte(len >> (56 - 8*i)) + } + d.Write(tmp[0:8]) + + if d.nx != 0 { + panic("d.nx != 0") + } + + h := d.h[:] + if d.is224 { + h = d.h[:7] + } + + var digest [Size]byte + for i, s := range h { + digest[i*4] = byte(s >> 24) + digest[i*4+1] = byte(s >> 16) + digest[i*4+2] = byte(s >> 8) + digest[i*4+3] = byte(s) + } + + return digest +} + +// Sum256 returns the SHA256 checksum of the data. +func Sum256(data []byte) [Size]byte { + var d digest + d.Reset() + d.Write(data) + return d.checkSum() +} + +// Sum224 returns the SHA224 checksum of the data. +func Sum224(data []byte) (sum224 [Size224]byte) { + var d digest + d.is224 = true + d.Reset() + d.Write(data) + sum := d.checkSum() + copy(sum224[:], sum[:Size224]) + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256block.go b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256block.go new file mode 100644 index 000000000..ca5efd156 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256block.go @@ -0,0 +1,128 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !386,!amd64 + +// SHA256 block step. +// In its own file so that a faster assembly or C version +// can be substituted easily. + +package sha256 + +var _K = []uint32{ + 0x428a2f98, + 0x71374491, + 0xb5c0fbcf, + 0xe9b5dba5, + 0x3956c25b, + 0x59f111f1, + 0x923f82a4, + 0xab1c5ed5, + 0xd807aa98, + 0x12835b01, + 0x243185be, + 0x550c7dc3, + 0x72be5d74, + 0x80deb1fe, + 0x9bdc06a7, + 0xc19bf174, + 0xe49b69c1, + 0xefbe4786, + 0x0fc19dc6, + 0x240ca1cc, + 0x2de92c6f, + 0x4a7484aa, + 0x5cb0a9dc, + 0x76f988da, + 0x983e5152, + 0xa831c66d, + 0xb00327c8, + 0xbf597fc7, + 0xc6e00bf3, + 0xd5a79147, + 0x06ca6351, + 0x14292967, + 0x27b70a85, + 0x2e1b2138, + 0x4d2c6dfc, + 0x53380d13, + 0x650a7354, + 0x766a0abb, + 0x81c2c92e, + 0x92722c85, + 0xa2bfe8a1, + 0xa81a664b, + 0xc24b8b70, + 0xc76c51a3, + 0xd192e819, + 0xd6990624, + 0xf40e3585, + 0x106aa070, + 0x19a4c116, + 0x1e376c08, + 0x2748774c, + 0x34b0bcb5, + 0x391c0cb3, + 0x4ed8aa4a, + 0x5b9cca4f, + 0x682e6ff3, + 0x748f82ee, + 0x78a5636f, + 0x84c87814, + 0x8cc70208, + 0x90befffa, + 0xa4506ceb, + 0xbef9a3f7, + 0xc67178f2, +} + +func block(dig *digest, p []byte) { + var w [64]uint32 + h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] + for len(p) >= chunk { + // Can interlace the computation of w with the + // rounds below if needed for speed. + for i := 0; i < 16; i++ { + j := i * 4 + w[i] = uint32(p[j])<<24 | uint32(p[j+1])<<16 | uint32(p[j+2])<<8 | uint32(p[j+3]) + } + for i := 16; i < 64; i++ { + v1 := w[i-2] + t1 := (v1>>17 | v1<<(32-17)) ^ (v1>>19 | v1<<(32-19)) ^ (v1 >> 10) + v2 := w[i-15] + t2 := (v2>>7 | v2<<(32-7)) ^ (v2>>18 | v2<<(32-18)) ^ (v2 >> 3) + w[i] = t1 + w[i-7] + t2 + w[i-16] + } + + a, b, c, d, e, f, g, h := h0, h1, h2, h3, h4, h5, h6, h7 + + for i := 0; i < 64; i++ { + t1 := h + ((e>>6 | e<<(32-6)) ^ (e>>11 | e<<(32-11)) ^ (e>>25 | e<<(32-25))) + ((e & f) ^ (^e & g)) + _K[i] + w[i] + + t2 := ((a>>2 | a<<(32-2)) ^ (a>>13 | a<<(32-13)) ^ (a>>22 | a<<(32-22))) + ((a & b) ^ (a & c) ^ (b & c)) + + h = g + g = f + f = e + e = d + t1 + d = c + c = b + b = a + a = t1 + t2 + } + + h0 += a + h1 += b + h2 += c + h3 += d + h4 += e + h5 += f + h6 += g + h7 += h + + p = p[chunk:] + } + + dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h0, h1, h2, h3, h4, h5, h6, h7 +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256block_386.s b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256block_386.s new file mode 100644 index 000000000..73ae2bf30 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256block_386.s @@ -0,0 +1,283 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// SHA256 block routine. See sha256block.go for Go equivalent. +// +// The algorithm is detailed in FIPS 180-4: +// +// http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf +// +// Wt = Mt; for 0 <= t <= 15 +// Wt = SIGMA1(Wt-2) + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 63 +// +// a = H0 +// b = H1 +// c = H2 +// d = H3 +// e = H4 +// f = H5 +// g = H6 +// h = H7 +// +// for t = 0 to 63 { +// T1 = h + BIGSIGMA1(e) + Ch(e,f,g) + Kt + Wt +// T2 = BIGSIGMA0(a) + Maj(a,b,c) +// h = g +// g = f +// f = e +// e = d + T1 +// d = c +// c = b +// b = a +// a = T1 + T2 +// } +// +// H0 = a + H0 +// H1 = b + H1 +// H2 = c + H2 +// H3 = d + H3 +// H4 = e + H4 +// H5 = f + H5 +// H6 = g + H6 +// H7 = h + H7 + +// Wt = Mt; for 0 <= t <= 15 +#define MSGSCHEDULE0(index) \ + MOVL (index*4)(SI), AX; \ + BSWAPL AX; \ + MOVL AX, (index*4)(BP) + +// Wt = SIGMA1(Wt-2) + Wt-7 + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 63 +// SIGMA0(x) = ROTR(7,x) XOR ROTR(18,x) XOR SHR(3,x) +// SIGMA1(x) = ROTR(17,x) XOR ROTR(19,x) XOR SHR(10,x) +#define MSGSCHEDULE1(index) \ + MOVL ((index-2)*4)(BP), AX; \ + MOVL AX, CX; \ + RORL $17, AX; \ + MOVL CX, DX; \ + RORL $19, CX; \ + SHRL $10, DX; \ + MOVL ((index-15)*4)(BP), BX; \ + XORL CX, AX; \ + MOVL BX, CX; \ + XORL DX, AX; \ + RORL $7, BX; \ + MOVL CX, DX; \ + SHRL $3, DX; \ + RORL $18, CX; \ + ADDL ((index-7)*4)(BP), AX; \ + XORL CX, BX; \ + XORL DX, BX; \ + ADDL ((index-16)*4)(BP), BX; \ + ADDL BX, AX; \ + MOVL AX, ((index)*4)(BP) + +// Calculate T1 in AX - uses AX, BX, CX and DX registers. +// Wt is passed in AX. +// T1 = h + BIGSIGMA1(e) + Ch(e, f, g) + Kt + Wt +// BIGSIGMA1(x) = ROTR(6,x) XOR ROTR(11,x) XOR ROTR(25,x) +// Ch(x, y, z) = (x AND y) XOR (NOT x AND z) +#define SHA256T1(const, e, f, g, h) \ + MOVL (h*4)(DI), BX; \ + ADDL AX, BX; \ + MOVL (e*4)(DI), AX; \ + ADDL $const, BX; \ + MOVL (e*4)(DI), CX; \ + RORL $6, AX; \ + MOVL (e*4)(DI), DX; \ + RORL $11, CX; \ + XORL CX, AX; \ + MOVL (e*4)(DI), CX; \ + RORL $25, DX; \ + ANDL (f*4)(DI), CX; \ + XORL AX, DX; \ + MOVL (e*4)(DI), AX; \ + NOTL AX; \ + ADDL DX, BX; \ + ANDL (g*4)(DI), AX; \ + XORL CX, AX; \ + ADDL BX, AX + +// Calculate T2 in BX - uses AX, BX, CX and DX registers. +// T2 = BIGSIGMA0(a) + Maj(a, b, c) +// BIGSIGMA0(x) = ROTR(2,x) XOR ROTR(13,x) XOR ROTR(22,x) +// Maj(x, y, z) = (x AND y) XOR (x AND z) XOR (y AND z) +#define SHA256T2(a, b, c) \ + MOVL (a*4)(DI), AX; \ + MOVL (c*4)(DI), BX; \ + RORL $2, AX; \ + MOVL (a*4)(DI), DX; \ + ANDL (b*4)(DI), BX; \ + RORL $13, DX; \ + MOVL (a*4)(DI), CX; \ + ANDL (c*4)(DI), CX; \ + XORL DX, AX; \ + XORL CX, BX; \ + MOVL (a*4)(DI), DX; \ + MOVL (b*4)(DI), CX; \ + RORL $22, DX; \ + ANDL (a*4)(DI), CX; \ + XORL CX, BX; \ + XORL DX, AX; \ + ADDL AX, BX + +// Calculate T1 and T2, then e = d + T1 and a = T1 + T2. +// The values for e and a are stored in d and h, ready for rotation. +#define SHA256ROUND(index, const, a, b, c, d, e, f, g, h) \ + SHA256T1(const, e, f, g, h); \ + MOVL AX, 292(SP); \ + SHA256T2(a, b, c); \ + MOVL 292(SP), AX; \ + ADDL AX, BX; \ + ADDL AX, (d*4)(DI); \ + MOVL BX, (h*4)(DI) + +#define SHA256ROUND0(index, const, a, b, c, d, e, f, g, h) \ + MSGSCHEDULE0(index); \ + SHA256ROUND(index, const, a, b, c, d, e, f, g, h) + +#define SHA256ROUND1(index, const, a, b, c, d, e, f, g, h) \ + MSGSCHEDULE1(index); \ + SHA256ROUND(index, const, a, b, c, d, e, f, g, h) + +TEXT ·block(SB),0,$296-12 + MOVL p_base+4(FP), SI + MOVL p_len+8(FP), DX + SHRL $6, DX + SHLL $6, DX + + LEAL (SI)(DX*1), DI + MOVL DI, 288(SP) + CMPL SI, DI + JEQ end + + LEAL 256(SP), DI // variables + + MOVL dig+0(FP), BP + MOVL (0*4)(BP), AX // a = H0 + MOVL AX, (0*4)(DI) + MOVL (1*4)(BP), BX // b = H1 + MOVL BX, (1*4)(DI) + MOVL (2*4)(BP), CX // c = H2 + MOVL CX, (2*4)(DI) + MOVL (3*4)(BP), DX // d = H3 + MOVL DX, (3*4)(DI) + MOVL (4*4)(BP), AX // e = H4 + MOVL AX, (4*4)(DI) + MOVL (5*4)(BP), BX // f = H5 + MOVL BX, (5*4)(DI) + MOVL (6*4)(BP), CX // g = H6 + MOVL CX, (6*4)(DI) + MOVL (7*4)(BP), DX // h = H7 + MOVL DX, (7*4)(DI) + +loop: + MOVL SP, BP // message schedule + + SHA256ROUND0(0, 0x428a2f98, 0, 1, 2, 3, 4, 5, 6, 7) + SHA256ROUND0(1, 0x71374491, 7, 0, 1, 2, 3, 4, 5, 6) + SHA256ROUND0(2, 0xb5c0fbcf, 6, 7, 0, 1, 2, 3, 4, 5) + SHA256ROUND0(3, 0xe9b5dba5, 5, 6, 7, 0, 1, 2, 3, 4) + SHA256ROUND0(4, 0x3956c25b, 4, 5, 6, 7, 0, 1, 2, 3) + SHA256ROUND0(5, 0x59f111f1, 3, 4, 5, 6, 7, 0, 1, 2) + SHA256ROUND0(6, 0x923f82a4, 2, 3, 4, 5, 6, 7, 0, 1) + SHA256ROUND0(7, 0xab1c5ed5, 1, 2, 3, 4, 5, 6, 7, 0) + SHA256ROUND0(8, 0xd807aa98, 0, 1, 2, 3, 4, 5, 6, 7) + SHA256ROUND0(9, 0x12835b01, 7, 0, 1, 2, 3, 4, 5, 6) + SHA256ROUND0(10, 0x243185be, 6, 7, 0, 1, 2, 3, 4, 5) + SHA256ROUND0(11, 0x550c7dc3, 5, 6, 7, 0, 1, 2, 3, 4) + SHA256ROUND0(12, 0x72be5d74, 4, 5, 6, 7, 0, 1, 2, 3) + SHA256ROUND0(13, 0x80deb1fe, 3, 4, 5, 6, 7, 0, 1, 2) + SHA256ROUND0(14, 0x9bdc06a7, 2, 3, 4, 5, 6, 7, 0, 1) + SHA256ROUND0(15, 0xc19bf174, 1, 2, 3, 4, 5, 6, 7, 0) + + SHA256ROUND1(16, 0xe49b69c1, 0, 1, 2, 3, 4, 5, 6, 7) + SHA256ROUND1(17, 0xefbe4786, 7, 0, 1, 2, 3, 4, 5, 6) + SHA256ROUND1(18, 0x0fc19dc6, 6, 7, 0, 1, 2, 3, 4, 5) + SHA256ROUND1(19, 0x240ca1cc, 5, 6, 7, 0, 1, 2, 3, 4) + SHA256ROUND1(20, 0x2de92c6f, 4, 5, 6, 7, 0, 1, 2, 3) + SHA256ROUND1(21, 0x4a7484aa, 3, 4, 5, 6, 7, 0, 1, 2) + SHA256ROUND1(22, 0x5cb0a9dc, 2, 3, 4, 5, 6, 7, 0, 1) + SHA256ROUND1(23, 0x76f988da, 1, 2, 3, 4, 5, 6, 7, 0) + SHA256ROUND1(24, 0x983e5152, 0, 1, 2, 3, 4, 5, 6, 7) + SHA256ROUND1(25, 0xa831c66d, 7, 0, 1, 2, 3, 4, 5, 6) + SHA256ROUND1(26, 0xb00327c8, 6, 7, 0, 1, 2, 3, 4, 5) + SHA256ROUND1(27, 0xbf597fc7, 5, 6, 7, 0, 1, 2, 3, 4) + SHA256ROUND1(28, 0xc6e00bf3, 4, 5, 6, 7, 0, 1, 2, 3) + SHA256ROUND1(29, 0xd5a79147, 3, 4, 5, 6, 7, 0, 1, 2) + SHA256ROUND1(30, 0x06ca6351, 2, 3, 4, 5, 6, 7, 0, 1) + SHA256ROUND1(31, 0x14292967, 1, 2, 3, 4, 5, 6, 7, 0) + SHA256ROUND1(32, 0x27b70a85, 0, 1, 2, 3, 4, 5, 6, 7) + SHA256ROUND1(33, 0x2e1b2138, 7, 0, 1, 2, 3, 4, 5, 6) + SHA256ROUND1(34, 0x4d2c6dfc, 6, 7, 0, 1, 2, 3, 4, 5) + SHA256ROUND1(35, 0x53380d13, 5, 6, 7, 0, 1, 2, 3, 4) + SHA256ROUND1(36, 0x650a7354, 4, 5, 6, 7, 0, 1, 2, 3) + SHA256ROUND1(37, 0x766a0abb, 3, 4, 5, 6, 7, 0, 1, 2) + SHA256ROUND1(38, 0x81c2c92e, 2, 3, 4, 5, 6, 7, 0, 1) + SHA256ROUND1(39, 0x92722c85, 1, 2, 3, 4, 5, 6, 7, 0) + SHA256ROUND1(40, 0xa2bfe8a1, 0, 1, 2, 3, 4, 5, 6, 7) + SHA256ROUND1(41, 0xa81a664b, 7, 0, 1, 2, 3, 4, 5, 6) + SHA256ROUND1(42, 0xc24b8b70, 6, 7, 0, 1, 2, 3, 4, 5) + SHA256ROUND1(43, 0xc76c51a3, 5, 6, 7, 0, 1, 2, 3, 4) + SHA256ROUND1(44, 0xd192e819, 4, 5, 6, 7, 0, 1, 2, 3) + SHA256ROUND1(45, 0xd6990624, 3, 4, 5, 6, 7, 0, 1, 2) + SHA256ROUND1(46, 0xf40e3585, 2, 3, 4, 5, 6, 7, 0, 1) + SHA256ROUND1(47, 0x106aa070, 1, 2, 3, 4, 5, 6, 7, 0) + SHA256ROUND1(48, 0x19a4c116, 0, 1, 2, 3, 4, 5, 6, 7) + SHA256ROUND1(49, 0x1e376c08, 7, 0, 1, 2, 3, 4, 5, 6) + SHA256ROUND1(50, 0x2748774c, 6, 7, 0, 1, 2, 3, 4, 5) + SHA256ROUND1(51, 0x34b0bcb5, 5, 6, 7, 0, 1, 2, 3, 4) + SHA256ROUND1(52, 0x391c0cb3, 4, 5, 6, 7, 0, 1, 2, 3) + SHA256ROUND1(53, 0x4ed8aa4a, 3, 4, 5, 6, 7, 0, 1, 2) + SHA256ROUND1(54, 0x5b9cca4f, 2, 3, 4, 5, 6, 7, 0, 1) + SHA256ROUND1(55, 0x682e6ff3, 1, 2, 3, 4, 5, 6, 7, 0) + SHA256ROUND1(56, 0x748f82ee, 0, 1, 2, 3, 4, 5, 6, 7) + SHA256ROUND1(57, 0x78a5636f, 7, 0, 1, 2, 3, 4, 5, 6) + SHA256ROUND1(58, 0x84c87814, 6, 7, 0, 1, 2, 3, 4, 5) + SHA256ROUND1(59, 0x8cc70208, 5, 6, 7, 0, 1, 2, 3, 4) + SHA256ROUND1(60, 0x90befffa, 4, 5, 6, 7, 0, 1, 2, 3) + SHA256ROUND1(61, 0xa4506ceb, 3, 4, 5, 6, 7, 0, 1, 2) + SHA256ROUND1(62, 0xbef9a3f7, 2, 3, 4, 5, 6, 7, 0, 1) + SHA256ROUND1(63, 0xc67178f2, 1, 2, 3, 4, 5, 6, 7, 0) + + MOVL dig+0(FP), BP + MOVL (0*4)(BP), AX // H0 = a + H0 + ADDL (0*4)(DI), AX + MOVL AX, (0*4)(DI) + MOVL AX, (0*4)(BP) + MOVL (1*4)(BP), BX // H1 = b + H1 + ADDL (1*4)(DI), BX + MOVL BX, (1*4)(DI) + MOVL BX, (1*4)(BP) + MOVL (2*4)(BP), CX // H2 = c + H2 + ADDL (2*4)(DI), CX + MOVL CX, (2*4)(DI) + MOVL CX, (2*4)(BP) + MOVL (3*4)(BP), DX // H3 = d + H3 + ADDL (3*4)(DI), DX + MOVL DX, (3*4)(DI) + MOVL DX, (3*4)(BP) + MOVL (4*4)(BP), AX // H4 = e + H4 + ADDL (4*4)(DI), AX + MOVL AX, (4*4)(DI) + MOVL AX, (4*4)(BP) + MOVL (5*4)(BP), BX // H5 = f + H5 + ADDL (5*4)(DI), BX + MOVL BX, (5*4)(DI) + MOVL BX, (5*4)(BP) + MOVL (6*4)(BP), CX // H6 = g + H6 + ADDL (6*4)(DI), CX + MOVL CX, (6*4)(DI) + MOVL CX, (6*4)(BP) + MOVL (7*4)(BP), DX // H7 = h + H7 + ADDL (7*4)(DI), DX + MOVL DX, (7*4)(DI) + MOVL DX, (7*4)(BP) + + ADDL $64, SI + CMPL SI, 288(SP) + JB loop + +end: + RET diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256block_amd64.s b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256block_amd64.s new file mode 100644 index 000000000..868eaed48 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256block_amd64.s @@ -0,0 +1,256 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// SHA256 block routine. See sha256block.go for Go equivalent. +// +// The algorithm is detailed in FIPS 180-4: +// +// http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf +// +// Wt = Mt; for 0 <= t <= 15 +// Wt = SIGMA1(Wt-2) + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 63 +// +// a = H0 +// b = H1 +// c = H2 +// d = H3 +// e = H4 +// f = H5 +// g = H6 +// h = H7 +// +// for t = 0 to 63 { +// T1 = h + BIGSIGMA1(e) + Ch(e,f,g) + Kt + Wt +// T2 = BIGSIGMA0(a) + Maj(a,b,c) +// h = g +// g = f +// f = e +// e = d + T1 +// d = c +// c = b +// b = a +// a = T1 + T2 +// } +// +// H0 = a + H0 +// H1 = b + H1 +// H2 = c + H2 +// H3 = d + H3 +// H4 = e + H4 +// H5 = f + H5 +// H6 = g + H6 +// H7 = h + H7 + +// Wt = Mt; for 0 <= t <= 15 +#define MSGSCHEDULE0(index) \ + MOVL (index*4)(SI), AX; \ + BSWAPL AX; \ + MOVL AX, (index*4)(BP) + +// Wt = SIGMA1(Wt-2) + Wt-7 + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 63 +// SIGMA0(x) = ROTR(7,x) XOR ROTR(18,x) XOR SHR(3,x) +// SIGMA1(x) = ROTR(17,x) XOR ROTR(19,x) XOR SHR(10,x) +#define MSGSCHEDULE1(index) \ + MOVL ((index-2)*4)(BP), AX; \ + MOVL AX, CX; \ + RORL $17, AX; \ + MOVL CX, DX; \ + RORL $19, CX; \ + SHRL $10, DX; \ + MOVL ((index-15)*4)(BP), BX; \ + XORL CX, AX; \ + MOVL BX, CX; \ + XORL DX, AX; \ + RORL $7, BX; \ + MOVL CX, DX; \ + SHRL $3, DX; \ + RORL $18, CX; \ + ADDL ((index-7)*4)(BP), AX; \ + XORL CX, BX; \ + XORL DX, BX; \ + ADDL ((index-16)*4)(BP), BX; \ + ADDL BX, AX; \ + MOVL AX, ((index)*4)(BP) + +// Calculate T1 in AX - uses AX, CX and DX registers. +// h is also used as an accumulator. Wt is passed in AX. +// T1 = h + BIGSIGMA1(e) + Ch(e, f, g) + Kt + Wt +// BIGSIGMA1(x) = ROTR(6,x) XOR ROTR(11,x) XOR ROTR(25,x) +// Ch(x, y, z) = (x AND y) XOR (NOT x AND z) +#define SHA256T1(const, e, f, g, h) \ + ADDL AX, h; \ + MOVL e, AX; \ + ADDL $const, h; \ + MOVL e, CX; \ + RORL $6, AX; \ + MOVL e, DX; \ + RORL $11, CX; \ + XORL CX, AX; \ + MOVL e, CX; \ + RORL $25, DX; \ + ANDL f, CX; \ + XORL AX, DX; \ + MOVL e, AX; \ + NOTL AX; \ + ADDL DX, h; \ + ANDL g, AX; \ + XORL CX, AX; \ + ADDL h, AX + +// Calculate T2 in BX - uses BX, CX, DX and DI registers. +// T2 = BIGSIGMA0(a) + Maj(a, b, c) +// BIGSIGMA0(x) = ROTR(2,x) XOR ROTR(13,x) XOR ROTR(22,x) +// Maj(x, y, z) = (x AND y) XOR (x AND z) XOR (y AND z) +#define SHA256T2(a, b, c) \ + MOVL a, DI; \ + MOVL c, BX; \ + RORL $2, DI; \ + MOVL a, DX; \ + ANDL b, BX; \ + RORL $13, DX; \ + MOVL a, CX; \ + ANDL c, CX; \ + XORL DX, DI; \ + XORL CX, BX; \ + MOVL a, DX; \ + MOVL b, CX; \ + RORL $22, DX; \ + ANDL a, CX; \ + XORL CX, BX; \ + XORL DX, DI; \ + ADDL DI, BX + +// Calculate T1 and T2, then e = d + T1 and a = T1 + T2. +// The values for e and a are stored in d and h, ready for rotation. +#define SHA256ROUND(index, const, a, b, c, d, e, f, g, h) \ + SHA256T1(const, e, f, g, h); \ + SHA256T2(a, b, c); \ + MOVL BX, h; \ + ADDL AX, d; \ + ADDL AX, h + +#define SHA256ROUND0(index, const, a, b, c, d, e, f, g, h) \ + MSGSCHEDULE0(index); \ + SHA256ROUND(index, const, a, b, c, d, e, f, g, h) + +#define SHA256ROUND1(index, const, a, b, c, d, e, f, g, h) \ + MSGSCHEDULE1(index); \ + SHA256ROUND(index, const, a, b, c, d, e, f, g, h) + +TEXT ·block(SB),0,$264-32 + MOVQ p_base+8(FP), SI + MOVQ p_len+16(FP), DX + SHRQ $6, DX + SHLQ $6, DX + + LEAQ (SI)(DX*1), DI + MOVQ DI, 256(SP) + CMPQ SI, DI + JEQ end + + MOVQ dig+0(FP), BP + MOVL (0*4)(BP), R8 // a = H0 + MOVL (1*4)(BP), R9 // b = H1 + MOVL (2*4)(BP), R10 // c = H2 + MOVL (3*4)(BP), R11 // d = H3 + MOVL (4*4)(BP), R12 // e = H4 + MOVL (5*4)(BP), R13 // f = H5 + MOVL (6*4)(BP), R14 // g = H6 + MOVL (7*4)(BP), R15 // h = H7 + +loop: + MOVQ SP, BP // message schedule + + SHA256ROUND0(0, 0x428a2f98, R8, R9, R10, R11, R12, R13, R14, R15) + SHA256ROUND0(1, 0x71374491, R15, R8, R9, R10, R11, R12, R13, R14) + SHA256ROUND0(2, 0xb5c0fbcf, R14, R15, R8, R9, R10, R11, R12, R13) + SHA256ROUND0(3, 0xe9b5dba5, R13, R14, R15, R8, R9, R10, R11, R12) + SHA256ROUND0(4, 0x3956c25b, R12, R13, R14, R15, R8, R9, R10, R11) + SHA256ROUND0(5, 0x59f111f1, R11, R12, R13, R14, R15, R8, R9, R10) + SHA256ROUND0(6, 0x923f82a4, R10, R11, R12, R13, R14, R15, R8, R9) + SHA256ROUND0(7, 0xab1c5ed5, R9, R10, R11, R12, R13, R14, R15, R8) + SHA256ROUND0(8, 0xd807aa98, R8, R9, R10, R11, R12, R13, R14, R15) + SHA256ROUND0(9, 0x12835b01, R15, R8, R9, R10, R11, R12, R13, R14) + SHA256ROUND0(10, 0x243185be, R14, R15, R8, R9, R10, R11, R12, R13) + SHA256ROUND0(11, 0x550c7dc3, R13, R14, R15, R8, R9, R10, R11, R12) + SHA256ROUND0(12, 0x72be5d74, R12, R13, R14, R15, R8, R9, R10, R11) + SHA256ROUND0(13, 0x80deb1fe, R11, R12, R13, R14, R15, R8, R9, R10) + SHA256ROUND0(14, 0x9bdc06a7, R10, R11, R12, R13, R14, R15, R8, R9) + SHA256ROUND0(15, 0xc19bf174, R9, R10, R11, R12, R13, R14, R15, R8) + + SHA256ROUND1(16, 0xe49b69c1, R8, R9, R10, R11, R12, R13, R14, R15) + SHA256ROUND1(17, 0xefbe4786, R15, R8, R9, R10, R11, R12, R13, R14) + SHA256ROUND1(18, 0x0fc19dc6, R14, R15, R8, R9, R10, R11, R12, R13) + SHA256ROUND1(19, 0x240ca1cc, R13, R14, R15, R8, R9, R10, R11, R12) + SHA256ROUND1(20, 0x2de92c6f, R12, R13, R14, R15, R8, R9, R10, R11) + SHA256ROUND1(21, 0x4a7484aa, R11, R12, R13, R14, R15, R8, R9, R10) + SHA256ROUND1(22, 0x5cb0a9dc, R10, R11, R12, R13, R14, R15, R8, R9) + SHA256ROUND1(23, 0x76f988da, R9, R10, R11, R12, R13, R14, R15, R8) + SHA256ROUND1(24, 0x983e5152, R8, R9, R10, R11, R12, R13, R14, R15) + SHA256ROUND1(25, 0xa831c66d, R15, R8, R9, R10, R11, R12, R13, R14) + SHA256ROUND1(26, 0xb00327c8, R14, R15, R8, R9, R10, R11, R12, R13) + SHA256ROUND1(27, 0xbf597fc7, R13, R14, R15, R8, R9, R10, R11, R12) + SHA256ROUND1(28, 0xc6e00bf3, R12, R13, R14, R15, R8, R9, R10, R11) + SHA256ROUND1(29, 0xd5a79147, R11, R12, R13, R14, R15, R8, R9, R10) + SHA256ROUND1(30, 0x06ca6351, R10, R11, R12, R13, R14, R15, R8, R9) + SHA256ROUND1(31, 0x14292967, R9, R10, R11, R12, R13, R14, R15, R8) + SHA256ROUND1(32, 0x27b70a85, R8, R9, R10, R11, R12, R13, R14, R15) + SHA256ROUND1(33, 0x2e1b2138, R15, R8, R9, R10, R11, R12, R13, R14) + SHA256ROUND1(34, 0x4d2c6dfc, R14, R15, R8, R9, R10, R11, R12, R13) + SHA256ROUND1(35, 0x53380d13, R13, R14, R15, R8, R9, R10, R11, R12) + SHA256ROUND1(36, 0x650a7354, R12, R13, R14, R15, R8, R9, R10, R11) + SHA256ROUND1(37, 0x766a0abb, R11, R12, R13, R14, R15, R8, R9, R10) + SHA256ROUND1(38, 0x81c2c92e, R10, R11, R12, R13, R14, R15, R8, R9) + SHA256ROUND1(39, 0x92722c85, R9, R10, R11, R12, R13, R14, R15, R8) + SHA256ROUND1(40, 0xa2bfe8a1, R8, R9, R10, R11, R12, R13, R14, R15) + SHA256ROUND1(41, 0xa81a664b, R15, R8, R9, R10, R11, R12, R13, R14) + SHA256ROUND1(42, 0xc24b8b70, R14, R15, R8, R9, R10, R11, R12, R13) + SHA256ROUND1(43, 0xc76c51a3, R13, R14, R15, R8, R9, R10, R11, R12) + SHA256ROUND1(44, 0xd192e819, R12, R13, R14, R15, R8, R9, R10, R11) + SHA256ROUND1(45, 0xd6990624, R11, R12, R13, R14, R15, R8, R9, R10) + SHA256ROUND1(46, 0xf40e3585, R10, R11, R12, R13, R14, R15, R8, R9) + SHA256ROUND1(47, 0x106aa070, R9, R10, R11, R12, R13, R14, R15, R8) + SHA256ROUND1(48, 0x19a4c116, R8, R9, R10, R11, R12, R13, R14, R15) + SHA256ROUND1(49, 0x1e376c08, R15, R8, R9, R10, R11, R12, R13, R14) + SHA256ROUND1(50, 0x2748774c, R14, R15, R8, R9, R10, R11, R12, R13) + SHA256ROUND1(51, 0x34b0bcb5, R13, R14, R15, R8, R9, R10, R11, R12) + SHA256ROUND1(52, 0x391c0cb3, R12, R13, R14, R15, R8, R9, R10, R11) + SHA256ROUND1(53, 0x4ed8aa4a, R11, R12, R13, R14, R15, R8, R9, R10) + SHA256ROUND1(54, 0x5b9cca4f, R10, R11, R12, R13, R14, R15, R8, R9) + SHA256ROUND1(55, 0x682e6ff3, R9, R10, R11, R12, R13, R14, R15, R8) + SHA256ROUND1(56, 0x748f82ee, R8, R9, R10, R11, R12, R13, R14, R15) + SHA256ROUND1(57, 0x78a5636f, R15, R8, R9, R10, R11, R12, R13, R14) + SHA256ROUND1(58, 0x84c87814, R14, R15, R8, R9, R10, R11, R12, R13) + SHA256ROUND1(59, 0x8cc70208, R13, R14, R15, R8, R9, R10, R11, R12) + SHA256ROUND1(60, 0x90befffa, R12, R13, R14, R15, R8, R9, R10, R11) + SHA256ROUND1(61, 0xa4506ceb, R11, R12, R13, R14, R15, R8, R9, R10) + SHA256ROUND1(62, 0xbef9a3f7, R10, R11, R12, R13, R14, R15, R8, R9) + SHA256ROUND1(63, 0xc67178f2, R9, R10, R11, R12, R13, R14, R15, R8) + + MOVQ dig+0(FP), BP + ADDL (0*4)(BP), R8 // H0 = a + H0 + MOVL R8, (0*4)(BP) + ADDL (1*4)(BP), R9 // H1 = b + H1 + MOVL R9, (1*4)(BP) + ADDL (2*4)(BP), R10 // H2 = c + H2 + MOVL R10, (2*4)(BP) + ADDL (3*4)(BP), R11 // H3 = d + H3 + MOVL R11, (3*4)(BP) + ADDL (4*4)(BP), R12 // H4 = e + H4 + MOVL R12, (4*4)(BP) + ADDL (5*4)(BP), R13 // H5 = f + H5 + MOVL R13, (5*4)(BP) + ADDL (6*4)(BP), R14 // H6 = g + H6 + MOVL R14, (6*4)(BP) + ADDL (7*4)(BP), R15 // H7 = h + H7 + MOVL R15, (7*4)(BP) + + ADDQ $64, SI + CMPQ SI, 256(SP) + JB loop + +end: + RET diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256block_decl.go b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256block_decl.go new file mode 100644 index 000000000..a50c97871 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256block_decl.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 + +package sha256 + +//go:noescape + +func block(dig *digest, p []byte) diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/resume.go b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/resume.go new file mode 100644 index 000000000..55b433e79 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/resume.go @@ -0,0 +1,53 @@ +package sha512 + +import ( + "bytes" + "encoding/gob" + + // import to ensure that our init function runs after the standard package + _ "crypto/sha512" +) + +// Len returns the number of bytes which have been written to the digest. +func (d *digest) Len() int64 { + return int64(d.len) +} + +// State returns a snapshot of the state of the digest. +func (d *digest) State() ([]byte, error) { + var buf bytes.Buffer + encoder := gob.NewEncoder(&buf) + + // We encode this way so that we do not have + // to export these fields of the digest struct. + vals := []interface{}{ + d.h, d.x, d.nx, d.len, d.is384, + } + + for _, val := range vals { + if err := encoder.Encode(val); err != nil { + return nil, err + } + } + + return buf.Bytes(), nil +} + +// Restore resets the digest to the given state. +func (d *digest) Restore(state []byte) error { + decoder := gob.NewDecoder(bytes.NewReader(state)) + + // We decode this way so that we do not have + // to export these fields of the digest struct. + vals := []interface{}{ + &d.h, &d.x, &d.nx, &d.len, &d.is384, + } + + for _, val := range vals { + if err := decoder.Decode(val); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/sha512.go b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/sha512.go new file mode 100644 index 000000000..bca7a91e2 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/sha512.go @@ -0,0 +1,198 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sha512 implements the SHA384 and SHA512 hash algorithms as defined +// in FIPS 180-2. +package sha512 + +import ( + "crypto" + "hash" +) + +func init() { + crypto.RegisterHash(crypto.SHA384, New384) + crypto.RegisterHash(crypto.SHA512, New) +} + +// The size of a SHA512 checksum in bytes. +const Size = 64 + +// The size of a SHA384 checksum in bytes. +const Size384 = 48 + +// The blocksize of SHA512 and SHA384 in bytes. +const BlockSize = 128 + +const ( + chunk = 128 + init0 = 0x6a09e667f3bcc908 + init1 = 0xbb67ae8584caa73b + init2 = 0x3c6ef372fe94f82b + init3 = 0xa54ff53a5f1d36f1 + init4 = 0x510e527fade682d1 + init5 = 0x9b05688c2b3e6c1f + init6 = 0x1f83d9abfb41bd6b + init7 = 0x5be0cd19137e2179 + init0_384 = 0xcbbb9d5dc1059ed8 + init1_384 = 0x629a292a367cd507 + init2_384 = 0x9159015a3070dd17 + init3_384 = 0x152fecd8f70e5939 + init4_384 = 0x67332667ffc00b31 + init5_384 = 0x8eb44a8768581511 + init6_384 = 0xdb0c2e0d64f98fa7 + init7_384 = 0x47b5481dbefa4fa4 +) + +// digest represents the partial evaluation of a checksum. +type digest struct { + h [8]uint64 + x [chunk]byte + nx int + len uint64 + is384 bool // mark if this digest is SHA-384 +} + +func (d *digest) Reset() { + if !d.is384 { + d.h[0] = init0 + d.h[1] = init1 + d.h[2] = init2 + d.h[3] = init3 + d.h[4] = init4 + d.h[5] = init5 + d.h[6] = init6 + d.h[7] = init7 + } else { + d.h[0] = init0_384 + d.h[1] = init1_384 + d.h[2] = init2_384 + d.h[3] = init3_384 + d.h[4] = init4_384 + d.h[5] = init5_384 + d.h[6] = init6_384 + d.h[7] = init7_384 + } + d.nx = 0 + d.len = 0 +} + +// New returns a new hash.Hash computing the SHA512 checksum. +func New() hash.Hash { + d := new(digest) + d.Reset() + return d +} + +// New384 returns a new hash.Hash computing the SHA384 checksum. +func New384() hash.Hash { + d := new(digest) + d.is384 = true + d.Reset() + return d +} + +func (d *digest) Size() int { + if !d.is384 { + return Size + } + return Size384 +} + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Write(p []byte) (nn int, err error) { + nn = len(p) + d.len += uint64(nn) + if d.nx > 0 { + n := copy(d.x[d.nx:], p) + d.nx += n + if d.nx == chunk { + block(d, d.x[:]) + d.nx = 0 + } + p = p[n:] + } + if len(p) >= chunk { + n := len(p) &^ (chunk - 1) + block(d, p[:n]) + p = p[n:] + } + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +func (d0 *digest) Sum(in []byte) []byte { + // Make a copy of d0 so that caller can keep writing and summing. + d := new(digest) + *d = *d0 + hash := d.checkSum() + if d.is384 { + return append(in, hash[:Size384]...) + } + return append(in, hash[:]...) +} + +func (d *digest) checkSum() [Size]byte { + // Padding. Add a 1 bit and 0 bits until 112 bytes mod 128. + len := d.len + var tmp [128]byte + tmp[0] = 0x80 + if len%128 < 112 { + d.Write(tmp[0 : 112-len%128]) + } else { + d.Write(tmp[0 : 128+112-len%128]) + } + + // Length in bits. + len <<= 3 + for i := uint(0); i < 16; i++ { + tmp[i] = byte(len >> (120 - 8*i)) + } + d.Write(tmp[0:16]) + + if d.nx != 0 { + panic("d.nx != 0") + } + + h := d.h[:] + if d.is384 { + h = d.h[:6] + } + + var digest [Size]byte + for i, s := range h { + digest[i*8] = byte(s >> 56) + digest[i*8+1] = byte(s >> 48) + digest[i*8+2] = byte(s >> 40) + digest[i*8+3] = byte(s >> 32) + digest[i*8+4] = byte(s >> 24) + digest[i*8+5] = byte(s >> 16) + digest[i*8+6] = byte(s >> 8) + digest[i*8+7] = byte(s) + } + + return digest +} + +// Sum512 returns the SHA512 checksum of the data. +func Sum512(data []byte) [Size]byte { + var d digest + d.Reset() + d.Write(data) + return d.checkSum() +} + +// Sum384 returns the SHA384 checksum of the data. +func Sum384(data []byte) (sum384 [Size384]byte) { + var d digest + d.is384 = true + d.Reset() + d.Write(data) + sum := d.checkSum() + copy(sum384[:], sum[:Size384]) + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/sha512block.go b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/sha512block.go new file mode 100644 index 000000000..648ae8f7e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/sha512block.go @@ -0,0 +1,144 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 + +// SHA512 block step. +// In its own file so that a faster assembly or C version +// can be substituted easily. + +package sha512 + +var _K = []uint64{ + 0x428a2f98d728ae22, + 0x7137449123ef65cd, + 0xb5c0fbcfec4d3b2f, + 0xe9b5dba58189dbbc, + 0x3956c25bf348b538, + 0x59f111f1b605d019, + 0x923f82a4af194f9b, + 0xab1c5ed5da6d8118, + 0xd807aa98a3030242, + 0x12835b0145706fbe, + 0x243185be4ee4b28c, + 0x550c7dc3d5ffb4e2, + 0x72be5d74f27b896f, + 0x80deb1fe3b1696b1, + 0x9bdc06a725c71235, + 0xc19bf174cf692694, + 0xe49b69c19ef14ad2, + 0xefbe4786384f25e3, + 0x0fc19dc68b8cd5b5, + 0x240ca1cc77ac9c65, + 0x2de92c6f592b0275, + 0x4a7484aa6ea6e483, + 0x5cb0a9dcbd41fbd4, + 0x76f988da831153b5, + 0x983e5152ee66dfab, + 0xa831c66d2db43210, + 0xb00327c898fb213f, + 0xbf597fc7beef0ee4, + 0xc6e00bf33da88fc2, + 0xd5a79147930aa725, + 0x06ca6351e003826f, + 0x142929670a0e6e70, + 0x27b70a8546d22ffc, + 0x2e1b21385c26c926, + 0x4d2c6dfc5ac42aed, + 0x53380d139d95b3df, + 0x650a73548baf63de, + 0x766a0abb3c77b2a8, + 0x81c2c92e47edaee6, + 0x92722c851482353b, + 0xa2bfe8a14cf10364, + 0xa81a664bbc423001, + 0xc24b8b70d0f89791, + 0xc76c51a30654be30, + 0xd192e819d6ef5218, + 0xd69906245565a910, + 0xf40e35855771202a, + 0x106aa07032bbd1b8, + 0x19a4c116b8d2d0c8, + 0x1e376c085141ab53, + 0x2748774cdf8eeb99, + 0x34b0bcb5e19b48a8, + 0x391c0cb3c5c95a63, + 0x4ed8aa4ae3418acb, + 0x5b9cca4f7763e373, + 0x682e6ff3d6b2b8a3, + 0x748f82ee5defb2fc, + 0x78a5636f43172f60, + 0x84c87814a1f0ab72, + 0x8cc702081a6439ec, + 0x90befffa23631e28, + 0xa4506cebde82bde9, + 0xbef9a3f7b2c67915, + 0xc67178f2e372532b, + 0xca273eceea26619c, + 0xd186b8c721c0c207, + 0xeada7dd6cde0eb1e, + 0xf57d4f7fee6ed178, + 0x06f067aa72176fba, + 0x0a637dc5a2c898a6, + 0x113f9804bef90dae, + 0x1b710b35131c471b, + 0x28db77f523047d84, + 0x32caab7b40c72493, + 0x3c9ebe0a15c9bebc, + 0x431d67c49c100d4c, + 0x4cc5d4becb3e42b6, + 0x597f299cfc657e2a, + 0x5fcb6fab3ad6faec, + 0x6c44198c4a475817, +} + +func block(dig *digest, p []byte) { + var w [80]uint64 + h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] + for len(p) >= chunk { + for i := 0; i < 16; i++ { + j := i * 8 + w[i] = uint64(p[j])<<56 | uint64(p[j+1])<<48 | uint64(p[j+2])<<40 | uint64(p[j+3])<<32 | + uint64(p[j+4])<<24 | uint64(p[j+5])<<16 | uint64(p[j+6])<<8 | uint64(p[j+7]) + } + for i := 16; i < 80; i++ { + v1 := w[i-2] + t1 := (v1>>19 | v1<<(64-19)) ^ (v1>>61 | v1<<(64-61)) ^ (v1 >> 6) + v2 := w[i-15] + t2 := (v2>>1 | v2<<(64-1)) ^ (v2>>8 | v2<<(64-8)) ^ (v2 >> 7) + + w[i] = t1 + w[i-7] + t2 + w[i-16] + } + + a, b, c, d, e, f, g, h := h0, h1, h2, h3, h4, h5, h6, h7 + + for i := 0; i < 80; i++ { + t1 := h + ((e>>14 | e<<(64-14)) ^ (e>>18 | e<<(64-18)) ^ (e>>41 | e<<(64-41))) + ((e & f) ^ (^e & g)) + _K[i] + w[i] + + t2 := ((a>>28 | a<<(64-28)) ^ (a>>34 | a<<(64-34)) ^ (a>>39 | a<<(64-39))) + ((a & b) ^ (a & c) ^ (b & c)) + + h = g + g = f + f = e + e = d + t1 + d = c + c = b + b = a + a = t1 + t2 + } + + h0 += a + h1 += b + h2 += c + h3 += d + h4 += e + h5 += f + h6 += g + h7 += h + + p = p[chunk:] + } + + dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h0, h1, h2, h3, h4, h5, h6, h7 +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/sha512block_amd64.s b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/sha512block_amd64.s new file mode 100644 index 000000000..2e10233de --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/sha512block_amd64.s @@ -0,0 +1,273 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// SHA512 block routine. See sha512block.go for Go equivalent. +// +// The algorithm is detailed in FIPS 180-4: +// +// http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf +// +// Wt = Mt; for 0 <= t <= 15 +// Wt = SIGMA1(Wt-2) + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 79 +// +// a = H0 +// b = H1 +// c = H2 +// d = H3 +// e = H4 +// f = H5 +// g = H6 +// h = H7 +// +// for t = 0 to 79 { +// T1 = h + BIGSIGMA1(e) + Ch(e,f,g) + Kt + Wt +// T2 = BIGSIGMA0(a) + Maj(a,b,c) +// h = g +// g = f +// f = e +// e = d + T1 +// d = c +// c = b +// b = a +// a = T1 + T2 +// } +// +// H0 = a + H0 +// H1 = b + H1 +// H2 = c + H2 +// H3 = d + H3 +// H4 = e + H4 +// H5 = f + H5 +// H6 = g + H6 +// H7 = h + H7 + +// Wt = Mt; for 0 <= t <= 15 +#define MSGSCHEDULE0(index) \ + MOVQ (index*8)(SI), AX; \ + BSWAPQ AX; \ + MOVQ AX, (index*8)(BP) + +// Wt = SIGMA1(Wt-2) + Wt-7 + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 79 +// SIGMA0(x) = ROTR(1,x) XOR ROTR(8,x) XOR SHR(7,x) +// SIGMA1(x) = ROTR(19,x) XOR ROTR(61,x) XOR SHR(6,x) +#define MSGSCHEDULE1(index) \ + MOVQ ((index-2)*8)(BP), AX; \ + MOVQ AX, CX; \ + RORQ $19, AX; \ + MOVQ CX, DX; \ + RORQ $61, CX; \ + SHRQ $6, DX; \ + MOVQ ((index-15)*8)(BP), BX; \ + XORQ CX, AX; \ + MOVQ BX, CX; \ + XORQ DX, AX; \ + RORQ $1, BX; \ + MOVQ CX, DX; \ + SHRQ $7, DX; \ + RORQ $8, CX; \ + ADDQ ((index-7)*8)(BP), AX; \ + XORQ CX, BX; \ + XORQ DX, BX; \ + ADDQ ((index-16)*8)(BP), BX; \ + ADDQ BX, AX; \ + MOVQ AX, ((index)*8)(BP) + +// Calculate T1 in AX - uses AX, CX and DX registers. +// h is also used as an accumulator. Wt is passed in AX. +// T1 = h + BIGSIGMA1(e) + Ch(e, f, g) + Kt + Wt +// BIGSIGMA1(x) = ROTR(14,x) XOR ROTR(18,x) XOR ROTR(41,x) +// Ch(x, y, z) = (x AND y) XOR (NOT x AND z) +#define SHA512T1(const, e, f, g, h) \ + MOVQ $const, DX; \ + ADDQ AX, h; \ + MOVQ e, AX; \ + ADDQ DX, h; \ + MOVQ e, CX; \ + RORQ $14, AX; \ + MOVQ e, DX; \ + RORQ $18, CX; \ + XORQ CX, AX; \ + MOVQ e, CX; \ + RORQ $41, DX; \ + ANDQ f, CX; \ + XORQ AX, DX; \ + MOVQ e, AX; \ + NOTQ AX; \ + ADDQ DX, h; \ + ANDQ g, AX; \ + XORQ CX, AX; \ + ADDQ h, AX + +// Calculate T2 in BX - uses BX, CX, DX and DI registers. +// T2 = BIGSIGMA0(a) + Maj(a, b, c) +// BIGSIGMA0(x) = ROTR(28,x) XOR ROTR(34,x) XOR ROTR(39,x) +// Maj(x, y, z) = (x AND y) XOR (x AND z) XOR (y AND z) +#define SHA512T2(a, b, c) \ + MOVQ a, DI; \ + MOVQ c, BX; \ + RORQ $28, DI; \ + MOVQ a, DX; \ + ANDQ b, BX; \ + RORQ $34, DX; \ + MOVQ a, CX; \ + ANDQ c, CX; \ + XORQ DX, DI; \ + XORQ CX, BX; \ + MOVQ a, DX; \ + MOVQ b, CX; \ + RORQ $39, DX; \ + ANDQ a, CX; \ + XORQ CX, BX; \ + XORQ DX, DI; \ + ADDQ DI, BX + +// Calculate T1 and T2, then e = d + T1 and a = T1 + T2. +// The values for e and a are stored in d and h, ready for rotation. +#define SHA512ROUND(index, const, a, b, c, d, e, f, g, h) \ + SHA512T1(const, e, f, g, h); \ + SHA512T2(a, b, c); \ + MOVQ BX, h; \ + ADDQ AX, d; \ + ADDQ AX, h + +#define SHA512ROUND0(index, const, a, b, c, d, e, f, g, h) \ + MSGSCHEDULE0(index); \ + SHA512ROUND(index, const, a, b, c, d, e, f, g, h) + +#define SHA512ROUND1(index, const, a, b, c, d, e, f, g, h) \ + MSGSCHEDULE1(index); \ + SHA512ROUND(index, const, a, b, c, d, e, f, g, h) + +TEXT ·block(SB),0,$648-32 + MOVQ p_base+8(FP), SI + MOVQ p_len+16(FP), DX + SHRQ $7, DX + SHLQ $7, DX + + LEAQ (SI)(DX*1), DI + MOVQ DI, 640(SP) + CMPQ SI, DI + JEQ end + + MOVQ dig+0(FP), BP + MOVQ (0*8)(BP), R8 // a = H0 + MOVQ (1*8)(BP), R9 // b = H1 + MOVQ (2*8)(BP), R10 // c = H2 + MOVQ (3*8)(BP), R11 // d = H3 + MOVQ (4*8)(BP), R12 // e = H4 + MOVQ (5*8)(BP), R13 // f = H5 + MOVQ (6*8)(BP), R14 // g = H6 + MOVQ (7*8)(BP), R15 // h = H7 + +loop: + MOVQ SP, BP // message schedule + + SHA512ROUND0(0, 0x428a2f98d728ae22, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND0(1, 0x7137449123ef65cd, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND0(2, 0xb5c0fbcfec4d3b2f, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND0(3, 0xe9b5dba58189dbbc, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND0(4, 0x3956c25bf348b538, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND0(5, 0x59f111f1b605d019, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND0(6, 0x923f82a4af194f9b, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND0(7, 0xab1c5ed5da6d8118, R9, R10, R11, R12, R13, R14, R15, R8) + SHA512ROUND0(8, 0xd807aa98a3030242, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND0(9, 0x12835b0145706fbe, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND0(10, 0x243185be4ee4b28c, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND0(11, 0x550c7dc3d5ffb4e2, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND0(12, 0x72be5d74f27b896f, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND0(13, 0x80deb1fe3b1696b1, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND0(14, 0x9bdc06a725c71235, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND0(15, 0xc19bf174cf692694, R9, R10, R11, R12, R13, R14, R15, R8) + + SHA512ROUND1(16, 0xe49b69c19ef14ad2, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND1(17, 0xefbe4786384f25e3, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND1(18, 0x0fc19dc68b8cd5b5, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND1(19, 0x240ca1cc77ac9c65, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND1(20, 0x2de92c6f592b0275, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND1(21, 0x4a7484aa6ea6e483, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND1(22, 0x5cb0a9dcbd41fbd4, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND1(23, 0x76f988da831153b5, R9, R10, R11, R12, R13, R14, R15, R8) + SHA512ROUND1(24, 0x983e5152ee66dfab, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND1(25, 0xa831c66d2db43210, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND1(26, 0xb00327c898fb213f, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND1(27, 0xbf597fc7beef0ee4, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND1(28, 0xc6e00bf33da88fc2, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND1(29, 0xd5a79147930aa725, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND1(30, 0x06ca6351e003826f, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND1(31, 0x142929670a0e6e70, R9, R10, R11, R12, R13, R14, R15, R8) + SHA512ROUND1(32, 0x27b70a8546d22ffc, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND1(33, 0x2e1b21385c26c926, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND1(34, 0x4d2c6dfc5ac42aed, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND1(35, 0x53380d139d95b3df, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND1(36, 0x650a73548baf63de, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND1(37, 0x766a0abb3c77b2a8, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND1(38, 0x81c2c92e47edaee6, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND1(39, 0x92722c851482353b, R9, R10, R11, R12, R13, R14, R15, R8) + SHA512ROUND1(40, 0xa2bfe8a14cf10364, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND1(41, 0xa81a664bbc423001, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND1(42, 0xc24b8b70d0f89791, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND1(43, 0xc76c51a30654be30, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND1(44, 0xd192e819d6ef5218, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND1(45, 0xd69906245565a910, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND1(46, 0xf40e35855771202a, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND1(47, 0x106aa07032bbd1b8, R9, R10, R11, R12, R13, R14, R15, R8) + SHA512ROUND1(48, 0x19a4c116b8d2d0c8, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND1(49, 0x1e376c085141ab53, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND1(50, 0x2748774cdf8eeb99, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND1(51, 0x34b0bcb5e19b48a8, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND1(52, 0x391c0cb3c5c95a63, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND1(53, 0x4ed8aa4ae3418acb, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND1(54, 0x5b9cca4f7763e373, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND1(55, 0x682e6ff3d6b2b8a3, R9, R10, R11, R12, R13, R14, R15, R8) + SHA512ROUND1(56, 0x748f82ee5defb2fc, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND1(57, 0x78a5636f43172f60, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND1(58, 0x84c87814a1f0ab72, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND1(59, 0x8cc702081a6439ec, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND1(60, 0x90befffa23631e28, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND1(61, 0xa4506cebde82bde9, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND1(62, 0xbef9a3f7b2c67915, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND1(63, 0xc67178f2e372532b, R9, R10, R11, R12, R13, R14, R15, R8) + SHA512ROUND1(64, 0xca273eceea26619c, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND1(65, 0xd186b8c721c0c207, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND1(66, 0xeada7dd6cde0eb1e, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND1(67, 0xf57d4f7fee6ed178, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND1(68, 0x06f067aa72176fba, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND1(69, 0x0a637dc5a2c898a6, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND1(70, 0x113f9804bef90dae, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND1(71, 0x1b710b35131c471b, R9, R10, R11, R12, R13, R14, R15, R8) + SHA512ROUND1(72, 0x28db77f523047d84, R8, R9, R10, R11, R12, R13, R14, R15) + SHA512ROUND1(73, 0x32caab7b40c72493, R15, R8, R9, R10, R11, R12, R13, R14) + SHA512ROUND1(74, 0x3c9ebe0a15c9bebc, R14, R15, R8, R9, R10, R11, R12, R13) + SHA512ROUND1(75, 0x431d67c49c100d4c, R13, R14, R15, R8, R9, R10, R11, R12) + SHA512ROUND1(76, 0x4cc5d4becb3e42b6, R12, R13, R14, R15, R8, R9, R10, R11) + SHA512ROUND1(77, 0x597f299cfc657e2a, R11, R12, R13, R14, R15, R8, R9, R10) + SHA512ROUND1(78, 0x5fcb6fab3ad6faec, R10, R11, R12, R13, R14, R15, R8, R9) + SHA512ROUND1(79, 0x6c44198c4a475817, R9, R10, R11, R12, R13, R14, R15, R8) + + MOVQ dig+0(FP), BP + ADDQ (0*8)(BP), R8 // H0 = a + H0 + MOVQ R8, (0*8)(BP) + ADDQ (1*8)(BP), R9 // H1 = b + H1 + MOVQ R9, (1*8)(BP) + ADDQ (2*8)(BP), R10 // H2 = c + H2 + MOVQ R10, (2*8)(BP) + ADDQ (3*8)(BP), R11 // H3 = d + H3 + MOVQ R11, (3*8)(BP) + ADDQ (4*8)(BP), R12 // H4 = e + H4 + MOVQ R12, (4*8)(BP) + ADDQ (5*8)(BP), R13 // H5 = f + H5 + MOVQ R13, (5*8)(BP) + ADDQ (6*8)(BP), R14 // H6 = g + H6 + MOVQ R14, (6*8)(BP) + ADDQ (7*8)(BP), R15 // H7 = h + H7 + MOVQ R15, (7*8)(BP) + + ADDQ $128, SI + CMPQ SI, 640(SP) + JB loop + +end: + RET diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/sha512block_decl.go b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/sha512block_decl.go new file mode 100644 index 000000000..bef99de2e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/sha512block_decl.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 + +package sha512 + +//go:noescape + +func block(dig *digest, p []byte) diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/.gitignore b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/.gitignore new file mode 100644 index 000000000..83c8f8237 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/.gitignore @@ -0,0 +1,9 @@ +*.[68] +*.a +*.out +*.swp +_obj +_testmain.go +cmd/metrics-bench/metrics-bench +cmd/metrics-example/metrics-example +cmd/never-read/never-read diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/LICENSE new file mode 100644 index 000000000..363fa9ee7 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/LICENSE @@ -0,0 +1,29 @@ +Copyright 2012 Richard Crowley. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + +THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE. + +The views and conclusions contained in the software and documentation +are those of the authors and should not be interpreted as representing +official policies, either expressed or implied, of Richard Crowley. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/README.md b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/README.md new file mode 100644 index 000000000..e0091a4bd --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/README.md @@ -0,0 +1,104 @@ +go-metrics +========== + +Go port of Coda Hale's Metrics library: . + +Documentation: . + +Usage +----- + +Create and update metrics: + +```go +c := metrics.NewCounter() +metrics.Register("foo", c) +c.Inc(47) + +g := metrics.NewGauge() +metrics.Register("bar", g) +g.Update(47) + +s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028) +h := metrics.NewHistogram(s) +metrics.Register("baz", h) +h.Update(47) + +m := metrics.NewMeter() +metrics.Register("quux", m) +m.Mark(47) + +t := metrics.NewTimer() +metrics.Register("bang", t) +t.Time(func() {}) +t.Update(47) +``` + +Periodically log every metric in human-readable form to standard error: + +```go +go metrics.Log(metrics.DefaultRegistry, 60e9, log.New(os.Stderr, "metrics: ", log.Lmicroseconds)) +``` + +Periodically log every metric in slightly-more-parseable form to syslog: + +```go +w, _ := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics") +go metrics.Syslog(metrics.DefaultRegistry, 60e9, w) +``` + +Periodically emit every metric to Graphite: + +```go +addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003") +go metrics.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr) +``` + +Periodically emit every metric into InfluxDB: + +```go +import "github.com/rcrowley/go-metrics/influxdb" + +go influxdb.Influxdb(metrics.DefaultRegistry, 10e9, &influxdb.Config{ + Host: "127.0.0.1:8086", + Database: "metrics", + Username: "test", + Password: "test", +}) +``` + +Periodically upload every metric to Librato: + +```go +import "github.com/rcrowley/go-metrics/librato" + +go librato.Librato(metrics.DefaultRegistry, + 10e9, // interval + "example@example.com", // account owner email address + "token", // Librato API token + "hostname", // source + []float64{0.95}, // precentiles to send + time.Millisecond, // time unit +) +``` + +Periodically emit every metric to StatHat: + +```go +import "github.com/rcrowley/go-metrics/stathat" + +go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com") +``` + +Installation +------------ + +```sh +go get github.com/rcrowley/go-metrics +``` + +StatHat support additionally requires their Go client: + +```sh +go get github.com/stathat/go +``` diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/counter.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/counter.go new file mode 100644 index 000000000..bb7b039cb --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/counter.go @@ -0,0 +1,112 @@ +package metrics + +import "sync/atomic" + +// Counters hold an int64 value that can be incremented and decremented. +type Counter interface { + Clear() + Count() int64 + Dec(int64) + Inc(int64) + Snapshot() Counter +} + +// GetOrRegisterCounter returns an existing Counter or constructs and registers +// a new StandardCounter. +func GetOrRegisterCounter(name string, r Registry) Counter { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewCounter).(Counter) +} + +// NewCounter constructs a new StandardCounter. +func NewCounter() Counter { + if UseNilMetrics { + return NilCounter{} + } + return &StandardCounter{0} +} + +// NewRegisteredCounter constructs and registers a new StandardCounter. +func NewRegisteredCounter(name string, r Registry) Counter { + c := NewCounter() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// CounterSnapshot is a read-only copy of another Counter. +type CounterSnapshot int64 + +// Clear panics. +func (CounterSnapshot) Clear() { + panic("Clear called on a CounterSnapshot") +} + +// Count returns the count at the time the snapshot was taken. +func (c CounterSnapshot) Count() int64 { return int64(c) } + +// Dec panics. +func (CounterSnapshot) Dec(int64) { + panic("Dec called on a CounterSnapshot") +} + +// Inc panics. +func (CounterSnapshot) Inc(int64) { + panic("Inc called on a CounterSnapshot") +} + +// Snapshot returns the snapshot. +func (c CounterSnapshot) Snapshot() Counter { return c } + +// NilCounter is a no-op Counter. +type NilCounter struct{} + +// Clear is a no-op. +func (NilCounter) Clear() {} + +// Count is a no-op. +func (NilCounter) Count() int64 { return 0 } + +// Dec is a no-op. +func (NilCounter) Dec(i int64) {} + +// Inc is a no-op. +func (NilCounter) Inc(i int64) {} + +// Snapshot is a no-op. +func (NilCounter) Snapshot() Counter { return NilCounter{} } + +// StandardCounter is the standard implementation of a Counter and uses the +// sync/atomic package to manage a single int64 value. +type StandardCounter struct { + count int64 +} + +// Clear sets the counter to zero. +func (c *StandardCounter) Clear() { + atomic.StoreInt64(&c.count, 0) +} + +// Count returns the current count. +func (c *StandardCounter) Count() int64 { + return atomic.LoadInt64(&c.count) +} + +// Dec decrements the counter by the given amount. +func (c *StandardCounter) Dec(i int64) { + atomic.AddInt64(&c.count, -i) +} + +// Inc increments the counter by the given amount. +func (c *StandardCounter) Inc(i int64) { + atomic.AddInt64(&c.count, i) +} + +// Snapshot returns a read-only copy of the counter. +func (c *StandardCounter) Snapshot() Counter { + return CounterSnapshot(c.Count()) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/debug.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/debug.go new file mode 100644 index 000000000..043ccefab --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/debug.go @@ -0,0 +1,76 @@ +package metrics + +import ( + "runtime/debug" + "time" +) + +var ( + debugMetrics struct { + GCStats struct { + LastGC Gauge + NumGC Gauge + Pause Histogram + //PauseQuantiles Histogram + PauseTotal Gauge + } + ReadGCStats Timer + } + gcStats debug.GCStats +) + +// Capture new values for the Go garbage collector statistics exported in +// debug.GCStats. This is designed to be called as a goroutine. +func CaptureDebugGCStats(r Registry, d time.Duration) { + for _ = range time.Tick(d) { + CaptureDebugGCStatsOnce(r) + } +} + +// Capture new values for the Go garbage collector statistics exported in +// debug.GCStats. This is designed to be called in a background goroutine. +// Giving a registry which has not been given to RegisterDebugGCStats will +// panic. +// +// Be careful (but much less so) with this because debug.ReadGCStats calls +// the C function runtime·lock(runtime·mheap) which, while not a stop-the-world +// operation, isn't something you want to be doing all the time. +func CaptureDebugGCStatsOnce(r Registry) { + lastGC := gcStats.LastGC + t := time.Now() + debug.ReadGCStats(&gcStats) + debugMetrics.ReadGCStats.UpdateSince(t) + + debugMetrics.GCStats.LastGC.Update(int64(gcStats.LastGC.UnixNano())) + debugMetrics.GCStats.NumGC.Update(int64(gcStats.NumGC)) + if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) { + debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0])) + } + //debugMetrics.GCStats.PauseQuantiles.Update(gcStats.PauseQuantiles) + debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal)) +} + +// Register metrics for the Go garbage collector statistics exported in +// debug.GCStats. The metrics are named by their fully-qualified Go symbols, +// i.e. debug.GCStats.PauseTotal. +func RegisterDebugGCStats(r Registry) { + debugMetrics.GCStats.LastGC = NewGauge() + debugMetrics.GCStats.NumGC = NewGauge() + debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015)) + //debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015)) + debugMetrics.GCStats.PauseTotal = NewGauge() + debugMetrics.ReadGCStats = NewTimer() + + r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC) + r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC) + r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause) + //r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles) + r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal) + r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats) +} + +// Allocate an initial slice for gcStats.Pause to avoid allocations during +// normal operation. +func init() { + gcStats.Pause = make([]time.Duration, 11) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/ewma.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/ewma.go new file mode 100644 index 000000000..7c152a174 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/ewma.go @@ -0,0 +1,118 @@ +package metrics + +import ( + "math" + "sync" + "sync/atomic" +) + +// EWMAs continuously calculate an exponentially-weighted moving average +// based on an outside source of clock ticks. +type EWMA interface { + Rate() float64 + Snapshot() EWMA + Tick() + Update(int64) +} + +// NewEWMA constructs a new EWMA with the given alpha. +func NewEWMA(alpha float64) EWMA { + if UseNilMetrics { + return NilEWMA{} + } + return &StandardEWMA{alpha: alpha} +} + +// NewEWMA1 constructs a new EWMA for a one-minute moving average. +func NewEWMA1() EWMA { + return NewEWMA(1 - math.Exp(-5.0/60.0/1)) +} + +// NewEWMA5 constructs a new EWMA for a five-minute moving average. +func NewEWMA5() EWMA { + return NewEWMA(1 - math.Exp(-5.0/60.0/5)) +} + +// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average. +func NewEWMA15() EWMA { + return NewEWMA(1 - math.Exp(-5.0/60.0/15)) +} + +// EWMASnapshot is a read-only copy of another EWMA. +type EWMASnapshot float64 + +// Rate returns the rate of events per second at the time the snapshot was +// taken. +func (a EWMASnapshot) Rate() float64 { return float64(a) } + +// Snapshot returns the snapshot. +func (a EWMASnapshot) Snapshot() EWMA { return a } + +// Tick panics. +func (EWMASnapshot) Tick() { + panic("Tick called on an EWMASnapshot") +} + +// Update panics. +func (EWMASnapshot) Update(int64) { + panic("Update called on an EWMASnapshot") +} + +// NilEWMA is a no-op EWMA. +type NilEWMA struct{} + +// Rate is a no-op. +func (NilEWMA) Rate() float64 { return 0.0 } + +// Snapshot is a no-op. +func (NilEWMA) Snapshot() EWMA { return NilEWMA{} } + +// Tick is a no-op. +func (NilEWMA) Tick() {} + +// Update is a no-op. +func (NilEWMA) Update(n int64) {} + +// StandardEWMA is the standard implementation of an EWMA and tracks the number +// of uncounted events and processes them on each tick. It uses the +// sync/atomic package to manage uncounted events. +type StandardEWMA struct { + uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment + alpha float64 + rate float64 + init bool + mutex sync.Mutex +} + +// Rate returns the moving average rate of events per second. +func (a *StandardEWMA) Rate() float64 { + a.mutex.Lock() + defer a.mutex.Unlock() + return a.rate * float64(1e9) +} + +// Snapshot returns a read-only copy of the EWMA. +func (a *StandardEWMA) Snapshot() EWMA { + return EWMASnapshot(a.Rate()) +} + +// Tick ticks the clock to update the moving average. It assumes it is called +// every five seconds. +func (a *StandardEWMA) Tick() { + count := atomic.LoadInt64(&a.uncounted) + atomic.AddInt64(&a.uncounted, -count) + instantRate := float64(count) / float64(5e9) + a.mutex.Lock() + defer a.mutex.Unlock() + if a.init { + a.rate += a.alpha * (instantRate - a.rate) + } else { + a.init = true + a.rate = instantRate + } +} + +// Update adds n uncounted events. +func (a *StandardEWMA) Update(n int64) { + atomic.AddInt64(&a.uncounted, n) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/gauge.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/gauge.go new file mode 100644 index 000000000..807638a31 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/gauge.go @@ -0,0 +1,84 @@ +package metrics + +import "sync/atomic" + +// Gauges hold an int64 value that can be set arbitrarily. +type Gauge interface { + Snapshot() Gauge + Update(int64) + Value() int64 +} + +// GetOrRegisterGauge returns an existing Gauge or constructs and registers a +// new StandardGauge. +func GetOrRegisterGauge(name string, r Registry) Gauge { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewGauge).(Gauge) +} + +// NewGauge constructs a new StandardGauge. +func NewGauge() Gauge { + if UseNilMetrics { + return NilGauge{} + } + return &StandardGauge{0} +} + +// NewRegisteredGauge constructs and registers a new StandardGauge. +func NewRegisteredGauge(name string, r Registry) Gauge { + c := NewGauge() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// GaugeSnapshot is a read-only copy of another Gauge. +type GaugeSnapshot int64 + +// Snapshot returns the snapshot. +func (g GaugeSnapshot) Snapshot() Gauge { return g } + +// Update panics. +func (GaugeSnapshot) Update(int64) { + panic("Update called on a GaugeSnapshot") +} + +// Value returns the value at the time the snapshot was taken. +func (g GaugeSnapshot) Value() int64 { return int64(g) } + +// NilGauge is a no-op Gauge. +type NilGauge struct{} + +// Snapshot is a no-op. +func (NilGauge) Snapshot() Gauge { return NilGauge{} } + +// Update is a no-op. +func (NilGauge) Update(v int64) {} + +// Value is a no-op. +func (NilGauge) Value() int64 { return 0 } + +// StandardGauge is the standard implementation of a Gauge and uses the +// sync/atomic package to manage a single int64 value. +type StandardGauge struct { + value int64 +} + +// Snapshot returns a read-only copy of the gauge. +func (g *StandardGauge) Snapshot() Gauge { + return GaugeSnapshot(g.Value()) +} + +// Update updates the gauge's value. +func (g *StandardGauge) Update(v int64) { + atomic.StoreInt64(&g.value, v) +} + +// Value returns the gauge's current value. +func (g *StandardGauge) Value() int64 { + return atomic.LoadInt64(&g.value) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/gauge_float64.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/gauge_float64.go new file mode 100644 index 000000000..47c3566c2 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/gauge_float64.go @@ -0,0 +1,91 @@ +package metrics + +import "sync" + +// GaugeFloat64s hold a float64 value that can be set arbitrarily. +type GaugeFloat64 interface { + Snapshot() GaugeFloat64 + Update(float64) + Value() float64 +} + +// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a +// new StandardGaugeFloat64. +func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewGaugeFloat64()).(GaugeFloat64) +} + +// NewGaugeFloat64 constructs a new StandardGaugeFloat64. +func NewGaugeFloat64() GaugeFloat64 { + if UseNilMetrics { + return NilGaugeFloat64{} + } + return &StandardGaugeFloat64{ + value: 0.0, + } +} + +// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64. +func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 { + c := NewGaugeFloat64() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64. +type GaugeFloat64Snapshot float64 + +// Snapshot returns the snapshot. +func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g } + +// Update panics. +func (GaugeFloat64Snapshot) Update(float64) { + panic("Update called on a GaugeFloat64Snapshot") +} + +// Value returns the value at the time the snapshot was taken. +func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) } + +// NilGauge is a no-op Gauge. +type NilGaugeFloat64 struct{} + +// Snapshot is a no-op. +func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} } + +// Update is a no-op. +func (NilGaugeFloat64) Update(v float64) {} + +// Value is a no-op. +func (NilGaugeFloat64) Value() float64 { return 0.0 } + +// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses +// sync.Mutex to manage a single float64 value. +type StandardGaugeFloat64 struct { + mutex sync.Mutex + value float64 +} + +// Snapshot returns a read-only copy of the gauge. +func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 { + return GaugeFloat64Snapshot(g.Value()) +} + +// Update updates the gauge's value. +func (g *StandardGaugeFloat64) Update(v float64) { + g.mutex.Lock() + defer g.mutex.Unlock() + g.value = v +} + +// Value returns the gauge's current value. +func (g *StandardGaugeFloat64) Value() float64 { + g.mutex.Lock() + defer g.mutex.Unlock() + return g.value +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/graphite.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/graphite.go new file mode 100644 index 000000000..643b3ec50 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/graphite.go @@ -0,0 +1,104 @@ +package metrics + +import ( + "bufio" + "fmt" + "log" + "net" + "strconv" + "strings" + "time" +) + +// GraphiteConfig provides a container with configuration parameters for +// the Graphite exporter +type GraphiteConfig struct { + Addr *net.TCPAddr // Network address to connect to + Registry Registry // Registry to be exported + FlushInterval time.Duration // Flush interval + DurationUnit time.Duration // Time conversion unit for durations + Prefix string // Prefix to be prepended to metric names + Percentiles []float64 // Percentiles to export from timers and histograms +} + +// Graphite is a blocking exporter function which reports metrics in r +// to a graphite server located at addr, flushing them every d duration +// and prepending metric names with prefix. +func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) { + GraphiteWithConfig(GraphiteConfig{ + Addr: addr, + Registry: r, + FlushInterval: d, + DurationUnit: time.Nanosecond, + Prefix: prefix, + Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999}, + }) +} + +// GraphiteWithConfig is a blocking exporter function just like Graphite, +// but it takes a GraphiteConfig instead. +func GraphiteWithConfig(c GraphiteConfig) { + for _ = range time.Tick(c.FlushInterval) { + if err := graphite(&c); nil != err { + log.Println(err) + } + } +} + +func graphite(c *GraphiteConfig) error { + now := time.Now().Unix() + du := float64(c.DurationUnit) + conn, err := net.DialTCP("tcp", nil, c.Addr) + if nil != err { + return err + } + defer conn.Close() + w := bufio.NewWriter(conn) + c.Registry.Each(func(name string, i interface{}) { + switch metric := i.(type) { + case Counter: + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now) + case Gauge: + fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now) + case GaugeFloat64: + fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles(c.Percentiles) + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now) + fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now) + fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now) + fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now) + fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now) + for psIdx, psKey := range c.Percentiles { + key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) + fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now) + } + case Meter: + m := metric.Snapshot() + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now) + fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now) + fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now) + fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now) + fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles(c.Percentiles) + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now) + fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, int64(du)*t.Min(), now) + fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, int64(du)*t.Max(), now) + fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, du*t.Mean(), now) + fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, du*t.StdDev(), now) + for psIdx, psKey := range c.Percentiles { + key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) + fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now) + } + fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now) + fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now) + fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now) + fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now) + } + w.Flush() + }) + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/healthcheck.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/healthcheck.go new file mode 100644 index 000000000..445131cae --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/healthcheck.go @@ -0,0 +1,61 @@ +package metrics + +// Healthchecks hold an error value describing an arbitrary up/down status. +type Healthcheck interface { + Check() + Error() error + Healthy() + Unhealthy(error) +} + +// NewHealthcheck constructs a new Healthcheck which will use the given +// function to update its status. +func NewHealthcheck(f func(Healthcheck)) Healthcheck { + if UseNilMetrics { + return NilHealthcheck{} + } + return &StandardHealthcheck{nil, f} +} + +// NilHealthcheck is a no-op. +type NilHealthcheck struct{} + +// Check is a no-op. +func (NilHealthcheck) Check() {} + +// Error is a no-op. +func (NilHealthcheck) Error() error { return nil } + +// Healthy is a no-op. +func (NilHealthcheck) Healthy() {} + +// Unhealthy is a no-op. +func (NilHealthcheck) Unhealthy(error) {} + +// StandardHealthcheck is the standard implementation of a Healthcheck and +// stores the status and a function to call to update the status. +type StandardHealthcheck struct { + err error + f func(Healthcheck) +} + +// Check runs the healthcheck function to update the healthcheck's status. +func (h *StandardHealthcheck) Check() { + h.f(h) +} + +// Error returns the healthcheck's status, which will be nil if it is healthy. +func (h *StandardHealthcheck) Error() error { + return h.err +} + +// Healthy marks the healthcheck as healthy. +func (h *StandardHealthcheck) Healthy() { + h.err = nil +} + +// Unhealthy marks the healthcheck as unhealthy. The error is stored and +// may be retrieved by the Error method. +func (h *StandardHealthcheck) Unhealthy(err error) { + h.err = err +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/histogram.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/histogram.go new file mode 100644 index 000000000..7f3ee70cc --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/histogram.go @@ -0,0 +1,192 @@ +package metrics + +// Histograms calculate distribution statistics from a series of int64 values. +type Histogram interface { + Clear() + Count() int64 + Max() int64 + Mean() float64 + Min() int64 + Percentile(float64) float64 + Percentiles([]float64) []float64 + Sample() Sample + Snapshot() Histogram + StdDev() float64 + Update(int64) + Variance() float64 +} + +// GetOrRegisterHistogram returns an existing Histogram or constructs and +// registers a new StandardHistogram. +func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram) +} + +// NewHistogram constructs a new StandardHistogram from a Sample. +func NewHistogram(s Sample) Histogram { + if UseNilMetrics { + return NilHistogram{} + } + return &StandardHistogram{sample: s} +} + +// NewRegisteredHistogram constructs and registers a new StandardHistogram from +// a Sample. +func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram { + c := NewHistogram(s) + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// HistogramSnapshot is a read-only copy of another Histogram. +type HistogramSnapshot struct { + sample *SampleSnapshot +} + +// Clear panics. +func (*HistogramSnapshot) Clear() { + panic("Clear called on a HistogramSnapshot") +} + +// Count returns the number of samples recorded at the time the snapshot was +// taken. +func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() } + +// Max returns the maximum value in the sample at the time the snapshot was +// taken. +func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() } + +// Mean returns the mean of the values in the sample at the time the snapshot +// was taken. +func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() } + +// Min returns the minimum value in the sample at the time the snapshot was +// taken. +func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() } + +// Percentile returns an arbitrary percentile of values in the sample at the +// time the snapshot was taken. +func (h *HistogramSnapshot) Percentile(p float64) float64 { + return h.sample.Percentile(p) +} + +// Percentiles returns a slice of arbitrary percentiles of values in the sample +// at the time the snapshot was taken. +func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 { + return h.sample.Percentiles(ps) +} + +// Sample returns the Sample underlying the histogram. +func (h *HistogramSnapshot) Sample() Sample { return h.sample } + +// Snapshot returns the snapshot. +func (h *HistogramSnapshot) Snapshot() Histogram { return h } + +// StdDev returns the standard deviation of the values in the sample at the +// time the snapshot was taken. +func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() } + +// Update panics. +func (*HistogramSnapshot) Update(int64) { + panic("Update called on a HistogramSnapshot") +} + +// Variance returns the variance of inputs at the time the snapshot was taken. +func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() } + +// NilHistogram is a no-op Histogram. +type NilHistogram struct{} + +// Clear is a no-op. +func (NilHistogram) Clear() {} + +// Count is a no-op. +func (NilHistogram) Count() int64 { return 0 } + +// Max is a no-op. +func (NilHistogram) Max() int64 { return 0 } + +// Mean is a no-op. +func (NilHistogram) Mean() float64 { return 0.0 } + +// Min is a no-op. +func (NilHistogram) Min() int64 { return 0 } + +// Percentile is a no-op. +func (NilHistogram) Percentile(p float64) float64 { return 0.0 } + +// Percentiles is a no-op. +func (NilHistogram) Percentiles(ps []float64) []float64 { + return make([]float64, len(ps)) +} + +// Sample is a no-op. +func (NilHistogram) Sample() Sample { return NilSample{} } + +// Snapshot is a no-op. +func (NilHistogram) Snapshot() Histogram { return NilHistogram{} } + +// StdDev is a no-op. +func (NilHistogram) StdDev() float64 { return 0.0 } + +// Update is a no-op. +func (NilHistogram) Update(v int64) {} + +// Variance is a no-op. +func (NilHistogram) Variance() float64 { return 0.0 } + +// StandardHistogram is the standard implementation of a Histogram and uses a +// Sample to bound its memory use. +type StandardHistogram struct { + sample Sample +} + +// Clear clears the histogram and its sample. +func (h *StandardHistogram) Clear() { h.sample.Clear() } + +// Count returns the number of samples recorded since the histogram was last +// cleared. +func (h *StandardHistogram) Count() int64 { return h.sample.Count() } + +// Max returns the maximum value in the sample. +func (h *StandardHistogram) Max() int64 { return h.sample.Max() } + +// Mean returns the mean of the values in the sample. +func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() } + +// Min returns the minimum value in the sample. +func (h *StandardHistogram) Min() int64 { return h.sample.Min() } + +// Percentile returns an arbitrary percentile of the values in the sample. +func (h *StandardHistogram) Percentile(p float64) float64 { + return h.sample.Percentile(p) +} + +// Percentiles returns a slice of arbitrary percentiles of the values in the +// sample. +func (h *StandardHistogram) Percentiles(ps []float64) []float64 { + return h.sample.Percentiles(ps) +} + +// Sample returns the Sample underlying the histogram. +func (h *StandardHistogram) Sample() Sample { return h.sample } + +// Snapshot returns a read-only copy of the histogram. +func (h *StandardHistogram) Snapshot() Histogram { + return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)} +} + +// StdDev returns the standard deviation of the values in the sample. +func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() } + +// Update samples a new value. +func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) } + +// Variance returns the variance of the values in the sample. +func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() } diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/json.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/json.go new file mode 100644 index 000000000..04a9c9198 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/json.go @@ -0,0 +1,83 @@ +package metrics + +import ( + "encoding/json" + "io" + "time" +) + +// MarshalJSON returns a byte slice containing a JSON representation of all +// the metrics in the Registry. +func (r StandardRegistry) MarshalJSON() ([]byte, error) { + data := make(map[string]map[string]interface{}) + r.Each(func(name string, i interface{}) { + values := make(map[string]interface{}) + switch metric := i.(type) { + case Counter: + values["count"] = metric.Count() + case Gauge: + values["value"] = metric.Value() + case GaugeFloat64: + values["value"] = metric.Value() + case Healthcheck: + values["error"] = nil + metric.Check() + if err := metric.Error(); nil != err { + values["error"] = metric.Error().Error() + } + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + values["count"] = h.Count() + values["min"] = h.Min() + values["max"] = h.Max() + values["mean"] = h.Mean() + values["stddev"] = h.StdDev() + values["median"] = ps[0] + values["75%"] = ps[1] + values["95%"] = ps[2] + values["99%"] = ps[3] + values["99.9%"] = ps[4] + case Meter: + m := metric.Snapshot() + values["count"] = m.Count() + values["1m.rate"] = m.Rate1() + values["5m.rate"] = m.Rate5() + values["15m.rate"] = m.Rate15() + values["mean.rate"] = m.RateMean() + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + values["count"] = t.Count() + values["min"] = t.Min() + values["max"] = t.Max() + values["mean"] = t.Mean() + values["stddev"] = t.StdDev() + values["median"] = ps[0] + values["75%"] = ps[1] + values["95%"] = ps[2] + values["99%"] = ps[3] + values["99.9%"] = ps[4] + values["1m.rate"] = t.Rate1() + values["5m.rate"] = t.Rate5() + values["15m.rate"] = t.Rate15() + values["mean.rate"] = t.RateMean() + } + data[name] = values + }) + return json.Marshal(data) +} + +// WriteJSON writes metrics from the given registry periodically to the +// specified io.Writer as JSON. +func WriteJSON(r Registry, d time.Duration, w io.Writer) { + for _ = range time.Tick(d) { + WriteJSONOnce(r, w) + } +} + +// WriteJSONOnce writes metrics from the given registry to the specified +// io.Writer as JSON. +func WriteJSONOnce(r Registry, w io.Writer) { + json.NewEncoder(w).Encode(r) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/log.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/log.go new file mode 100644 index 000000000..278a8a441 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/log.go @@ -0,0 +1,70 @@ +package metrics + +import ( + "log" + "time" +) + +// Output each metric in the given registry periodically using the given +// logger. +func Log(r Registry, d time.Duration, l *log.Logger) { + for _ = range time.Tick(d) { + r.Each(func(name string, i interface{}) { + switch metric := i.(type) { + case Counter: + l.Printf("counter %s\n", name) + l.Printf(" count: %9d\n", metric.Count()) + case Gauge: + l.Printf("gauge %s\n", name) + l.Printf(" value: %9d\n", metric.Value()) + case GaugeFloat64: + l.Printf("gauge %s\n", name) + l.Printf(" value: %f\n", metric.Value()) + case Healthcheck: + metric.Check() + l.Printf("healthcheck %s\n", name) + l.Printf(" error: %v\n", metric.Error()) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + l.Printf("histogram %s\n", name) + l.Printf(" count: %9d\n", h.Count()) + l.Printf(" min: %9d\n", h.Min()) + l.Printf(" max: %9d\n", h.Max()) + l.Printf(" mean: %12.2f\n", h.Mean()) + l.Printf(" stddev: %12.2f\n", h.StdDev()) + l.Printf(" median: %12.2f\n", ps[0]) + l.Printf(" 75%%: %12.2f\n", ps[1]) + l.Printf(" 95%%: %12.2f\n", ps[2]) + l.Printf(" 99%%: %12.2f\n", ps[3]) + l.Printf(" 99.9%%: %12.2f\n", ps[4]) + case Meter: + m := metric.Snapshot() + l.Printf("meter %s\n", name) + l.Printf(" count: %9d\n", m.Count()) + l.Printf(" 1-min rate: %12.2f\n", m.Rate1()) + l.Printf(" 5-min rate: %12.2f\n", m.Rate5()) + l.Printf(" 15-min rate: %12.2f\n", m.Rate15()) + l.Printf(" mean rate: %12.2f\n", m.RateMean()) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + l.Printf("timer %s\n", name) + l.Printf(" count: %9d\n", t.Count()) + l.Printf(" min: %9d\n", t.Min()) + l.Printf(" max: %9d\n", t.Max()) + l.Printf(" mean: %12.2f\n", t.Mean()) + l.Printf(" stddev: %12.2f\n", t.StdDev()) + l.Printf(" median: %12.2f\n", ps[0]) + l.Printf(" 75%%: %12.2f\n", ps[1]) + l.Printf(" 95%%: %12.2f\n", ps[2]) + l.Printf(" 99%%: %12.2f\n", ps[3]) + l.Printf(" 99.9%%: %12.2f\n", ps[4]) + l.Printf(" 1-min rate: %12.2f\n", t.Rate1()) + l.Printf(" 5-min rate: %12.2f\n", t.Rate5()) + l.Printf(" 15-min rate: %12.2f\n", t.Rate15()) + l.Printf(" mean rate: %12.2f\n", t.RateMean()) + } + }) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/memory.md b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/memory.md new file mode 100644 index 000000000..47454f54b --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/memory.md @@ -0,0 +1,285 @@ +Memory usage +============ + +(Highly unscientific.) + +Command used to gather static memory usage: + +```sh +grep ^Vm "/proc/$(ps fax | grep [m]etrics-bench | awk '{print $1}')/status" +``` + +Program used to gather baseline memory usage: + +```go +package main + +import "time" + +func main() { + time.Sleep(600e9) +} +``` + +Baseline +-------- + +``` +VmPeak: 42604 kB +VmSize: 42604 kB +VmLck: 0 kB +VmHWM: 1120 kB +VmRSS: 1120 kB +VmData: 35460 kB +VmStk: 136 kB +VmExe: 1020 kB +VmLib: 1848 kB +VmPTE: 36 kB +VmSwap: 0 kB +``` + +Program used to gather metric memory usage (with other metrics being similar): + +```go +package main + +import ( + "fmt" + "metrics" + "time" +) + +func main() { + fmt.Sprintf("foo") + metrics.NewRegistry() + time.Sleep(600e9) +} +``` + +1000 counters registered +------------------------ + +``` +VmPeak: 44016 kB +VmSize: 44016 kB +VmLck: 0 kB +VmHWM: 1928 kB +VmRSS: 1928 kB +VmData: 36868 kB +VmStk: 136 kB +VmExe: 1024 kB +VmLib: 1848 kB +VmPTE: 40 kB +VmSwap: 0 kB +``` + +**1.412 kB virtual, TODO 0.808 kB resident per counter.** + +100000 counters registered +-------------------------- + +``` +VmPeak: 55024 kB +VmSize: 55024 kB +VmLck: 0 kB +VmHWM: 12440 kB +VmRSS: 12440 kB +VmData: 47876 kB +VmStk: 136 kB +VmExe: 1024 kB +VmLib: 1848 kB +VmPTE: 64 kB +VmSwap: 0 kB +``` + +**0.1242 kB virtual, 0.1132 kB resident per counter.** + +1000 gauges registered +---------------------- + +``` +VmPeak: 44012 kB +VmSize: 44012 kB +VmLck: 0 kB +VmHWM: 1928 kB +VmRSS: 1928 kB +VmData: 36868 kB +VmStk: 136 kB +VmExe: 1020 kB +VmLib: 1848 kB +VmPTE: 40 kB +VmSwap: 0 kB +``` + +**1.408 kB virtual, 0.808 kB resident per counter.** + +100000 gauges registered +------------------------ + +``` +VmPeak: 55020 kB +VmSize: 55020 kB +VmLck: 0 kB +VmHWM: 12432 kB +VmRSS: 12432 kB +VmData: 47876 kB +VmStk: 136 kB +VmExe: 1020 kB +VmLib: 1848 kB +VmPTE: 60 kB +VmSwap: 0 kB +``` + +**0.12416 kB virtual, 0.11312 resident per gauge.** + +1000 histograms with a uniform sample size of 1028 +-------------------------------------------------- + +``` +VmPeak: 72272 kB +VmSize: 72272 kB +VmLck: 0 kB +VmHWM: 16204 kB +VmRSS: 16204 kB +VmData: 65100 kB +VmStk: 136 kB +VmExe: 1048 kB +VmLib: 1848 kB +VmPTE: 80 kB +VmSwap: 0 kB +``` + +**29.668 kB virtual, TODO 15.084 resident per histogram.** + +10000 histograms with a uniform sample size of 1028 +--------------------------------------------------- + +``` +VmPeak: 256912 kB +VmSize: 256912 kB +VmLck: 0 kB +VmHWM: 146204 kB +VmRSS: 146204 kB +VmData: 249740 kB +VmStk: 136 kB +VmExe: 1048 kB +VmLib: 1848 kB +VmPTE: 448 kB +VmSwap: 0 kB +``` + +**21.4308 kB virtual, 14.5084 kB resident per histogram.** + +50000 histograms with a uniform sample size of 1028 +--------------------------------------------------- + +``` +VmPeak: 908112 kB +VmSize: 908112 kB +VmLck: 0 kB +VmHWM: 645832 kB +VmRSS: 645588 kB +VmData: 900940 kB +VmStk: 136 kB +VmExe: 1048 kB +VmLib: 1848 kB +VmPTE: 1716 kB +VmSwap: 1544 kB +``` + +**17.31016 kB virtual, 12.88936 kB resident per histogram.** + +1000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 +------------------------------------------------------------------------------------- + +``` +VmPeak: 62480 kB +VmSize: 62480 kB +VmLck: 0 kB +VmHWM: 11572 kB +VmRSS: 11572 kB +VmData: 55308 kB +VmStk: 136 kB +VmExe: 1048 kB +VmLib: 1848 kB +VmPTE: 64 kB +VmSwap: 0 kB +``` + +**19.876 kB virtual, 10.452 kB resident per histogram.** + +10000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 +-------------------------------------------------------------------------------------- + +``` +VmPeak: 153296 kB +VmSize: 153296 kB +VmLck: 0 kB +VmHWM: 101176 kB +VmRSS: 101176 kB +VmData: 146124 kB +VmStk: 136 kB +VmExe: 1048 kB +VmLib: 1848 kB +VmPTE: 240 kB +VmSwap: 0 kB +``` + +**11.0692 kB virtual, 10.0056 kB resident per histogram.** + +50000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 +-------------------------------------------------------------------------------------- + +``` +VmPeak: 557264 kB +VmSize: 557264 kB +VmLck: 0 kB +VmHWM: 501056 kB +VmRSS: 501056 kB +VmData: 550092 kB +VmStk: 136 kB +VmExe: 1048 kB +VmLib: 1848 kB +VmPTE: 1032 kB +VmSwap: 0 kB +``` + +**10.2932 kB virtual, 9.99872 kB resident per histogram.** + +1000 meters +----------- + +``` +VmPeak: 74504 kB +VmSize: 74504 kB +VmLck: 0 kB +VmHWM: 24124 kB +VmRSS: 24124 kB +VmData: 67340 kB +VmStk: 136 kB +VmExe: 1040 kB +VmLib: 1848 kB +VmPTE: 92 kB +VmSwap: 0 kB +``` + +**31.9 kB virtual, 23.004 kB resident per meter.** + +10000 meters +------------ + +``` +VmPeak: 278920 kB +VmSize: 278920 kB +VmLck: 0 kB +VmHWM: 227300 kB +VmRSS: 227300 kB +VmData: 271756 kB +VmStk: 136 kB +VmExe: 1040 kB +VmLib: 1848 kB +VmPTE: 488 kB +VmSwap: 0 kB +``` + +**23.6316 kB virtual, 22.618 kB resident per meter.** diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/meter.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/meter.go new file mode 100644 index 000000000..0389ab0b8 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/meter.go @@ -0,0 +1,233 @@ +package metrics + +import ( + "sync" + "time" +) + +// Meters count events to produce exponentially-weighted moving average rates +// at one-, five-, and fifteen-minutes and a mean rate. +type Meter interface { + Count() int64 + Mark(int64) + Rate1() float64 + Rate5() float64 + Rate15() float64 + RateMean() float64 + Snapshot() Meter +} + +// GetOrRegisterMeter returns an existing Meter or constructs and registers a +// new StandardMeter. +func GetOrRegisterMeter(name string, r Registry) Meter { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewMeter).(Meter) +} + +// NewMeter constructs a new StandardMeter and launches a goroutine. +func NewMeter() Meter { + if UseNilMetrics { + return NilMeter{} + } + m := newStandardMeter() + arbiter.Lock() + defer arbiter.Unlock() + arbiter.meters = append(arbiter.meters, m) + if !arbiter.started { + arbiter.started = true + go arbiter.tick() + } + return m +} + +// NewMeter constructs and registers a new StandardMeter and launches a +// goroutine. +func NewRegisteredMeter(name string, r Registry) Meter { + c := NewMeter() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// MeterSnapshot is a read-only copy of another Meter. +type MeterSnapshot struct { + count int64 + rate1, rate5, rate15, rateMean float64 +} + +// Count returns the count of events at the time the snapshot was taken. +func (m *MeterSnapshot) Count() int64 { return m.count } + +// Mark panics. +func (*MeterSnapshot) Mark(n int64) { + panic("Mark called on a MeterSnapshot") +} + +// Rate1 returns the one-minute moving average rate of events per second at the +// time the snapshot was taken. +func (m *MeterSnapshot) Rate1() float64 { return m.rate1 } + +// Rate5 returns the five-minute moving average rate of events per second at +// the time the snapshot was taken. +func (m *MeterSnapshot) Rate5() float64 { return m.rate5 } + +// Rate15 returns the fifteen-minute moving average rate of events per second +// at the time the snapshot was taken. +func (m *MeterSnapshot) Rate15() float64 { return m.rate15 } + +// RateMean returns the meter's mean rate of events per second at the time the +// snapshot was taken. +func (m *MeterSnapshot) RateMean() float64 { return m.rateMean } + +// Snapshot returns the snapshot. +func (m *MeterSnapshot) Snapshot() Meter { return m } + +// NilMeter is a no-op Meter. +type NilMeter struct{} + +// Count is a no-op. +func (NilMeter) Count() int64 { return 0 } + +// Mark is a no-op. +func (NilMeter) Mark(n int64) {} + +// Rate1 is a no-op. +func (NilMeter) Rate1() float64 { return 0.0 } + +// Rate5 is a no-op. +func (NilMeter) Rate5() float64 { return 0.0 } + +// Rate15is a no-op. +func (NilMeter) Rate15() float64 { return 0.0 } + +// RateMean is a no-op. +func (NilMeter) RateMean() float64 { return 0.0 } + +// Snapshot is a no-op. +func (NilMeter) Snapshot() Meter { return NilMeter{} } + +// StandardMeter is the standard implementation of a Meter. +type StandardMeter struct { + lock sync.RWMutex + snapshot *MeterSnapshot + a1, a5, a15 EWMA + startTime time.Time +} + +func newStandardMeter() *StandardMeter { + return &StandardMeter{ + snapshot: &MeterSnapshot{}, + a1: NewEWMA1(), + a5: NewEWMA5(), + a15: NewEWMA15(), + startTime: time.Now(), + } +} + +// Count returns the number of events recorded. +func (m *StandardMeter) Count() int64 { + m.lock.RLock() + count := m.snapshot.count + m.lock.RUnlock() + return count +} + +// Mark records the occurance of n events. +func (m *StandardMeter) Mark(n int64) { + m.lock.Lock() + defer m.lock.Unlock() + m.snapshot.count += n + m.a1.Update(n) + m.a5.Update(n) + m.a15.Update(n) + m.updateSnapshot() +} + +// Rate1 returns the one-minute moving average rate of events per second. +func (m *StandardMeter) Rate1() float64 { + m.lock.RLock() + rate1 := m.snapshot.rate1 + m.lock.RUnlock() + return rate1 +} + +// Rate5 returns the five-minute moving average rate of events per second. +func (m *StandardMeter) Rate5() float64 { + m.lock.RLock() + rate5 := m.snapshot.rate5 + m.lock.RUnlock() + return rate5 +} + +// Rate15 returns the fifteen-minute moving average rate of events per second. +func (m *StandardMeter) Rate15() float64 { + m.lock.RLock() + rate15 := m.snapshot.rate15 + m.lock.RUnlock() + return rate15 +} + +// RateMean returns the meter's mean rate of events per second. +func (m *StandardMeter) RateMean() float64 { + m.lock.RLock() + rateMean := m.snapshot.rateMean + m.lock.RUnlock() + return rateMean +} + +// Snapshot returns a read-only copy of the meter. +func (m *StandardMeter) Snapshot() Meter { + m.lock.RLock() + snapshot := *m.snapshot + m.lock.RUnlock() + return &snapshot +} + +func (m *StandardMeter) updateSnapshot() { + // should run with write lock held on m.lock + snapshot := m.snapshot + snapshot.rate1 = m.a1.Rate() + snapshot.rate5 = m.a5.Rate() + snapshot.rate15 = m.a15.Rate() + snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds() +} + +func (m *StandardMeter) tick() { + m.lock.Lock() + defer m.lock.Unlock() + m.a1.Tick() + m.a5.Tick() + m.a15.Tick() + m.updateSnapshot() +} + +type meterArbiter struct { + sync.RWMutex + started bool + meters []*StandardMeter + ticker *time.Ticker +} + +var arbiter = meterArbiter{ticker: time.NewTicker(5e9)} + +// Ticks meters on the scheduled interval +func (ma *meterArbiter) tick() { + for { + select { + case <-ma.ticker.C: + ma.tickMeters() + } + } +} + +func (ma *meterArbiter) tickMeters() { + ma.RLock() + defer ma.RUnlock() + for _, meter := range ma.meters { + meter.tick() + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/metrics.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/metrics.go new file mode 100644 index 000000000..b97a49ed1 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/metrics.go @@ -0,0 +1,13 @@ +// Go port of Coda Hale's Metrics library +// +// +// +// Coda Hale's original work: +package metrics + +// UseNilMetrics is checked by the constructor functions for all of the +// standard metrics. If it is true, the metric returned is a stub. +// +// This global kill-switch helps quantify the observer effect and makes +// for less cluttered pprof profiles. +var UseNilMetrics bool = false diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/opentsdb.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/opentsdb.go new file mode 100644 index 000000000..fbc292de1 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/opentsdb.go @@ -0,0 +1,119 @@ +package metrics + +import ( + "bufio" + "fmt" + "log" + "net" + "time" + "os" + "strings" +) + +var shortHostName string = "" + +// OpenTSDBConfig provides a container with configuration parameters for +// the OpenTSDB exporter +type OpenTSDBConfig struct { + Addr *net.TCPAddr // Network address to connect to + Registry Registry // Registry to be exported + FlushInterval time.Duration // Flush interval + DurationUnit time.Duration // Time conversion unit for durations + Prefix string // Prefix to be prepended to metric names +} + +// OpenTSDB is a blocking exporter function which reports metrics in r +// to a TSDB server located at addr, flushing them every d duration +// and prepending metric names with prefix. +func OpenTSDB(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) { + OpenTSDBWithConfig(OpenTSDBConfig{ + Addr: addr, + Registry: r, + FlushInterval: d, + DurationUnit: time.Nanosecond, + Prefix: prefix, + }) +} + +// OpenTSDBWithConfig is a blocking exporter function just like OpenTSDB, +// but it takes a OpenTSDBConfig instead. +func OpenTSDBWithConfig(c OpenTSDBConfig) { + for _ = range time.Tick(c.FlushInterval) { + if err := openTSDB(&c); nil != err { + log.Println(err) + } + } +} + +func getShortHostname() string { + if shortHostName == "" { + host, _ := os.Hostname() + if index := strings.Index(host, "."); index > 0 { + shortHostName = host[:index] + } else { + shortHostName = host + } + } + return shortHostName +} + +func openTSDB(c *OpenTSDBConfig) error { + shortHostname := getShortHostname() + now := time.Now().Unix() + du := float64(c.DurationUnit) + conn, err := net.DialTCP("tcp", nil, c.Addr) + if nil != err { + return err + } + defer conn.Close() + w := bufio.NewWriter(conn) + c.Registry.Each(func(name string, i interface{}) { + switch metric := i.(type) { + case Counter: + fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname) + case Gauge: + fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname) + case GaugeFloat64: + fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, h.Count(), shortHostname) + fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, h.Min(), shortHostname) + fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, h.Max(), shortHostname) + fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, h.Mean(), shortHostname) + fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, h.StdDev(), shortHostname) + fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0], shortHostname) + fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1], shortHostname) + fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2], shortHostname) + fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3], shortHostname) + fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4], shortHostname) + case Meter: + m := metric.Snapshot() + fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, m.Count(), shortHostname) + fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate1(), shortHostname) + fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate5(), shortHostname) + fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate15(), shortHostname) + fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, m.RateMean(), shortHostname) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, t.Count(), shortHostname) + fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, int64(du)*t.Min(), shortHostname) + fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, int64(du)*t.Max(), shortHostname) + fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, du*t.Mean(), shortHostname) + fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, du*t.StdDev(), shortHostname) + fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, du*ps[0], shortHostname) + fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, du*ps[1], shortHostname) + fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, du*ps[2], shortHostname) + fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, du*ps[3], shortHostname) + fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, du*ps[4], shortHostname) + fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate1(), shortHostname) + fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate5(), shortHostname) + fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname) + fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname) + } + w.Flush() + }) + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/registry.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/registry.go new file mode 100644 index 000000000..9ef498a2a --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/registry.go @@ -0,0 +1,168 @@ +package metrics + +import ( + "fmt" + "reflect" + "sync" +) + +// DuplicateMetric is the error returned by Registry.Register when a metric +// already exists. If you mean to Register that metric you must first +// Unregister the existing metric. +type DuplicateMetric string + +func (err DuplicateMetric) Error() string { + return fmt.Sprintf("duplicate metric: %s", string(err)) +} + +// A Registry holds references to a set of metrics by name and can iterate +// over them, calling callback functions provided by the user. +// +// This is an interface so as to encourage other structs to implement +// the Registry API as appropriate. +type Registry interface { + + // Call the given function for each registered metric. + Each(func(string, interface{})) + + // Get the metric by the given name or nil if none is registered. + Get(string) interface{} + + // Gets an existing metric or registers the given one. + // The interface can be the metric to register if not found in registry, + // or a function returning the metric for lazy instantiation. + GetOrRegister(string, interface{}) interface{} + + // Register the given metric under the given name. + Register(string, interface{}) error + + // Run all registered healthchecks. + RunHealthchecks() + + // Unregister the metric with the given name. + Unregister(string) +} + +// The standard implementation of a Registry is a mutex-protected map +// of names to metrics. +type StandardRegistry struct { + metrics map[string]interface{} + mutex sync.Mutex +} + +// Create a new registry. +func NewRegistry() Registry { + return &StandardRegistry{metrics: make(map[string]interface{})} +} + +// Call the given function for each registered metric. +func (r *StandardRegistry) Each(f func(string, interface{})) { + for name, i := range r.registered() { + f(name, i) + } +} + +// Get the metric by the given name or nil if none is registered. +func (r *StandardRegistry) Get(name string) interface{} { + r.mutex.Lock() + defer r.mutex.Unlock() + return r.metrics[name] +} + +// Gets an existing metric or creates and registers a new one. Threadsafe +// alternative to calling Get and Register on failure. +// The interface can be the metric to register if not found in registry, +// or a function returning the metric for lazy instantiation. +func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} { + r.mutex.Lock() + defer r.mutex.Unlock() + if metric, ok := r.metrics[name]; ok { + return metric + } + if v := reflect.ValueOf(i); v.Kind() == reflect.Func { + i = v.Call(nil)[0].Interface() + } + r.register(name, i) + return i +} + +// Register the given metric under the given name. Returns a DuplicateMetric +// if a metric by the given name is already registered. +func (r *StandardRegistry) Register(name string, i interface{}) error { + r.mutex.Lock() + defer r.mutex.Unlock() + return r.register(name, i) +} + +// Run all registered healthchecks. +func (r *StandardRegistry) RunHealthchecks() { + r.mutex.Lock() + defer r.mutex.Unlock() + for _, i := range r.metrics { + if h, ok := i.(Healthcheck); ok { + h.Check() + } + } +} + +// Unregister the metric with the given name. +func (r *StandardRegistry) Unregister(name string) { + r.mutex.Lock() + defer r.mutex.Unlock() + delete(r.metrics, name) +} + +func (r *StandardRegistry) register(name string, i interface{}) error { + if _, ok := r.metrics[name]; ok { + return DuplicateMetric(name) + } + switch i.(type) { + case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer: + r.metrics[name] = i + } + return nil +} + +func (r *StandardRegistry) registered() map[string]interface{} { + metrics := make(map[string]interface{}, len(r.metrics)) + r.mutex.Lock() + defer r.mutex.Unlock() + for name, i := range r.metrics { + metrics[name] = i + } + return metrics +} + +var DefaultRegistry Registry = NewRegistry() + +// Call the given function for each registered metric. +func Each(f func(string, interface{})) { + DefaultRegistry.Each(f) +} + +// Get the metric by the given name or nil if none is registered. +func Get(name string) interface{} { + return DefaultRegistry.Get(name) +} + +// Gets an existing metric or creates and registers a new one. Threadsafe +// alternative to calling Get and Register on failure. +func GetOrRegister(name string, i interface{}) interface{} { + return DefaultRegistry.GetOrRegister(name, i) +} + +// Register the given metric under the given name. Returns a DuplicateMetric +// if a metric by the given name is already registered. +func Register(name string, i interface{}) error { + return DefaultRegistry.Register(name, i) +} + +// Run all registered healthchecks. +func RunHealthchecks() { + DefaultRegistry.RunHealthchecks() +} + +// Unregister the metric with the given name. +func Unregister(name string) { + DefaultRegistry.Unregister(name) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/runtime.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/runtime.go new file mode 100644 index 000000000..82574bf25 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/runtime.go @@ -0,0 +1,200 @@ +package metrics + +import ( + "runtime" + "time" +) + +var ( + memStats runtime.MemStats + runtimeMetrics struct { + MemStats struct { + Alloc Gauge + BuckHashSys Gauge + DebugGC Gauge + EnableGC Gauge + Frees Gauge + HeapAlloc Gauge + HeapIdle Gauge + HeapInuse Gauge + HeapObjects Gauge + HeapReleased Gauge + HeapSys Gauge + LastGC Gauge + Lookups Gauge + Mallocs Gauge + MCacheInuse Gauge + MCacheSys Gauge + MSpanInuse Gauge + MSpanSys Gauge + NextGC Gauge + NumGC Gauge + PauseNs Histogram + PauseTotalNs Gauge + StackInuse Gauge + StackSys Gauge + Sys Gauge + TotalAlloc Gauge + } + NumCgoCall Gauge + NumGoroutine Gauge + ReadMemStats Timer + } + frees uint64 + lookups uint64 + mallocs uint64 + numGC uint32 + numCgoCalls int64 +) + +// Capture new values for the Go runtime statistics exported in +// runtime.MemStats. This is designed to be called as a goroutine. +func CaptureRuntimeMemStats(r Registry, d time.Duration) { + for _ = range time.Tick(d) { + CaptureRuntimeMemStatsOnce(r) + } +} + +// Capture new values for the Go runtime statistics exported in +// runtime.MemStats. This is designed to be called in a background +// goroutine. Giving a registry which has not been given to +// RegisterRuntimeMemStats will panic. +// +// Be very careful with this because runtime.ReadMemStats calls the C +// functions runtime·semacquire(&runtime·worldsema) and runtime·stoptheworld() +// and that last one does what it says on the tin. +func CaptureRuntimeMemStatsOnce(r Registry) { + t := time.Now() + runtime.ReadMemStats(&memStats) // This takes 50-200us. + runtimeMetrics.ReadMemStats.UpdateSince(t) + + runtimeMetrics.MemStats.Alloc.Update(int64(memStats.Alloc)) + runtimeMetrics.MemStats.BuckHashSys.Update(int64(memStats.BuckHashSys)) + if memStats.DebugGC { + runtimeMetrics.MemStats.DebugGC.Update(1) + } else { + runtimeMetrics.MemStats.DebugGC.Update(0) + } + if memStats.EnableGC { + runtimeMetrics.MemStats.EnableGC.Update(1) + } else { + runtimeMetrics.MemStats.EnableGC.Update(0) + } + + runtimeMetrics.MemStats.Frees.Update(int64(memStats.Frees - frees)) + runtimeMetrics.MemStats.HeapAlloc.Update(int64(memStats.HeapAlloc)) + runtimeMetrics.MemStats.HeapIdle.Update(int64(memStats.HeapIdle)) + runtimeMetrics.MemStats.HeapInuse.Update(int64(memStats.HeapInuse)) + runtimeMetrics.MemStats.HeapObjects.Update(int64(memStats.HeapObjects)) + runtimeMetrics.MemStats.HeapReleased.Update(int64(memStats.HeapReleased)) + runtimeMetrics.MemStats.HeapSys.Update(int64(memStats.HeapSys)) + runtimeMetrics.MemStats.LastGC.Update(int64(memStats.LastGC)) + runtimeMetrics.MemStats.Lookups.Update(int64(memStats.Lookups - lookups)) + runtimeMetrics.MemStats.Mallocs.Update(int64(memStats.Mallocs - mallocs)) + runtimeMetrics.MemStats.MCacheInuse.Update(int64(memStats.MCacheInuse)) + runtimeMetrics.MemStats.MCacheSys.Update(int64(memStats.MCacheSys)) + runtimeMetrics.MemStats.MSpanInuse.Update(int64(memStats.MSpanInuse)) + runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys)) + runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC)) + runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC)) + + // + i := numGC % uint32(len(memStats.PauseNs)) + ii := memStats.NumGC % uint32(len(memStats.PauseNs)) + if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) { + for i = 0; i < uint32(len(memStats.PauseNs)); i++ { + runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) + } + } else { + if i > ii { + for ; i < uint32(len(memStats.PauseNs)); i++ { + runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) + } + i = 0 + } + for ; i < ii; i++ { + runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) + } + } + frees = memStats.Frees + lookups = memStats.Lookups + mallocs = memStats.Mallocs + numGC = memStats.NumGC + + runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs)) + runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse)) + runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys)) + runtimeMetrics.MemStats.Sys.Update(int64(memStats.Sys)) + runtimeMetrics.MemStats.TotalAlloc.Update(int64(memStats.TotalAlloc)) + + currentNumCgoCalls := numCgoCall() + runtimeMetrics.NumCgoCall.Update(currentNumCgoCalls - numCgoCalls) + numCgoCalls = currentNumCgoCalls + + runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine())) +} + +// Register runtimeMetrics for the Go runtime statistics exported in runtime and +// specifically runtime.MemStats. The runtimeMetrics are named by their +// fully-qualified Go symbols, i.e. runtime.MemStats.Alloc. +func RegisterRuntimeMemStats(r Registry) { + runtimeMetrics.MemStats.Alloc = NewGauge() + runtimeMetrics.MemStats.BuckHashSys = NewGauge() + runtimeMetrics.MemStats.DebugGC = NewGauge() + runtimeMetrics.MemStats.EnableGC = NewGauge() + runtimeMetrics.MemStats.Frees = NewGauge() + runtimeMetrics.MemStats.HeapAlloc = NewGauge() + runtimeMetrics.MemStats.HeapIdle = NewGauge() + runtimeMetrics.MemStats.HeapInuse = NewGauge() + runtimeMetrics.MemStats.HeapObjects = NewGauge() + runtimeMetrics.MemStats.HeapReleased = NewGauge() + runtimeMetrics.MemStats.HeapSys = NewGauge() + runtimeMetrics.MemStats.LastGC = NewGauge() + runtimeMetrics.MemStats.Lookups = NewGauge() + runtimeMetrics.MemStats.Mallocs = NewGauge() + runtimeMetrics.MemStats.MCacheInuse = NewGauge() + runtimeMetrics.MemStats.MCacheSys = NewGauge() + runtimeMetrics.MemStats.MSpanInuse = NewGauge() + runtimeMetrics.MemStats.MSpanSys = NewGauge() + runtimeMetrics.MemStats.NextGC = NewGauge() + runtimeMetrics.MemStats.NumGC = NewGauge() + runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015)) + runtimeMetrics.MemStats.PauseTotalNs = NewGauge() + runtimeMetrics.MemStats.StackInuse = NewGauge() + runtimeMetrics.MemStats.StackSys = NewGauge() + runtimeMetrics.MemStats.Sys = NewGauge() + runtimeMetrics.MemStats.TotalAlloc = NewGauge() + runtimeMetrics.NumCgoCall = NewGauge() + runtimeMetrics.NumGoroutine = NewGauge() + runtimeMetrics.ReadMemStats = NewTimer() + + r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc) + r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys) + r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC) + r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC) + r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees) + r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc) + r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle) + r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse) + r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects) + r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased) + r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys) + r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC) + r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups) + r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs) + r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse) + r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys) + r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse) + r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys) + r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC) + r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC) + r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs) + r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs) + r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse) + r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys) + r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys) + r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc) + r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall) + r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine) + r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/runtime_cgo.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/runtime_cgo.go new file mode 100644 index 000000000..38976a8c5 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/runtime_cgo.go @@ -0,0 +1,9 @@ +// +build cgo + +package metrics + +import "runtime" + +func numCgoCall() int64 { + return runtime.NumCgoCall() +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/runtime_no_cgo.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/runtime_no_cgo.go new file mode 100644 index 000000000..38220330c --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/runtime_no_cgo.go @@ -0,0 +1,7 @@ +// +build !cgo + +package metrics + +func numCgoCall() int64 { + return 0 +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/sample.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/sample.go new file mode 100644 index 000000000..e34b7b585 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/sample.go @@ -0,0 +1,568 @@ +package metrics + +import ( + "container/heap" + "math" + "math/rand" + "sort" + "sync" + "time" +) + +const rescaleThreshold = time.Hour + +// Samples maintain a statistically-significant selection of values from +// a stream. +type Sample interface { + Clear() + Count() int64 + Max() int64 + Mean() float64 + Min() int64 + Percentile(float64) float64 + Percentiles([]float64) []float64 + Size() int + Snapshot() Sample + StdDev() float64 + Sum() int64 + Update(int64) + Values() []int64 + Variance() float64 +} + +// ExpDecaySample is an exponentially-decaying sample using a forward-decaying +// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time +// Decay Model for Streaming Systems". +// +// +type ExpDecaySample struct { + alpha float64 + count int64 + mutex sync.Mutex + reservoirSize int + t0, t1 time.Time + values expDecaySampleHeap +} + +// NewExpDecaySample constructs a new exponentially-decaying sample with the +// given reservoir size and alpha. +func NewExpDecaySample(reservoirSize int, alpha float64) Sample { + if UseNilMetrics { + return NilSample{} + } + s := &ExpDecaySample{ + alpha: alpha, + reservoirSize: reservoirSize, + t0: time.Now(), + values: make(expDecaySampleHeap, 0, reservoirSize), + } + s.t1 = time.Now().Add(rescaleThreshold) + return s +} + +// Clear clears all samples. +func (s *ExpDecaySample) Clear() { + s.mutex.Lock() + defer s.mutex.Unlock() + s.count = 0 + s.t0 = time.Now() + s.t1 = s.t0.Add(rescaleThreshold) + s.values = make(expDecaySampleHeap, 0, s.reservoirSize) +} + +// Count returns the number of samples recorded, which may exceed the +// reservoir size. +func (s *ExpDecaySample) Count() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return s.count +} + +// Max returns the maximum value in the sample, which may not be the maximum +// value ever to be part of the sample. +func (s *ExpDecaySample) Max() int64 { + return SampleMax(s.Values()) +} + +// Mean returns the mean of the values in the sample. +func (s *ExpDecaySample) Mean() float64 { + return SampleMean(s.Values()) +} + +// Min returns the minimum value in the sample, which may not be the minimum +// value ever to be part of the sample. +func (s *ExpDecaySample) Min() int64 { + return SampleMin(s.Values()) +} + +// Percentile returns an arbitrary percentile of values in the sample. +func (s *ExpDecaySample) Percentile(p float64) float64 { + return SamplePercentile(s.Values(), p) +} + +// Percentiles returns a slice of arbitrary percentiles of values in the +// sample. +func (s *ExpDecaySample) Percentiles(ps []float64) []float64 { + return SamplePercentiles(s.Values(), ps) +} + +// Size returns the size of the sample, which is at most the reservoir size. +func (s *ExpDecaySample) Size() int { + s.mutex.Lock() + defer s.mutex.Unlock() + return len(s.values) +} + +// Snapshot returns a read-only copy of the sample. +func (s *ExpDecaySample) Snapshot() Sample { + s.mutex.Lock() + defer s.mutex.Unlock() + values := make([]int64, len(s.values)) + for i, v := range s.values { + values[i] = v.v + } + return &SampleSnapshot{ + count: s.count, + values: values, + } +} + +// StdDev returns the standard deviation of the values in the sample. +func (s *ExpDecaySample) StdDev() float64 { + return SampleStdDev(s.Values()) +} + +// Sum returns the sum of the values in the sample. +func (s *ExpDecaySample) Sum() int64 { + return SampleSum(s.Values()) +} + +// Update samples a new value. +func (s *ExpDecaySample) Update(v int64) { + s.update(time.Now(), v) +} + +// Values returns a copy of the values in the sample. +func (s *ExpDecaySample) Values() []int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + values := make([]int64, len(s.values)) + for i, v := range s.values { + values[i] = v.v + } + return values +} + +// Variance returns the variance of the values in the sample. +func (s *ExpDecaySample) Variance() float64 { + return SampleVariance(s.Values()) +} + +// update samples a new value at a particular timestamp. This is a method all +// its own to facilitate testing. +func (s *ExpDecaySample) update(t time.Time, v int64) { + s.mutex.Lock() + defer s.mutex.Unlock() + s.count++ + if len(s.values) == s.reservoirSize { + heap.Pop(&s.values) + } + heap.Push(&s.values, expDecaySample{ + k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(), + v: v, + }) + if t.After(s.t1) { + values := s.values + t0 := s.t0 + s.values = make(expDecaySampleHeap, 0, s.reservoirSize) + s.t0 = t + s.t1 = s.t0.Add(rescaleThreshold) + for _, v := range values { + v.k = v.k * math.Exp(-s.alpha*float64(s.t0.Sub(t0))) + heap.Push(&s.values, v) + } + } +} + +// NilSample is a no-op Sample. +type NilSample struct{} + +// Clear is a no-op. +func (NilSample) Clear() {} + +// Count is a no-op. +func (NilSample) Count() int64 { return 0 } + +// Max is a no-op. +func (NilSample) Max() int64 { return 0 } + +// Mean is a no-op. +func (NilSample) Mean() float64 { return 0.0 } + +// Min is a no-op. +func (NilSample) Min() int64 { return 0 } + +// Percentile is a no-op. +func (NilSample) Percentile(p float64) float64 { return 0.0 } + +// Percentiles is a no-op. +func (NilSample) Percentiles(ps []float64) []float64 { + return make([]float64, len(ps)) +} + +// Size is a no-op. +func (NilSample) Size() int { return 0 } + +// Sample is a no-op. +func (NilSample) Snapshot() Sample { return NilSample{} } + +// StdDev is a no-op. +func (NilSample) StdDev() float64 { return 0.0 } + +// Sum is a no-op. +func (NilSample) Sum() int64 { return 0 } + +// Update is a no-op. +func (NilSample) Update(v int64) {} + +// Values is a no-op. +func (NilSample) Values() []int64 { return []int64{} } + +// Variance is a no-op. +func (NilSample) Variance() float64 { return 0.0 } + +// SampleMax returns the maximum value of the slice of int64. +func SampleMax(values []int64) int64 { + if 0 == len(values) { + return 0 + } + var max int64 = math.MinInt64 + for _, v := range values { + if max < v { + max = v + } + } + return max +} + +// SampleMean returns the mean value of the slice of int64. +func SampleMean(values []int64) float64 { + if 0 == len(values) { + return 0.0 + } + return float64(SampleSum(values)) / float64(len(values)) +} + +// SampleMin returns the minimum value of the slice of int64. +func SampleMin(values []int64) int64 { + if 0 == len(values) { + return 0 + } + var min int64 = math.MaxInt64 + for _, v := range values { + if min > v { + min = v + } + } + return min +} + +// SamplePercentiles returns an arbitrary percentile of the slice of int64. +func SamplePercentile(values int64Slice, p float64) float64 { + return SamplePercentiles(values, []float64{p})[0] +} + +// SamplePercentiles returns a slice of arbitrary percentiles of the slice of +// int64. +func SamplePercentiles(values int64Slice, ps []float64) []float64 { + scores := make([]float64, len(ps)) + size := len(values) + if size > 0 { + sort.Sort(values) + for i, p := range ps { + pos := p * float64(size+1) + if pos < 1.0 { + scores[i] = float64(values[0]) + } else if pos >= float64(size) { + scores[i] = float64(values[size-1]) + } else { + lower := float64(values[int(pos)-1]) + upper := float64(values[int(pos)]) + scores[i] = lower + (pos-math.Floor(pos))*(upper-lower) + } + } + } + return scores +} + +// SampleSnapshot is a read-only copy of another Sample. +type SampleSnapshot struct { + count int64 + values []int64 +} + +// Clear panics. +func (*SampleSnapshot) Clear() { + panic("Clear called on a SampleSnapshot") +} + +// Count returns the count of inputs at the time the snapshot was taken. +func (s *SampleSnapshot) Count() int64 { return s.count } + +// Max returns the maximal value at the time the snapshot was taken. +func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) } + +// Mean returns the mean value at the time the snapshot was taken. +func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) } + +// Min returns the minimal value at the time the snapshot was taken. +func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) } + +// Percentile returns an arbitrary percentile of values at the time the +// snapshot was taken. +func (s *SampleSnapshot) Percentile(p float64) float64 { + return SamplePercentile(s.values, p) +} + +// Percentiles returns a slice of arbitrary percentiles of values at the time +// the snapshot was taken. +func (s *SampleSnapshot) Percentiles(ps []float64) []float64 { + return SamplePercentiles(s.values, ps) +} + +// Size returns the size of the sample at the time the snapshot was taken. +func (s *SampleSnapshot) Size() int { return len(s.values) } + +// Snapshot returns the snapshot. +func (s *SampleSnapshot) Snapshot() Sample { return s } + +// StdDev returns the standard deviation of values at the time the snapshot was +// taken. +func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) } + +// Sum returns the sum of values at the time the snapshot was taken. +func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) } + +// Update panics. +func (*SampleSnapshot) Update(int64) { + panic("Update called on a SampleSnapshot") +} + +// Values returns a copy of the values in the sample. +func (s *SampleSnapshot) Values() []int64 { + values := make([]int64, len(s.values)) + copy(values, s.values) + return values +} + +// Variance returns the variance of values at the time the snapshot was taken. +func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) } + +// SampleStdDev returns the standard deviation of the slice of int64. +func SampleStdDev(values []int64) float64 { + return math.Sqrt(SampleVariance(values)) +} + +// SampleSum returns the sum of the slice of int64. +func SampleSum(values []int64) int64 { + var sum int64 + for _, v := range values { + sum += v + } + return sum +} + +// SampleVariance returns the variance of the slice of int64. +func SampleVariance(values []int64) float64 { + if 0 == len(values) { + return 0.0 + } + m := SampleMean(values) + var sum float64 + for _, v := range values { + d := float64(v) - m + sum += d * d + } + return sum / float64(len(values)) +} + +// A uniform sample using Vitter's Algorithm R. +// +// +type UniformSample struct { + count int64 + mutex sync.Mutex + reservoirSize int + values []int64 +} + +// NewUniformSample constructs a new uniform sample with the given reservoir +// size. +func NewUniformSample(reservoirSize int) Sample { + if UseNilMetrics { + return NilSample{} + } + return &UniformSample{ + reservoirSize: reservoirSize, + values: make([]int64, 0, reservoirSize), + } +} + +// Clear clears all samples. +func (s *UniformSample) Clear() { + s.mutex.Lock() + defer s.mutex.Unlock() + s.count = 0 + s.values = make([]int64, 0, s.reservoirSize) +} + +// Count returns the number of samples recorded, which may exceed the +// reservoir size. +func (s *UniformSample) Count() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return s.count +} + +// Max returns the maximum value in the sample, which may not be the maximum +// value ever to be part of the sample. +func (s *UniformSample) Max() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleMax(s.values) +} + +// Mean returns the mean of the values in the sample. +func (s *UniformSample) Mean() float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleMean(s.values) +} + +// Min returns the minimum value in the sample, which may not be the minimum +// value ever to be part of the sample. +func (s *UniformSample) Min() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleMin(s.values) +} + +// Percentile returns an arbitrary percentile of values in the sample. +func (s *UniformSample) Percentile(p float64) float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SamplePercentile(s.values, p) +} + +// Percentiles returns a slice of arbitrary percentiles of values in the +// sample. +func (s *UniformSample) Percentiles(ps []float64) []float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SamplePercentiles(s.values, ps) +} + +// Size returns the size of the sample, which is at most the reservoir size. +func (s *UniformSample) Size() int { + s.mutex.Lock() + defer s.mutex.Unlock() + return len(s.values) +} + +// Snapshot returns a read-only copy of the sample. +func (s *UniformSample) Snapshot() Sample { + s.mutex.Lock() + defer s.mutex.Unlock() + values := make([]int64, len(s.values)) + copy(values, s.values) + return &SampleSnapshot{ + count: s.count, + values: values, + } +} + +// StdDev returns the standard deviation of the values in the sample. +func (s *UniformSample) StdDev() float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleStdDev(s.values) +} + +// Sum returns the sum of the values in the sample. +func (s *UniformSample) Sum() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleSum(s.values) +} + +// Update samples a new value. +func (s *UniformSample) Update(v int64) { + s.mutex.Lock() + defer s.mutex.Unlock() + s.count++ + if len(s.values) < s.reservoirSize { + s.values = append(s.values, v) + } else { + s.values[rand.Intn(s.reservoirSize)] = v + } +} + +// Values returns a copy of the values in the sample. +func (s *UniformSample) Values() []int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + values := make([]int64, len(s.values)) + copy(values, s.values) + return values +} + +// Variance returns the variance of the values in the sample. +func (s *UniformSample) Variance() float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleVariance(s.values) +} + +// expDecaySample represents an individual sample in a heap. +type expDecaySample struct { + k float64 + v int64 +} + +// expDecaySampleHeap is a min-heap of expDecaySamples. +type expDecaySampleHeap []expDecaySample + +func (q expDecaySampleHeap) Len() int { + return len(q) +} + +func (q expDecaySampleHeap) Less(i, j int) bool { + return q[i].k < q[j].k +} + +func (q *expDecaySampleHeap) Pop() interface{} { + q_ := *q + n := len(q_) + i := q_[n-1] + q_ = q_[0 : n-1] + *q = q_ + return i +} + +func (q *expDecaySampleHeap) Push(x interface{}) { + q_ := *q + n := len(q_) + q_ = q_[0 : n+1] + q_[n] = x.(expDecaySample) + *q = q_ +} + +func (q expDecaySampleHeap) Swap(i, j int) { + q[i], q[j] = q[j], q[i] +} + +type int64Slice []int64 + +func (p int64Slice) Len() int { return len(p) } +func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/syslog.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/syslog.go new file mode 100644 index 000000000..693f19085 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/syslog.go @@ -0,0 +1,78 @@ +// +build !windows + +package metrics + +import ( + "fmt" + "log/syslog" + "time" +) + +// Output each metric in the given registry to syslog periodically using +// the given syslogger. +func Syslog(r Registry, d time.Duration, w *syslog.Writer) { + for _ = range time.Tick(d) { + r.Each(func(name string, i interface{}) { + switch metric := i.(type) { + case Counter: + w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count())) + case Gauge: + w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value())) + case GaugeFloat64: + w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value())) + case Healthcheck: + metric.Check() + w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error())) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + w.Info(fmt.Sprintf( + "histogram %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f", + name, + h.Count(), + h.Min(), + h.Max(), + h.Mean(), + h.StdDev(), + ps[0], + ps[1], + ps[2], + ps[3], + ps[4], + )) + case Meter: + m := metric.Snapshot() + w.Info(fmt.Sprintf( + "meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f", + name, + m.Count(), + m.Rate1(), + m.Rate5(), + m.Rate15(), + m.RateMean(), + )) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + w.Info(fmt.Sprintf( + "timer %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f 1-min: %.2f 5-min: %.2f 15-min: %.2f mean-rate: %.2f", + name, + t.Count(), + t.Min(), + t.Max(), + t.Mean(), + t.StdDev(), + ps[0], + ps[1], + ps[2], + ps[3], + ps[4], + t.Rate1(), + t.Rate5(), + t.Rate15(), + t.RateMean(), + )) + } + }) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/timer.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/timer.go new file mode 100644 index 000000000..73f19b585 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/timer.go @@ -0,0 +1,299 @@ +package metrics + +import ( + "sync" + "time" +) + +// Timers capture the duration and rate of events. +type Timer interface { + Count() int64 + Max() int64 + Mean() float64 + Min() int64 + Percentile(float64) float64 + Percentiles([]float64) []float64 + Rate1() float64 + Rate5() float64 + Rate15() float64 + RateMean() float64 + Snapshot() Timer + StdDev() float64 + Time(func()) + Update(time.Duration) + UpdateSince(time.Time) + Variance() float64 +} + +// GetOrRegisterTimer returns an existing Timer or constructs and registers a +// new StandardTimer. +func GetOrRegisterTimer(name string, r Registry) Timer { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewTimer).(Timer) +} + +// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter. +func NewCustomTimer(h Histogram, m Meter) Timer { + if UseNilMetrics { + return NilTimer{} + } + return &StandardTimer{ + histogram: h, + meter: m, + } +} + +// NewRegisteredTimer constructs and registers a new StandardTimer. +func NewRegisteredTimer(name string, r Registry) Timer { + c := NewTimer() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// NewTimer constructs a new StandardTimer using an exponentially-decaying +// sample with the same reservoir size and alpha as UNIX load averages. +func NewTimer() Timer { + if UseNilMetrics { + return NilTimer{} + } + return &StandardTimer{ + histogram: NewHistogram(NewExpDecaySample(1028, 0.015)), + meter: NewMeter(), + } +} + +// NilTimer is a no-op Timer. +type NilTimer struct { + h Histogram + m Meter +} + +// Count is a no-op. +func (NilTimer) Count() int64 { return 0 } + +// Max is a no-op. +func (NilTimer) Max() int64 { return 0 } + +// Mean is a no-op. +func (NilTimer) Mean() float64 { return 0.0 } + +// Min is a no-op. +func (NilTimer) Min() int64 { return 0 } + +// Percentile is a no-op. +func (NilTimer) Percentile(p float64) float64 { return 0.0 } + +// Percentiles is a no-op. +func (NilTimer) Percentiles(ps []float64) []float64 { + return make([]float64, len(ps)) +} + +// Rate1 is a no-op. +func (NilTimer) Rate1() float64 { return 0.0 } + +// Rate5 is a no-op. +func (NilTimer) Rate5() float64 { return 0.0 } + +// Rate15 is a no-op. +func (NilTimer) Rate15() float64 { return 0.0 } + +// RateMean is a no-op. +func (NilTimer) RateMean() float64 { return 0.0 } + +// Snapshot is a no-op. +func (NilTimer) Snapshot() Timer { return NilTimer{} } + +// StdDev is a no-op. +func (NilTimer) StdDev() float64 { return 0.0 } + +// Time is a no-op. +func (NilTimer) Time(func()) {} + +// Update is a no-op. +func (NilTimer) Update(time.Duration) {} + +// UpdateSince is a no-op. +func (NilTimer) UpdateSince(time.Time) {} + +// Variance is a no-op. +func (NilTimer) Variance() float64 { return 0.0 } + +// StandardTimer is the standard implementation of a Timer and uses a Histogram +// and Meter. +type StandardTimer struct { + histogram Histogram + meter Meter + mutex sync.Mutex +} + +// Count returns the number of events recorded. +func (t *StandardTimer) Count() int64 { + return t.histogram.Count() +} + +// Max returns the maximum value in the sample. +func (t *StandardTimer) Max() int64 { + return t.histogram.Max() +} + +// Mean returns the mean of the values in the sample. +func (t *StandardTimer) Mean() float64 { + return t.histogram.Mean() +} + +// Min returns the minimum value in the sample. +func (t *StandardTimer) Min() int64 { + return t.histogram.Min() +} + +// Percentile returns an arbitrary percentile of the values in the sample. +func (t *StandardTimer) Percentile(p float64) float64 { + return t.histogram.Percentile(p) +} + +// Percentiles returns a slice of arbitrary percentiles of the values in the +// sample. +func (t *StandardTimer) Percentiles(ps []float64) []float64 { + return t.histogram.Percentiles(ps) +} + +// Rate1 returns the one-minute moving average rate of events per second. +func (t *StandardTimer) Rate1() float64 { + return t.meter.Rate1() +} + +// Rate5 returns the five-minute moving average rate of events per second. +func (t *StandardTimer) Rate5() float64 { + return t.meter.Rate5() +} + +// Rate15 returns the fifteen-minute moving average rate of events per second. +func (t *StandardTimer) Rate15() float64 { + return t.meter.Rate15() +} + +// RateMean returns the meter's mean rate of events per second. +func (t *StandardTimer) RateMean() float64 { + return t.meter.RateMean() +} + +// Snapshot returns a read-only copy of the timer. +func (t *StandardTimer) Snapshot() Timer { + t.mutex.Lock() + defer t.mutex.Unlock() + return &TimerSnapshot{ + histogram: t.histogram.Snapshot().(*HistogramSnapshot), + meter: t.meter.Snapshot().(*MeterSnapshot), + } +} + +// StdDev returns the standard deviation of the values in the sample. +func (t *StandardTimer) StdDev() float64 { + return t.histogram.StdDev() +} + +// Record the duration of the execution of the given function. +func (t *StandardTimer) Time(f func()) { + ts := time.Now() + f() + t.Update(time.Since(ts)) +} + +// Record the duration of an event. +func (t *StandardTimer) Update(d time.Duration) { + t.mutex.Lock() + defer t.mutex.Unlock() + t.histogram.Update(int64(d)) + t.meter.Mark(1) +} + +// Record the duration of an event that started at a time and ends now. +func (t *StandardTimer) UpdateSince(ts time.Time) { + t.mutex.Lock() + defer t.mutex.Unlock() + t.histogram.Update(int64(time.Since(ts))) + t.meter.Mark(1) +} + +// Variance returns the variance of the values in the sample. +func (t *StandardTimer) Variance() float64 { + return t.histogram.Variance() +} + +// TimerSnapshot is a read-only copy of another Timer. +type TimerSnapshot struct { + histogram *HistogramSnapshot + meter *MeterSnapshot +} + +// Count returns the number of events recorded at the time the snapshot was +// taken. +func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() } + +// Max returns the maximum value at the time the snapshot was taken. +func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() } + +// Mean returns the mean value at the time the snapshot was taken. +func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() } + +// Min returns the minimum value at the time the snapshot was taken. +func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() } + +// Percentile returns an arbitrary percentile of sampled values at the time the +// snapshot was taken. +func (t *TimerSnapshot) Percentile(p float64) float64 { + return t.histogram.Percentile(p) +} + +// Percentiles returns a slice of arbitrary percentiles of sampled values at +// the time the snapshot was taken. +func (t *TimerSnapshot) Percentiles(ps []float64) []float64 { + return t.histogram.Percentiles(ps) +} + +// Rate1 returns the one-minute moving average rate of events per second at the +// time the snapshot was taken. +func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() } + +// Rate5 returns the five-minute moving average rate of events per second at +// the time the snapshot was taken. +func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() } + +// Rate15 returns the fifteen-minute moving average rate of events per second +// at the time the snapshot was taken. +func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() } + +// RateMean returns the meter's mean rate of events per second at the time the +// snapshot was taken. +func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() } + +// Snapshot returns the snapshot. +func (t *TimerSnapshot) Snapshot() Timer { return t } + +// StdDev returns the standard deviation of the values at the time the snapshot +// was taken. +func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() } + +// Time panics. +func (*TimerSnapshot) Time(func()) { + panic("Time called on a TimerSnapshot") +} + +// Update panics. +func (*TimerSnapshot) Update(time.Duration) { + panic("Update called on a TimerSnapshot") +} + +// UpdateSince panics. +func (*TimerSnapshot) UpdateSince(time.Time) { + panic("UpdateSince called on a TimerSnapshot") +} + +// Variance returns the variance of the values at the time the snapshot was +// taken. +func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() } diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/writer.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/writer.go new file mode 100644 index 000000000..091e971d2 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/writer.go @@ -0,0 +1,100 @@ +package metrics + +import ( + "fmt" + "io" + "sort" + "time" +) + +// Write sorts writes each metric in the given registry periodically to the +// given io.Writer. +func Write(r Registry, d time.Duration, w io.Writer) { + for _ = range time.Tick(d) { + WriteOnce(r, w) + } +} + +// WriteOnce sorts and writes metrics in the given registry to the given +// io.Writer. +func WriteOnce(r Registry, w io.Writer) { + var namedMetrics namedMetricSlice + r.Each(func(name string, i interface{}) { + namedMetrics = append(namedMetrics, namedMetric{name, i}) + }) + + sort.Sort(namedMetrics) + for _, namedMetric := range namedMetrics { + switch metric := namedMetric.m.(type) { + case Counter: + fmt.Fprintf(w, "counter %s\n", namedMetric.name) + fmt.Fprintf(w, " count: %9d\n", metric.Count()) + case Gauge: + fmt.Fprintf(w, "gauge %s\n", namedMetric.name) + fmt.Fprintf(w, " value: %9d\n", metric.Value()) + case GaugeFloat64: + fmt.Fprintf(w, "gauge %s\n", namedMetric.name) + fmt.Fprintf(w, " value: %f\n", metric.Value()) + case Healthcheck: + metric.Check() + fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name) + fmt.Fprintf(w, " error: %v\n", metric.Error()) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + fmt.Fprintf(w, "histogram %s\n", namedMetric.name) + fmt.Fprintf(w, " count: %9d\n", h.Count()) + fmt.Fprintf(w, " min: %9d\n", h.Min()) + fmt.Fprintf(w, " max: %9d\n", h.Max()) + fmt.Fprintf(w, " mean: %12.2f\n", h.Mean()) + fmt.Fprintf(w, " stddev: %12.2f\n", h.StdDev()) + fmt.Fprintf(w, " median: %12.2f\n", ps[0]) + fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1]) + fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2]) + fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3]) + fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4]) + case Meter: + m := metric.Snapshot() + fmt.Fprintf(w, "meter %s\n", namedMetric.name) + fmt.Fprintf(w, " count: %9d\n", m.Count()) + fmt.Fprintf(w, " 1-min rate: %12.2f\n", m.Rate1()) + fmt.Fprintf(w, " 5-min rate: %12.2f\n", m.Rate5()) + fmt.Fprintf(w, " 15-min rate: %12.2f\n", m.Rate15()) + fmt.Fprintf(w, " mean rate: %12.2f\n", m.RateMean()) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + fmt.Fprintf(w, "timer %s\n", namedMetric.name) + fmt.Fprintf(w, " count: %9d\n", t.Count()) + fmt.Fprintf(w, " min: %9d\n", t.Min()) + fmt.Fprintf(w, " max: %9d\n", t.Max()) + fmt.Fprintf(w, " mean: %12.2f\n", t.Mean()) + fmt.Fprintf(w, " stddev: %12.2f\n", t.StdDev()) + fmt.Fprintf(w, " median: %12.2f\n", ps[0]) + fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1]) + fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2]) + fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3]) + fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4]) + fmt.Fprintf(w, " 1-min rate: %12.2f\n", t.Rate1()) + fmt.Fprintf(w, " 5-min rate: %12.2f\n", t.Rate5()) + fmt.Fprintf(w, " 15-min rate: %12.2f\n", t.Rate15()) + fmt.Fprintf(w, " mean rate: %12.2f\n", t.RateMean()) + } + } +} + +type namedMetric struct { + name string + m interface{} +} + +// namedMetricSlice is a slice of namedMetrics that implements sort.Interface. +type namedMetricSlice []namedMetric + +func (nms namedMetricSlice) Len() int { return len(nms) } + +func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] } + +func (nms namedMetricSlice) Less(i, j int) bool { + return nms[i].name < nms[j].name +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/.gitignore b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/.gitignore new file mode 100644 index 000000000..ca502e294 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/.gitignore @@ -0,0 +1,4 @@ +*.nut +*.swp +examples/example1 +examples/example_web diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/.travis.yml b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/.travis.yml new file mode 100644 index 000000000..4f2ee4d97 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/LICENSE new file mode 100644 index 000000000..01a9a5c4d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2013 Yuriy Vasiyarov. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/README.md b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/README.md new file mode 100644 index 000000000..61068a82a --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/README.md @@ -0,0 +1,119 @@ +# GoRelic + +New Relic agent for Go runtime. It collect a lot of metrics about scheduler, garbage collector and memory allocator and +send them to NewRelic. + +### Requirements +- Go 1.1 or higher +- github.com/yvasiyarov/gorelic +- github.com/yvasiyarov/newrelic_platform_go +- github.com/yvasiyarov/go-metrics + +You have to install manually only first two dependencies. All other dependencies will be installed automatically +by Go toolchain. + +### Installation +```bash +go get github.com/yvasiyarov/gorelic +``` +and add to the initialization part of your application following code: +```go +import ( + "github.com/yvasiyarov/gorelic" +) +.... + +agent := gorelic.NewAgent() +agent.Verbose = true +agent.NewrelicLicense = "YOUR NEWRELIC LICENSE KEY THERE" +agent.Run() + +``` + +### Middleware +If you using Beego, Martini, Revel or Gin framework you can hook up gorelic with your application by using the following middleware: +- https://github.com/yvasiyarov/beego_gorelic +- https://github.com/yvasiyarov/martini_gorelic +- https://github.com/yvasiyarov/gocraft_gorelic +- http://wiki.colar.net/revel_newelic +- https://github.com/jingweno/negroni-gorelic +- https://github.com/brandfolder/gin-gorelic + + +### Configuration +- NewrelicLicense - its the only mandatory setting of this agent. +- NewrelicName - component name in NewRelic dashboard. Default value: "Go daemon" +- NewrelicPollInterval - how often metrics will be sent to NewRelic. Default value: 60 seconds +- Verbose - print some usefull for debugging information. Default value: false +- CollectGcStat - should agent collect garbage collector statistic or not. Default value: true +- CollectHTTPStat - should agent collect HTTP metrics. Default value: false +- CollectMemoryStat - should agent collect memory allocator statistic or not. Default value: true +- GCPollInterval - how often should GC statistic collected. Default value: 10 seconds. It has performance impact. For more information, please, see metrics documentation. +- MemoryAllocatorPollInterval - how often should memory allocator statistic collected. Default value: 60 seconds. It has performance impact. For more information, please, read metrics documentation. + + +## Metrics reported by plugin +This agent use functions exposed by runtime or runtime/debug packages to collect most important information about Go runtime. + +### General metrics +- Runtime/General/NOGoroutines - number of runned go routines, as it reported by NumGoroutine() from runtime package +- Runtime/General/NOCgoCalls - number of runned cgo calls, as it reported by NumCgoCall() from runtime package + +### Garbage collector metrics +- Runtime/GC/NumberOfGCCalls - Nuber of GC calls, as it reported by ReadGCStats() from runtime/debug +- Runtime/GC/PauseTotalTime - Total pause time diring GC calls, as it reported by ReadGCStats() from runtime/debug (in nanoseconds) +- Runtime/GC/GCTime/Max - max GC time +- Runtime/GC/GCTime/Min - min GC time +- Runtime/GC/GCTime/Mean - GC mean time +- Runtime/GC/GCTime/Percentile95 - 95% percentile of GC time + +All this metrics are measured in nanoseconds. Last 4 of them can be inaccurate if GC called more often then once in GCPollInterval. +If in your workload GC is called more often - you can consider decreasing value of GCPollInterval. +But be carefull, ReadGCStats() blocks mheap, so its not good idea to set GCPollInterval to very low values. + +### Memory allocator +- Component/Runtime/Memory/SysMem/Total - number of bytes/minute allocated from OS totally. +- Component/Runtime/Memory/SysMem/Stack - number of bytes/minute allocated from OS for stacks. +- Component/Runtime/Memory/SysMem/MSpan - number of bytes/minute allocated from OS for internal MSpan structs. +- Component/Runtime/Memory/SysMem/MCache - number of bytes/minute allocated from OS for internal MCache structs. +- Component/Runtime/Memory/SysMem/Heap - number of bytes/minute allocated from OS for heap. +- Component/Runtime/Memory/SysMem/BuckHash - number of bytes/minute allocated from OS for internal BuckHash structs. +- Component/Runtime/Memory/Operations/NoFrees - number of memory frees per minute +- Component/Runtime/Memory/Operations/NoMallocs - number of memory allocations per minute +- Component/Runtime/Memory/Operations/NoPointerLookups - number of pointer lookups per minute +- Component/Runtime/Memory/InUse/Total - total amount of memory in use +- Component/Runtime/Memory/InUse/Heap - amount of memory in use for heap +- Component/Runtime/Memory/InUse/MCacheInuse - amount of memory in use for MCache internal structures +- Component/Runtime/Memory/InUse/MSpanInuse - amount of memory in use for MSpan internal structures +- Component/Runtime/Memory/InUse/Stack - amount of memory in use for stacks + +### Process metrics +- Component/Runtime/System/Threads - number of OS threads used +- Runtime/System/FDSize - number of file descriptors, used by process +- Runtime/System/Memory/VmPeakSize - VM max size +- Runtime/System/Memory/VmCurrent - VM current size +- Runtime/System/Memory/RssPeak - max size of resident memory set +- Runtime/System/Memory/RssCurrent - current size of resident memory set + +All this metrics collected once in MemoryAllocatorPollInterval. In order to collect this statistic agent use ReadMemStats() routine. +This routine calls stoptheworld() internally and it block everything. So, please, consider this when you change MemoryAllocatorPollInterval value. + +### HTTP metrics +- throughput (requests per second), calculated for last minute +- mean throughput (requests per second) +- mean response time +- min response time +- max response time +- 75%, 90%, 95% percentiles for response time + + +In order to collect HTTP metrics, handler functions must be wrapped using WrapHTTPHandlerFunc: + +```go +http.HandleFunc("/", agent.WrapHTTPHandlerFunc(handler)) +``` + +## TODO +- Collect per-size allocation statistic +- Collect user defined metrics + diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/agent.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/agent.go new file mode 100644 index 000000000..660623d6f --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/agent.go @@ -0,0 +1,137 @@ +package gorelic + +import ( + "errors" + "fmt" + metrics "github.com/yvasiyarov/go-metrics" + "github.com/yvasiyarov/newrelic_platform_go" + "log" + "net/http" +) + +const ( + // DefaultNewRelicPollInterval - how often we will report metrics to NewRelic. + // Recommended values is 60 seconds + DefaultNewRelicPollInterval = 60 + + // DefaultGcPollIntervalInSeconds - how often we will get garbage collector run statistic + // Default value is - every 10 seconds + // During GC stat pooling - mheap will be locked, so be carefull changing this value + DefaultGcPollIntervalInSeconds = 10 + + // DefaultMemoryAllocatorPollIntervalInSeconds - how often we will get memory allocator statistic. + // Default value is - every 60 seconds + // During this process stoptheword() is called, so be carefull changing this value + DefaultMemoryAllocatorPollIntervalInSeconds = 60 + + //DefaultAgentGuid is plugin ID in NewRelic. + //You should not change it unless you want to create your own plugin. + DefaultAgentGuid = "com.github.yvasiyarov.GoRelic" + + //CurrentAgentVersion is plugin version + CurrentAgentVersion = "0.0.6" + + //DefaultAgentName in NewRelic GUI. You can change it. + DefaultAgentName = "Go daemon" +) + +//Agent - is NewRelic agent implementation. +//Agent start separate go routine which will report data to NewRelic +type Agent struct { + NewrelicName string + NewrelicLicense string + NewrelicPollInterval int + Verbose bool + CollectGcStat bool + CollectMemoryStat bool + CollectHTTPStat bool + GCPollInterval int + MemoryAllocatorPollInterval int + AgentGUID string + AgentVersion string + plugin *newrelic_platform_go.NewrelicPlugin + HTTPTimer metrics.Timer +} + +//NewAgent build new Agent objects. +func NewAgent() *Agent { + agent := &Agent{ + NewrelicName: DefaultAgentName, + NewrelicPollInterval: DefaultNewRelicPollInterval, + Verbose: false, + CollectGcStat: true, + CollectMemoryStat: true, + GCPollInterval: DefaultGcPollIntervalInSeconds, + MemoryAllocatorPollInterval: DefaultMemoryAllocatorPollIntervalInSeconds, + AgentGUID: DefaultAgentGuid, + AgentVersion: CurrentAgentVersion, + } + return agent +} + +//WrapHTTPHandlerFunc instrument HTTP handler functions to collect HTTP metrics +func (agent *Agent) WrapHTTPHandlerFunc(h tHTTPHandlerFunc) tHTTPHandlerFunc { + agent.initTimer() + return func(w http.ResponseWriter, req *http.Request) { + proxy := newHTTPHandlerFunc(h) + proxy.timer = agent.HTTPTimer + proxy.ServeHTTP(w, req) + } +} + +//WrapHTTPHandler instrument HTTP handler object to collect HTTP metrics +func (agent *Agent) WrapHTTPHandler(h http.Handler) http.Handler { + agent.initTimer() + + proxy := newHTTPHandler(h) + proxy.timer = agent.HTTPTimer + return proxy +} + +//Run initialize Agent instance and start harvest go routine +func (agent *Agent) Run() error { + if agent.NewrelicLicense == "" { + return errors.New("please, pass a valid newrelic license key") + } + + agent.plugin = newrelic_platform_go.NewNewrelicPlugin(agent.AgentVersion, agent.NewrelicLicense, agent.NewrelicPollInterval) + component := newrelic_platform_go.NewPluginComponent(agent.NewrelicName, agent.AgentGUID) + agent.plugin.AddComponent(component) + + addRuntimeMericsToComponent(component) + + if agent.CollectGcStat { + addGCMericsToComponent(component, agent.GCPollInterval) + agent.debug(fmt.Sprintf("Init GC metrics collection. Poll interval %d seconds.", agent.GCPollInterval)) + } + if agent.CollectMemoryStat { + addMemoryMericsToComponent(component, agent.MemoryAllocatorPollInterval) + agent.debug(fmt.Sprintf("Init memory allocator metrics collection. Poll interval %d seconds.", agent.MemoryAllocatorPollInterval)) + } + + if agent.CollectHTTPStat { + agent.initTimer() + addHTTPMericsToComponent(component, agent.HTTPTimer) + agent.debug(fmt.Sprintf("Init HTTP metrics collection.")) + } + + agent.plugin.Verbose = agent.Verbose + go agent.plugin.Run() + return nil +} + +//Initialize global metrics.Timer object, used to collect HTTP metrics +func (agent *Agent) initTimer() { + if agent.HTTPTimer == nil { + agent.HTTPTimer = metrics.NewTimer() + } + + agent.CollectHTTPStat = true +} + +//Print debug messages +func (agent *Agent) debug(msg string) { + if agent.Verbose { + log.Println(msg) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/doc.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/doc.go new file mode 100644 index 000000000..69de9fee3 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/doc.go @@ -0,0 +1,2 @@ +// Package gorelic is an New Relic agent implementation for Go runtime. It collect a lot of metrics about Go scheduler, garbage collector and memory allocator and send them to NewRelic. +package gorelic diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/gc_metrics.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/gc_metrics.go new file mode 100644 index 000000000..394059402 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/gc_metrics.go @@ -0,0 +1,65 @@ +package gorelic + +import ( + metrics "github.com/yvasiyarov/go-metrics" + "github.com/yvasiyarov/newrelic_platform_go" + "time" +) + +func newGCMetricaDataSource(pollInterval int) goMetricaDataSource { + r := metrics.NewRegistry() + + metrics.RegisterDebugGCStats(r) + go metrics.CaptureDebugGCStats(r, time.Duration(pollInterval)*time.Second) + return goMetricaDataSource{r} +} + +func addGCMericsToComponent(component newrelic_platform_go.IComponent, pollInterval int) { + metrics := []*baseGoMetrica{ + &baseGoMetrica{ + name: "NumberOfGCCalls", + units: "calls", + dataSourceKey: "debug.GCStats.NumGC", + }, + &baseGoMetrica{ + name: "PauseTotalTime", + units: "nanoseconds", + dataSourceKey: "debug.GCStats.PauseTotal", + }, + } + + ds := newGCMetricaDataSource(pollInterval) + for _, m := range metrics { + m.basePath = "Runtime/GC/" + m.dataSource = ds + component.AddMetrica(&gaugeMetrica{m}) + } + + histogramMetrics := []*histogramMetrica{ + &histogramMetrica{ + statFunction: histogramMax, + baseGoMetrica: &baseGoMetrica{name: "Max"}, + }, + &histogramMetrica{ + statFunction: histogramMin, + baseGoMetrica: &baseGoMetrica{name: "Min"}, + }, + &histogramMetrica{ + statFunction: histogramMean, + baseGoMetrica: &baseGoMetrica{name: "Mean"}, + }, + &histogramMetrica{ + statFunction: histogramPercentile, + percentileValue: 0.95, + baseGoMetrica: &baseGoMetrica{name: "Percentile95"}, + }, + } + for _, m := range histogramMetrics { + m.baseGoMetrica.units = "nanoseconds" + m.baseGoMetrica.dataSourceKey = "debug.GCStats.Pause" + m.baseGoMetrica.basePath = "Runtime/GC/GCTime/" + m.baseGoMetrica.dataSource = ds + + component.AddMetrica(m) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/gometrica.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/gometrica.go new file mode 100644 index 000000000..52fcdd575 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/gometrica.go @@ -0,0 +1,105 @@ +package gorelic + +import ( + "fmt" + metrics "github.com/yvasiyarov/go-metrics" +) + +const ( + histogramMin = iota + histogramMax + histogramMean + histogramPercentile + histogramStdDev + histogramVariance + noHistogramFunctions +) + +type goMetricaDataSource struct { + metrics.Registry +} + +func (ds goMetricaDataSource) GetGaugeValue(key string) (float64, error) { + if valueContainer := ds.Get(key); valueContainer == nil { + return 0, fmt.Errorf("metrica with name %s is not registered\n", key) + } else if gauge, ok := valueContainer.(metrics.Gauge); ok { + return float64(gauge.Value()), nil + } else { + return 0, fmt.Errorf("metrica container has unexpected type: %T\n", valueContainer) + } +} + +func (ds goMetricaDataSource) GetHistogramValue(key string, statFunction int, percentile float64) (float64, error) { + if valueContainer := ds.Get(key); valueContainer == nil { + return 0, fmt.Errorf("metrica with name %s is not registered\n", key) + } else if histogram, ok := valueContainer.(metrics.Histogram); ok { + switch statFunction { + default: + return 0, fmt.Errorf("unsupported stat function for histogram: %s\n", statFunction) + case histogramMax: + return float64(histogram.Max()), nil + case histogramMin: + return float64(histogram.Min()), nil + case histogramMean: + return float64(histogram.Mean()), nil + case histogramStdDev: + return float64(histogram.StdDev()), nil + case histogramVariance: + return float64(histogram.Variance()), nil + case histogramPercentile: + return float64(histogram.Percentile(percentile)), nil + } + } else { + return 0, fmt.Errorf("metrica container has unexpected type: %T\n", valueContainer) + } +} + +type baseGoMetrica struct { + dataSource goMetricaDataSource + basePath string + name string + units string + dataSourceKey string +} + +func (metrica *baseGoMetrica) GetName() string { + return metrica.basePath + metrica.name +} + +func (metrica *baseGoMetrica) GetUnits() string { + return metrica.units +} + +type gaugeMetrica struct { + *baseGoMetrica +} + +func (metrica *gaugeMetrica) GetValue() (float64, error) { + return metrica.dataSource.GetGaugeValue(metrica.dataSourceKey) +} + +type gaugeIncMetrica struct { + *baseGoMetrica + previousValue float64 +} + +func (metrica *gaugeIncMetrica) GetValue() (float64, error) { + var value float64 + var currentValue float64 + var err error + if currentValue, err = metrica.dataSource.GetGaugeValue(metrica.dataSourceKey); err == nil { + value = currentValue - metrica.previousValue + metrica.previousValue = currentValue + } + return value, err +} + +type histogramMetrica struct { + *baseGoMetrica + statFunction int + percentileValue float64 +} + +func (metrica *histogramMetrica) GetValue() (float64, error) { + return metrica.dataSource.GetHistogramValue(metrica.dataSourceKey, metrica.statFunction, metrica.percentileValue) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/http_metrics.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/http_metrics.go new file mode 100644 index 000000000..e54cbd371 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/http_metrics.go @@ -0,0 +1,194 @@ +package gorelic + +import ( + metrics "github.com/yvasiyarov/go-metrics" + "github.com/yvasiyarov/newrelic_platform_go" + "net/http" + "time" +) + +type tHTTPHandlerFunc func(http.ResponseWriter, *http.Request) +type tHTTPHandler struct { + originalHandler http.Handler + originalHandlerFunc tHTTPHandlerFunc + isFunc bool + timer metrics.Timer +} + +var httpTimer metrics.Timer + +func newHTTPHandlerFunc(h tHTTPHandlerFunc) *tHTTPHandler { + return &tHTTPHandler{ + isFunc: true, + originalHandlerFunc: h, + } +} +func newHTTPHandler(h http.Handler) *tHTTPHandler { + return &tHTTPHandler{ + isFunc: false, + originalHandler: h, + } +} + +func (handler *tHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + startTime := time.Now() + defer handler.timer.UpdateSince(startTime) + + if handler.isFunc { + handler.originalHandlerFunc(w, req) + } else { + handler.originalHandler.ServeHTTP(w, req) + } +} + +type baseTimerMetrica struct { + dataSource metrics.Timer + name string + units string +} + +func (metrica *baseTimerMetrica) GetName() string { + return metrica.name +} + +func (metrica *baseTimerMetrica) GetUnits() string { + return metrica.units +} + +type timerRate1Metrica struct { + *baseTimerMetrica +} + +func (metrica *timerRate1Metrica) GetValue() (float64, error) { + return metrica.dataSource.Rate1(), nil +} + +type timerRateMeanMetrica struct { + *baseTimerMetrica +} + +func (metrica *timerRateMeanMetrica) GetValue() (float64, error) { + return metrica.dataSource.RateMean(), nil +} + +type timerMeanMetrica struct { + *baseTimerMetrica +} + +func (metrica *timerMeanMetrica) GetValue() (float64, error) { + return metrica.dataSource.Mean() / float64(time.Millisecond), nil +} + +type timerMinMetrica struct { + *baseTimerMetrica +} + +func (metrica *timerMinMetrica) GetValue() (float64, error) { + return float64(metrica.dataSource.Min()) / float64(time.Millisecond), nil +} + +type timerMaxMetrica struct { + *baseTimerMetrica +} + +func (metrica *timerMaxMetrica) GetValue() (float64, error) { + return float64(metrica.dataSource.Max()) / float64(time.Millisecond), nil +} + +type timerPercentile75Metrica struct { + *baseTimerMetrica +} + +func (metrica *timerPercentile75Metrica) GetValue() (float64, error) { + return metrica.dataSource.Percentile(0.75) / float64(time.Millisecond), nil +} + +type timerPercentile90Metrica struct { + *baseTimerMetrica +} + +func (metrica *timerPercentile90Metrica) GetValue() (float64, error) { + return metrica.dataSource.Percentile(0.90) / float64(time.Millisecond), nil +} + +type timerPercentile95Metrica struct { + *baseTimerMetrica +} + +func (metrica *timerPercentile95Metrica) GetValue() (float64, error) { + return metrica.dataSource.Percentile(0.95) / float64(time.Millisecond), nil +} + +func addHTTPMericsToComponent(component newrelic_platform_go.IComponent, timer metrics.Timer) { + rate1 := &timerRate1Metrica{ + baseTimerMetrica: &baseTimerMetrica{ + name: "http/throughput/1minute", + units: "rps", + dataSource: timer, + }, + } + component.AddMetrica(rate1) + + rateMean := &timerRateMeanMetrica{ + baseTimerMetrica: &baseTimerMetrica{ + name: "http/throughput/rateMean", + units: "rps", + dataSource: timer, + }, + } + component.AddMetrica(rateMean) + + responseTimeMean := &timerMeanMetrica{ + baseTimerMetrica: &baseTimerMetrica{ + name: "http/responseTime/mean", + units: "ms", + dataSource: timer, + }, + } + component.AddMetrica(responseTimeMean) + + responseTimeMax := &timerMaxMetrica{ + baseTimerMetrica: &baseTimerMetrica{ + name: "http/responseTime/max", + units: "ms", + dataSource: timer, + }, + } + component.AddMetrica(responseTimeMax) + + responseTimeMin := &timerMinMetrica{ + baseTimerMetrica: &baseTimerMetrica{ + name: "http/responseTime/min", + units: "ms", + dataSource: timer, + }, + } + component.AddMetrica(responseTimeMin) + + responseTimePercentile75 := &timerPercentile75Metrica{ + baseTimerMetrica: &baseTimerMetrica{ + name: "http/responseTime/percentile75", + units: "ms", + dataSource: timer, + }, + } + component.AddMetrica(responseTimePercentile75) + + responseTimePercentile90 := &timerPercentile90Metrica{ + baseTimerMetrica: &baseTimerMetrica{ + name: "http/responseTime/percentile90", + units: "ms", + dataSource: timer, + }, + } + component.AddMetrica(responseTimePercentile90) + + responseTimePercentile95 := &timerPercentile95Metrica{ + baseTimerMetrica: &baseTimerMetrica{ + name: "http/responseTime/percentile95", + units: "ms", + dataSource: timer, + }, + } + component.AddMetrica(responseTimePercentile95) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/memory_metrics.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/memory_metrics.go new file mode 100644 index 000000000..5c8d3e4ec --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/memory_metrics.go @@ -0,0 +1,110 @@ +package gorelic + +import ( + metrics "github.com/yvasiyarov/go-metrics" + "github.com/yvasiyarov/newrelic_platform_go" + "time" +) + +func newMemoryMetricaDataSource(pollInterval int) goMetricaDataSource { + r := metrics.NewRegistry() + + metrics.RegisterRuntimeMemStats(r) + metrics.CaptureRuntimeMemStatsOnce(r) + go metrics.CaptureRuntimeMemStats(r, time.Duration(pollInterval)*time.Second) + return goMetricaDataSource{r} +} + +func addMemoryMericsToComponent(component newrelic_platform_go.IComponent, pollInterval int) { + gaugeMetrics := []*baseGoMetrica{ + //Memory in use metrics + &baseGoMetrica{ + name: "InUse/Total", + units: "bytes", + dataSourceKey: "runtime.MemStats.Alloc", + }, + &baseGoMetrica{ + name: "InUse/Heap", + units: "bytes", + dataSourceKey: "runtime.MemStats.HeapAlloc", + }, + &baseGoMetrica{ + name: "InUse/Stack", + units: "bytes", + dataSourceKey: "runtime.MemStats.StackInuse", + }, + &baseGoMetrica{ + name: "InUse/MSpanInuse", + units: "bytes", + dataSourceKey: "runtime.MemStats.MSpanInuse", + }, + &baseGoMetrica{ + name: "InUse/MCacheInuse", + units: "bytes", + dataSourceKey: "runtime.MemStats.MCacheInuse", + }, + } + ds := newMemoryMetricaDataSource(pollInterval) + for _, m := range gaugeMetrics { + m.basePath = "Runtime/Memory/" + m.dataSource = ds + component.AddMetrica(&gaugeMetrica{m}) + } + + gaugeIncMetrics := []*baseGoMetrica{ + //NO operations graph + &baseGoMetrica{ + name: "Operations/NoPointerLookups", + units: "lookups", + dataSourceKey: "runtime.MemStats.Lookups", + }, + &baseGoMetrica{ + name: "Operations/NoMallocs", + units: "mallocs", + dataSourceKey: "runtime.MemStats.Mallocs", + }, + &baseGoMetrica{ + name: "Operations/NoFrees", + units: "frees", + dataSourceKey: "runtime.MemStats.Frees", + }, + + // Sytem memory allocations + &baseGoMetrica{ + name: "SysMem/Total", + units: "bytes", + dataSourceKey: "runtime.MemStats.Sys", + }, + &baseGoMetrica{ + name: "SysMem/Heap", + units: "bytes", + dataSourceKey: "runtime.MemStats.HeapSys", + }, + &baseGoMetrica{ + name: "SysMem/Stack", + units: "bytes", + dataSourceKey: "runtime.MemStats.StackSys", + }, + &baseGoMetrica{ + name: "SysMem/MSpan", + units: "bytes", + dataSourceKey: "runtime.MemStats.MSpanSys", + }, + &baseGoMetrica{ + name: "SysMem/MCache", + units: "bytes", + dataSourceKey: "runtime.MemStats.MCacheSys", + }, + &baseGoMetrica{ + name: "SysMem/BuckHash", + units: "bytes", + dataSourceKey: "runtime.MemStats.BuckHashSys", + }, + } + + for _, m := range gaugeIncMetrics { + m.basePath = "Runtime/Memory/" + m.dataSource = ds + component.AddMetrica(&gaugeIncMetrica{baseGoMetrica: m}) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/nut.json b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/nut.json new file mode 100644 index 000000000..7abb8ec69 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/nut.json @@ -0,0 +1,15 @@ +{ + "Version": "0.0.6", + "Vendor": "yvasiyarov", + "Authors": [ + { + "FullName": "Yuriy Vasiyarov", + "Email": "varyous@gmail.com" + } + ], + "ExtraFiles": [ + "README.md", + "LICENSE" + ], + "Homepage": "https://github.com/yvasiyarov/gorelic" +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/runtime_metrics.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/runtime_metrics.go new file mode 100644 index 000000000..87a42ca69 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/runtime_metrics.go @@ -0,0 +1,196 @@ +package gorelic + +import ( + "fmt" + "github.com/yvasiyarov/newrelic_platform_go" + "io/ioutil" + "os" + "runtime" + "strconv" + "strings" + "time" +) + +const linuxSystemQueryInterval = 60 + +// Number of goroutines metrica +type noGoroutinesMetrica struct{} + +func (metrica *noGoroutinesMetrica) GetName() string { + return "Runtime/General/NOGoroutines" +} +func (metrica *noGoroutinesMetrica) GetUnits() string { + return "goroutines" +} +func (metrica *noGoroutinesMetrica) GetValue() (float64, error) { + return float64(runtime.NumGoroutine()), nil +} + +// Number of CGO calls metrica +type noCgoCallsMetrica struct { + lastValue int64 +} + +func (metrica *noCgoCallsMetrica) GetName() string { + return "Runtime/General/NOCgoCalls" +} +func (metrica *noCgoCallsMetrica) GetUnits() string { + return "calls" +} +func (metrica *noCgoCallsMetrica) GetValue() (float64, error) { + currentValue := runtime.NumCgoCall() + value := float64(currentValue - metrica.lastValue) + metrica.lastValue = currentValue + + return value, nil +} + +//OS specific metrics data source interface +type iSystemMetricaDataSource interface { + GetValue(key string) (float64, error) +} + +// iSystemMetricaDataSource fabrica +func newSystemMetricaDataSource() iSystemMetricaDataSource { + var ds iSystemMetricaDataSource + switch runtime.GOOS { + default: + ds = &systemMetricaDataSource{} + case "linux": + ds = &linuxSystemMetricaDataSource{ + systemData: make(map[string]string), + } + } + return ds +} + +//Default implementation of iSystemMetricaDataSource. Just return an error +type systemMetricaDataSource struct{} + +func (ds *systemMetricaDataSource) GetValue(key string) (float64, error) { + return 0, fmt.Errorf("this metrica was not implemented yet for %s", runtime.GOOS) +} + +// Linux OS implementation of ISystemMetricaDataSource +type linuxSystemMetricaDataSource struct { + lastUpdate time.Time + systemData map[string]string +} + +func (ds *linuxSystemMetricaDataSource) GetValue(key string) (float64, error) { + if err := ds.checkAndUpdateData(); err != nil { + return 0, err + } else if val, ok := ds.systemData[key]; !ok { + return 0, fmt.Errorf("system data with key %s was not found", key) + } else if key == "VmSize" || key == "VmPeak" || key == "VmHWM" || key == "VmRSS" { + valueParts := strings.Split(val, " ") + if len(valueParts) != 2 { + return 0, fmt.Errorf("invalid format for value %s", key) + } + valConverted, err := strconv.ParseFloat(valueParts[0], 64) + if err != nil { + return 0, err + } + switch valueParts[1] { + case "kB": + valConverted *= 1 << 10 + case "mB": + valConverted *= 1 << 20 + case "gB": + valConverted *= 1 << 30 + } + return valConverted, nil + } else if valConverted, err := strconv.ParseFloat(val, 64); err != nil { + return valConverted, nil + } else { + return valConverted, nil + } +} +func (ds *linuxSystemMetricaDataSource) checkAndUpdateData() error { + startTime := time.Now() + if startTime.Sub(ds.lastUpdate) > time.Second*linuxSystemQueryInterval { + path := fmt.Sprintf("/proc/%d/status", os.Getpid()) + rawStats, err := ioutil.ReadFile(path) + if err != nil { + return err + } + + lines := strings.Split(string(rawStats), "\n") + for _, line := range lines { + parts := strings.Split(line, ":") + if len(parts) == 2 { + k := strings.TrimSpace(parts[0]) + v := strings.TrimSpace(parts[1]) + + ds.systemData[k] = v + } + } + ds.lastUpdate = startTime + } + return nil +} + +// OS specific metrica +type systemMetrica struct { + sourceKey string + newrelicName string + units string + dataSource iSystemMetricaDataSource +} + +func (metrica *systemMetrica) GetName() string { + return metrica.newrelicName +} +func (metrica *systemMetrica) GetUnits() string { + return metrica.units +} +func (metrica *systemMetrica) GetValue() (float64, error) { + return metrica.dataSource.GetValue(metrica.sourceKey) +} + +func addRuntimeMericsToComponent(component newrelic_platform_go.IComponent) { + component.AddMetrica(&noGoroutinesMetrica{}) + component.AddMetrica(&noCgoCallsMetrica{}) + + ds := newSystemMetricaDataSource() + metrics := []*systemMetrica{ + &systemMetrica{ + sourceKey: "Threads", + units: "Threads", + newrelicName: "Runtime/System/Threads", + }, + &systemMetrica{ + sourceKey: "FDSize", + units: "fd", + newrelicName: "Runtime/System/FDSize", + }, + // Peak virtual memory size + &systemMetrica{ + sourceKey: "VmPeak", + units: "bytes", + newrelicName: "Runtime/System/Memory/VmPeakSize", + }, + //Virtual memory size + &systemMetrica{ + sourceKey: "VmSize", + units: "bytes", + newrelicName: "Runtime/System/Memory/VmCurrent", + }, + //Peak resident set size + &systemMetrica{ + sourceKey: "VmHWM", + units: "bytes", + newrelicName: "Runtime/System/Memory/RssPeak", + }, + //Resident set size + &systemMetrica{ + sourceKey: "VmRSS", + units: "bytes", + newrelicName: "Runtime/System/Memory/RssCurrent", + }, + } + for _, m := range metrics { + m.dataSource = ds + component.AddMetrica(m) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/.travis.yml b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/.travis.yml new file mode 100644 index 000000000..4f2ee4d97 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/LICENSE new file mode 100644 index 000000000..01a9a5c4d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2013 Yuriy Vasiyarov. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/README.md b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/README.md new file mode 100644 index 000000000..344623440 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/README.md @@ -0,0 +1,11 @@ +New Relic Platform Agent SDK for Go(golang) +==================== + +[![Build Status](https://travis-ci.org/yvasiyarov/newrelic_platform_go.png?branch=master)](https://travis-ci.org/yvasiyarov/newrelic_platform_go) + +This package provide very simple interface to NewRelic Platform http://newrelic.com/platform + +For example of usage see examples/wave_plugin.go + +For real-word example, you can have a look at: +https://github.com/yvasiyarov/newrelic_sphinx diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/agent.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/agent.go new file mode 100644 index 000000000..d9d275354 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/agent.go @@ -0,0 +1,27 @@ +package newrelic_platform_go + +import ( + "log" + "os" +) + +type Agent struct { + Host string `json:"host"` + Version string `json:"version"` + Pid int `json:"pid"` +} + +func NewAgent(Version string) *Agent { + agent := &Agent{ + Version: Version, + } + return agent +} + +func (agent *Agent) CollectEnvironmentInfo() { + var err error + agent.Pid = os.Getpid() + if agent.Host, err = os.Hostname(); err != nil { + log.Fatalf("Can not get hostname: %#v \n", err) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/component.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/component.go new file mode 100644 index 000000000..000f7ab76 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/component.go @@ -0,0 +1,71 @@ +package newrelic_platform_go + +import ( + "log" + "math" +) + +type ComponentData interface{} +type IComponent interface { + Harvest(plugin INewrelicPlugin) ComponentData + SetDuration(duration int) + AddMetrica(model IMetrica) + ClearSentData() +} + +type PluginComponent struct { + Name string `json:"name"` + GUID string `json:"guid"` + Duration int `json:"duration"` + Metrics map[string]MetricaValue `json:"metrics"` + MetricaModels []IMetrica `json:"-"` +} + +func NewPluginComponent(name string, guid string) *PluginComponent { + c := &PluginComponent{ + Name: name, + GUID: guid, + } + return c +} + +func (component *PluginComponent) AddMetrica(model IMetrica) { + component.MetricaModels = append(component.MetricaModels, model) +} + +func (component *PluginComponent) ClearSentData() { + component.Metrics = nil +} + +func (component *PluginComponent) SetDuration(duration int) { + component.Duration = duration +} + +func (component *PluginComponent) Harvest(plugin INewrelicPlugin) ComponentData { + component.Metrics = make(map[string]MetricaValue, len(component.MetricaModels)) + for i := 0; i < len(component.MetricaModels); i++ { + model := component.MetricaModels[i] + metricaKey := plugin.GetMetricaKey(model) + + if newValue, err := model.GetValue(); err == nil { + if math.IsInf(newValue, 0) || math.IsNaN(newValue) { + newValue = 0 + } + + if existMetric, ok := component.Metrics[metricaKey]; ok { + if floatExistVal, ok := existMetric.(float64); ok { + component.Metrics[metricaKey] = NewAggregatedMetricaValue(floatExistVal, newValue) + } else if aggregatedValue, ok := existMetric.(*AggregatedMetricaValue); ok { + aggregatedValue.Aggregate(newValue) + } else { + panic("Invalid type in metrica value") + } + } else { + component.Metrics[metricaKey] = newValue + } + } else { + log.Printf("Can not get metrica: %v, got error:%#v", model.GetName(), err) + } + } + return component +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/doc.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/doc.go new file mode 100644 index 000000000..ef41e9697 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/doc.go @@ -0,0 +1,2 @@ +// Package newrelic_platform_go is New Relic Platform Agent SDK for Go language. +package newrelic_platform_go diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/metrica.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/metrica.go new file mode 100644 index 000000000..fc4fbd48f --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/metrica.go @@ -0,0 +1,42 @@ +package newrelic_platform_go + +import ( + "math" +) + +type IMetrica interface { + GetValue() (float64, error) + GetName() string + GetUnits() string +} + +type MetricaValue interface{} + +type SimpleMetricaValue float64 + +type AggregatedMetricaValue struct { + Min float64 `json:"min"` + Max float64 `json:"max"` + Total float64 `json:"total"` + Count int `json:"count"` + SumOfSquares float64 `json:"sum_of_squares"` +} + +func NewAggregatedMetricaValue(existValue float64, newValue float64) *AggregatedMetricaValue { + v := &AggregatedMetricaValue{ + Min: math.Min(newValue, existValue), + Max: math.Max(newValue, existValue), + Total: newValue + existValue, + Count: 2, + SumOfSquares: newValue*newValue + existValue*existValue, + } + return v +} + +func (aggregatedValue *AggregatedMetricaValue) Aggregate(newValue float64) { + aggregatedValue.Min = math.Min(newValue, aggregatedValue.Min) + aggregatedValue.Max = math.Max(newValue, aggregatedValue.Max) + aggregatedValue.Total += newValue + aggregatedValue.Count++ + aggregatedValue.SumOfSquares += newValue * newValue +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/nut.json b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/nut.json new file mode 100644 index 000000000..1e57c3952 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/nut.json @@ -0,0 +1,15 @@ +{ + "Version": "0.0.1", + "Vendor": "yvasiyarov", + "Authors": [ + { + "FullName": "Yuriy Vasiyarov", + "Email": "varyous@gmail.com" + } + ], + "ExtraFiles": [ + "README.md", + "LICENSE" + ], + "Homepage": "https://github.com/yvasiyarov/newrelic_platform_go" +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/plugin.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/plugin.go new file mode 100644 index 000000000..3e45666de --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/plugin.go @@ -0,0 +1,194 @@ +package newrelic_platform_go + +import ( + "bytes" + "encoding/json" + "fmt" + "log" + "net/http" + "strings" + "time" +) + +const ( + NEWRELIC_API_URL = "https://platform-api.newrelic.com/platform/v1/metrics" +) + +type INewrelicPlugin interface { + GetMetricaKey(metrica IMetrica) string + Harvest() error + Run() + AddComponent(component IComponent) +} +type NewrelicPlugin struct { + Agent *Agent `json:"agent"` + Components []ComponentData `json:"components"` + + ComponentModels []IComponent `json:"-"` + LastPollTime time.Time `json:"-"` + Verbose bool `json:"-"` + LicenseKey string `json:"-"` + PollIntervalInSecond int `json:"-"` +} + +func NewNewrelicPlugin(version string, licenseKey string, pollInterval int) *NewrelicPlugin { + plugin := &NewrelicPlugin{ + LicenseKey: licenseKey, + PollIntervalInSecond: pollInterval, + } + + plugin.Agent = NewAgent(version) + plugin.Agent.CollectEnvironmentInfo() + + plugin.ComponentModels = []IComponent{} + return plugin +} + +func (plugin *NewrelicPlugin) Harvest() error { + startTime := time.Now() + var duration int + if plugin.LastPollTime.IsZero() { + duration = plugin.PollIntervalInSecond + } else { + duration = int(startTime.Sub(plugin.LastPollTime).Seconds()) + } + + plugin.Components = make([]ComponentData, 0, len(plugin.ComponentModels)) + for i := 0; i < len(plugin.ComponentModels); i++ { + plugin.ComponentModels[i].SetDuration(duration) + plugin.Components = append(plugin.Components, plugin.ComponentModels[i].Harvest(plugin)) + } + + if httpCode, err := plugin.SendMetricas(); err != nil { + log.Printf("Can not send metricas to newrelic: %#v\n", err) + return err + } else { + + if plugin.Verbose { + log.Printf("Got HTTP response code:%d", httpCode) + } + + if err, isFatal := plugin.CheckResponse(httpCode); isFatal { + log.Printf("Got fatal error:%v\n", err) + return err + } else { + if err != nil { + log.Printf("WARNING: %v", err) + } + return err + } + } + return nil +} + +func (plugin *NewrelicPlugin) GetMetricaKey(metrica IMetrica) string { + var keyBuffer bytes.Buffer + + keyBuffer.WriteString("Component/") + keyBuffer.WriteString(metrica.GetName()) + keyBuffer.WriteString("[") + keyBuffer.WriteString(metrica.GetUnits()) + keyBuffer.WriteString("]") + + return keyBuffer.String() +} + +func (plugin *NewrelicPlugin) SendMetricas() (int, error) { + client := &http.Client{} + var metricasJson []byte + var encodingError error + + if plugin.Verbose { + metricasJson, encodingError = json.MarshalIndent(plugin, "", " ") + } else { + metricasJson, encodingError = json.Marshal(plugin) + } + + if encodingError != nil { + return 0, encodingError + } + + jsonAsString := string(metricasJson) + if plugin.Verbose { + log.Printf("Send data:%s \n", jsonAsString) + } + + if httpRequest, err := http.NewRequest("POST", NEWRELIC_API_URL, strings.NewReader(jsonAsString)); err != nil { + return 0, err + } else { + httpRequest.Header.Set("X-License-Key", plugin.LicenseKey) + httpRequest.Header.Set("Content-Type", "application/json") + httpRequest.Header.Set("Accept", "application/json") + + if httpResponse, err := client.Do(httpRequest); err != nil { + return 0, err + } else { + defer httpResponse.Body.Close() + return httpResponse.StatusCode, nil + } + } + + // we will never get there + return 0, nil +} + +func (plugin *NewrelicPlugin) ClearSentData() { + for _, component := range plugin.ComponentModels { + component.ClearSentData() + } + plugin.Components = nil + plugin.LastPollTime = time.Now() +} + +func (plugin *NewrelicPlugin) CheckResponse(httpResponseCode int) (error, bool) { + isFatal := false + var err error + switch httpResponseCode { + case http.StatusOK: + { + plugin.ClearSentData() + } + case http.StatusForbidden: + { + err = fmt.Errorf("Authentication error (no license key header, or invalid license key).\n") + isFatal = true + } + case http.StatusBadRequest: + { + err = fmt.Errorf("The request or headers are in the wrong format or the URL is incorrect.\n") + isFatal = true + } + case http.StatusNotFound: + { + err = fmt.Errorf("Invalid URL\n") + isFatal = true + } + case http.StatusRequestEntityTooLarge: + { + err = fmt.Errorf("Too many metrics were sent in one request, or too many components (instances) were specified in one request, or other single-request limits were reached.\n") + //discard metrics + plugin.ClearSentData() + } + case http.StatusInternalServerError, http.StatusBadGateway, http.StatusServiceUnavailable, http.StatusGatewayTimeout: + { + err = fmt.Errorf("Got %v response code.Metricas will be aggregated", httpResponseCode) + } + } + return err, isFatal +} + +func (plugin *NewrelicPlugin) Run() { + plugin.Harvest() + tickerChannel := time.Tick(time.Duration(plugin.PollIntervalInSecond) * time.Second) + for ts := range tickerChannel { + plugin.Harvest() + + if plugin.Verbose { + log.Printf("Harvest ended at:%v\n", ts) + } + } +} + +func (plugin *NewrelicPlugin) AddComponent(component IComponent) { + plugin.ComponentModels = append(plugin.ComponentModels, component) +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/LICENSE b/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/PATENTS b/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/bcrypt/base64.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/bcrypt/base64.go new file mode 100644 index 000000000..fc3116090 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/bcrypt/base64.go @@ -0,0 +1,35 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bcrypt + +import "encoding/base64" + +const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + +var bcEncoding = base64.NewEncoding(alphabet) + +func base64Encode(src []byte) []byte { + n := bcEncoding.EncodedLen(len(src)) + dst := make([]byte, n) + bcEncoding.Encode(dst, src) + for dst[n-1] == '=' { + n-- + } + return dst[:n] +} + +func base64Decode(src []byte) ([]byte, error) { + numOfEquals := 4 - (len(src) % 4) + for i := 0; i < numOfEquals; i++ { + src = append(src, '=') + } + + dst := make([]byte, bcEncoding.DecodedLen(len(src))) + n, err := bcEncoding.Decode(dst, src) + if err != nil { + return nil, err + } + return dst[:n], nil +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/bcrypt/bcrypt.go new file mode 100644 index 000000000..b8e18d744 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/bcrypt/bcrypt.go @@ -0,0 +1,294 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing +// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf +package bcrypt + +// The code is a port of Provos and Mazières's C implementation. +import ( + "crypto/rand" + "crypto/subtle" + "errors" + "fmt" + "golang.org/x/crypto/blowfish" + "io" + "strconv" +) + +const ( + MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword + MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword + DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword +) + +// The error returned from CompareHashAndPassword when a password and hash do +// not match. +var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password") + +// The error returned from CompareHashAndPassword when a hash is too short to +// be a bcrypt hash. +var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password") + +// The error returned from CompareHashAndPassword when a hash was created with +// a bcrypt algorithm newer than this implementation. +type HashVersionTooNewError byte + +func (hv HashVersionTooNewError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion) +} + +// The error returned from CompareHashAndPassword when a hash starts with something other than '$' +type InvalidHashPrefixError byte + +func (ih InvalidHashPrefixError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih)) +} + +type InvalidCostError int + +func (ic InvalidCostError) Error() string { + return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost)) +} + +const ( + majorVersion = '2' + minorVersion = 'a' + maxSaltSize = 16 + maxCryptedHashSize = 23 + encodedSaltSize = 22 + encodedHashSize = 31 + minHashSize = 59 +) + +// magicCipherData is an IV for the 64 Blowfish encryption calls in +// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes. +var magicCipherData = []byte{ + 0x4f, 0x72, 0x70, 0x68, + 0x65, 0x61, 0x6e, 0x42, + 0x65, 0x68, 0x6f, 0x6c, + 0x64, 0x65, 0x72, 0x53, + 0x63, 0x72, 0x79, 0x44, + 0x6f, 0x75, 0x62, 0x74, +} + +type hashed struct { + hash []byte + salt []byte + cost int // allowed range is MinCost to MaxCost + major byte + minor byte +} + +// GenerateFromPassword returns the bcrypt hash of the password at the given +// cost. If the cost given is less than MinCost, the cost will be set to +// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package, +// to compare the returned hashed password with its cleartext version. +func GenerateFromPassword(password []byte, cost int) ([]byte, error) { + p, err := newFromPassword(password, cost) + if err != nil { + return nil, err + } + return p.Hash(), nil +} + +// CompareHashAndPassword compares a bcrypt hashed password with its possible +// plaintext equivalent. Returns nil on success, or an error on failure. +func CompareHashAndPassword(hashedPassword, password []byte) error { + p, err := newFromHash(hashedPassword) + if err != nil { + return err + } + + otherHash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return err + } + + otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor} + if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 { + return nil + } + + return ErrMismatchedHashAndPassword +} + +// Cost returns the hashing cost used to create the given hashed +// password. When, in the future, the hashing cost of a password system needs +// to be increased in order to adjust for greater computational power, this +// function allows one to establish which passwords need to be updated. +func Cost(hashedPassword []byte) (int, error) { + p, err := newFromHash(hashedPassword) + if err != nil { + return 0, err + } + return p.cost, nil +} + +func newFromPassword(password []byte, cost int) (*hashed, error) { + if cost < MinCost { + cost = DefaultCost + } + p := new(hashed) + p.major = majorVersion + p.minor = minorVersion + + err := checkCost(cost) + if err != nil { + return nil, err + } + p.cost = cost + + unencodedSalt := make([]byte, maxSaltSize) + _, err = io.ReadFull(rand.Reader, unencodedSalt) + if err != nil { + return nil, err + } + + p.salt = base64Encode(unencodedSalt) + hash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return nil, err + } + p.hash = hash + return p, err +} + +func newFromHash(hashedSecret []byte) (*hashed, error) { + if len(hashedSecret) < minHashSize { + return nil, ErrHashTooShort + } + p := new(hashed) + n, err := p.decodeVersion(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + n, err = p.decodeCost(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + + // The "+2" is here because we'll have to append at most 2 '=' to the salt + // when base64 decoding it in expensiveBlowfishSetup(). + p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2) + copy(p.salt, hashedSecret[:encodedSaltSize]) + + hashedSecret = hashedSecret[encodedSaltSize:] + p.hash = make([]byte, len(hashedSecret)) + copy(p.hash, hashedSecret) + + return p, nil +} + +func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) { + cipherData := make([]byte, len(magicCipherData)) + copy(cipherData, magicCipherData) + + c, err := expensiveBlowfishSetup(password, uint32(cost), salt) + if err != nil { + return nil, err + } + + for i := 0; i < 24; i += 8 { + for j := 0; j < 64; j++ { + c.Encrypt(cipherData[i:i+8], cipherData[i:i+8]) + } + } + + // Bug compatibility with C bcrypt implementations. We only encode 23 of + // the 24 bytes encrypted. + hsh := base64Encode(cipherData[:maxCryptedHashSize]) + return hsh, nil +} + +func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) { + + csalt, err := base64Decode(salt) + if err != nil { + return nil, err + } + + // Bug compatibility with C bcrypt implementations. They use the trailing + // NULL in the key string during expansion. + ckey := append(key, 0) + + c, err := blowfish.NewSaltedCipher(ckey, csalt) + if err != nil { + return nil, err + } + + var i, rounds uint64 + rounds = 1 << cost + for i = 0; i < rounds; i++ { + blowfish.ExpandKey(ckey, c) + blowfish.ExpandKey(csalt, c) + } + + return c, nil +} + +func (p *hashed) Hash() []byte { + arr := make([]byte, 60) + arr[0] = '$' + arr[1] = p.major + n := 2 + if p.minor != 0 { + arr[2] = p.minor + n = 3 + } + arr[n] = '$' + n += 1 + copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost))) + n += 2 + arr[n] = '$' + n += 1 + copy(arr[n:], p.salt) + n += encodedSaltSize + copy(arr[n:], p.hash) + n += encodedHashSize + return arr[:n] +} + +func (p *hashed) decodeVersion(sbytes []byte) (int, error) { + if sbytes[0] != '$' { + return -1, InvalidHashPrefixError(sbytes[0]) + } + if sbytes[1] > majorVersion { + return -1, HashVersionTooNewError(sbytes[1]) + } + p.major = sbytes[1] + n := 3 + if sbytes[2] != '$' { + p.minor = sbytes[2] + n++ + } + return n, nil +} + +// sbytes should begin where decodeVersion left off. +func (p *hashed) decodeCost(sbytes []byte) (int, error) { + cost, err := strconv.Atoi(string(sbytes[0:2])) + if err != nil { + return -1, err + } + err = checkCost(cost) + if err != nil { + return -1, err + } + p.cost = cost + return 3, nil +} + +func (p *hashed) String() string { + return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor) +} + +func checkCost(cost int) error { + if cost < MinCost || cost > MaxCost { + return InvalidCostError(cost) + } + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/blowfish/block.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/blowfish/block.go new file mode 100644 index 000000000..9d80f1952 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/blowfish/block.go @@ -0,0 +1,159 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blowfish + +// getNextWord returns the next big-endian uint32 value from the byte slice +// at the given position in a circular manner, updating the position. +func getNextWord(b []byte, pos *int) uint32 { + var w uint32 + j := *pos + for i := 0; i < 4; i++ { + w = w<<8 | uint32(b[j]) + j++ + if j >= len(b) { + j = 0 + } + } + *pos = j + return w +} + +// ExpandKey performs a key expansion on the given *Cipher. Specifically, it +// performs the Blowfish algorithm's key schedule which sets up the *Cipher's +// pi and substitution tables for calls to Encrypt. This is used, primarily, +// by the bcrypt package to reuse the Blowfish key schedule during its +// set up. It's unlikely that you need to use this directly. +func ExpandKey(key []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + // Using inlined getNextWord for performance. + var d uint32 + for k := 0; k < 4; k++ { + d = d<<8 | uint32(key[j]) + j++ + if j >= len(key) { + j = 0 + } + } + c.p[i] ^= d + } + + var l, r uint32 + for i := 0; i < 18; i += 2 { + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +// This is similar to ExpandKey, but folds the salt during the key +// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero +// salt passed in, reusing ExpandKey turns out to be a place of inefficiency +// and specializing it here is useful. +func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + c.p[i] ^= getNextWord(key, &j) + } + + j = 0 + var l, r uint32 + for i := 0; i < 18; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[0] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16] + xr ^= c.p[17] + return xr, xl +} + +func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[17] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1] + xr ^= c.p[0] + return xr, xl +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/blowfish/cipher.go new file mode 100644 index 000000000..5019658a4 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/blowfish/cipher.go @@ -0,0 +1,91 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm. +package blowfish + +// The code is a port of Bruce Schneier's C implementation. +// See http://www.schneier.com/blowfish.html. + +import "strconv" + +// The Blowfish block size in bytes. +const BlockSize = 8 + +// A Cipher is an instance of Blowfish encryption using a particular key. +type Cipher struct { + p [18]uint32 + s0, s1, s2, s3 [256]uint32 +} + +type KeySizeError int + +func (k KeySizeError) Error() string { + return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k)) +} + +// NewCipher creates and returns a Cipher. +// The key argument should be the Blowfish key, from 1 to 56 bytes. +func NewCipher(key []byte) (*Cipher, error) { + var result Cipher + if k := len(key); k < 1 || k > 56 { + return nil, KeySizeError(k) + } + initCipher(&result) + ExpandKey(key, &result) + return &result, nil +} + +// NewSaltedCipher creates a returns a Cipher that folds a salt into its key +// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is +// sufficient and desirable. For bcrypt compatiblity, the key can be over 56 +// bytes. +func NewSaltedCipher(key, salt []byte) (*Cipher, error) { + if len(salt) == 0 { + return NewCipher(key) + } + var result Cipher + if k := len(key); k < 1 { + return nil, KeySizeError(k) + } + initCipher(&result) + expandKeyWithSalt(key, salt, &result) + return &result, nil +} + +// BlockSize returns the Blowfish block size, 8 bytes. +// It is necessary to satisfy the Block interface in the +// package "crypto/cipher". +func (c *Cipher) BlockSize() int { return BlockSize } + +// Encrypt encrypts the 8-byte buffer src using the key k +// and stores the result in dst. +// Note that for amounts of data larger than a block, +// it is not safe to just call Encrypt on successive blocks; +// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). +func (c *Cipher) Encrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = encryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +// Decrypt decrypts the 8-byte buffer src using the key k +// and stores the result in dst. +func (c *Cipher) Decrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = decryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +func initCipher(c *Cipher) { + copy(c.p[0:], p[0:]) + copy(c.s0[0:], s0[0:]) + copy(c.s1[0:], s1[0:]) + copy(c.s2[0:], s2[0:]) + copy(c.s3[0:], s3[0:]) +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/blowfish/const.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/blowfish/const.go new file mode 100644 index 000000000..8c5ee4cb0 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/blowfish/const.go @@ -0,0 +1,199 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The startup permutation array and substitution boxes. +// They are the hexadecimal digits of PI; see: +// http://www.schneier.com/code/constants.txt. + +package blowfish + +var s0 = [256]uint32{ + 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, + 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, + 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, + 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, + 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, + 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, + 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, + 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, + 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, + 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, + 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, + 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, + 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, + 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, + 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, + 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, + 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, + 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, + 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, + 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, + 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, + 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, + 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, + 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, + 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, + 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, + 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, + 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, + 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, + 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, + 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, + 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, + 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, + 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, + 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, + 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, + 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, + 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, + 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, + 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, + 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, + 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, + 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, +} + +var s1 = [256]uint32{ + 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, + 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, + 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, + 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, + 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, + 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, + 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, + 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, + 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, + 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, + 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, + 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, + 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, + 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, + 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, + 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, + 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, + 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, + 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, + 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, + 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, + 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, + 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, + 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, + 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, + 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, + 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, + 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, + 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, + 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, + 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, + 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, + 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, + 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, + 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, + 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, + 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, + 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, + 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, + 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, + 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, + 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, + 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, +} + +var s2 = [256]uint32{ + 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, + 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, + 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, + 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, + 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, + 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, + 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, + 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, + 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, + 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, + 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, + 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, + 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, + 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, + 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, + 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, + 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, + 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, + 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, + 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, + 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, + 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, + 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, + 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, + 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, + 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, + 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, + 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, + 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, + 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, + 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, + 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, + 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, + 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, + 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, + 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, + 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, + 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, + 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, + 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, + 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, + 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, + 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, +} + +var s3 = [256]uint32{ + 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, + 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, + 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, + 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, + 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, + 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, + 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, + 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, + 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, + 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, + 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, + 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, + 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, + 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, + 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, + 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, + 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, + 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, + 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, + 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, + 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, + 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, + 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, + 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, + 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, + 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, + 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, + 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, + 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, + 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, + 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, + 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, + 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, + 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, + 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, + 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, + 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, + 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, + 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, + 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, + 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, + 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, + 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, +} + +var p = [18]uint32{ + 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, + 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, + 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b, +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/ocsp/ocsp.go new file mode 100644 index 000000000..f6a1bd4da --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/ocsp/ocsp.go @@ -0,0 +1,592 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ocsp parses OCSP responses as specified in RFC 2560. OCSP responses +// are signed messages attesting to the validity of a certificate for a small +// period of time. This is used to manage revocation for X.509 certificates. +package ocsp + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "math/big" + "time" +) + +var idPKIXOCSPBasic = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 5, 5, 7, 48, 1, 1}) + +// These are internal structures that reflect the ASN.1 structure of an OCSP +// response. See RFC 2560, section 4.2. + +const ( + ocspSuccess = 0 + ocspMalformed = 1 + ocspInternalError = 2 + ocspTryLater = 3 + ocspSigRequired = 4 + ocspUnauthorized = 5 +) + +type certID struct { + HashAlgorithm pkix.AlgorithmIdentifier + NameHash []byte + IssuerKeyHash []byte + SerialNumber *big.Int +} + +// https://tools.ietf.org/html/rfc2560#section-4.1.1 +type ocspRequest struct { + TBSRequest tbsRequest +} + +type tbsRequest struct { + Version int `asn1:"explicit,tag:0,default:0,optional"` + RequestorName pkix.RDNSequence `asn1:"explicit,tag:1,optional"` + RequestList []request +} + +type request struct { + Cert certID +} + +type responseASN1 struct { + Status asn1.Enumerated + Response responseBytes `asn1:"explicit,tag:0"` +} + +type responseBytes struct { + ResponseType asn1.ObjectIdentifier + Response []byte +} + +type basicResponse struct { + TBSResponseData responseData + SignatureAlgorithm pkix.AlgorithmIdentifier + Signature asn1.BitString + Certificates []asn1.RawValue `asn1:"explicit,tag:0,optional"` +} + +type responseData struct { + Raw asn1.RawContent + Version int `asn1:"optional,default:1,explicit,tag:0"` + RawResponderName asn1.RawValue `asn1:"optional,explicit,tag:1"` + KeyHash []byte `asn1:"optional,explicit,tag:2"` + ProducedAt time.Time `asn1:"generalized"` + Responses []singleResponse +} + +type singleResponse struct { + CertID certID + Good asn1.Flag `asn1:"tag:0,optional"` + Revoked revokedInfo `asn1:"explicit,tag:1,optional"` + Unknown asn1.Flag `asn1:"tag:2,optional"` + ThisUpdate time.Time `asn1:"generalized"` + NextUpdate time.Time `asn1:"generalized,explicit,tag:0,optional"` +} + +type revokedInfo struct { + RevocationTime time.Time `asn1:"generalized"` + Reason int `asn1:"explicit,tag:0,optional"` +} + +var ( + oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2} + oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4} + oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} + oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} + oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} + oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} + oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} + oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 4, 3, 2} + oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} + oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} + oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} + oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} +) + +var hashOIDs = map[crypto.Hash]asn1.ObjectIdentifier{ + crypto.SHA1: asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}), + crypto.SHA256: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 1}), + crypto.SHA384: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 2}), + crypto.SHA512: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 3}), +} + +// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below +var signatureAlgorithmDetails = []struct { + algo x509.SignatureAlgorithm + oid asn1.ObjectIdentifier + pubKeyAlgo x509.PublicKeyAlgorithm + hash crypto.Hash +}{ + {x509.MD2WithRSA, oidSignatureMD2WithRSA, x509.RSA, crypto.Hash(0) /* no value for MD2 */}, + {x509.MD5WithRSA, oidSignatureMD5WithRSA, x509.RSA, crypto.MD5}, + {x509.SHA1WithRSA, oidSignatureSHA1WithRSA, x509.RSA, crypto.SHA1}, + {x509.SHA256WithRSA, oidSignatureSHA256WithRSA, x509.RSA, crypto.SHA256}, + {x509.SHA384WithRSA, oidSignatureSHA384WithRSA, x509.RSA, crypto.SHA384}, + {x509.SHA512WithRSA, oidSignatureSHA512WithRSA, x509.RSA, crypto.SHA512}, + {x509.DSAWithSHA1, oidSignatureDSAWithSHA1, x509.DSA, crypto.SHA1}, + {x509.DSAWithSHA256, oidSignatureDSAWithSHA256, x509.DSA, crypto.SHA256}, + {x509.ECDSAWithSHA1, oidSignatureECDSAWithSHA1, x509.ECDSA, crypto.SHA1}, + {x509.ECDSAWithSHA256, oidSignatureECDSAWithSHA256, x509.ECDSA, crypto.SHA256}, + {x509.ECDSAWithSHA384, oidSignatureECDSAWithSHA384, x509.ECDSA, crypto.SHA384}, + {x509.ECDSAWithSHA512, oidSignatureECDSAWithSHA512, x509.ECDSA, crypto.SHA512}, +} + +// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below +func signingParamsForPublicKey(pub interface{}, requestedSigAlgo x509.SignatureAlgorithm) (hashFunc crypto.Hash, sigAlgo pkix.AlgorithmIdentifier, err error) { + var pubType x509.PublicKeyAlgorithm + + switch pub := pub.(type) { + case *rsa.PublicKey: + pubType = x509.RSA + hashFunc = crypto.SHA256 + sigAlgo.Algorithm = oidSignatureSHA256WithRSA + sigAlgo.Parameters = asn1.RawValue{ + Tag: 5, + } + + case *ecdsa.PublicKey: + pubType = x509.ECDSA + + switch pub.Curve { + case elliptic.P224(), elliptic.P256(): + hashFunc = crypto.SHA256 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA256 + case elliptic.P384(): + hashFunc = crypto.SHA384 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA384 + case elliptic.P521(): + hashFunc = crypto.SHA512 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA512 + default: + err = errors.New("x509: unknown elliptic curve") + } + + default: + err = errors.New("x509: only RSA and ECDSA keys supported") + } + + if err != nil { + return + } + + if requestedSigAlgo == 0 { + return + } + + found := false + for _, details := range signatureAlgorithmDetails { + if details.algo == requestedSigAlgo { + if details.pubKeyAlgo != pubType { + err = errors.New("x509: requested SignatureAlgorithm does not match private key type") + return + } + sigAlgo.Algorithm, hashFunc = details.oid, details.hash + if hashFunc == 0 { + err = errors.New("x509: cannot sign with hash function requested") + return + } + found = true + break + } + } + + if !found { + err = errors.New("x509: unknown SignatureAlgorithm") + } + + return +} + +// TODO(agl): this is taken from crypto/x509 and so should probably be exported +// from crypto/x509 or crypto/x509/pkix. +func getSignatureAlgorithmFromOID(oid asn1.ObjectIdentifier) x509.SignatureAlgorithm { + for _, details := range signatureAlgorithmDetails { + if oid.Equal(details.oid) { + return details.algo + } + } + return x509.UnknownSignatureAlgorithm +} + +// TODO(rlb): This is not taken from crypto/x509, but it's of the same general form. +func getHashAlgorithmFromOID(target asn1.ObjectIdentifier) crypto.Hash { + for hash, oid := range hashOIDs { + if oid.Equal(target) { + return hash + } + } + return crypto.Hash(0) +} + +// This is the exposed reflection of the internal OCSP structures. + +const ( + // Good means that the certificate is valid. + Good = iota + // Revoked means that the certificate has been deliberately revoked. + Revoked = iota + // Unknown means that the OCSP responder doesn't know about the certificate. + Unknown = iota + // ServerFailed means that the OCSP responder failed to process the request. + ServerFailed = iota +) + +// Request represents an OCSP request. See RFC 2560. +type Request struct { + HashAlgorithm crypto.Hash + IssuerNameHash []byte + IssuerKeyHash []byte + SerialNumber *big.Int +} + +// Response represents an OCSP response. See RFC 2560. +type Response struct { + // Status is one of {Good, Revoked, Unknown, ServerFailed} + Status int + SerialNumber *big.Int + ProducedAt, ThisUpdate, NextUpdate, RevokedAt time.Time + RevocationReason int + Certificate *x509.Certificate + // TBSResponseData contains the raw bytes of the signed response. If + // Certificate is nil then this can be used to verify Signature. + TBSResponseData []byte + Signature []byte + SignatureAlgorithm x509.SignatureAlgorithm +} + +// These are pre-serialized error responses for the various non-success codes +// defined by OCSP. The Unauthorized code in particular can be used by an OCSP +// responder that supports only pre-signed responses as a response to requests +// for certificates with unknown status. See RFC 5019. +var ( + MalformedRequestErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x01} + InternalErrorErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x02} + TryLaterErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x03} + SigRequredErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x05} + UnauthorizedErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x06} +) + +// CheckSignatureFrom checks that the signature in resp is a valid signature +// from issuer. This should only be used if resp.Certificate is nil. Otherwise, +// the OCSP response contained an intermediate certificate that created the +// signature. That signature is checked by ParseResponse and only +// resp.Certificate remains to be validated. +func (resp *Response) CheckSignatureFrom(issuer *x509.Certificate) error { + return issuer.CheckSignature(resp.SignatureAlgorithm, resp.TBSResponseData, resp.Signature) +} + +// ParseError results from an invalid OCSP response. +type ParseError string + +func (p ParseError) Error() string { + return string(p) +} + +// ParseRequest parses an OCSP request in DER form. It only supports +// requests for a single certificate. Signed requests are not supported. +// If a request includes a signature, it will result in a ParseError. +func ParseRequest(bytes []byte) (*Request, error) { + var req ocspRequest + rest, err := asn1.Unmarshal(bytes, &req) + if err != nil { + return nil, err + } + if len(rest) > 0 { + return nil, ParseError("trailing data in OCSP request") + } + + if len(req.TBSRequest.RequestList) == 0 { + return nil, ParseError("OCSP request contains no request body") + } + innerRequest := req.TBSRequest.RequestList[0] + + hashFunc := getHashAlgorithmFromOID(innerRequest.Cert.HashAlgorithm.Algorithm) + if hashFunc == crypto.Hash(0) { + return nil, ParseError("OCSP request uses unknown hash function") + } + + return &Request{ + HashAlgorithm: hashFunc, + IssuerNameHash: innerRequest.Cert.NameHash, + IssuerKeyHash: innerRequest.Cert.IssuerKeyHash, + SerialNumber: innerRequest.Cert.SerialNumber, + }, nil +} + +// ParseResponse parses an OCSP response in DER form. It only supports +// responses for a single certificate. If the response contains a certificate +// then the signature over the response is checked. If issuer is not nil then +// it will be used to validate the signature or embedded certificate. Invalid +// signatures or parse failures will result in a ParseError. +func ParseResponse(bytes []byte, issuer *x509.Certificate) (*Response, error) { + var resp responseASN1 + rest, err := asn1.Unmarshal(bytes, &resp) + if err != nil { + return nil, err + } + if len(rest) > 0 { + return nil, ParseError("trailing data in OCSP response") + } + + ret := new(Response) + if resp.Status != ocspSuccess { + ret.Status = ServerFailed + return ret, nil + } + + if !resp.Response.ResponseType.Equal(idPKIXOCSPBasic) { + return nil, ParseError("bad OCSP response type") + } + + var basicResp basicResponse + rest, err = asn1.Unmarshal(resp.Response.Response, &basicResp) + if err != nil { + return nil, err + } + + if len(basicResp.Certificates) > 1 { + return nil, ParseError("OCSP response contains bad number of certificates") + } + + if len(basicResp.TBSResponseData.Responses) != 1 { + return nil, ParseError("OCSP response contains bad number of responses") + } + + ret.TBSResponseData = basicResp.TBSResponseData.Raw + ret.Signature = basicResp.Signature.RightAlign() + ret.SignatureAlgorithm = getSignatureAlgorithmFromOID(basicResp.SignatureAlgorithm.Algorithm) + + if len(basicResp.Certificates) > 0 { + ret.Certificate, err = x509.ParseCertificate(basicResp.Certificates[0].FullBytes) + if err != nil { + return nil, err + } + + if err := ret.CheckSignatureFrom(ret.Certificate); err != nil { + return nil, ParseError("bad OCSP signature") + } + + if issuer != nil { + if err := issuer.CheckSignature(ret.Certificate.SignatureAlgorithm, ret.Certificate.RawTBSCertificate, ret.Certificate.Signature); err != nil { + return nil, ParseError("bad signature on embedded certificate") + } + } + } else if issuer != nil { + if err := ret.CheckSignatureFrom(issuer); err != nil { + return nil, ParseError("bad OCSP signature") + } + } + + r := basicResp.TBSResponseData.Responses[0] + + ret.SerialNumber = r.CertID.SerialNumber + + switch { + case bool(r.Good): + ret.Status = Good + case bool(r.Unknown): + ret.Status = Unknown + default: + ret.Status = Revoked + ret.RevokedAt = r.Revoked.RevocationTime + ret.RevocationReason = r.Revoked.Reason + } + + ret.ProducedAt = basicResp.TBSResponseData.ProducedAt + ret.ThisUpdate = r.ThisUpdate + ret.NextUpdate = r.NextUpdate + + return ret, nil +} + +// RequestOptions contains options for constructing OCSP requests. +type RequestOptions struct { + // Hash contains the hash function that should be used when + // constructing the OCSP request. If zero, SHA-1 will be used. + Hash crypto.Hash +} + +func (opts *RequestOptions) hash() crypto.Hash { + if opts == nil || opts.Hash == 0 { + // SHA-1 is nearly universally used in OCSP. + return crypto.SHA1 + } + return opts.Hash +} + +// CreateRequest returns a DER-encoded, OCSP request for the status of cert. If +// opts is nil then sensible defaults are used. +func CreateRequest(cert, issuer *x509.Certificate, opts *RequestOptions) ([]byte, error) { + hashFunc := opts.hash() + + // OCSP seems to be the only place where these raw hash identifiers are + // used. I took the following from + // http://msdn.microsoft.com/en-us/library/ff635603.aspx + var hashOID asn1.ObjectIdentifier + hashOID, ok := hashOIDs[hashFunc] + if !ok { + return nil, x509.ErrUnsupportedAlgorithm + } + + if !hashFunc.Available() { + return nil, x509.ErrUnsupportedAlgorithm + } + h := opts.hash().New() + + var publicKeyInfo struct { + Algorithm pkix.AlgorithmIdentifier + PublicKey asn1.BitString + } + if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil { + return nil, err + } + + h.Write(publicKeyInfo.PublicKey.RightAlign()) + issuerKeyHash := h.Sum(nil) + + h.Reset() + h.Write(issuer.RawSubject) + issuerNameHash := h.Sum(nil) + + return asn1.Marshal(ocspRequest{ + tbsRequest{ + Version: 0, + RequestList: []request{ + { + Cert: certID{ + pkix.AlgorithmIdentifier{ + Algorithm: hashOID, + Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */}, + }, + issuerNameHash, + issuerKeyHash, + cert.SerialNumber, + }, + }, + }, + }, + }) +} + +// CreateResponse returns a DER-encoded OCSP response with the specified contents. +// The fields in the response are populated as follows: +// +// The responder cert is used to populate the ResponderName field, and the certificate +// itself is provided alongside the OCSP response signature. +// +// The issuer cert is used to puplate the IssuerNameHash and IssuerKeyHash fields. +// (SHA-1 is used for the hash function; this is not configurable.) +// +// The template is used to populate the SerialNumber, RevocationStatus, RevokedAt, +// RevocationReason, ThisUpdate, and NextUpdate fields. +// +// The ProducedAt date is automatically set to the current date, to the nearest minute. +func CreateResponse(issuer, responderCert *x509.Certificate, template Response, priv crypto.Signer) ([]byte, error) { + var publicKeyInfo struct { + Algorithm pkix.AlgorithmIdentifier + PublicKey asn1.BitString + } + if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil { + return nil, err + } + + h := sha1.New() + h.Write(publicKeyInfo.PublicKey.RightAlign()) + issuerKeyHash := h.Sum(nil) + + h.Reset() + h.Write(issuer.RawSubject) + issuerNameHash := h.Sum(nil) + + innerResponse := singleResponse{ + CertID: certID{ + HashAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: hashOIDs[crypto.SHA1], + Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */}, + }, + NameHash: issuerNameHash, + IssuerKeyHash: issuerKeyHash, + SerialNumber: template.SerialNumber, + }, + ThisUpdate: template.ThisUpdate.UTC(), + NextUpdate: template.NextUpdate.UTC(), + } + + switch template.Status { + case Good: + innerResponse.Good = true + case Unknown: + innerResponse.Unknown = true + case Revoked: + innerResponse.Revoked = revokedInfo{ + RevocationTime: template.RevokedAt.UTC(), + Reason: template.RevocationReason, + } + } + + responderName := asn1.RawValue{ + Class: 2, // context-specific + Tag: 1, // explicit tag + IsCompound: true, + Bytes: responderCert.RawSubject, + } + tbsResponseData := responseData{ + Version: 0, + RawResponderName: responderName, + ProducedAt: time.Now().Truncate(time.Minute).UTC(), + Responses: []singleResponse{innerResponse}, + } + + tbsResponseDataDER, err := asn1.Marshal(tbsResponseData) + if err != nil { + return nil, err + } + + hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(priv.Public(), template.SignatureAlgorithm) + if err != nil { + return nil, err + } + + responseHash := hashFunc.New() + responseHash.Write(tbsResponseDataDER) + signature, err := priv.Sign(rand.Reader, responseHash.Sum(nil), hashFunc) + if err != nil { + return nil, err + } + + response := basicResponse{ + TBSResponseData: tbsResponseData, + SignatureAlgorithm: signatureAlgorithm, + Signature: asn1.BitString{ + Bytes: signature, + BitLength: 8 * len(signature), + }, + } + if template.Certificate != nil { + response.Certificates = []asn1.RawValue{ + asn1.RawValue{FullBytes: template.Certificate.Raw}, + } + } + responseDER, err := asn1.Marshal(response) + if err != nil { + return nil, err + } + + return asn1.Marshal(responseASN1{ + Status: ocspSuccess, + Response: responseBytes{ + ResponseType: idPKIXOCSPBasic, + Response: responseDER, + }, + }) +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/LICENSE b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/PATENTS b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/context/context.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/context/context.go new file mode 100644 index 000000000..46629881b --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/context/context.go @@ -0,0 +1,447 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out <-chan Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, c) + return c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) *cancelCtx { + return &cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + *cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/context/ctxhttp/cancelreq.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/context/ctxhttp/cancelreq.go new file mode 100644 index 000000000..e3170e333 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/context/ctxhttp/cancelreq.go @@ -0,0 +1,19 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.5 + +package ctxhttp + +import "net/http" + +func canceler(client *http.Client, req *http.Request) func() { + // TODO(djd): Respect any existing value of req.Cancel. + ch := make(chan struct{}) + req.Cancel = ch + + return func() { + close(ch) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go new file mode 100644 index 000000000..56bcbadb8 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.5 + +package ctxhttp + +import "net/http" + +type requestCanceler interface { + CancelRequest(*http.Request) +} + +func canceler(client *http.Client, req *http.Request) func() { + rc, ok := client.Transport.(requestCanceler) + if !ok { + return func() {} + } + return func() { + rc.CancelRequest(req) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go new file mode 100644 index 000000000..e35860a70 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go @@ -0,0 +1,145 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ctxhttp provides helper functions for performing context-aware HTTP requests. +package ctxhttp + +import ( + "io" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +func nop() {} + +var ( + testHookContextDoneBeforeHeaders = nop + testHookDoReturned = nop + testHookDidBodyClose = nop +) + +// Do sends an HTTP request with the provided http.Client and returns an HTTP response. +// If the client is nil, http.DefaultClient is used. +// If the context is canceled or times out, ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + + // Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go. + cancel := canceler(client, req) + + type responseAndError struct { + resp *http.Response + err error + } + result := make(chan responseAndError, 1) + + // Make local copies of test hooks closed over by goroutines below. + // Prevents data races in tests. + testHookDoReturned := testHookDoReturned + testHookDidBodyClose := testHookDidBodyClose + + go func() { + resp, err := client.Do(req) + testHookDoReturned() + result <- responseAndError{resp, err} + }() + + var resp *http.Response + + select { + case <-ctx.Done(): + testHookContextDoneBeforeHeaders() + cancel() + // Clean up after the goroutine calling client.Do: + go func() { + if r := <-result; r.resp != nil { + testHookDidBodyClose() + r.resp.Body.Close() + } + }() + return nil, ctx.Err() + case r := <-result: + var err error + resp, err = r.resp, r.err + if err != nil { + return resp, err + } + } + + c := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + cancel() + case <-c: + // The response's Body is closed. + } + }() + resp.Body = ¬ifyingReader{resp.Body, c} + + return resp, nil +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// notifyingReader is an io.ReadCloser that closes the notify channel after +// Close is called or a Read fails on the underlying ReadCloser. +type notifyingReader struct { + io.ReadCloser + notify chan<- struct{} +} + +func (r *notifyingReader) Read(p []byte) (int, error) { + n, err := r.ReadCloser.Read(p) + if err != nil && r.notify != nil { + close(r.notify) + r.notify = nil + } + return n, err +} + +func (r *notifyingReader) Close() error { + err := r.ReadCloser.Close() + if r.notify != nil { + close(r.notify) + r.notify = nil + } + return err +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/.gitignore b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/.gitignore new file mode 100644 index 000000000..190f12234 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/.gitignore @@ -0,0 +1,2 @@ +*~ +h2i/h2i diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/Dockerfile b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/Dockerfile new file mode 100644 index 000000000..53fc52579 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/Dockerfile @@ -0,0 +1,51 @@ +# +# This Dockerfile builds a recent curl with HTTP/2 client support, using +# a recent nghttp2 build. +# +# See the Makefile for how to tag it. If Docker and that image is found, the +# Go tests use this curl binary for integration tests. +# + +FROM ubuntu:trusty + +RUN apt-get update && \ + apt-get upgrade -y && \ + apt-get install -y git-core build-essential wget + +RUN apt-get install -y --no-install-recommends \ + autotools-dev libtool pkg-config zlib1g-dev \ + libcunit1-dev libssl-dev libxml2-dev libevent-dev \ + automake autoconf + +# The list of packages nghttp2 recommends for h2load: +RUN apt-get install -y --no-install-recommends make binutils \ + autoconf automake autotools-dev \ + libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \ + libev-dev libevent-dev libjansson-dev libjemalloc-dev \ + cython python3.4-dev python-setuptools + +# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached: +ENV NGHTTP2_VER 895da9a +RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git + +WORKDIR /root/nghttp2 +RUN git reset --hard $NGHTTP2_VER +RUN autoreconf -i +RUN automake +RUN autoconf +RUN ./configure +RUN make +RUN make install + +WORKDIR /root +RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz +RUN tar -zxvf curl-7.45.0.tar.gz +WORKDIR /root/curl-7.45.0 +RUN ./configure --with-ssl --with-nghttp2=/usr/local +RUN make +RUN make install +RUN ldconfig + +CMD ["-h"] +ENTRYPOINT ["/usr/local/bin/curl"] + diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/Makefile b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/Makefile new file mode 100644 index 000000000..55fd826f7 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/Makefile @@ -0,0 +1,3 @@ +curlimage: + docker build -t gohttp2/curl . + diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/README b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/README new file mode 100644 index 000000000..360d5aa37 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/README @@ -0,0 +1,20 @@ +This is a work-in-progress HTTP/2 implementation for Go. + +It will eventually live in the Go standard library and won't require +any changes to your code to use. It will just be automatic. + +Status: + +* The server support is pretty good. A few things are missing + but are being worked on. +* The client work has just started but shares a lot of code + is coming along much quicker. + +Docs are at https://godoc.org/golang.org/x/net/http2 + +Demo test server at https://http2.golang.org/ + +Help & bug reports welcome! + +Contributing: https://golang.org/doc/contribute.html +Bugs: https://golang.org/issue/new?title=x/net/http2:+ diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/client_conn_pool.go new file mode 100644 index 000000000..772ea5e92 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -0,0 +1,225 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Transport code's client connection pooling. + +package http2 + +import ( + "crypto/tls" + "net/http" + "sync" +) + +// ClientConnPool manages a pool of HTTP/2 client connections. +type ClientConnPool interface { + GetClientConn(req *http.Request, addr string) (*ClientConn, error) + MarkDead(*ClientConn) +} + +// TODO: use singleflight for dialing and addConnCalls? +type clientConnPool struct { + t *Transport + + mu sync.Mutex // TODO: maybe switch to RWMutex + // TODO: add support for sharing conns based on cert names + // (e.g. share conn for googleapis.com and appspot.com) + conns map[string][]*ClientConn // key is host:port + dialing map[string]*dialCall // currently in-flight dials + keys map[*ClientConn][]string + addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls +} + +func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { + return p.getClientConn(req, addr, dialOnMiss) +} + +const ( + dialOnMiss = true + noDialOnMiss = false +) + +func (p *clientConnPool) getClientConn(_ *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { + p.mu.Lock() + for _, cc := range p.conns[addr] { + if cc.CanTakeNewRequest() { + p.mu.Unlock() + return cc, nil + } + } + if !dialOnMiss { + p.mu.Unlock() + return nil, ErrNoCachedConn + } + call := p.getStartDialLocked(addr) + p.mu.Unlock() + <-call.done + return call.res, call.err +} + +// dialCall is an in-flight Transport dial call to a host. +type dialCall struct { + p *clientConnPool + done chan struct{} // closed when done + res *ClientConn // valid after done is closed + err error // valid after done is closed +} + +// requires p.mu is held. +func (p *clientConnPool) getStartDialLocked(addr string) *dialCall { + if call, ok := p.dialing[addr]; ok { + // A dial is already in-flight. Don't start another. + return call + } + call := &dialCall{p: p, done: make(chan struct{})} + if p.dialing == nil { + p.dialing = make(map[string]*dialCall) + } + p.dialing[addr] = call + go call.dial(addr) + return call +} + +// run in its own goroutine. +func (c *dialCall) dial(addr string) { + c.res, c.err = c.p.t.dialClientConn(addr) + close(c.done) + + c.p.mu.Lock() + delete(c.p.dialing, addr) + if c.err == nil { + c.p.addConnLocked(addr, c.res) + } + c.p.mu.Unlock() +} + +// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't +// already exist. It coalesces concurrent calls with the same key. +// This is used by the http1 Transport code when it creates a new connection. Because +// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know +// the protocol), it can get into a situation where it has multiple TLS connections. +// This code decides which ones live or die. +// The return value used is whether c was used. +// c is never closed. +func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) { + p.mu.Lock() + for _, cc := range p.conns[key] { + if cc.CanTakeNewRequest() { + p.mu.Unlock() + return false, nil + } + } + call, dup := p.addConnCalls[key] + if !dup { + if p.addConnCalls == nil { + p.addConnCalls = make(map[string]*addConnCall) + } + call = &addConnCall{ + p: p, + done: make(chan struct{}), + } + p.addConnCalls[key] = call + go call.run(t, key, c) + } + p.mu.Unlock() + + <-call.done + if call.err != nil { + return false, call.err + } + return !dup, nil +} + +type addConnCall struct { + p *clientConnPool + done chan struct{} // closed when done + err error +} + +func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { + cc, err := t.NewClientConn(tc) + + p := c.p + p.mu.Lock() + if err != nil { + c.err = err + } else { + p.addConnLocked(key, cc) + } + delete(p.addConnCalls, key) + p.mu.Unlock() + close(c.done) +} + +func (p *clientConnPool) addConn(key string, cc *ClientConn) { + p.mu.Lock() + p.addConnLocked(key, cc) + p.mu.Unlock() +} + +// p.mu must be held +func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) { + for _, v := range p.conns[key] { + if v == cc { + return + } + } + if p.conns == nil { + p.conns = make(map[string][]*ClientConn) + } + if p.keys == nil { + p.keys = make(map[*ClientConn][]string) + } + p.conns[key] = append(p.conns[key], cc) + p.keys[cc] = append(p.keys[cc], key) +} + +func (p *clientConnPool) MarkDead(cc *ClientConn) { + p.mu.Lock() + defer p.mu.Unlock() + for _, key := range p.keys[cc] { + vv, ok := p.conns[key] + if !ok { + continue + } + newList := filterOutClientConn(vv, cc) + if len(newList) > 0 { + p.conns[key] = newList + } else { + delete(p.conns, key) + } + } + delete(p.keys, cc) +} + +func (p *clientConnPool) closeIdleConnections() { + p.mu.Lock() + defer p.mu.Unlock() + // TODO: don't close a cc if it was just added to the pool + // milliseconds ago and has never been used. There's currently + // a small race window with the HTTP/1 Transport's integration + // where it can add an idle conn just before using it, and + // somebody else can concurrently call CloseIdleConns and + // break some caller's RoundTrip. + for _, vv := range p.conns { + for _, cc := range vv { + cc.closeIfIdle() + } + } +} + +func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn { + out := in[:0] + for _, v := range in { + if v != exclude { + out = append(out, v) + } + } + // If we filtered it out, zero out the last item to prevent + // the GC from seeing it. + if len(in) != len(out) { + in[len(in)-1] = nil + } + return out +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/configure_transport.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/configure_transport.go new file mode 100644 index 000000000..daa17f5d4 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/configure_transport.go @@ -0,0 +1,89 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.6 + +package http2 + +import ( + "crypto/tls" + "fmt" + "net/http" +) + +func configureTransport(t1 *http.Transport) (*Transport, error) { + connPool := new(clientConnPool) + t2 := &Transport{ + ConnPool: noDialClientConnPool{connPool}, + t1: t1, + } + connPool.t = t2 + if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil { + return nil, err + } + if t1.TLSClientConfig == nil { + t1.TLSClientConfig = new(tls.Config) + } + if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { + t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) + } + if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { + t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") + } + upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { + addr := authorityAddr(authority) + if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { + go c.Close() + return erringRoundTripper{err} + } else if !used { + // Turns out we don't need this c. + // For example, two goroutines made requests to the same host + // at the same time, both kicking off TCP dials. (since protocol + // was unknown) + go c.Close() + } + return t2 + } + if m := t1.TLSNextProto; len(m) == 0 { + t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ + "h2": upgradeFn, + } + } else { + m["h2"] = upgradeFn + } + return t2, nil +} + +// registerHTTPSProtocol calls Transport.RegisterProtocol but +// convering panics into errors. +func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) { + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + }() + t.RegisterProtocol("https", rt) + return nil +} + +// noDialClientConnPool is an implementation of http2.ClientConnPool +// which never dials. We let the HTTP/1.1 client dial and use its TLS +// connection instead. +type noDialClientConnPool struct{ *clientConnPool } + +func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { + return p.getClientConn(req, addr, noDialOnMiss) +} + +// noDialH2RoundTripper is a RoundTripper which only tries to complete the request +// if there's already has a cached connection to the host. +type noDialH2RoundTripper struct{ t *Transport } + +func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + res, err := rt.t.RoundTrip(req) + if err == ErrNoCachedConn { + return nil, http.ErrSkipAltProtocol + } + return res, err +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/errors.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/errors.go new file mode 100644 index 000000000..71a4e2905 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/errors.go @@ -0,0 +1,122 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "fmt" +) + +// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec. +type ErrCode uint32 + +const ( + ErrCodeNo ErrCode = 0x0 + ErrCodeProtocol ErrCode = 0x1 + ErrCodeInternal ErrCode = 0x2 + ErrCodeFlowControl ErrCode = 0x3 + ErrCodeSettingsTimeout ErrCode = 0x4 + ErrCodeStreamClosed ErrCode = 0x5 + ErrCodeFrameSize ErrCode = 0x6 + ErrCodeRefusedStream ErrCode = 0x7 + ErrCodeCancel ErrCode = 0x8 + ErrCodeCompression ErrCode = 0x9 + ErrCodeConnect ErrCode = 0xa + ErrCodeEnhanceYourCalm ErrCode = 0xb + ErrCodeInadequateSecurity ErrCode = 0xc + ErrCodeHTTP11Required ErrCode = 0xd +) + +var errCodeName = map[ErrCode]string{ + ErrCodeNo: "NO_ERROR", + ErrCodeProtocol: "PROTOCOL_ERROR", + ErrCodeInternal: "INTERNAL_ERROR", + ErrCodeFlowControl: "FLOW_CONTROL_ERROR", + ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT", + ErrCodeStreamClosed: "STREAM_CLOSED", + ErrCodeFrameSize: "FRAME_SIZE_ERROR", + ErrCodeRefusedStream: "REFUSED_STREAM", + ErrCodeCancel: "CANCEL", + ErrCodeCompression: "COMPRESSION_ERROR", + ErrCodeConnect: "CONNECT_ERROR", + ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM", + ErrCodeInadequateSecurity: "INADEQUATE_SECURITY", + ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED", +} + +func (e ErrCode) String() string { + if s, ok := errCodeName[e]; ok { + return s + } + return fmt.Sprintf("unknown error code 0x%x", uint32(e)) +} + +// ConnectionError is an error that results in the termination of the +// entire connection. +type ConnectionError ErrCode + +func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) } + +// StreamError is an error that only affects one stream within an +// HTTP/2 connection. +type StreamError struct { + StreamID uint32 + Code ErrCode +} + +func (e StreamError) Error() string { + return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code) +} + +// 6.9.1 The Flow Control Window +// "If a sender receives a WINDOW_UPDATE that causes a flow control +// window to exceed this maximum it MUST terminate either the stream +// or the connection, as appropriate. For streams, [...]; for the +// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code." +type goAwayFlowError struct{} + +func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" } + +// connErrorReason wraps a ConnectionError with an informative error about why it occurs. + +// Errors of this type are only returned by the frame parser functions +// and converted into ConnectionError(ErrCodeProtocol). +type connError struct { + Code ErrCode + Reason string +} + +func (e connError) Error() string { + return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason) +} + +type pseudoHeaderError string + +func (e pseudoHeaderError) Error() string { + return fmt.Sprintf("invalid pseudo-header %q", string(e)) +} + +type duplicatePseudoHeaderError string + +func (e duplicatePseudoHeaderError) Error() string { + return fmt.Sprintf("duplicate pseudo-header %q", string(e)) +} + +type headerFieldNameError string + +func (e headerFieldNameError) Error() string { + return fmt.Sprintf("invalid header field name %q", string(e)) +} + +type headerFieldValueError string + +func (e headerFieldValueError) Error() string { + return fmt.Sprintf("invalid header field value %q", string(e)) +} + +var ( + errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers") + errPseudoAfterRegular = errors.New("pseudo header field after regular") +) diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/fixed_buffer.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/fixed_buffer.go new file mode 100644 index 000000000..47da0f0bf --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/fixed_buffer.go @@ -0,0 +1,60 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" +) + +// fixedBuffer is an io.ReadWriter backed by a fixed size buffer. +// It never allocates, but moves old data as new data is written. +type fixedBuffer struct { + buf []byte + r, w int +} + +var ( + errReadEmpty = errors.New("read from empty fixedBuffer") + errWriteFull = errors.New("write on full fixedBuffer") +) + +// Read copies bytes from the buffer into p. +// It is an error to read when no data is available. +func (b *fixedBuffer) Read(p []byte) (n int, err error) { + if b.r == b.w { + return 0, errReadEmpty + } + n = copy(p, b.buf[b.r:b.w]) + b.r += n + if b.r == b.w { + b.r = 0 + b.w = 0 + } + return n, nil +} + +// Len returns the number of bytes of the unread portion of the buffer. +func (b *fixedBuffer) Len() int { + return b.w - b.r +} + +// Write copies bytes from p into the buffer. +// It is an error to write more data than the buffer can hold. +func (b *fixedBuffer) Write(p []byte) (n int, err error) { + // Slide existing data to beginning. + if b.r > 0 && len(p) > len(b.buf)-b.w { + copy(b.buf, b.buf[b.r:b.w]) + b.w -= b.r + b.r = 0 + } + + // Write new data. + n = copy(b.buf[b.w:], p) + b.w += n + if n < len(p) { + err = errWriteFull + } + return n, err +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/flow.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/flow.go new file mode 100644 index 000000000..957de2542 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/flow.go @@ -0,0 +1,50 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Flow control + +package http2 + +// flow is the flow control window's size. +type flow struct { + // n is the number of DATA bytes we're allowed to send. + // A flow is kept both on a conn and a per-stream. + n int32 + + // conn points to the shared connection-level flow that is + // shared by all streams on that conn. It is nil for the flow + // that's on the conn directly. + conn *flow +} + +func (f *flow) setConnFlow(cf *flow) { f.conn = cf } + +func (f *flow) available() int32 { + n := f.n + if f.conn != nil && f.conn.n < n { + n = f.conn.n + } + return n +} + +func (f *flow) take(n int32) { + if n > f.available() { + panic("internal error: took too much") + } + f.n -= n + if f.conn != nil { + f.conn.n -= n + } +} + +// add adds n bytes (positive or negative) to the flow control window. +// It returns false if the sum would exceed 2^31-1. +func (f *flow) add(n int32) bool { + remain := (1<<31 - 1) - f.n + if n > remain { + return false + } + f.n += n + return true +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/frame.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/frame.go new file mode 100644 index 000000000..6943f9338 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/frame.go @@ -0,0 +1,1496 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "log" + "strings" + "sync" + + "golang.org/x/net/http2/hpack" +) + +const frameHeaderLen = 9 + +var padZeros = make([]byte, 255) // zeros for padding + +// A FrameType is a registered frame type as defined in +// http://http2.github.io/http2-spec/#rfc.section.11.2 +type FrameType uint8 + +const ( + FrameData FrameType = 0x0 + FrameHeaders FrameType = 0x1 + FramePriority FrameType = 0x2 + FrameRSTStream FrameType = 0x3 + FrameSettings FrameType = 0x4 + FramePushPromise FrameType = 0x5 + FramePing FrameType = 0x6 + FrameGoAway FrameType = 0x7 + FrameWindowUpdate FrameType = 0x8 + FrameContinuation FrameType = 0x9 +) + +var frameName = map[FrameType]string{ + FrameData: "DATA", + FrameHeaders: "HEADERS", + FramePriority: "PRIORITY", + FrameRSTStream: "RST_STREAM", + FrameSettings: "SETTINGS", + FramePushPromise: "PUSH_PROMISE", + FramePing: "PING", + FrameGoAway: "GOAWAY", + FrameWindowUpdate: "WINDOW_UPDATE", + FrameContinuation: "CONTINUATION", +} + +func (t FrameType) String() string { + if s, ok := frameName[t]; ok { + return s + } + return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) +} + +// Flags is a bitmask of HTTP/2 flags. +// The meaning of flags varies depending on the frame type. +type Flags uint8 + +// Has reports whether f contains all (0 or more) flags in v. +func (f Flags) Has(v Flags) bool { + return (f & v) == v +} + +// Frame-specific FrameHeader flag bits. +const ( + // Data Frame + FlagDataEndStream Flags = 0x1 + FlagDataPadded Flags = 0x8 + + // Headers Frame + FlagHeadersEndStream Flags = 0x1 + FlagHeadersEndHeaders Flags = 0x4 + FlagHeadersPadded Flags = 0x8 + FlagHeadersPriority Flags = 0x20 + + // Settings Frame + FlagSettingsAck Flags = 0x1 + + // Ping Frame + FlagPingAck Flags = 0x1 + + // Continuation Frame + FlagContinuationEndHeaders Flags = 0x4 + + FlagPushPromiseEndHeaders Flags = 0x4 + FlagPushPromisePadded Flags = 0x8 +) + +var flagName = map[FrameType]map[Flags]string{ + FrameData: { + FlagDataEndStream: "END_STREAM", + FlagDataPadded: "PADDED", + }, + FrameHeaders: { + FlagHeadersEndStream: "END_STREAM", + FlagHeadersEndHeaders: "END_HEADERS", + FlagHeadersPadded: "PADDED", + FlagHeadersPriority: "PRIORITY", + }, + FrameSettings: { + FlagSettingsAck: "ACK", + }, + FramePing: { + FlagPingAck: "ACK", + }, + FrameContinuation: { + FlagContinuationEndHeaders: "END_HEADERS", + }, + FramePushPromise: { + FlagPushPromiseEndHeaders: "END_HEADERS", + FlagPushPromisePadded: "PADDED", + }, +} + +// a frameParser parses a frame given its FrameHeader and payload +// bytes. The length of payload will always equal fh.Length (which +// might be 0). +type frameParser func(fh FrameHeader, payload []byte) (Frame, error) + +var frameParsers = map[FrameType]frameParser{ + FrameData: parseDataFrame, + FrameHeaders: parseHeadersFrame, + FramePriority: parsePriorityFrame, + FrameRSTStream: parseRSTStreamFrame, + FrameSettings: parseSettingsFrame, + FramePushPromise: parsePushPromise, + FramePing: parsePingFrame, + FrameGoAway: parseGoAwayFrame, + FrameWindowUpdate: parseWindowUpdateFrame, + FrameContinuation: parseContinuationFrame, +} + +func typeFrameParser(t FrameType) frameParser { + if f := frameParsers[t]; f != nil { + return f + } + return parseUnknownFrame +} + +// A FrameHeader is the 9 byte header of all HTTP/2 frames. +// +// See http://http2.github.io/http2-spec/#FrameHeader +type FrameHeader struct { + valid bool // caller can access []byte fields in the Frame + + // Type is the 1 byte frame type. There are ten standard frame + // types, but extension frame types may be written by WriteRawFrame + // and will be returned by ReadFrame (as UnknownFrame). + Type FrameType + + // Flags are the 1 byte of 8 potential bit flags per frame. + // They are specific to the frame type. + Flags Flags + + // Length is the length of the frame, not including the 9 byte header. + // The maximum size is one byte less than 16MB (uint24), but only + // frames up to 16KB are allowed without peer agreement. + Length uint32 + + // StreamID is which stream this frame is for. Certain frames + // are not stream-specific, in which case this field is 0. + StreamID uint32 +} + +// Header returns h. It exists so FrameHeaders can be embedded in other +// specific frame types and implement the Frame interface. +func (h FrameHeader) Header() FrameHeader { return h } + +func (h FrameHeader) String() string { + var buf bytes.Buffer + buf.WriteString("[FrameHeader ") + h.writeDebug(&buf) + buf.WriteByte(']') + return buf.String() +} + +func (h FrameHeader) writeDebug(buf *bytes.Buffer) { + buf.WriteString(h.Type.String()) + if h.Flags != 0 { + buf.WriteString(" flags=") + set := 0 + for i := uint8(0); i < 8; i++ { + if h.Flags&(1< 1 { + buf.WriteByte('|') + } + name := flagName[h.Type][Flags(1<>24), + byte(streamID>>16), + byte(streamID>>8), + byte(streamID)) +} + +func (f *Framer) endWrite() error { + // Now that we know the final size, fill in the FrameHeader in + // the space previously reserved for it. Abuse append. + length := len(f.wbuf) - frameHeaderLen + if length >= (1 << 24) { + return ErrFrameTooLarge + } + _ = append(f.wbuf[:0], + byte(length>>16), + byte(length>>8), + byte(length)) + if logFrameWrites { + f.logWrite() + } + + n, err := f.w.Write(f.wbuf) + if err == nil && n != len(f.wbuf) { + err = io.ErrShortWrite + } + return err +} + +func (f *Framer) logWrite() { + if f.debugFramer == nil { + f.debugFramerBuf = new(bytes.Buffer) + f.debugFramer = NewFramer(nil, f.debugFramerBuf) + f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below + // Let us read anything, even if we accidentally wrote it + // in the wrong order: + f.debugFramer.AllowIllegalReads = true + } + f.debugFramerBuf.Write(f.wbuf) + fr, err := f.debugFramer.ReadFrame() + if err != nil { + log.Printf("http2: Framer %p: failed to decode just-written frame", f) + return + } + log.Printf("http2: Framer %p: wrote %v", f, summarizeFrame(fr)) +} + +func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) } +func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) } +func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) } +func (f *Framer) writeUint32(v uint32) { + f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +const ( + minMaxFrameSize = 1 << 14 + maxFrameSize = 1<<24 - 1 +) + +// NewFramer returns a Framer that writes frames to w and reads them from r. +func NewFramer(w io.Writer, r io.Reader) *Framer { + fr := &Framer{ + w: w, + r: r, + logReads: logFrameReads, + } + fr.getReadBuf = func(size uint32) []byte { + if cap(fr.readBuf) >= int(size) { + return fr.readBuf[:size] + } + fr.readBuf = make([]byte, size) + return fr.readBuf + } + fr.SetMaxReadFrameSize(maxFrameSize) + return fr +} + +// SetMaxReadFrameSize sets the maximum size of a frame +// that will be read by a subsequent call to ReadFrame. +// It is the caller's responsibility to advertise this +// limit with a SETTINGS frame. +func (fr *Framer) SetMaxReadFrameSize(v uint32) { + if v > maxFrameSize { + v = maxFrameSize + } + fr.maxReadSize = v +} + +// ErrorDetail returns a more detailed error of the last error +// returned by Framer.ReadFrame. For instance, if ReadFrame +// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail +// will say exactly what was invalid. ErrorDetail is not guaranteed +// to return a non-nil value and like the rest of the http2 package, +// its return value is not protected by an API compatibility promise. +// ErrorDetail is reset after the next call to ReadFrame. +func (fr *Framer) ErrorDetail() error { + return fr.errDetail +} + +// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer +// sends a frame that is larger than declared with SetMaxReadFrameSize. +var ErrFrameTooLarge = errors.New("http2: frame too large") + +// terminalReadFrameError reports whether err is an unrecoverable +// error from ReadFrame and no other frames should be read. +func terminalReadFrameError(err error) bool { + if _, ok := err.(StreamError); ok { + return false + } + return err != nil +} + +// ReadFrame reads a single frame. The returned Frame is only valid +// until the next call to ReadFrame. +// +// If the frame is larger than previously set with SetMaxReadFrameSize, the +// returned error is ErrFrameTooLarge. Other errors may be of type +// ConnectionError, StreamError, or anything else from from the underlying +// reader. +func (fr *Framer) ReadFrame() (Frame, error) { + fr.errDetail = nil + if fr.lastFrame != nil { + fr.lastFrame.invalidate() + } + fh, err := readFrameHeader(fr.headerBuf[:], fr.r) + if err != nil { + return nil, err + } + if fh.Length > fr.maxReadSize { + return nil, ErrFrameTooLarge + } + payload := fr.getReadBuf(fh.Length) + if _, err := io.ReadFull(fr.r, payload); err != nil { + return nil, err + } + f, err := typeFrameParser(fh.Type)(fh, payload) + if err != nil { + if ce, ok := err.(connError); ok { + return nil, fr.connError(ce.Code, ce.Reason) + } + return nil, err + } + if err := fr.checkFrameOrder(f); err != nil { + return nil, err + } + if fr.logReads { + log.Printf("http2: Framer %p: read %v", fr, summarizeFrame(f)) + } + if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil { + return fr.readMetaFrame(f.(*HeadersFrame)) + } + return f, nil +} + +// connError returns ConnectionError(code) but first +// stashes away a public reason to the caller can optionally relay it +// to the peer before hanging up on them. This might help others debug +// their implementations. +func (fr *Framer) connError(code ErrCode, reason string) error { + fr.errDetail = errors.New(reason) + return ConnectionError(code) +} + +// checkFrameOrder reports an error if f is an invalid frame to return +// next from ReadFrame. Mostly it checks whether HEADERS and +// CONTINUATION frames are contiguous. +func (fr *Framer) checkFrameOrder(f Frame) error { + last := fr.lastFrame + fr.lastFrame = f + if fr.AllowIllegalReads { + return nil + } + + fh := f.Header() + if fr.lastHeaderStream != 0 { + if fh.Type != FrameContinuation { + return fr.connError(ErrCodeProtocol, + fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", + fh.Type, fh.StreamID, + last.Header().Type, fr.lastHeaderStream)) + } + if fh.StreamID != fr.lastHeaderStream { + return fr.connError(ErrCodeProtocol, + fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d", + fh.StreamID, fr.lastHeaderStream)) + } + } else if fh.Type == FrameContinuation { + return fr.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID)) + } + + switch fh.Type { + case FrameHeaders, FrameContinuation: + if fh.Flags.Has(FlagHeadersEndHeaders) { + fr.lastHeaderStream = 0 + } else { + fr.lastHeaderStream = fh.StreamID + } + } + + return nil +} + +// A DataFrame conveys arbitrary, variable-length sequences of octets +// associated with a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.1 +type DataFrame struct { + FrameHeader + data []byte +} + +func (f *DataFrame) StreamEnded() bool { + return f.FrameHeader.Flags.Has(FlagDataEndStream) +} + +// Data returns the frame's data octets, not including any padding +// size byte or padding suffix bytes. +// The caller must not retain the returned memory past the next +// call to ReadFrame. +func (f *DataFrame) Data() []byte { + f.checkValid() + return f.data +} + +func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { + if fh.StreamID == 0 { + // DATA frames MUST be associated with a stream. If a + // DATA frame is received whose stream identifier + // field is 0x0, the recipient MUST respond with a + // connection error (Section 5.4.1) of type + // PROTOCOL_ERROR. + return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} + } + f := &DataFrame{ + FrameHeader: fh, + } + var padSize byte + if fh.Flags.Has(FlagDataPadded) { + var err error + payload, padSize, err = readByte(payload) + if err != nil { + return nil, err + } + } + if int(padSize) > len(payload) { + // If the length of the padding is greater than the + // length of the frame payload, the recipient MUST + // treat this as a connection error. + // Filed: https://github.com/http2/http2-spec/issues/610 + return nil, connError{ErrCodeProtocol, "pad size larger than data payload"} + } + f.data = payload[:len(payload)-int(padSize)] + return f, nil +} + +var errStreamID = errors.New("invalid streamid") + +func validStreamID(streamID uint32) bool { + return streamID != 0 && streamID&(1<<31) == 0 +} + +// WriteData writes a DATA frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { + // TODO: ignoring padding for now. will add when somebody cares. + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if endStream { + flags |= FlagDataEndStream + } + f.startWrite(FrameData, flags, streamID) + f.wbuf = append(f.wbuf, data...) + return f.endWrite() +} + +// A SettingsFrame conveys configuration parameters that affect how +// endpoints communicate, such as preferences and constraints on peer +// behavior. +// +// See http://http2.github.io/http2-spec/#SETTINGS +type SettingsFrame struct { + FrameHeader + p []byte +} + +func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) { + if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { + // When this (ACK 0x1) bit is set, the payload of the + // SETTINGS frame MUST be empty. Receipt of a + // SETTINGS frame with the ACK flag set and a length + // field value other than 0 MUST be treated as a + // connection error (Section 5.4.1) of type + // FRAME_SIZE_ERROR. + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID != 0 { + // SETTINGS frames always apply to a connection, + // never a single stream. The stream identifier for a + // SETTINGS frame MUST be zero (0x0). If an endpoint + // receives a SETTINGS frame whose stream identifier + // field is anything other than 0x0, the endpoint MUST + // respond with a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR. + return nil, ConnectionError(ErrCodeProtocol) + } + if len(p)%6 != 0 { + // Expecting even number of 6 byte settings. + return nil, ConnectionError(ErrCodeFrameSize) + } + f := &SettingsFrame{FrameHeader: fh, p: p} + if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 { + // Values above the maximum flow control window size of 2^31 - 1 MUST + // be treated as a connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR. + return nil, ConnectionError(ErrCodeFlowControl) + } + return f, nil +} + +func (f *SettingsFrame) IsAck() bool { + return f.FrameHeader.Flags.Has(FlagSettingsAck) +} + +func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) { + f.checkValid() + buf := f.p + for len(buf) > 0 { + settingID := SettingID(binary.BigEndian.Uint16(buf[:2])) + if settingID == s { + return binary.BigEndian.Uint32(buf[2:6]), true + } + buf = buf[6:] + } + return 0, false +} + +// ForeachSetting runs fn for each setting. +// It stops and returns the first error. +func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error { + f.checkValid() + buf := f.p + for len(buf) > 0 { + if err := fn(Setting{ + SettingID(binary.BigEndian.Uint16(buf[:2])), + binary.BigEndian.Uint32(buf[2:6]), + }); err != nil { + return err + } + buf = buf[6:] + } + return nil +} + +// WriteSettings writes a SETTINGS frame with zero or more settings +// specified and the ACK bit not set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteSettings(settings ...Setting) error { + f.startWrite(FrameSettings, 0, 0) + for _, s := range settings { + f.writeUint16(uint16(s.ID)) + f.writeUint32(s.Val) + } + return f.endWrite() +} + +// WriteSettings writes an empty SETTINGS frame with the ACK bit set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteSettingsAck() error { + f.startWrite(FrameSettings, FlagSettingsAck, 0) + return f.endWrite() +} + +// A PingFrame is a mechanism for measuring a minimal round trip time +// from the sender, as well as determining whether an idle connection +// is still functional. +// See http://http2.github.io/http2-spec/#rfc.section.6.7 +type PingFrame struct { + FrameHeader + Data [8]byte +} + +func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } + +func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) { + if len(payload) != 8 { + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID != 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + f := &PingFrame{FrameHeader: fh} + copy(f.Data[:], payload) + return f, nil +} + +func (f *Framer) WritePing(ack bool, data [8]byte) error { + var flags Flags + if ack { + flags = FlagPingAck + } + f.startWrite(FramePing, flags, 0) + f.writeBytes(data[:]) + return f.endWrite() +} + +// A GoAwayFrame informs the remote peer to stop creating streams on this connection. +// See http://http2.github.io/http2-spec/#rfc.section.6.8 +type GoAwayFrame struct { + FrameHeader + LastStreamID uint32 + ErrCode ErrCode + debugData []byte +} + +// DebugData returns any debug data in the GOAWAY frame. Its contents +// are not defined. +// The caller must not retain the returned memory past the next +// call to ReadFrame. +func (f *GoAwayFrame) DebugData() []byte { + f.checkValid() + return f.debugData +} + +func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) { + if fh.StreamID != 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + if len(p) < 8 { + return nil, ConnectionError(ErrCodeFrameSize) + } + return &GoAwayFrame{ + FrameHeader: fh, + LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1), + ErrCode: ErrCode(binary.BigEndian.Uint32(p[4:8])), + debugData: p[8:], + }, nil +} + +func (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error { + f.startWrite(FrameGoAway, 0, 0) + f.writeUint32(maxStreamID & (1<<31 - 1)) + f.writeUint32(uint32(code)) + f.writeBytes(debugData) + return f.endWrite() +} + +// An UnknownFrame is the frame type returned when the frame type is unknown +// or no specific frame type parser exists. +type UnknownFrame struct { + FrameHeader + p []byte +} + +// Payload returns the frame's payload (after the header). It is not +// valid to call this method after a subsequent call to +// Framer.ReadFrame, nor is it valid to retain the returned slice. +// The memory is owned by the Framer and is invalidated when the next +// frame is read. +func (f *UnknownFrame) Payload() []byte { + f.checkValid() + return f.p +} + +func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) { + return &UnknownFrame{fh, p}, nil +} + +// A WindowUpdateFrame is used to implement flow control. +// See http://http2.github.io/http2-spec/#rfc.section.6.9 +type WindowUpdateFrame struct { + FrameHeader + Increment uint32 // never read with high bit set +} + +func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) { + if len(p) != 4 { + return nil, ConnectionError(ErrCodeFrameSize) + } + inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit + if inc == 0 { + // A receiver MUST treat the receipt of a + // WINDOW_UPDATE frame with an flow control window + // increment of 0 as a stream error (Section 5.4.2) of + // type PROTOCOL_ERROR; errors on the connection flow + // control window MUST be treated as a connection + // error (Section 5.4.1). + if fh.StreamID == 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + return nil, StreamError{fh.StreamID, ErrCodeProtocol} + } + return &WindowUpdateFrame{ + FrameHeader: fh, + Increment: inc, + }, nil +} + +// WriteWindowUpdate writes a WINDOW_UPDATE frame. +// The increment value must be between 1 and 2,147,483,647, inclusive. +// If the Stream ID is zero, the window update applies to the +// connection as a whole. +func (f *Framer) WriteWindowUpdate(streamID, incr uint32) error { + // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets." + if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites { + return errors.New("illegal window increment value") + } + f.startWrite(FrameWindowUpdate, 0, streamID) + f.writeUint32(incr) + return f.endWrite() +} + +// A HeadersFrame is used to open a stream and additionally carries a +// header block fragment. +type HeadersFrame struct { + FrameHeader + + // Priority is set if FlagHeadersPriority is set in the FrameHeader. + Priority PriorityParam + + headerFragBuf []byte // not owned +} + +func (f *HeadersFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *HeadersFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders) +} + +func (f *HeadersFrame) StreamEnded() bool { + return f.FrameHeader.Flags.Has(FlagHeadersEndStream) +} + +func (f *HeadersFrame) HasPriority() bool { + return f.FrameHeader.Flags.Has(FlagHeadersPriority) +} + +func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) { + hf := &HeadersFrame{ + FrameHeader: fh, + } + if fh.StreamID == 0 { + // HEADERS frames MUST be associated with a stream. If a HEADERS frame + // is received whose stream identifier field is 0x0, the recipient MUST + // respond with a connection error (Section 5.4.1) of type + // PROTOCOL_ERROR. + return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"} + } + var padLength uint8 + if fh.Flags.Has(FlagHeadersPadded) { + if p, padLength, err = readByte(p); err != nil { + return + } + } + if fh.Flags.Has(FlagHeadersPriority) { + var v uint32 + p, v, err = readUint32(p) + if err != nil { + return nil, err + } + hf.Priority.StreamDep = v & 0x7fffffff + hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set + p, hf.Priority.Weight, err = readByte(p) + if err != nil { + return nil, err + } + } + if len(p)-int(padLength) <= 0 { + return nil, StreamError{fh.StreamID, ErrCodeProtocol} + } + hf.headerFragBuf = p[:len(p)-int(padLength)] + return hf, nil +} + +// HeadersFrameParam are the parameters for writing a HEADERS frame. +type HeadersFrameParam struct { + // StreamID is the required Stream ID to initiate. + StreamID uint32 + // BlockFragment is part (or all) of a Header Block. + BlockFragment []byte + + // EndStream indicates that the header block is the last that + // the endpoint will send for the identified stream. Setting + // this flag causes the stream to enter one of "half closed" + // states. + EndStream bool + + // EndHeaders indicates that this frame contains an entire + // header block and is not followed by any + // CONTINUATION frames. + EndHeaders bool + + // PadLength is the optional number of bytes of zeros to add + // to this frame. + PadLength uint8 + + // Priority, if non-zero, includes stream priority information + // in the HEADER frame. + Priority PriorityParam +} + +// WriteHeaders writes a single HEADERS frame. +// +// This is a low-level header writing method. Encoding headers and +// splitting them into any necessary CONTINUATION frames is handled +// elsewhere. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteHeaders(p HeadersFrameParam) error { + if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if p.PadLength != 0 { + flags |= FlagHeadersPadded + } + if p.EndStream { + flags |= FlagHeadersEndStream + } + if p.EndHeaders { + flags |= FlagHeadersEndHeaders + } + if !p.Priority.IsZero() { + flags |= FlagHeadersPriority + } + f.startWrite(FrameHeaders, flags, p.StreamID) + if p.PadLength != 0 { + f.writeByte(p.PadLength) + } + if !p.Priority.IsZero() { + v := p.Priority.StreamDep + if !validStreamID(v) && !f.AllowIllegalWrites { + return errors.New("invalid dependent stream id") + } + if p.Priority.Exclusive { + v |= 1 << 31 + } + f.writeUint32(v) + f.writeByte(p.Priority.Weight) + } + f.wbuf = append(f.wbuf, p.BlockFragment...) + f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) + return f.endWrite() +} + +// A PriorityFrame specifies the sender-advised priority of a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.3 +type PriorityFrame struct { + FrameHeader + PriorityParam +} + +// PriorityParam are the stream prioritzation parameters. +type PriorityParam struct { + // StreamDep is a 31-bit stream identifier for the + // stream that this stream depends on. Zero means no + // dependency. + StreamDep uint32 + + // Exclusive is whether the dependency is exclusive. + Exclusive bool + + // Weight is the stream's zero-indexed weight. It should be + // set together with StreamDep, or neither should be set. Per + // the spec, "Add one to the value to obtain a weight between + // 1 and 256." + Weight uint8 +} + +func (p PriorityParam) IsZero() bool { + return p == PriorityParam{} +} + +func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) { + if fh.StreamID == 0 { + return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} + } + if len(payload) != 5 { + return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))} + } + v := binary.BigEndian.Uint32(payload[:4]) + streamID := v & 0x7fffffff // mask off high bit + return &PriorityFrame{ + FrameHeader: fh, + PriorityParam: PriorityParam{ + Weight: payload[4], + StreamDep: streamID, + Exclusive: streamID != v, // was high bit set? + }, + }, nil +} + +// WritePriority writes a PRIORITY frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + f.startWrite(FramePriority, 0, streamID) + v := p.StreamDep + if p.Exclusive { + v |= 1 << 31 + } + f.writeUint32(v) + f.writeByte(p.Weight) + return f.endWrite() +} + +// A RSTStreamFrame allows for abnormal termination of a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.4 +type RSTStreamFrame struct { + FrameHeader + ErrCode ErrCode +} + +func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) { + if len(p) != 4 { + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID == 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil +} + +// WriteRSTStream writes a RST_STREAM frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + f.startWrite(FrameRSTStream, 0, streamID) + f.writeUint32(uint32(code)) + return f.endWrite() +} + +// A ContinuationFrame is used to continue a sequence of header block fragments. +// See http://http2.github.io/http2-spec/#rfc.section.6.10 +type ContinuationFrame struct { + FrameHeader + headerFragBuf []byte +} + +func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) { + if fh.StreamID == 0 { + return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} + } + return &ContinuationFrame{fh, p}, nil +} + +func (f *ContinuationFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *ContinuationFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders) +} + +// WriteContinuation writes a CONTINUATION frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if endHeaders { + flags |= FlagContinuationEndHeaders + } + f.startWrite(FrameContinuation, flags, streamID) + f.wbuf = append(f.wbuf, headerBlockFragment...) + return f.endWrite() +} + +// A PushPromiseFrame is used to initiate a server stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.6 +type PushPromiseFrame struct { + FrameHeader + PromiseID uint32 + headerFragBuf []byte // not owned +} + +func (f *PushPromiseFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *PushPromiseFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) +} + +func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) { + pp := &PushPromiseFrame{ + FrameHeader: fh, + } + if pp.StreamID == 0 { + // PUSH_PROMISE frames MUST be associated with an existing, + // peer-initiated stream. The stream identifier of a + // PUSH_PROMISE frame indicates the stream it is associated + // with. If the stream identifier field specifies the value + // 0x0, a recipient MUST respond with a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + return nil, ConnectionError(ErrCodeProtocol) + } + // The PUSH_PROMISE frame includes optional padding. + // Padding fields and flags are identical to those defined for DATA frames + var padLength uint8 + if fh.Flags.Has(FlagPushPromisePadded) { + if p, padLength, err = readByte(p); err != nil { + return + } + } + + p, pp.PromiseID, err = readUint32(p) + if err != nil { + return + } + pp.PromiseID = pp.PromiseID & (1<<31 - 1) + + if int(padLength) > len(p) { + // like the DATA frame, error out if padding is longer than the body. + return nil, ConnectionError(ErrCodeProtocol) + } + pp.headerFragBuf = p[:len(p)-int(padLength)] + return pp, nil +} + +// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame. +type PushPromiseParam struct { + // StreamID is the required Stream ID to initiate. + StreamID uint32 + + // PromiseID is the required Stream ID which this + // Push Promises + PromiseID uint32 + + // BlockFragment is part (or all) of a Header Block. + BlockFragment []byte + + // EndHeaders indicates that this frame contains an entire + // header block and is not followed by any + // CONTINUATION frames. + EndHeaders bool + + // PadLength is the optional number of bytes of zeros to add + // to this frame. + PadLength uint8 +} + +// WritePushPromise writes a single PushPromise Frame. +// +// As with Header Frames, This is the low level call for writing +// individual frames. Continuation frames are handled elsewhere. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WritePushPromise(p PushPromiseParam) error { + if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if p.PadLength != 0 { + flags |= FlagPushPromisePadded + } + if p.EndHeaders { + flags |= FlagPushPromiseEndHeaders + } + f.startWrite(FramePushPromise, flags, p.StreamID) + if p.PadLength != 0 { + f.writeByte(p.PadLength) + } + if !validStreamID(p.PromiseID) && !f.AllowIllegalWrites { + return errStreamID + } + f.writeUint32(p.PromiseID) + f.wbuf = append(f.wbuf, p.BlockFragment...) + f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) + return f.endWrite() +} + +// WriteRawFrame writes a raw frame. This can be used to write +// extension frames unknown to this package. +func (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error { + f.startWrite(t, flags, streamID) + f.writeBytes(payload) + return f.endWrite() +} + +func readByte(p []byte) (remain []byte, b byte, err error) { + if len(p) == 0 { + return nil, 0, io.ErrUnexpectedEOF + } + return p[1:], p[0], nil +} + +func readUint32(p []byte) (remain []byte, v uint32, err error) { + if len(p) < 4 { + return nil, 0, io.ErrUnexpectedEOF + } + return p[4:], binary.BigEndian.Uint32(p[:4]), nil +} + +type streamEnder interface { + StreamEnded() bool +} + +type headersEnder interface { + HeadersEnded() bool +} + +type headersOrContinuation interface { + headersEnder + HeaderBlockFragment() []byte +} + +// A MetaHeadersFrame is the representation of one HEADERS frame and +// zero or more contiguous CONTINUATION frames and the decoding of +// their HPACK-encoded contents. +// +// This type of frame does not appear on the wire and is only returned +// by the Framer when Framer.ReadMetaHeaders is set. +type MetaHeadersFrame struct { + *HeadersFrame + + // Fields are the fields contained in the HEADERS and + // CONTINUATION frames. The underlying slice is owned by the + // Framer and must not be retained after the next call to + // ReadFrame. + // + // Fields are guaranteed to be in the correct http2 order and + // not have unknown pseudo header fields or invalid header + // field names or values. Required pseudo header fields may be + // missing, however. Use the MetaHeadersFrame.Pseudo accessor + // method access pseudo headers. + Fields []hpack.HeaderField + + // Truncated is whether the max header list size limit was hit + // and Fields is incomplete. The hpack decoder state is still + // valid, however. + Truncated bool +} + +// PseudoValue returns the given pseudo header field's value. +// The provided pseudo field should not contain the leading colon. +func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string { + for _, hf := range mh.Fields { + if !hf.IsPseudo() { + return "" + } + if hf.Name[1:] == pseudo { + return hf.Value + } + } + return "" +} + +// RegularFields returns the regular (non-pseudo) header fields of mh. +// The caller does not own the returned slice. +func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField { + for i, hf := range mh.Fields { + if !hf.IsPseudo() { + return mh.Fields[i:] + } + } + return nil +} + +// PseudoFields returns the pseudo header fields of mh. +// The caller does not own the returned slice. +func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField { + for i, hf := range mh.Fields { + if !hf.IsPseudo() { + return mh.Fields[:i] + } + } + return mh.Fields +} + +func (mh *MetaHeadersFrame) checkPseudos() error { + var isRequest, isResponse bool + pf := mh.PseudoFields() + for i, hf := range pf { + switch hf.Name { + case ":method", ":path", ":scheme", ":authority": + isRequest = true + case ":status": + isResponse = true + default: + return pseudoHeaderError(hf.Name) + } + // Check for duplicates. + // This would be a bad algorithm, but N is 4. + // And this doesn't allocate. + for _, hf2 := range pf[:i] { + if hf.Name == hf2.Name { + return duplicatePseudoHeaderError(hf.Name) + } + } + } + if isRequest && isResponse { + return errMixPseudoHeaderTypes + } + return nil +} + +func (fr *Framer) maxHeaderStringLen() int { + v := fr.maxHeaderListSize() + if uint32(int(v)) == v { + return int(v) + } + // They had a crazy big number for MaxHeaderBytes anyway, + // so give them unlimited header lengths: + return 0 +} + +// readMetaFrame returns 0 or more CONTINUATION frames from fr and +// merge them into into the provided hf and returns a MetaHeadersFrame +// with the decoded hpack values. +func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { + if fr.AllowIllegalReads { + return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") + } + mh := &MetaHeadersFrame{ + HeadersFrame: hf, + } + var remainSize = fr.maxHeaderListSize() + var sawRegular bool + + var invalid error // pseudo header field errors + hdec := fr.ReadMetaHeaders + hdec.SetEmitEnabled(true) + hdec.SetMaxStringLength(fr.maxHeaderStringLen()) + hdec.SetEmitFunc(func(hf hpack.HeaderField) { + if !validHeaderFieldValue(hf.Value) { + invalid = headerFieldValueError(hf.Value) + } + isPseudo := strings.HasPrefix(hf.Name, ":") + if isPseudo { + if sawRegular { + invalid = errPseudoAfterRegular + } + } else { + sawRegular = true + if !validHeaderFieldName(hf.Name) { + invalid = headerFieldNameError(hf.Name) + } + } + + if invalid != nil { + hdec.SetEmitEnabled(false) + return + } + + size := hf.Size() + if size > remainSize { + hdec.SetEmitEnabled(false) + mh.Truncated = true + return + } + remainSize -= size + + mh.Fields = append(mh.Fields, hf) + }) + // Lose reference to MetaHeadersFrame: + defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {}) + + var hc headersOrContinuation = hf + for { + frag := hc.HeaderBlockFragment() + if _, err := hdec.Write(frag); err != nil { + return nil, ConnectionError(ErrCodeCompression) + } + + if hc.HeadersEnded() { + break + } + if f, err := fr.ReadFrame(); err != nil { + return nil, err + } else { + hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder + } + } + + mh.HeadersFrame.headerFragBuf = nil + mh.HeadersFrame.invalidate() + + if err := hdec.Close(); err != nil { + return nil, ConnectionError(ErrCodeCompression) + } + if invalid != nil { + fr.errDetail = invalid + return nil, StreamError{mh.StreamID, ErrCodeProtocol} + } + if err := mh.checkPseudos(); err != nil { + fr.errDetail = err + return nil, StreamError{mh.StreamID, ErrCodeProtocol} + } + return mh, nil +} + +func summarizeFrame(f Frame) string { + var buf bytes.Buffer + f.Header().writeDebug(&buf) + switch f := f.(type) { + case *SettingsFrame: + n := 0 + f.ForeachSetting(func(s Setting) error { + n++ + if n == 1 { + buf.WriteString(", settings:") + } + fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val) + return nil + }) + if n > 0 { + buf.Truncate(buf.Len() - 1) // remove trailing comma + } + case *DataFrame: + data := f.Data() + const max = 256 + if len(data) > max { + data = data[:max] + } + fmt.Fprintf(&buf, " data=%q", data) + if len(f.Data()) > max { + fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max) + } + case *WindowUpdateFrame: + if f.StreamID == 0 { + buf.WriteString(" (conn)") + } + fmt.Fprintf(&buf, " incr=%v", f.Increment) + case *PingFrame: + fmt.Fprintf(&buf, " ping=%q", f.Data[:]) + case *GoAwayFrame: + fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q", + f.LastStreamID, f.ErrCode, f.debugData) + case *RSTStreamFrame: + fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode) + } + return buf.String() +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/go15.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/go15.go new file mode 100644 index 000000000..f0a562414 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/go15.go @@ -0,0 +1,11 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.5 + +package http2 + +import "net/http" + +func requestCancel(req *http.Request) <-chan struct{} { return req.Cancel } diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/gotrack.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/gotrack.go new file mode 100644 index 000000000..9933c9f8c --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/gotrack.go @@ -0,0 +1,170 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Defensive debug-only utility to track that functions run on the +// goroutine that they're supposed to. + +package http2 + +import ( + "bytes" + "errors" + "fmt" + "os" + "runtime" + "strconv" + "sync" +) + +var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" + +type goroutineLock uint64 + +func newGoroutineLock() goroutineLock { + if !DebugGoroutines { + return 0 + } + return goroutineLock(curGoroutineID()) +} + +func (g goroutineLock) check() { + if !DebugGoroutines { + return + } + if curGoroutineID() != uint64(g) { + panic("running on the wrong goroutine") + } +} + +func (g goroutineLock) checkNotOn() { + if !DebugGoroutines { + return + } + if curGoroutineID() == uint64(g) { + panic("running on the wrong goroutine") + } +} + +var goroutineSpace = []byte("goroutine ") + +func curGoroutineID() uint64 { + bp := littleBuf.Get().(*[]byte) + defer littleBuf.Put(bp) + b := *bp + b = b[:runtime.Stack(b, false)] + // Parse the 4707 out of "goroutine 4707 [" + b = bytes.TrimPrefix(b, goroutineSpace) + i := bytes.IndexByte(b, ' ') + if i < 0 { + panic(fmt.Sprintf("No space found in %q", b)) + } + b = b[:i] + n, err := parseUintBytes(b, 10, 64) + if err != nil { + panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err)) + } + return n +} + +var littleBuf = sync.Pool{ + New: func() interface{} { + buf := make([]byte, 64) + return &buf + }, +} + +// parseUintBytes is like strconv.ParseUint, but using a []byte. +func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) { + var cutoff, maxVal uint64 + + if bitSize == 0 { + bitSize = int(strconv.IntSize) + } + + s0 := s + switch { + case len(s) < 1: + err = strconv.ErrSyntax + goto Error + + case 2 <= base && base <= 36: + // valid base; nothing to do + + case base == 0: + // Look for octal, hex prefix. + switch { + case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): + base = 16 + s = s[2:] + if len(s) < 1 { + err = strconv.ErrSyntax + goto Error + } + case s[0] == '0': + base = 8 + default: + base = 10 + } + + default: + err = errors.New("invalid base " + strconv.Itoa(base)) + goto Error + } + + n = 0 + cutoff = cutoff64(base) + maxVal = 1<= base { + n = 0 + err = strconv.ErrSyntax + goto Error + } + + if n >= cutoff { + // n*base overflows + n = 1<<64 - 1 + err = strconv.ErrRange + goto Error + } + n *= uint64(base) + + n1 := n + uint64(v) + if n1 < n || n1 > maxVal { + // n+v overflows + n = 1<<64 - 1 + err = strconv.ErrRange + goto Error + } + n = n1 + } + + return n, nil + +Error: + return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err} +} + +// Return the first number n such that n*base >= 1<<64. +func cutoff64(base int) uint64 { + if base < 2 { + return 0 + } + return (1<<64-1)/uint64(base) + 1 +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/headermap.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/headermap.go new file mode 100644 index 000000000..c2805f6ac --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/headermap.go @@ -0,0 +1,78 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "net/http" + "strings" +) + +var ( + commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case + commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case +) + +func init() { + for _, v := range []string{ + "accept", + "accept-charset", + "accept-encoding", + "accept-language", + "accept-ranges", + "age", + "access-control-allow-origin", + "allow", + "authorization", + "cache-control", + "content-disposition", + "content-encoding", + "content-language", + "content-length", + "content-location", + "content-range", + "content-type", + "cookie", + "date", + "etag", + "expect", + "expires", + "from", + "host", + "if-match", + "if-modified-since", + "if-none-match", + "if-unmodified-since", + "last-modified", + "link", + "location", + "max-forwards", + "proxy-authenticate", + "proxy-authorization", + "range", + "referer", + "refresh", + "retry-after", + "server", + "set-cookie", + "strict-transport-security", + "trailer", + "transfer-encoding", + "user-agent", + "vary", + "via", + "www-authenticate", + } { + chk := http.CanonicalHeaderKey(v) + commonLowerHeader[chk] = v + commonCanonHeader[v] = chk + } +} + +func lowerHeader(v string) string { + if s, ok := commonLowerHeader[v]; ok { + return s + } + return strings.ToLower(v) +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/encode.go new file mode 100644 index 000000000..f9bb03398 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/encode.go @@ -0,0 +1,251 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "io" +) + +const ( + uint32Max = ^uint32(0) + initialHeaderTableSize = 4096 +) + +type Encoder struct { + dynTab dynamicTable + // minSize is the minimum table size set by + // SetMaxDynamicTableSize after the previous Header Table Size + // Update. + minSize uint32 + // maxSizeLimit is the maximum table size this encoder + // supports. This will protect the encoder from too large + // size. + maxSizeLimit uint32 + // tableSizeUpdate indicates whether "Header Table Size + // Update" is required. + tableSizeUpdate bool + w io.Writer + buf []byte +} + +// NewEncoder returns a new Encoder which performs HPACK encoding. An +// encoded data is written to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{ + minSize: uint32Max, + maxSizeLimit: initialHeaderTableSize, + tableSizeUpdate: false, + w: w, + } + e.dynTab.setMaxSize(initialHeaderTableSize) + return e +} + +// WriteField encodes f into a single Write to e's underlying Writer. +// This function may also produce bytes for "Header Table Size Update" +// if necessary. If produced, it is done before encoding f. +func (e *Encoder) WriteField(f HeaderField) error { + e.buf = e.buf[:0] + + if e.tableSizeUpdate { + e.tableSizeUpdate = false + if e.minSize < e.dynTab.maxSize { + e.buf = appendTableSize(e.buf, e.minSize) + } + e.minSize = uint32Max + e.buf = appendTableSize(e.buf, e.dynTab.maxSize) + } + + idx, nameValueMatch := e.searchTable(f) + if nameValueMatch { + e.buf = appendIndexed(e.buf, idx) + } else { + indexing := e.shouldIndex(f) + if indexing { + e.dynTab.add(f) + } + + if idx == 0 { + e.buf = appendNewName(e.buf, f, indexing) + } else { + e.buf = appendIndexedName(e.buf, f, idx, indexing) + } + } + n, err := e.w.Write(e.buf) + if err == nil && n != len(e.buf) { + err = io.ErrShortWrite + } + return err +} + +// searchTable searches f in both stable and dynamic header tables. +// The static header table is searched first. Only when there is no +// exact match for both name and value, the dynamic header table is +// then searched. If there is no match, i is 0. If both name and value +// match, i is the matched index and nameValueMatch becomes true. If +// only name matches, i points to that index and nameValueMatch +// becomes false. +func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { + for idx, hf := range staticTable { + if !constantTimeStringCompare(hf.Name, f.Name) { + continue + } + if i == 0 { + i = uint64(idx + 1) + } + if f.Sensitive { + continue + } + if !constantTimeStringCompare(hf.Value, f.Value) { + continue + } + i = uint64(idx + 1) + nameValueMatch = true + return + } + + j, nameValueMatch := e.dynTab.search(f) + if nameValueMatch || (i == 0 && j != 0) { + i = j + uint64(len(staticTable)) + } + return +} + +// SetMaxDynamicTableSize changes the dynamic header table size to v. +// The actual size is bounded by the value passed to +// SetMaxDynamicTableSizeLimit. +func (e *Encoder) SetMaxDynamicTableSize(v uint32) { + if v > e.maxSizeLimit { + v = e.maxSizeLimit + } + if v < e.minSize { + e.minSize = v + } + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) +} + +// SetMaxDynamicTableSizeLimit changes the maximum value that can be +// specified in SetMaxDynamicTableSize to v. By default, it is set to +// 4096, which is the same size of the default dynamic header table +// size described in HPACK specification. If the current maximum +// dynamic header table size is strictly greater than v, "Header Table +// Size Update" will be done in the next WriteField call and the +// maximum dynamic header table size is truncated to v. +func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { + e.maxSizeLimit = v + if e.dynTab.maxSize > v { + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) + } +} + +// shouldIndex reports whether f should be indexed. +func (e *Encoder) shouldIndex(f HeaderField) bool { + return !f.Sensitive && f.Size() <= e.dynTab.maxSize +} + +// appendIndexed appends index i, as encoded in "Indexed Header Field" +// representation, to dst and returns the extended buffer. +func appendIndexed(dst []byte, i uint64) []byte { + first := len(dst) + dst = appendVarInt(dst, 7, i) + dst[first] |= 0x80 + return dst +} + +// appendNewName appends f, as encoded in one of "Literal Header field +// - New Name" representation variants, to dst and returns the +// extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Inremental Indexing" +// representation is used. +func appendNewName(dst []byte, f HeaderField, indexing bool) []byte { + dst = append(dst, encodeTypeByte(indexing, f.Sensitive)) + dst = appendHpackString(dst, f.Name) + return appendHpackString(dst, f.Value) +} + +// appendIndexedName appends f and index i referring indexed name +// entry, as encoded in one of "Literal Header field - Indexed Name" +// representation variants, to dst and returns the extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Incremental Indexing" +// representation is used. +func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte { + first := len(dst) + var n byte + if indexing { + n = 6 + } else { + n = 4 + } + dst = appendVarInt(dst, n, i) + dst[first] |= encodeTypeByte(indexing, f.Sensitive) + return appendHpackString(dst, f.Value) +} + +// appendTableSize appends v, as encoded in "Header Table Size Update" +// representation, to dst and returns the extended buffer. +func appendTableSize(dst []byte, v uint32) []byte { + first := len(dst) + dst = appendVarInt(dst, 5, uint64(v)) + dst[first] |= 0x20 + return dst +} + +// appendVarInt appends i, as encoded in variable integer form using n +// bit prefix, to dst and returns the extended buffer. +// +// See +// http://http2.github.io/http2-spec/compression.html#integer.representation +func appendVarInt(dst []byte, n byte, i uint64) []byte { + k := uint64((1 << n) - 1) + if i < k { + return append(dst, byte(i)) + } + dst = append(dst, byte(k)) + i -= k + for ; i >= 128; i >>= 7 { + dst = append(dst, byte(0x80|(i&0x7f))) + } + return append(dst, byte(i)) +} + +// appendHpackString appends s, as encoded in "String Literal" +// representation, to dst and returns the the extended buffer. +// +// s will be encoded in Huffman codes only when it produces strictly +// shorter byte string. +func appendHpackString(dst []byte, s string) []byte { + huffmanLength := HuffmanEncodeLength(s) + if huffmanLength < uint64(len(s)) { + first := len(dst) + dst = appendVarInt(dst, 7, huffmanLength) + dst = AppendHuffmanString(dst, s) + dst[first] |= 0x80 + } else { + dst = appendVarInt(dst, 7, uint64(len(s))) + dst = append(dst, s...) + } + return dst +} + +// encodeTypeByte returns type byte. If sensitive is true, type byte +// for "Never Indexed" representation is returned. If sensitive is +// false and indexing is true, type byte for "Incremental Indexing" +// representation is returned. Otherwise, type byte for "Without +// Indexing" is returned. +func encodeTypeByte(indexing, sensitive bool) byte { + if sensitive { + return 0x10 + } + if indexing { + return 0x40 + } + return 0 +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/hpack.go new file mode 100644 index 000000000..dcf257afa --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -0,0 +1,542 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hpack implements HPACK, a compression format for +// efficiently representing HTTP header fields in the context of HTTP/2. +// +// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09 +package hpack + +import ( + "bytes" + "errors" + "fmt" +) + +// A DecodingError is something the spec defines as a decoding error. +type DecodingError struct { + Err error +} + +func (de DecodingError) Error() string { + return fmt.Sprintf("decoding error: %v", de.Err) +} + +// An InvalidIndexError is returned when an encoder references a table +// entry before the static table or after the end of the dynamic table. +type InvalidIndexError int + +func (e InvalidIndexError) Error() string { + return fmt.Sprintf("invalid indexed representation index %d", int(e)) +} + +// A HeaderField is a name-value pair. Both the name and value are +// treated as opaque sequences of octets. +type HeaderField struct { + Name, Value string + + // Sensitive means that this header field should never be + // indexed. + Sensitive bool +} + +// IsPseudo reports whether the header field is an http2 pseudo header. +// That is, it reports whether it starts with a colon. +// It is not otherwise guaranteed to be a valid psuedo header field, +// though. +func (hf HeaderField) IsPseudo() bool { + return len(hf.Name) != 0 && hf.Name[0] == ':' +} + +func (hf HeaderField) String() string { + var suffix string + if hf.Sensitive { + suffix = " (sensitive)" + } + return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) +} + +// Size returns the size of an entry per RFC 7540 section 5.2. +func (hf HeaderField) Size() uint32 { + // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 + // "The size of the dynamic table is the sum of the size of + // its entries. The size of an entry is the sum of its name's + // length in octets (as defined in Section 5.2), its value's + // length in octets (see Section 5.2), plus 32. The size of + // an entry is calculated using the length of the name and + // value without any Huffman encoding applied." + + // This can overflow if somebody makes a large HeaderField + // Name and/or Value by hand, but we don't care, because that + // won't happen on the wire because the encoding doesn't allow + // it. + return uint32(len(hf.Name) + len(hf.Value) + 32) +} + +// A Decoder is the decoding context for incremental processing of +// header blocks. +type Decoder struct { + dynTab dynamicTable + emit func(f HeaderField) + + emitEnabled bool // whether calls to emit are enabled + maxStrLen int // 0 means unlimited + + // buf is the unparsed buffer. It's only written to + // saveBuf if it was truncated in the middle of a header + // block. Because it's usually not owned, we can only + // process it under Write. + buf []byte // not owned; only valid during Write + + // saveBuf is previous data passed to Write which we weren't able + // to fully parse before. Unlike buf, we own this data. + saveBuf bytes.Buffer +} + +// NewDecoder returns a new decoder with the provided maximum dynamic +// table size. The emitFunc will be called for each valid field +// parsed, in the same goroutine as calls to Write, before Write returns. +func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder { + d := &Decoder{ + emit: emitFunc, + emitEnabled: true, + } + d.dynTab.allowedMaxSize = maxDynamicTableSize + d.dynTab.setMaxSize(maxDynamicTableSize) + return d +} + +// ErrStringLength is returned by Decoder.Write when the max string length +// (as configured by Decoder.SetMaxStringLength) would be violated. +var ErrStringLength = errors.New("hpack: string too long") + +// SetMaxStringLength sets the maximum size of a HeaderField name or +// value string. If a string exceeds this length (even after any +// decompression), Write will return ErrStringLength. +// A value of 0 means unlimited and is the default from NewDecoder. +func (d *Decoder) SetMaxStringLength(n int) { + d.maxStrLen = n +} + +// SetEmitFunc changes the callback used when new header fields +// are decoded. +// It must be non-nil. It does not affect EmitEnabled. +func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) { + d.emit = emitFunc +} + +// SetEmitEnabled controls whether the emitFunc provided to NewDecoder +// should be called. The default is true. +// +// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE +// while still decoding and keeping in-sync with decoder state, but +// without doing unnecessary decompression or generating unnecessary +// garbage for header fields past the limit. +func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v } + +// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder +// are currently enabled. The default is true. +func (d *Decoder) EmitEnabled() bool { return d.emitEnabled } + +// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their +// underlying buffers for garbage reasons. + +func (d *Decoder) SetMaxDynamicTableSize(v uint32) { + d.dynTab.setMaxSize(v) +} + +// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded +// stream (via dynamic table size updates) may set the maximum size +// to. +func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { + d.dynTab.allowedMaxSize = v +} + +type dynamicTable struct { + // ents is the FIFO described at + // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 + // The newest (low index) is append at the end, and items are + // evicted from the front. + ents []HeaderField + size uint32 + maxSize uint32 // current maxSize + allowedMaxSize uint32 // maxSize may go up to this, inclusive +} + +func (dt *dynamicTable) setMaxSize(v uint32) { + dt.maxSize = v + dt.evict() +} + +// TODO: change dynamicTable to be a struct with a slice and a size int field, +// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1: +// +// +// Then make add increment the size. maybe the max size should move from Decoder to +// dynamicTable and add should return an ok bool if there was enough space. +// +// Later we'll need a remove operation on dynamicTable. + +func (dt *dynamicTable) add(f HeaderField) { + dt.ents = append(dt.ents, f) + dt.size += f.Size() + dt.evict() +} + +// If we're too big, evict old stuff (front of the slice) +func (dt *dynamicTable) evict() { + base := dt.ents // keep base pointer of slice + for dt.size > dt.maxSize { + dt.size -= dt.ents[0].Size() + dt.ents = dt.ents[1:] + } + + // Shift slice contents down if we evicted things. + if len(dt.ents) != len(base) { + copy(base, dt.ents) + dt.ents = base[:len(dt.ents)] + } +} + +// constantTimeStringCompare compares string a and b in a constant +// time manner. +func constantTimeStringCompare(a, b string) bool { + if len(a) != len(b) { + return false + } + + c := byte(0) + + for i := 0; i < len(a); i++ { + c |= a[i] ^ b[i] + } + + return c == 0 +} + +// Search searches f in the table. The return value i is 0 if there is +// no name match. If there is name match or name/value match, i is the +// index of that entry (1-based). If both name and value match, +// nameValueMatch becomes true. +func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) { + l := len(dt.ents) + for j := l - 1; j >= 0; j-- { + ent := dt.ents[j] + if !constantTimeStringCompare(ent.Name, f.Name) { + continue + } + if i == 0 { + i = uint64(l - j) + } + if f.Sensitive { + continue + } + if !constantTimeStringCompare(ent.Value, f.Value) { + continue + } + i = uint64(l - j) + nameValueMatch = true + return + } + return +} + +func (d *Decoder) maxTableIndex() int { + return len(d.dynTab.ents) + len(staticTable) +} + +func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { + if i < 1 { + return + } + if i > uint64(d.maxTableIndex()) { + return + } + if i <= uint64(len(staticTable)) { + return staticTable[i-1], true + } + dents := d.dynTab.ents + return dents[len(dents)-(int(i)-len(staticTable))], true +} + +// Decode decodes an entire block. +// +// TODO: remove this method and make it incremental later? This is +// easier for debugging now. +func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) { + var hf []HeaderField + saveFunc := d.emit + defer func() { d.emit = saveFunc }() + d.emit = func(f HeaderField) { hf = append(hf, f) } + if _, err := d.Write(p); err != nil { + return nil, err + } + if err := d.Close(); err != nil { + return nil, err + } + return hf, nil +} + +func (d *Decoder) Close() error { + if d.saveBuf.Len() > 0 { + d.saveBuf.Reset() + return DecodingError{errors.New("truncated headers")} + } + return nil +} + +func (d *Decoder) Write(p []byte) (n int, err error) { + if len(p) == 0 { + // Prevent state machine CPU attacks (making us redo + // work up to the point of finding out we don't have + // enough data) + return + } + // Only copy the data if we have to. Optimistically assume + // that p will contain a complete header block. + if d.saveBuf.Len() == 0 { + d.buf = p + } else { + d.saveBuf.Write(p) + d.buf = d.saveBuf.Bytes() + d.saveBuf.Reset() + } + + for len(d.buf) > 0 { + err = d.parseHeaderFieldRepr() + if err == errNeedMore { + // Extra paranoia, making sure saveBuf won't + // get too large. All the varint and string + // reading code earlier should already catch + // overlong things and return ErrStringLength, + // but keep this as a last resort. + const varIntOverhead = 8 // conservative + if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) { + return 0, ErrStringLength + } + d.saveBuf.Write(d.buf) + return len(p), nil + } + if err != nil { + break + } + } + return len(p), err +} + +// errNeedMore is an internal sentinel error value that means the +// buffer is truncated and we need to read more data before we can +// continue parsing. +var errNeedMore = errors.New("need more data") + +type indexType int + +const ( + indexedTrue indexType = iota + indexedFalse + indexedNever +) + +func (v indexType) indexed() bool { return v == indexedTrue } +func (v indexType) sensitive() bool { return v == indexedNever } + +// returns errNeedMore if there isn't enough data available. +// any other error is fatal. +// consumes d.buf iff it returns nil. +// precondition: must be called with len(d.buf) > 0 +func (d *Decoder) parseHeaderFieldRepr() error { + b := d.buf[0] + switch { + case b&128 != 0: + // Indexed representation. + // High bit set? + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1 + return d.parseFieldIndexed() + case b&192 == 64: + // 6.2.1 Literal Header Field with Incremental Indexing + // 0b10xxxxxx: top two bits are 10 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1 + return d.parseFieldLiteral(6, indexedTrue) + case b&240 == 0: + // 6.2.2 Literal Header Field without Indexing + // 0b0000xxxx: top four bits are 0000 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2 + return d.parseFieldLiteral(4, indexedFalse) + case b&240 == 16: + // 6.2.3 Literal Header Field never Indexed + // 0b0001xxxx: top four bits are 0001 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3 + return d.parseFieldLiteral(4, indexedNever) + case b&224 == 32: + // 6.3 Dynamic Table Size Update + // Top three bits are '001'. + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3 + return d.parseDynamicTableSizeUpdate() + } + + return DecodingError{errors.New("invalid encoding")} +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldIndexed() error { + buf := d.buf + idx, buf, err := readVarInt(7, buf) + if err != nil { + return err + } + hf, ok := d.at(idx) + if !ok { + return DecodingError{InvalidIndexError(idx)} + } + d.buf = buf + return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value}) +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { + buf := d.buf + nameIdx, buf, err := readVarInt(n, buf) + if err != nil { + return err + } + + var hf HeaderField + wantStr := d.emitEnabled || it.indexed() + if nameIdx > 0 { + ihf, ok := d.at(nameIdx) + if !ok { + return DecodingError{InvalidIndexError(nameIdx)} + } + hf.Name = ihf.Name + } else { + hf.Name, buf, err = d.readString(buf, wantStr) + if err != nil { + return err + } + } + hf.Value, buf, err = d.readString(buf, wantStr) + if err != nil { + return err + } + d.buf = buf + if it.indexed() { + d.dynTab.add(hf) + } + hf.Sensitive = it.sensitive() + return d.callEmit(hf) +} + +func (d *Decoder) callEmit(hf HeaderField) error { + if d.maxStrLen != 0 { + if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen { + return ErrStringLength + } + } + if d.emitEnabled { + d.emit(hf) + } + return nil +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseDynamicTableSizeUpdate() error { + buf := d.buf + size, buf, err := readVarInt(5, buf) + if err != nil { + return err + } + if size > uint64(d.dynTab.allowedMaxSize) { + return DecodingError{errors.New("dynamic table size update too large")} + } + d.dynTab.setMaxSize(uint32(size)) + d.buf = buf + return nil +} + +var errVarintOverflow = DecodingError{errors.New("varint integer overflow")} + +// readVarInt reads an unsigned variable length integer off the +// beginning of p. n is the parameter as described in +// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1. +// +// n must always be between 1 and 8. +// +// The returned remain buffer is either a smaller suffix of p, or err != nil. +// The error is errNeedMore if p doesn't contain a complete integer. +func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) { + if n < 1 || n > 8 { + panic("bad n") + } + if len(p) == 0 { + return 0, p, errNeedMore + } + i = uint64(p[0]) + if n < 8 { + i &= (1 << uint64(n)) - 1 + } + if i < (1< 0 { + b := p[0] + p = p[1:] + i += uint64(b&127) << m + if b&128 == 0 { + return i, p, nil + } + m += 7 + if m >= 63 { // TODO: proper overflow check. making this up. + return 0, origP, errVarintOverflow + } + } + return 0, origP, errNeedMore +} + +// readString decodes an hpack string from p. +// +// wantStr is whether s will be used. If false, decompression and +// []byte->string garbage are skipped if s will be ignored +// anyway. This does mean that huffman decoding errors for non-indexed +// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server +// is returning an error anyway, and because they're not indexed, the error +// won't affect the decoding state. +func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) { + if len(p) == 0 { + return "", p, errNeedMore + } + isHuff := p[0]&128 != 0 + strLen, p, err := readVarInt(7, p) + if err != nil { + return "", p, err + } + if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) { + return "", nil, ErrStringLength + } + if uint64(len(p)) < strLen { + return "", p, errNeedMore + } + if !isHuff { + if wantStr { + s = string(p[:strLen]) + } + return s, p[strLen:], nil + } + + if wantStr { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() // don't trust others + defer bufPool.Put(buf) + if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil { + buf.Reset() + return "", nil, err + } + s = buf.String() + buf.Reset() // be nice to GC + } + return s, p[strLen:], nil +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/huffman.go new file mode 100644 index 000000000..eb4b1f05c --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/huffman.go @@ -0,0 +1,190 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bytes" + "errors" + "io" + "sync" +) + +var bufPool = sync.Pool{ + New: func() interface{} { return new(bytes.Buffer) }, +} + +// HuffmanDecode decodes the string in v and writes the expanded +// result to w, returning the number of bytes written to w and the +// Write call's return value. At most one Write call is made. +func HuffmanDecode(w io.Writer, v []byte) (int, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return 0, err + } + return w.Write(buf.Bytes()) +} + +// HuffmanDecodeToString decodes the string in v. +func HuffmanDecodeToString(v []byte) (string, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return "", err + } + return buf.String(), nil +} + +// ErrInvalidHuffman is returned for errors found decoding +// Huffman-encoded strings. +var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data") + +// huffmanDecode decodes v to buf. +// If maxLen is greater than 0, attempts to write more to buf than +// maxLen bytes will return ErrStringLength. +func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { + n := rootHuffmanNode + cur, nbits := uint(0), uint8(0) + for _, b := range v { + cur = cur<<8 | uint(b) + nbits += 8 + for nbits >= 8 { + idx := byte(cur >> (nbits - 8)) + n = n.children[idx] + if n == nil { + return ErrInvalidHuffman + } + if n.children == nil { + if maxLen != 0 && buf.Len() == maxLen { + return ErrStringLength + } + buf.WriteByte(n.sym) + nbits -= n.codeLen + n = rootHuffmanNode + } else { + nbits -= 8 + } + } + } + for nbits > 0 { + n = n.children[byte(cur<<(8-nbits))] + if n.children != nil || n.codeLen > nbits { + break + } + buf.WriteByte(n.sym) + nbits -= n.codeLen + n = rootHuffmanNode + } + return nil +} + +type node struct { + // children is non-nil for internal nodes + children []*node + + // The following are only valid if children is nil: + codeLen uint8 // number of bits that led to the output of sym + sym byte // output symbol +} + +func newInternalNode() *node { + return &node{children: make([]*node, 256)} +} + +var rootHuffmanNode = newInternalNode() + +func init() { + if len(huffmanCodes) != 256 { + panic("unexpected size") + } + for i, code := range huffmanCodes { + addDecoderNode(byte(i), code, huffmanCodeLen[i]) + } +} + +func addDecoderNode(sym byte, code uint32, codeLen uint8) { + cur := rootHuffmanNode + for codeLen > 8 { + codeLen -= 8 + i := uint8(code >> codeLen) + if cur.children[i] == nil { + cur.children[i] = newInternalNode() + } + cur = cur.children[i] + } + shift := 8 - codeLen + start, end := int(uint8(code<> (nbits - rembits)) + dst[len(dst)-1] |= t + } + + return dst +} + +// HuffmanEncodeLength returns the number of bytes required to encode +// s in Huffman codes. The result is round up to byte boundary. +func HuffmanEncodeLength(s string) uint64 { + n := uint64(0) + for i := 0; i < len(s); i++ { + n += uint64(huffmanCodeLen[s[i]]) + } + return (n + 7) / 8 +} + +// appendByteToHuffmanCode appends Huffman code for c to dst and +// returns the extended buffer and the remaining bits in the last +// element. The appending is not byte aligned and the remaining bits +// in the last element of dst is given in rembits. +func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { + code := huffmanCodes[c] + nbits := huffmanCodeLen[c] + + for { + if rembits > nbits { + t := uint8(code << (rembits - nbits)) + dst[len(dst)-1] |= t + rembits -= nbits + break + } + + t := uint8(code >> (nbits - rembits)) + dst[len(dst)-1] |= t + + nbits -= rembits + rembits = 8 + + if nbits == 0 { + break + } + + dst = append(dst, 0) + } + + return dst, rembits +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/tables.go new file mode 100644 index 000000000..b9283a023 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/tables.go @@ -0,0 +1,352 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +func pair(name, value string) HeaderField { + return HeaderField{Name: name, Value: value} +} + +// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B +var staticTable = [...]HeaderField{ + pair(":authority", ""), // index 1 (1-based) + pair(":method", "GET"), + pair(":method", "POST"), + pair(":path", "/"), + pair(":path", "/index.html"), + pair(":scheme", "http"), + pair(":scheme", "https"), + pair(":status", "200"), + pair(":status", "204"), + pair(":status", "206"), + pair(":status", "304"), + pair(":status", "400"), + pair(":status", "404"), + pair(":status", "500"), + pair("accept-charset", ""), + pair("accept-encoding", "gzip, deflate"), + pair("accept-language", ""), + pair("accept-ranges", ""), + pair("accept", ""), + pair("access-control-allow-origin", ""), + pair("age", ""), + pair("allow", ""), + pair("authorization", ""), + pair("cache-control", ""), + pair("content-disposition", ""), + pair("content-encoding", ""), + pair("content-language", ""), + pair("content-length", ""), + pair("content-location", ""), + pair("content-range", ""), + pair("content-type", ""), + pair("cookie", ""), + pair("date", ""), + pair("etag", ""), + pair("expect", ""), + pair("expires", ""), + pair("from", ""), + pair("host", ""), + pair("if-match", ""), + pair("if-modified-since", ""), + pair("if-none-match", ""), + pair("if-range", ""), + pair("if-unmodified-since", ""), + pair("last-modified", ""), + pair("link", ""), + pair("location", ""), + pair("max-forwards", ""), + pair("proxy-authenticate", ""), + pair("proxy-authorization", ""), + pair("range", ""), + pair("referer", ""), + pair("refresh", ""), + pair("retry-after", ""), + pair("server", ""), + pair("set-cookie", ""), + pair("strict-transport-security", ""), + pair("transfer-encoding", ""), + pair("user-agent", ""), + pair("vary", ""), + pair("via", ""), + pair("www-authenticate", ""), +} + +var huffmanCodes = [256]uint32{ + 0x1ff8, + 0x7fffd8, + 0xfffffe2, + 0xfffffe3, + 0xfffffe4, + 0xfffffe5, + 0xfffffe6, + 0xfffffe7, + 0xfffffe8, + 0xffffea, + 0x3ffffffc, + 0xfffffe9, + 0xfffffea, + 0x3ffffffd, + 0xfffffeb, + 0xfffffec, + 0xfffffed, + 0xfffffee, + 0xfffffef, + 0xffffff0, + 0xffffff1, + 0xffffff2, + 0x3ffffffe, + 0xffffff3, + 0xffffff4, + 0xffffff5, + 0xffffff6, + 0xffffff7, + 0xffffff8, + 0xffffff9, + 0xffffffa, + 0xffffffb, + 0x14, + 0x3f8, + 0x3f9, + 0xffa, + 0x1ff9, + 0x15, + 0xf8, + 0x7fa, + 0x3fa, + 0x3fb, + 0xf9, + 0x7fb, + 0xfa, + 0x16, + 0x17, + 0x18, + 0x0, + 0x1, + 0x2, + 0x19, + 0x1a, + 0x1b, + 0x1c, + 0x1d, + 0x1e, + 0x1f, + 0x5c, + 0xfb, + 0x7ffc, + 0x20, + 0xffb, + 0x3fc, + 0x1ffa, + 0x21, + 0x5d, + 0x5e, + 0x5f, + 0x60, + 0x61, + 0x62, + 0x63, + 0x64, + 0x65, + 0x66, + 0x67, + 0x68, + 0x69, + 0x6a, + 0x6b, + 0x6c, + 0x6d, + 0x6e, + 0x6f, + 0x70, + 0x71, + 0x72, + 0xfc, + 0x73, + 0xfd, + 0x1ffb, + 0x7fff0, + 0x1ffc, + 0x3ffc, + 0x22, + 0x7ffd, + 0x3, + 0x23, + 0x4, + 0x24, + 0x5, + 0x25, + 0x26, + 0x27, + 0x6, + 0x74, + 0x75, + 0x28, + 0x29, + 0x2a, + 0x7, + 0x2b, + 0x76, + 0x2c, + 0x8, + 0x9, + 0x2d, + 0x77, + 0x78, + 0x79, + 0x7a, + 0x7b, + 0x7ffe, + 0x7fc, + 0x3ffd, + 0x1ffd, + 0xffffffc, + 0xfffe6, + 0x3fffd2, + 0xfffe7, + 0xfffe8, + 0x3fffd3, + 0x3fffd4, + 0x3fffd5, + 0x7fffd9, + 0x3fffd6, + 0x7fffda, + 0x7fffdb, + 0x7fffdc, + 0x7fffdd, + 0x7fffde, + 0xffffeb, + 0x7fffdf, + 0xffffec, + 0xffffed, + 0x3fffd7, + 0x7fffe0, + 0xffffee, + 0x7fffe1, + 0x7fffe2, + 0x7fffe3, + 0x7fffe4, + 0x1fffdc, + 0x3fffd8, + 0x7fffe5, + 0x3fffd9, + 0x7fffe6, + 0x7fffe7, + 0xffffef, + 0x3fffda, + 0x1fffdd, + 0xfffe9, + 0x3fffdb, + 0x3fffdc, + 0x7fffe8, + 0x7fffe9, + 0x1fffde, + 0x7fffea, + 0x3fffdd, + 0x3fffde, + 0xfffff0, + 0x1fffdf, + 0x3fffdf, + 0x7fffeb, + 0x7fffec, + 0x1fffe0, + 0x1fffe1, + 0x3fffe0, + 0x1fffe2, + 0x7fffed, + 0x3fffe1, + 0x7fffee, + 0x7fffef, + 0xfffea, + 0x3fffe2, + 0x3fffe3, + 0x3fffe4, + 0x7ffff0, + 0x3fffe5, + 0x3fffe6, + 0x7ffff1, + 0x3ffffe0, + 0x3ffffe1, + 0xfffeb, + 0x7fff1, + 0x3fffe7, + 0x7ffff2, + 0x3fffe8, + 0x1ffffec, + 0x3ffffe2, + 0x3ffffe3, + 0x3ffffe4, + 0x7ffffde, + 0x7ffffdf, + 0x3ffffe5, + 0xfffff1, + 0x1ffffed, + 0x7fff2, + 0x1fffe3, + 0x3ffffe6, + 0x7ffffe0, + 0x7ffffe1, + 0x3ffffe7, + 0x7ffffe2, + 0xfffff2, + 0x1fffe4, + 0x1fffe5, + 0x3ffffe8, + 0x3ffffe9, + 0xffffffd, + 0x7ffffe3, + 0x7ffffe4, + 0x7ffffe5, + 0xfffec, + 0xfffff3, + 0xfffed, + 0x1fffe6, + 0x3fffe9, + 0x1fffe7, + 0x1fffe8, + 0x7ffff3, + 0x3fffea, + 0x3fffeb, + 0x1ffffee, + 0x1ffffef, + 0xfffff4, + 0xfffff5, + 0x3ffffea, + 0x7ffff4, + 0x3ffffeb, + 0x7ffffe6, + 0x3ffffec, + 0x3ffffed, + 0x7ffffe7, + 0x7ffffe8, + 0x7ffffe9, + 0x7ffffea, + 0x7ffffeb, + 0xffffffe, + 0x7ffffec, + 0x7ffffed, + 0x7ffffee, + 0x7ffffef, + 0x7fffff0, + 0x3ffffee, +} + +var huffmanCodeLen = [256]uint8{ + 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28, + 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6, + 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10, + 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6, + 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5, + 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28, + 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23, + 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24, + 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23, + 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23, + 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25, + 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27, + 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23, + 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26, +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/http2.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/http2.go new file mode 100644 index 000000000..0529b63e2 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/http2.go @@ -0,0 +1,463 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package http2 implements the HTTP/2 protocol. +// +// This package is low-level and intended to be used directly by very +// few people. Most users will use it indirectly through the automatic +// use by the net/http package (from Go 1.6 and later). +// For use in earlier Go versions see ConfigureServer. (Transport support +// requires Go 1.6 or later) +// +// See https://http2.github.io/ for more information on HTTP/2. +// +// See https://http2.golang.org/ for a test server running this code. +package http2 + +import ( + "bufio" + "crypto/tls" + "errors" + "fmt" + "io" + "net/http" + "os" + "sort" + "strconv" + "strings" + "sync" +) + +var ( + VerboseLogs bool + logFrameWrites bool + logFrameReads bool +) + +func init() { + e := os.Getenv("GODEBUG") + if strings.Contains(e, "http2debug=1") { + VerboseLogs = true + } + if strings.Contains(e, "http2debug=2") { + VerboseLogs = true + logFrameWrites = true + logFrameReads = true + } +} + +const ( + // ClientPreface is the string that must be sent by new + // connections from clients. + ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" + + // SETTINGS_MAX_FRAME_SIZE default + // http://http2.github.io/http2-spec/#rfc.section.6.5.2 + initialMaxFrameSize = 16384 + + // NextProtoTLS is the NPN/ALPN protocol negotiated during + // HTTP/2's TLS setup. + NextProtoTLS = "h2" + + // http://http2.github.io/http2-spec/#SettingValues + initialHeaderTableSize = 4096 + + initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size + + defaultMaxReadFrameSize = 1 << 20 +) + +var ( + clientPreface = []byte(ClientPreface) +) + +type streamState int + +const ( + stateIdle streamState = iota + stateOpen + stateHalfClosedLocal + stateHalfClosedRemote + stateResvLocal + stateResvRemote + stateClosed +) + +var stateName = [...]string{ + stateIdle: "Idle", + stateOpen: "Open", + stateHalfClosedLocal: "HalfClosedLocal", + stateHalfClosedRemote: "HalfClosedRemote", + stateResvLocal: "ResvLocal", + stateResvRemote: "ResvRemote", + stateClosed: "Closed", +} + +func (st streamState) String() string { + return stateName[st] +} + +// Setting is a setting parameter: which setting it is, and its value. +type Setting struct { + // ID is which setting is being set. + // See http://http2.github.io/http2-spec/#SettingValues + ID SettingID + + // Val is the value. + Val uint32 +} + +func (s Setting) String() string { + return fmt.Sprintf("[%v = %d]", s.ID, s.Val) +} + +// Valid reports whether the setting is valid. +func (s Setting) Valid() error { + // Limits and error codes from 6.5.2 Defined SETTINGS Parameters + switch s.ID { + case SettingEnablePush: + if s.Val != 1 && s.Val != 0 { + return ConnectionError(ErrCodeProtocol) + } + case SettingInitialWindowSize: + if s.Val > 1<<31-1 { + return ConnectionError(ErrCodeFlowControl) + } + case SettingMaxFrameSize: + if s.Val < 16384 || s.Val > 1<<24-1 { + return ConnectionError(ErrCodeProtocol) + } + } + return nil +} + +// A SettingID is an HTTP/2 setting as defined in +// http://http2.github.io/http2-spec/#iana-settings +type SettingID uint16 + +const ( + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 +) + +var settingName = map[SettingID]string{ + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", +} + +func (s SettingID) String() string { + if v, ok := settingName[s]; ok { + return v + } + return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) +} + +var ( + errInvalidHeaderFieldName = errors.New("http2: invalid header field name") + errInvalidHeaderFieldValue = errors.New("http2: invalid header field value") +) + +// validHeaderFieldName reports whether v is a valid header field name (key). +// RFC 7230 says: +// header-field = field-name ":" OWS field-value OWS +// field-name = token +// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / +// "^" / "_" / " +// Further, http2 says: +// "Just as in HTTP/1.x, header field names are strings of ASCII +// characters that are compared in a case-insensitive +// fashion. However, header field names MUST be converted to +// lowercase prior to their encoding in HTTP/2. " +func validHeaderFieldName(v string) bool { + if len(v) == 0 { + return false + } + for _, r := range v { + if int(r) >= len(isTokenTable) || ('A' <= r && r <= 'Z') { + return false + } + if !isTokenTable[byte(r)] { + return false + } + } + return true +} + +// validHeaderFieldValue reports whether v is a valid header field value. +// +// RFC 7230 says: +// field-value = *( field-content / obs-fold ) +// obj-fold = N/A to http2, and deprecated +// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +// field-vchar = VCHAR / obs-text +// obs-text = %x80-FF +// VCHAR = "any visible [USASCII] character" +// +// http2 further says: "Similarly, HTTP/2 allows header field values +// that are not valid. While most of the values that can be encoded +// will not alter header field parsing, carriage return (CR, ASCII +// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII +// 0x0) might be exploited by an attacker if they are translated +// verbatim. Any request or response that contains a character not +// permitted in a header field value MUST be treated as malformed +// (Section 8.1.2.6). Valid characters are defined by the +// field-content ABNF rule in Section 3.2 of [RFC7230]." +// +// This function does not (yet?) properly handle the rejection of +// strings that begin or end with SP or HTAB. +func validHeaderFieldValue(v string) bool { + for i := 0; i < len(v); i++ { + if b := v[i]; b < ' ' && b != '\t' || b == 0x7f { + return false + } + } + return true +} + +var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n) + +func init() { + for i := 100; i <= 999; i++ { + if v := http.StatusText(i); v != "" { + httpCodeStringCommon[i] = strconv.Itoa(i) + } + } +} + +func httpCodeString(code int) string { + if s, ok := httpCodeStringCommon[code]; ok { + return s + } + return strconv.Itoa(code) +} + +// from pkg io +type stringWriter interface { + WriteString(s string) (n int, err error) +} + +// A gate lets two goroutines coordinate their activities. +type gate chan struct{} + +func (g gate) Done() { g <- struct{}{} } +func (g gate) Wait() { <-g } + +// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). +type closeWaiter chan struct{} + +// Init makes a closeWaiter usable. +// It exists because so a closeWaiter value can be placed inside a +// larger struct and have the Mutex and Cond's memory in the same +// allocation. +func (cw *closeWaiter) Init() { + *cw = make(chan struct{}) +} + +// Close marks the closeWaiter as closed and unblocks any waiters. +func (cw closeWaiter) Close() { + close(cw) +} + +// Wait waits for the closeWaiter to become closed. +func (cw closeWaiter) Wait() { + <-cw +} + +// bufferedWriter is a buffered writer that writes to w. +// Its buffered writer is lazily allocated as needed, to minimize +// idle memory usage with many connections. +type bufferedWriter struct { + w io.Writer // immutable + bw *bufio.Writer // non-nil when data is buffered +} + +func newBufferedWriter(w io.Writer) *bufferedWriter { + return &bufferedWriter{w: w} +} + +var bufWriterPool = sync.Pool{ + New: func() interface{} { + // TODO: pick something better? this is a bit under + // (3 x typical 1500 byte MTU) at least. + return bufio.NewWriterSize(nil, 4<<10) + }, +} + +func (w *bufferedWriter) Write(p []byte) (n int, err error) { + if w.bw == nil { + bw := bufWriterPool.Get().(*bufio.Writer) + bw.Reset(w.w) + w.bw = bw + } + return w.bw.Write(p) +} + +func (w *bufferedWriter) Flush() error { + bw := w.bw + if bw == nil { + return nil + } + err := bw.Flush() + bw.Reset(nil) + bufWriterPool.Put(bw) + w.bw = nil + return err +} + +func mustUint31(v int32) uint32 { + if v < 0 || v > 2147483647 { + panic("out of range") + } + return uint32(v) +} + +// bodyAllowedForStatus reports whether a given response status code +// permits a body. See RFC2616, section 4.4. +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +type httpError struct { + msg string + timeout bool +} + +func (e *httpError) Error() string { return e.msg } +func (e *httpError) Timeout() bool { return e.timeout } +func (e *httpError) Temporary() bool { return true } + +var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true} + +var isTokenTable = [127]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +type connectionStater interface { + ConnectionState() tls.ConnectionState +} + +var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }} + +type sorter struct { + v []string // owned by sorter +} + +func (s *sorter) Len() int { return len(s.v) } +func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } +func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } + +// Keys returns the sorted keys of h. +// +// The returned slice is only valid until s used again or returned to +// its pool. +func (s *sorter) Keys(h http.Header) []string { + keys := s.v[:0] + for k := range h { + keys = append(keys, k) + } + s.v = keys + sort.Sort(s) + return keys +} + +func (s *sorter) SortStrings(ss []string) { + // Our sorter works on s.v, which sorter owners, so + // stash it away while we sort the user's buffer. + save := s.v + s.v = ss + sort.Sort(s) + s.v = save +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/not_go15.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/not_go15.go new file mode 100644 index 000000000..d0fa5c890 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/not_go15.go @@ -0,0 +1,11 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.5 + +package http2 + +import "net/http" + +func requestCancel(req *http.Request) <-chan struct{} { return nil } diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/not_go16.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/not_go16.go new file mode 100644 index 000000000..db53c5b8c --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/not_go16.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.6 + +package http2 + +import "net/http" + +func configureTransport(t1 *http.Transport) (*Transport, error) { + return nil, errTransportVersion +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/pipe.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/pipe.go new file mode 100644 index 000000000..69446e7a3 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/pipe.go @@ -0,0 +1,147 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "io" + "sync" +) + +// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like +// io.Pipe except there are no PipeReader/PipeWriter halves, and the +// underlying buffer is an interface. (io.Pipe is always unbuffered) +type pipe struct { + mu sync.Mutex + c sync.Cond // c.L lazily initialized to &p.mu + b pipeBuffer + err error // read error once empty. non-nil means closed. + breakErr error // immediate read error (caller doesn't see rest of b) + donec chan struct{} // closed on error + readFn func() // optional code to run in Read before error +} + +type pipeBuffer interface { + Len() int + io.Writer + io.Reader +} + +// Read waits until data is available and copies bytes +// from the buffer into p. +func (p *pipe) Read(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + for { + if p.breakErr != nil { + return 0, p.breakErr + } + if p.b.Len() > 0 { + return p.b.Read(d) + } + if p.err != nil { + if p.readFn != nil { + p.readFn() // e.g. copy trailers + p.readFn = nil // not sticky like p.err + } + return 0, p.err + } + p.c.Wait() + } +} + +var errClosedPipeWrite = errors.New("write on closed buffer") + +// Write copies bytes from p into the buffer and wakes a reader. +// It is an error to write more data than the buffer can hold. +func (p *pipe) Write(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if p.err != nil { + return 0, errClosedPipeWrite + } + return p.b.Write(d) +} + +// CloseWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err after all data has been +// read. +// +// The error must be non-nil. +func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) } + +// BreakWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err immediately, without +// waiting for unread data. +func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) } + +// closeWithErrorAndCode is like CloseWithError but also sets some code to run +// in the caller's goroutine before returning the error. +func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) } + +func (p *pipe) closeWithError(dst *error, err error, fn func()) { + if err == nil { + panic("err must be non-nil") + } + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if *dst != nil { + // Already been done. + return + } + p.readFn = fn + *dst = err + p.closeDoneLocked() +} + +// requires p.mu be held. +func (p *pipe) closeDoneLocked() { + if p.donec == nil { + return + } + // Close if unclosed. This isn't racy since we always + // hold p.mu while closing. + select { + case <-p.donec: + default: + close(p.donec) + } +} + +// Err returns the error (if any) first set by BreakWithError or CloseWithError. +func (p *pipe) Err() error { + p.mu.Lock() + defer p.mu.Unlock() + if p.breakErr != nil { + return p.breakErr + } + return p.err +} + +// Done returns a channel which is closed if and when this pipe is closed +// with CloseWithError. +func (p *pipe) Done() <-chan struct{} { + p.mu.Lock() + defer p.mu.Unlock() + if p.donec == nil { + p.donec = make(chan struct{}) + if p.err != nil || p.breakErr != nil { + // Already hit an error. + p.closeDoneLocked() + } + } + return p.donec +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/server.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/server.go new file mode 100644 index 000000000..1e6980c31 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/server.go @@ -0,0 +1,2178 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO: replace all <-sc.doneServing with reads from the stream's cw +// instead, and make sure that on close we close all open +// streams. then remove doneServing? + +// TODO: re-audit GOAWAY support. Consider each incoming frame type and +// whether it should be ignored during graceful shutdown. + +// TODO: disconnect idle clients. GFE seems to do 4 minutes. make +// configurable? or maximum number of idle clients and remove the +// oldest? + +// TODO: turn off the serve goroutine when idle, so +// an idle conn only has the readFrames goroutine active. (which could +// also be optimized probably to pin less memory in crypto/tls). This +// would involve tracking when the serve goroutine is active (atomic +// int32 read/CAS probably?) and starting it up when frames arrive, +// and shutting it down when all handlers exit. the occasional PING +// packets could use time.AfterFunc to call sc.wakeStartServeLoop() +// (which is a no-op if already running) and then queue the PING write +// as normal. The serve loop would then exit in most cases (if no +// Handlers running) and not be woken up again until the PING packet +// returns. + +// TODO (maybe): add a mechanism for Handlers to going into +// half-closed-local mode (rw.(io.Closer) test?) but not exit their +// handler, and continue to be able to read from the +// Request.Body. This would be a somewhat semantic change from HTTP/1 +// (or at least what we expose in net/http), so I'd probably want to +// add it there too. For now, this package says that returning from +// the Handler ServeHTTP function means you're both done reading and +// done writing, without a way to stop just one or the other. + +package http2 + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "fmt" + "io" + "log" + "net" + "net/http" + "net/textproto" + "net/url" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/http2/hpack" +) + +const ( + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? +) + +var ( + errClientDisconnected = errors.New("client disconnected") + errClosedBody = errors.New("body closed by handler") + errHandlerComplete = errors.New("http2: request body closed due to handler exiting") + errStreamClosed = errors.New("http2: stream closed") +) + +var responseWriterStatePool = sync.Pool{ + New: func() interface{} { + rws := &responseWriterState{} + rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize) + return rws + }, +} + +// Test hooks. +var ( + testHookOnConn func() + testHookGetServerConn func(*serverConn) + testHookOnPanicMu *sync.Mutex // nil except in tests + testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool) +) + +// Server is an HTTP/2 server. +type Server struct { + // MaxHandlers limits the number of http.Handler ServeHTTP goroutines + // which may run at a time over all connections. + // Negative or zero no limit. + // TODO: implement + MaxHandlers int + + // MaxConcurrentStreams optionally specifies the number of + // concurrent streams that each client may have open at a + // time. This is unrelated to the number of http.Handler goroutines + // which may be active globally, which is MaxHandlers. + // If zero, MaxConcurrentStreams defaults to at least 100, per + // the HTTP/2 spec's recommendations. + MaxConcurrentStreams uint32 + + // MaxReadFrameSize optionally specifies the largest frame + // this server is willing to read. A valid value is between + // 16k and 16M, inclusive. If zero or otherwise invalid, a + // default value is used. + MaxReadFrameSize uint32 + + // PermitProhibitedCipherSuites, if true, permits the use of + // cipher suites prohibited by the HTTP/2 spec. + PermitProhibitedCipherSuites bool +} + +func (s *Server) maxReadFrameSize() uint32 { + if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { + return v + } + return defaultMaxReadFrameSize +} + +func (s *Server) maxConcurrentStreams() uint32 { + if v := s.MaxConcurrentStreams; v > 0 { + return v + } + return defaultMaxStreams +} + +// ConfigureServer adds HTTP/2 support to a net/http Server. +// +// The configuration conf may be nil. +// +// ConfigureServer must be called before s begins serving. +func ConfigureServer(s *http.Server, conf *Server) error { + if conf == nil { + conf = new(Server) + } + + if s.TLSConfig == nil { + s.TLSConfig = new(tls.Config) + } else if s.TLSConfig.CipherSuites != nil { + // If they already provided a CipherSuite list, return + // an error if it has a bad order or is missing + // ECDHE_RSA_WITH_AES_128_GCM_SHA256. + const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + haveRequired := false + sawBad := false + for i, cs := range s.TLSConfig.CipherSuites { + if cs == requiredCipher { + haveRequired = true + } + if isBadCipher(cs) { + sawBad = true + } else if sawBad { + return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs) + } + } + if !haveRequired { + return fmt.Errorf("http2: TLSConfig.CipherSuites is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256") + } + } + + // Note: not setting MinVersion to tls.VersionTLS12, + // as we don't want to interfere with HTTP/1.1 traffic + // on the user's server. We enforce TLS 1.2 later once + // we accept a connection. Ideally this should be done + // during next-proto selection, but using TLS <1.2 with + // HTTP/2 is still the client's bug. + + s.TLSConfig.PreferServerCipherSuites = true + + haveNPN := false + for _, p := range s.TLSConfig.NextProtos { + if p == NextProtoTLS { + haveNPN = true + break + } + } + if !haveNPN { + s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) + } + // h2-14 is temporary (as of 2015-03-05) while we wait for all browsers + // to switch to "h2". + s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "h2-14") + + if s.TLSNextProto == nil { + s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} + } + protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { + if testHookOnConn != nil { + testHookOnConn() + } + conf.ServeConn(c, &ServeConnOpts{ + Handler: h, + BaseConfig: hs, + }) + } + s.TLSNextProto[NextProtoTLS] = protoHandler + s.TLSNextProto["h2-14"] = protoHandler // temporary; see above. + return nil +} + +// ServeConnOpts are options for the Server.ServeConn method. +type ServeConnOpts struct { + // BaseConfig optionally sets the base configuration + // for values. If nil, defaults are used. + BaseConfig *http.Server + + // Handler specifies which handler to use for processing + // requests. If nil, BaseConfig.Handler is used. If BaseConfig + // or BaseConfig.Handler is nil, http.DefaultServeMux is used. + Handler http.Handler +} + +func (o *ServeConnOpts) baseConfig() *http.Server { + if o != nil && o.BaseConfig != nil { + return o.BaseConfig + } + return new(http.Server) +} + +func (o *ServeConnOpts) handler() http.Handler { + if o != nil { + if o.Handler != nil { + return o.Handler + } + if o.BaseConfig != nil && o.BaseConfig.Handler != nil { + return o.BaseConfig.Handler + } + } + return http.DefaultServeMux +} + +// ServeConn serves HTTP/2 requests on the provided connection and +// blocks until the connection is no longer readable. +// +// ServeConn starts speaking HTTP/2 assuming that c has not had any +// reads or writes. It writes its initial settings frame and expects +// to be able to read the preface and settings frame from the +// client. If c has a ConnectionState method like a *tls.Conn, the +// ConnectionState is used to verify the TLS ciphersuite and to set +// the Request.TLS field in Handlers. +// +// ServeConn does not support h2c by itself. Any h2c support must be +// implemented in terms of providing a suitably-behaving net.Conn. +// +// The opts parameter is optional. If nil, default values are used. +func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + sc := &serverConn{ + srv: s, + hs: opts.baseConfig(), + conn: c, + remoteAddrStr: c.RemoteAddr().String(), + bw: newBufferedWriter(c), + handler: opts.handler(), + streams: make(map[uint32]*stream), + readFrameCh: make(chan readFrameResult), + wantWriteFrameCh: make(chan frameWriteMsg, 8), + wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync + bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way + doneServing: make(chan struct{}), + advMaxStreams: s.maxConcurrentStreams(), + writeSched: writeScheduler{ + maxFrameSize: initialMaxFrameSize, + }, + initialWindowSize: initialWindowSize, + headerTableSize: initialHeaderTableSize, + serveG: newGoroutineLock(), + pushEnabled: true, + } + sc.flow.add(initialWindowSize) + sc.inflow.add(initialWindowSize) + sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) + + fr := NewFramer(sc.bw, c) + fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + fr.MaxHeaderListSize = sc.maxHeaderListSize() + fr.SetMaxReadFrameSize(s.maxReadFrameSize()) + sc.framer = fr + + if tc, ok := c.(connectionStater); ok { + sc.tlsState = new(tls.ConnectionState) + *sc.tlsState = tc.ConnectionState() + // 9.2 Use of TLS Features + // An implementation of HTTP/2 over TLS MUST use TLS + // 1.2 or higher with the restrictions on feature set + // and cipher suite described in this section. Due to + // implementation limitations, it might not be + // possible to fail TLS negotiation. An endpoint MUST + // immediately terminate an HTTP/2 connection that + // does not meet the TLS requirements described in + // this section with a connection error (Section + // 5.4.1) of type INADEQUATE_SECURITY. + if sc.tlsState.Version < tls.VersionTLS12 { + sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low") + return + } + + if sc.tlsState.ServerName == "" { + // Client must use SNI, but we don't enforce that anymore, + // since it was causing problems when connecting to bare IP + // addresses during development. + // + // TODO: optionally enforce? Or enforce at the time we receive + // a new request, and verify the the ServerName matches the :authority? + // But that precludes proxy situations, perhaps. + // + // So for now, do nothing here again. + } + + if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + // "Endpoints MAY choose to generate a connection error + // (Section 5.4.1) of type INADEQUATE_SECURITY if one of + // the prohibited cipher suites are negotiated." + // + // We choose that. In my opinion, the spec is weak + // here. It also says both parties must support at least + // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no + // excuses here. If we really must, we could allow an + // "AllowInsecureWeakCiphers" option on the server later. + // Let's see how it plays out first. + sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite)) + return + } + } + + if hook := testHookGetServerConn; hook != nil { + hook(sc) + } + sc.serve() +} + +// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. +func isBadCipher(cipher uint16) bool { + switch cipher { + case tls.TLS_RSA_WITH_RC4_128_SHA, + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: + // Reject cipher suites from Appendix A. + // "This list includes those cipher suites that do not + // offer an ephemeral key exchange and those that are + // based on the TLS null, stream or block cipher type" + return true + default: + return false + } +} + +func (sc *serverConn) rejectConn(err ErrCode, debug string) { + sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) + // ignoring errors. hanging up anyway. + sc.framer.WriteGoAway(0, err, []byte(debug)) + sc.bw.Flush() + sc.conn.Close() +} + +type serverConn struct { + // Immutable: + srv *Server + hs *http.Server + conn net.Conn + bw *bufferedWriter // writing to conn + handler http.Handler + framer *Framer + doneServing chan struct{} // closed when serverConn.serve ends + readFrameCh chan readFrameResult // written by serverConn.readFrames + wantWriteFrameCh chan frameWriteMsg // from handlers -> serve + wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes + bodyReadCh chan bodyReadMsg // from handlers -> serve + testHookCh chan func(int) // code to run on the serve loop + flow flow // conn-wide (not stream-specific) outbound flow control + inflow flow // conn-wide inbound flow control + tlsState *tls.ConnectionState // shared by all handlers, like net/http + remoteAddrStr string + + // Everything following is owned by the serve loop; use serveG.check(): + serveG goroutineLock // used to verify funcs are on serve() + pushEnabled bool + sawFirstSettings bool // got the initial SETTINGS frame after the preface + needToSendSettingsAck bool + unackedSettings int // how many SETTINGS have we sent without ACKs? + clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) + advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client + curOpenStreams uint32 // client's number of open streams + maxStreamID uint32 // max ever seen + streams map[uint32]*stream + initialWindowSize int32 + headerTableSize uint32 + peerMaxHeaderListSize uint32 // zero means unknown (default) + canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case + writingFrame bool // started write goroutine but haven't heard back on wroteFrameCh + needsFrameFlush bool // last frame write wasn't a flush + writeSched writeScheduler + inGoAway bool // we've started to or sent GOAWAY + needToSendGoAway bool // we need to schedule a GOAWAY frame write + goAwayCode ErrCode + shutdownTimerCh <-chan time.Time // nil until used + shutdownTimer *time.Timer // nil until used + freeRequestBodyBuf []byte // if non-nil, a free initialWindowSize buffer for getRequestBodyBuf + + // Owned by the writeFrameAsync goroutine: + headerWriteBuf bytes.Buffer + hpackEncoder *hpack.Encoder +} + +func (sc *serverConn) maxHeaderListSize() uint32 { + n := sc.hs.MaxHeaderBytes + if n <= 0 { + n = http.DefaultMaxHeaderBytes + } + // http2's count is in a slightly different unit and includes 32 bytes per pair. + // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. + const perFieldOverhead = 32 // per http2 spec + const typicalHeaders = 10 // conservative + return uint32(n + typicalHeaders*perFieldOverhead) +} + +// stream represents a stream. This is the minimal metadata needed by +// the serve goroutine. Most of the actual stream state is owned by +// the http.Handler's goroutine in the responseWriter. Because the +// responseWriter's responseWriterState is recycled at the end of a +// handler, this struct intentionally has no pointer to the +// *responseWriter{,State} itself, as the Handler ending nils out the +// responseWriter's state field. +type stream struct { + // immutable: + sc *serverConn + id uint32 + body *pipe // non-nil if expecting DATA frames + cw closeWaiter // closed wait stream transitions to closed state + + // owned by serverConn's serve loop: + bodyBytes int64 // body bytes seen so far + declBodyBytes int64 // or -1 if undeclared + flow flow // limits writing from Handler to client + inflow flow // what the client is allowed to POST/etc to us + parent *stream // or nil + numTrailerValues int64 + weight uint8 + state streamState + sentReset bool // only true once detached from streams map + gotReset bool // only true once detacted from streams map + gotTrailerHeader bool // HEADER frame for trailers was seen + reqBuf []byte + + trailer http.Header // accumulated trailers + reqTrailer http.Header // handler's Request.Trailer +} + +func (sc *serverConn) Framer() *Framer { return sc.framer } +func (sc *serverConn) CloseConn() error { return sc.conn.Close() } +func (sc *serverConn) Flush() error { return sc.bw.Flush() } +func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) { + return sc.hpackEncoder, &sc.headerWriteBuf +} + +func (sc *serverConn) state(streamID uint32) (streamState, *stream) { + sc.serveG.check() + // http://http2.github.io/http2-spec/#rfc.section.5.1 + if st, ok := sc.streams[streamID]; ok { + return st.state, st + } + // "The first use of a new stream identifier implicitly closes all + // streams in the "idle" state that might have been initiated by + // that peer with a lower-valued stream identifier. For example, if + // a client sends a HEADERS frame on stream 7 without ever sending a + // frame on stream 5, then stream 5 transitions to the "closed" + // state when the first frame for stream 7 is sent or received." + if streamID <= sc.maxStreamID { + return stateClosed, nil + } + return stateIdle, nil +} + +// setConnState calls the net/http ConnState hook for this connection, if configured. +// Note that the net/http package does StateNew and StateClosed for us. +// There is currently no plan for StateHijacked or hijacking HTTP/2 connections. +func (sc *serverConn) setConnState(state http.ConnState) { + if sc.hs.ConnState != nil { + sc.hs.ConnState(sc.conn, state) + } +} + +func (sc *serverConn) vlogf(format string, args ...interface{}) { + if VerboseLogs { + sc.logf(format, args...) + } +} + +func (sc *serverConn) logf(format string, args ...interface{}) { + if lg := sc.hs.ErrorLog; lg != nil { + lg.Printf(format, args...) + } else { + log.Printf(format, args...) + } +} + +// errno returns v's underlying uintptr, else 0. +// +// TODO: remove this helper function once http2 can use build +// tags. See comment in isClosedConnError. +func errno(v error) uintptr { + if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr { + return uintptr(rv.Uint()) + } + return 0 +} + +// isClosedConnError reports whether err is an error from use of a closed +// network connection. +func isClosedConnError(err error) bool { + if err == nil { + return false + } + + // TODO: remove this string search and be more like the Windows + // case below. That might involve modifying the standard library + // to return better error types. + str := err.Error() + if strings.Contains(str, "use of closed network connection") { + return true + } + + // TODO(bradfitz): x/tools/cmd/bundle doesn't really support + // build tags, so I can't make an http2_windows.go file with + // Windows-specific stuff. Fix that and move this, once we + // have a way to bundle this into std's net/http somehow. + if runtime.GOOS == "windows" { + if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { + if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" { + const WSAECONNABORTED = 10053 + const WSAECONNRESET = 10054 + if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED { + return true + } + } + } + } + return false +} + +func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { + if err == nil { + return + } + if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) { + // Boring, expected errors. + sc.vlogf(format, args...) + } else { + sc.logf(format, args...) + } +} + +func (sc *serverConn) canonicalHeader(v string) string { + sc.serveG.check() + cv, ok := commonCanonHeader[v] + if ok { + return cv + } + cv, ok = sc.canonHeader[v] + if ok { + return cv + } + if sc.canonHeader == nil { + sc.canonHeader = make(map[string]string) + } + cv = http.CanonicalHeaderKey(v) + sc.canonHeader[v] = cv + return cv +} + +type readFrameResult struct { + f Frame // valid until readMore is called + err error + + // readMore should be called once the consumer no longer needs or + // retains f. After readMore, f is invalid and more frames can be + // read. + readMore func() +} + +// readFrames is the loop that reads incoming frames. +// It takes care to only read one frame at a time, blocking until the +// consumer is done with the frame. +// It's run on its own goroutine. +func (sc *serverConn) readFrames() { + gate := make(gate) + gateDone := gate.Done + for { + f, err := sc.framer.ReadFrame() + select { + case sc.readFrameCh <- readFrameResult{f, err, gateDone}: + case <-sc.doneServing: + return + } + select { + case <-gate: + case <-sc.doneServing: + return + } + if terminalReadFrameError(err) { + return + } + } +} + +// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. +type frameWriteResult struct { + wm frameWriteMsg // what was written (or attempted) + err error // result of the writeFrame call +} + +// writeFrameAsync runs in its own goroutine and writes a single frame +// and then reports when it's done. +// At most one goroutine can be running writeFrameAsync at a time per +// serverConn. +func (sc *serverConn) writeFrameAsync(wm frameWriteMsg) { + err := wm.write.writeFrame(sc) + sc.wroteFrameCh <- frameWriteResult{wm, err} +} + +func (sc *serverConn) closeAllStreamsOnConnClose() { + sc.serveG.check() + for _, st := range sc.streams { + sc.closeStream(st, errClientDisconnected) + } +} + +func (sc *serverConn) stopShutdownTimer() { + sc.serveG.check() + if t := sc.shutdownTimer; t != nil { + t.Stop() + } +} + +func (sc *serverConn) notePanic() { + // Note: this is for serverConn.serve panicking, not http.Handler code. + if testHookOnPanicMu != nil { + testHookOnPanicMu.Lock() + defer testHookOnPanicMu.Unlock() + } + if testHookOnPanic != nil { + if e := recover(); e != nil { + if testHookOnPanic(sc, e) { + panic(e) + } + } + } +} + +func (sc *serverConn) serve() { + sc.serveG.check() + defer sc.notePanic() + defer sc.conn.Close() + defer sc.closeAllStreamsOnConnClose() + defer sc.stopShutdownTimer() + defer close(sc.doneServing) // unblocks handlers trying to send + + if VerboseLogs { + sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) + } + + sc.writeFrame(frameWriteMsg{ + write: writeSettings{ + {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, + {SettingMaxConcurrentStreams, sc.advMaxStreams}, + {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + + // TODO: more actual settings, notably + // SettingInitialWindowSize, but then we also + // want to bump up the conn window size the + // same amount here right after the settings + }, + }) + sc.unackedSettings++ + + if err := sc.readPreface(); err != nil { + sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) + return + } + // Now that we've got the preface, get us out of the + // "StateNew" state. We can't go directly to idle, though. + // Active means we read some data and anticipate a request. We'll + // do another Active when we get a HEADERS frame. + sc.setConnState(http.StateActive) + sc.setConnState(http.StateIdle) + + go sc.readFrames() // closed by defer sc.conn.Close above + + settingsTimer := time.NewTimer(firstSettingsTimeout) + loopNum := 0 + for { + loopNum++ + select { + case wm := <-sc.wantWriteFrameCh: + sc.writeFrame(wm) + case res := <-sc.wroteFrameCh: + sc.wroteFrame(res) + case res := <-sc.readFrameCh: + if !sc.processFrameFromReader(res) { + return + } + res.readMore() + if settingsTimer.C != nil { + settingsTimer.Stop() + settingsTimer.C = nil + } + case m := <-sc.bodyReadCh: + sc.noteBodyRead(m.st, m.n) + case <-settingsTimer.C: + sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) + return + case <-sc.shutdownTimerCh: + sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) + return + case fn := <-sc.testHookCh: + fn(loopNum) + } + } +} + +// readPreface reads the ClientPreface greeting from the peer +// or returns an error on timeout or an invalid greeting. +func (sc *serverConn) readPreface() error { + errc := make(chan error, 1) + go func() { + // Read the client preface + buf := make([]byte, len(ClientPreface)) + if _, err := io.ReadFull(sc.conn, buf); err != nil { + errc <- err + } else if !bytes.Equal(buf, clientPreface) { + errc <- fmt.Errorf("bogus greeting %q", buf) + } else { + errc <- nil + } + }() + timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? + defer timer.Stop() + select { + case <-timer.C: + return errors.New("timeout waiting for client preface") + case err := <-errc: + if err == nil { + if VerboseLogs { + sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr()) + } + } + return err + } +} + +var errChanPool = sync.Pool{ + New: func() interface{} { return make(chan error, 1) }, +} + +var writeDataPool = sync.Pool{ + New: func() interface{} { return new(writeData) }, +} + +// writeDataFromHandler writes DATA response frames from a handler on +// the given stream. +func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { + ch := errChanPool.Get().(chan error) + writeArg := writeDataPool.Get().(*writeData) + *writeArg = writeData{stream.id, data, endStream} + err := sc.writeFrameFromHandler(frameWriteMsg{ + write: writeArg, + stream: stream, + done: ch, + }) + if err != nil { + return err + } + var frameWriteDone bool // the frame write is done (successfully or not) + select { + case err = <-ch: + frameWriteDone = true + case <-sc.doneServing: + return errClientDisconnected + case <-stream.cw: + // If both ch and stream.cw were ready (as might + // happen on the final Write after an http.Handler + // ends), prefer the write result. Otherwise this + // might just be us successfully closing the stream. + // The writeFrameAsync and serve goroutines guarantee + // that the ch send will happen before the stream.cw + // close. + select { + case err = <-ch: + frameWriteDone = true + default: + return errStreamClosed + } + } + errChanPool.Put(ch) + if frameWriteDone { + writeDataPool.Put(writeArg) + } + return err +} + +// writeFrameFromHandler sends wm to sc.wantWriteFrameCh, but aborts +// if the connection has gone away. +// +// This must not be run from the serve goroutine itself, else it might +// deadlock writing to sc.wantWriteFrameCh (which is only mildly +// buffered and is read by serve itself). If you're on the serve +// goroutine, call writeFrame instead. +func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) error { + sc.serveG.checkNotOn() // NOT + select { + case sc.wantWriteFrameCh <- wm: + return nil + case <-sc.doneServing: + // Serve loop is gone. + // Client has closed their connection to the server. + return errClientDisconnected + } +} + +// writeFrame schedules a frame to write and sends it if there's nothing +// already being written. +// +// There is no pushback here (the serve goroutine never blocks). It's +// the http.Handlers that block, waiting for their previous frames to +// make it onto the wire +// +// If you're not on the serve goroutine, use writeFrameFromHandler instead. +func (sc *serverConn) writeFrame(wm frameWriteMsg) { + sc.serveG.check() + sc.writeSched.add(wm) + sc.scheduleFrameWrite() +} + +// startFrameWrite starts a goroutine to write wm (in a separate +// goroutine since that might block on the network), and updates the +// serve goroutine's state about the world, updated from info in wm. +func (sc *serverConn) startFrameWrite(wm frameWriteMsg) { + sc.serveG.check() + if sc.writingFrame { + panic("internal error: can only be writing one frame at a time") + } + + st := wm.stream + if st != nil { + switch st.state { + case stateHalfClosedLocal: + panic("internal error: attempt to send frame on half-closed-local stream") + case stateClosed: + if st.sentReset || st.gotReset { + // Skip this frame. + sc.scheduleFrameWrite() + return + } + panic(fmt.Sprintf("internal error: attempt to send a write %v on a closed stream", wm)) + } + } + + sc.writingFrame = true + sc.needsFrameFlush = true + go sc.writeFrameAsync(wm) +} + +// errHandlerPanicked is the error given to any callers blocked in a read from +// Request.Body when the main goroutine panics. Since most handlers read in the +// the main ServeHTTP goroutine, this will show up rarely. +var errHandlerPanicked = errors.New("http2: handler panicked") + +// wroteFrame is called on the serve goroutine with the result of +// whatever happened on writeFrameAsync. +func (sc *serverConn) wroteFrame(res frameWriteResult) { + sc.serveG.check() + if !sc.writingFrame { + panic("internal error: expected to be already writing a frame") + } + sc.writingFrame = false + + wm := res.wm + st := wm.stream + + closeStream := endsStream(wm.write) + + if _, ok := wm.write.(handlerPanicRST); ok { + sc.closeStream(st, errHandlerPanicked) + } + + // Reply (if requested) to the blocked ServeHTTP goroutine. + if ch := wm.done; ch != nil { + select { + case ch <- res.err: + default: + panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wm.write)) + } + } + wm.write = nil // prevent use (assume it's tainted after wm.done send) + + if closeStream { + if st == nil { + panic("internal error: expecting non-nil stream") + } + switch st.state { + case stateOpen: + // Here we would go to stateHalfClosedLocal in + // theory, but since our handler is done and + // the net/http package provides no mechanism + // for finishing writing to a ResponseWriter + // while still reading data (see possible TODO + // at top of this file), we go into closed + // state here anyway, after telling the peer + // we're hanging up on them. + st.state = stateHalfClosedLocal // won't last long, but necessary for closeStream via resetStream + errCancel := StreamError{st.id, ErrCodeCancel} + sc.resetStream(errCancel) + case stateHalfClosedRemote: + sc.closeStream(st, errHandlerComplete) + } + } + + sc.scheduleFrameWrite() +} + +// scheduleFrameWrite tickles the frame writing scheduler. +// +// If a frame is already being written, nothing happens. This will be called again +// when the frame is done being written. +// +// If a frame isn't being written we need to send one, the best frame +// to send is selected, preferring first things that aren't +// stream-specific (e.g. ACKing settings), and then finding the +// highest priority stream. +// +// If a frame isn't being written and there's nothing else to send, we +// flush the write buffer. +func (sc *serverConn) scheduleFrameWrite() { + sc.serveG.check() + if sc.writingFrame { + return + } + if sc.needToSendGoAway { + sc.needToSendGoAway = false + sc.startFrameWrite(frameWriteMsg{ + write: &writeGoAway{ + maxStreamID: sc.maxStreamID, + code: sc.goAwayCode, + }, + }) + return + } + if sc.needToSendSettingsAck { + sc.needToSendSettingsAck = false + sc.startFrameWrite(frameWriteMsg{write: writeSettingsAck{}}) + return + } + if !sc.inGoAway { + if wm, ok := sc.writeSched.take(); ok { + sc.startFrameWrite(wm) + return + } + } + if sc.needsFrameFlush { + sc.startFrameWrite(frameWriteMsg{write: flushFrameWriter{}}) + sc.needsFrameFlush = false // after startFrameWrite, since it sets this true + return + } +} + +func (sc *serverConn) goAway(code ErrCode) { + sc.serveG.check() + if sc.inGoAway { + return + } + if code != ErrCodeNo { + sc.shutDownIn(250 * time.Millisecond) + } else { + // TODO: configurable + sc.shutDownIn(1 * time.Second) + } + sc.inGoAway = true + sc.needToSendGoAway = true + sc.goAwayCode = code + sc.scheduleFrameWrite() +} + +func (sc *serverConn) shutDownIn(d time.Duration) { + sc.serveG.check() + sc.shutdownTimer = time.NewTimer(d) + sc.shutdownTimerCh = sc.shutdownTimer.C +} + +func (sc *serverConn) resetStream(se StreamError) { + sc.serveG.check() + sc.writeFrame(frameWriteMsg{write: se}) + if st, ok := sc.streams[se.StreamID]; ok { + st.sentReset = true + sc.closeStream(st, se) + } +} + +// processFrameFromReader processes the serve loop's read from readFrameCh from the +// frame-reading goroutine. +// processFrameFromReader returns whether the connection should be kept open. +func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { + sc.serveG.check() + err := res.err + if err != nil { + if err == ErrFrameTooLarge { + sc.goAway(ErrCodeFrameSize) + return true // goAway will close the loop + } + clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) + if clientGone { + // TODO: could we also get into this state if + // the peer does a half close + // (e.g. CloseWrite) because they're done + // sending frames but they're still wanting + // our open replies? Investigate. + // TODO: add CloseWrite to crypto/tls.Conn first + // so we have a way to test this? I suppose + // just for testing we could have a non-TLS mode. + return false + } + } else { + f := res.f + if VerboseLogs { + sc.vlogf("http2: server read frame %v", summarizeFrame(f)) + } + err = sc.processFrame(f) + if err == nil { + return true + } + } + + switch ev := err.(type) { + case StreamError: + sc.resetStream(ev) + return true + case goAwayFlowError: + sc.goAway(ErrCodeFlowControl) + return true + case ConnectionError: + sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) + sc.goAway(ErrCode(ev)) + return true // goAway will handle shutdown + default: + if res.err != nil { + sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) + } else { + sc.logf("http2: server closing client connection: %v", err) + } + return false + } +} + +func (sc *serverConn) processFrame(f Frame) error { + sc.serveG.check() + + // First frame received must be SETTINGS. + if !sc.sawFirstSettings { + if _, ok := f.(*SettingsFrame); !ok { + return ConnectionError(ErrCodeProtocol) + } + sc.sawFirstSettings = true + } + + switch f := f.(type) { + case *SettingsFrame: + return sc.processSettings(f) + case *MetaHeadersFrame: + return sc.processHeaders(f) + case *WindowUpdateFrame: + return sc.processWindowUpdate(f) + case *PingFrame: + return sc.processPing(f) + case *DataFrame: + return sc.processData(f) + case *RSTStreamFrame: + return sc.processResetStream(f) + case *PriorityFrame: + return sc.processPriority(f) + case *PushPromiseFrame: + // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE + // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. + return ConnectionError(ErrCodeProtocol) + default: + sc.vlogf("http2: server ignoring frame: %v", f.Header()) + return nil + } +} + +func (sc *serverConn) processPing(f *PingFrame) error { + sc.serveG.check() + if f.IsAck() { + // 6.7 PING: " An endpoint MUST NOT respond to PING frames + // containing this flag." + return nil + } + if f.StreamID != 0 { + // "PING frames are not associated with any individual + // stream. If a PING frame is received with a stream + // identifier field value other than 0x0, the recipient MUST + // respond with a connection error (Section 5.4.1) of type + // PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + sc.writeFrame(frameWriteMsg{write: writePingAck{f}}) + return nil +} + +func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { + sc.serveG.check() + switch { + case f.StreamID != 0: // stream-level flow control + st := sc.streams[f.StreamID] + if st == nil { + // "WINDOW_UPDATE can be sent by a peer that has sent a + // frame bearing the END_STREAM flag. This means that a + // receiver could receive a WINDOW_UPDATE frame on a "half + // closed (remote)" or "closed" stream. A receiver MUST + // NOT treat this as an error, see Section 5.1." + return nil + } + if !st.flow.add(int32(f.Increment)) { + return StreamError{f.StreamID, ErrCodeFlowControl} + } + default: // connection-level flow control + if !sc.flow.add(int32(f.Increment)) { + return goAwayFlowError{} + } + } + sc.scheduleFrameWrite() + return nil +} + +func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { + sc.serveG.check() + + state, st := sc.state(f.StreamID) + if state == stateIdle { + // 6.4 "RST_STREAM frames MUST NOT be sent for a + // stream in the "idle" state. If a RST_STREAM frame + // identifying an idle stream is received, the + // recipient MUST treat this as a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + return ConnectionError(ErrCodeProtocol) + } + if st != nil { + st.gotReset = true + sc.closeStream(st, StreamError{f.StreamID, f.ErrCode}) + } + return nil +} + +func (sc *serverConn) closeStream(st *stream, err error) { + sc.serveG.check() + if st.state == stateIdle || st.state == stateClosed { + panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) + } + st.state = stateClosed + sc.curOpenStreams-- + if sc.curOpenStreams == 0 { + sc.setConnState(http.StateIdle) + } + delete(sc.streams, st.id) + if p := st.body; p != nil { + p.CloseWithError(err) + } + st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc + sc.writeSched.forgetStream(st.id) + if st.reqBuf != nil { + // Stash this request body buffer (64k) away for reuse + // by a future POST/PUT/etc. + // + // TODO(bradfitz): share on the server? sync.Pool? + // Server requires locks and might hurt contention. + // sync.Pool might work, or might be worse, depending + // on goroutine CPU migrations. (get and put on + // separate CPUs). Maybe a mix of strategies. But + // this is an easy win for now. + sc.freeRequestBodyBuf = st.reqBuf + } +} + +func (sc *serverConn) processSettings(f *SettingsFrame) error { + sc.serveG.check() + if f.IsAck() { + sc.unackedSettings-- + if sc.unackedSettings < 0 { + // Why is the peer ACKing settings we never sent? + // The spec doesn't mention this case, but + // hang up on them anyway. + return ConnectionError(ErrCodeProtocol) + } + return nil + } + if err := f.ForeachSetting(sc.processSetting); err != nil { + return err + } + sc.needToSendSettingsAck = true + sc.scheduleFrameWrite() + return nil +} + +func (sc *serverConn) processSetting(s Setting) error { + sc.serveG.check() + if err := s.Valid(); err != nil { + return err + } + if VerboseLogs { + sc.vlogf("http2: server processing setting %v", s) + } + switch s.ID { + case SettingHeaderTableSize: + sc.headerTableSize = s.Val + sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) + case SettingEnablePush: + sc.pushEnabled = s.Val != 0 + case SettingMaxConcurrentStreams: + sc.clientMaxStreams = s.Val + case SettingInitialWindowSize: + return sc.processSettingInitialWindowSize(s.Val) + case SettingMaxFrameSize: + sc.writeSched.maxFrameSize = s.Val + case SettingMaxHeaderListSize: + sc.peerMaxHeaderListSize = s.Val + default: + // Unknown setting: "An endpoint that receives a SETTINGS + // frame with any unknown or unsupported identifier MUST + // ignore that setting." + if VerboseLogs { + sc.vlogf("http2: server ignoring unknown setting %v", s) + } + } + return nil +} + +func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { + sc.serveG.check() + // Note: val already validated to be within range by + // processSetting's Valid call. + + // "A SETTINGS frame can alter the initial flow control window + // size for all current streams. When the value of + // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST + // adjust the size of all stream flow control windows that it + // maintains by the difference between the new value and the + // old value." + old := sc.initialWindowSize + sc.initialWindowSize = int32(val) + growth := sc.initialWindowSize - old // may be negative + for _, st := range sc.streams { + if !st.flow.add(growth) { + // 6.9.2 Initial Flow Control Window Size + // "An endpoint MUST treat a change to + // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow + // control window to exceed the maximum size as a + // connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR." + return ConnectionError(ErrCodeFlowControl) + } + } + return nil +} + +func (sc *serverConn) processData(f *DataFrame) error { + sc.serveG.check() + // "If a DATA frame is received whose stream is not in "open" + // or "half closed (local)" state, the recipient MUST respond + // with a stream error (Section 5.4.2) of type STREAM_CLOSED." + id := f.Header().StreamID + st, ok := sc.streams[id] + if !ok || st.state != stateOpen || st.gotTrailerHeader { + // This includes sending a RST_STREAM if the stream is + // in stateHalfClosedLocal (which currently means that + // the http.Handler returned, so it's done reading & + // done writing). Try to stop the client from sending + // more DATA. + return StreamError{id, ErrCodeStreamClosed} + } + if st.body == nil { + panic("internal error: should have a body in this state") + } + data := f.Data() + + // Sender sending more than they'd declared? + if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { + st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) + return StreamError{id, ErrCodeStreamClosed} + } + if len(data) > 0 { + // Check whether the client has flow control quota. + if int(st.inflow.available()) < len(data) { + return StreamError{id, ErrCodeFlowControl} + } + st.inflow.take(int32(len(data))) + wrote, err := st.body.Write(data) + if err != nil { + return StreamError{id, ErrCodeStreamClosed} + } + if wrote != len(data) { + panic("internal error: bad Writer") + } + st.bodyBytes += int64(len(data)) + } + if f.StreamEnded() { + st.endStream() + } + return nil +} + +// endStream closes a Request.Body's pipe. It is called when a DATA +// frame says a request body is over (or after trailers). +func (st *stream) endStream() { + sc := st.sc + sc.serveG.check() + + if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { + st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", + st.declBodyBytes, st.bodyBytes)) + } else { + st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest) + st.body.CloseWithError(io.EOF) + } + st.state = stateHalfClosedRemote +} + +// copyTrailersToHandlerRequest is run in the Handler's goroutine in +// its Request.Body.Read just before it gets io.EOF. +func (st *stream) copyTrailersToHandlerRequest() { + for k, vv := range st.trailer { + if _, ok := st.reqTrailer[k]; ok { + // Only copy it over it was pre-declared. + st.reqTrailer[k] = vv + } + } +} + +func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { + sc.serveG.check() + id := f.Header().StreamID + if sc.inGoAway { + // Ignore. + return nil + } + // http://http2.github.io/http2-spec/#rfc.section.5.1.1 + // Streams initiated by a client MUST use odd-numbered stream + // identifiers. [...] An endpoint that receives an unexpected + // stream identifier MUST respond with a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + if id%2 != 1 { + return ConnectionError(ErrCodeProtocol) + } + // A HEADERS frame can be used to create a new stream or + // send a trailer for an open one. If we already have a stream + // open, let it process its own HEADERS frame (trailers at this + // point, if it's valid). + st := sc.streams[f.Header().StreamID] + if st != nil { + return st.processTrailerHeaders(f) + } + + // [...] The identifier of a newly established stream MUST be + // numerically greater than all streams that the initiating + // endpoint has opened or reserved. [...] An endpoint that + // receives an unexpected stream identifier MUST respond with + // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. + if id <= sc.maxStreamID { + return ConnectionError(ErrCodeProtocol) + } + sc.maxStreamID = id + + st = &stream{ + sc: sc, + id: id, + state: stateOpen, + } + if f.StreamEnded() { + st.state = stateHalfClosedRemote + } + st.cw.Init() + + st.flow.conn = &sc.flow // link to conn-level counter + st.flow.add(sc.initialWindowSize) + st.inflow.conn = &sc.inflow // link to conn-level counter + st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings + + sc.streams[id] = st + if f.HasPriority() { + adjustStreamPriority(sc.streams, st.id, f.Priority) + } + sc.curOpenStreams++ + if sc.curOpenStreams == 1 { + sc.setConnState(http.StateActive) + } + if sc.curOpenStreams > sc.advMaxStreams { + // "Endpoints MUST NOT exceed the limit set by their + // peer. An endpoint that receives a HEADERS frame + // that causes their advertised concurrent stream + // limit to be exceeded MUST treat this as a stream + // error (Section 5.4.2) of type PROTOCOL_ERROR or + // REFUSED_STREAM." + if sc.unackedSettings == 0 { + // They should know better. + return StreamError{st.id, ErrCodeProtocol} + } + // Assume it's a network race, where they just haven't + // received our last SETTINGS update. But actually + // this can't happen yet, because we don't yet provide + // a way for users to adjust server parameters at + // runtime. + return StreamError{st.id, ErrCodeRefusedStream} + } + + rw, req, err := sc.newWriterAndRequest(st, f) + if err != nil { + return err + } + st.reqTrailer = req.Trailer + if st.reqTrailer != nil { + st.trailer = make(http.Header) + } + st.body = req.Body.(*requestBody).pipe // may be nil + st.declBodyBytes = req.ContentLength + + handler := sc.handler.ServeHTTP + if f.Truncated { + // Their header list was too long. Send a 431 error. + handler = handleHeaderListTooLong + } + + go sc.runHandler(rw, req, handler) + return nil +} + +func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { + sc := st.sc + sc.serveG.check() + if st.gotTrailerHeader { + return ConnectionError(ErrCodeProtocol) + } + st.gotTrailerHeader = true + if !f.StreamEnded() { + return StreamError{st.id, ErrCodeProtocol} + } + + if len(f.PseudoFields()) > 0 { + return StreamError{st.id, ErrCodeProtocol} + } + if st.trailer != nil { + for _, hf := range f.RegularFields() { + key := sc.canonicalHeader(hf.Name) + st.trailer[key] = append(st.trailer[key], hf.Value) + } + } + st.endStream() + return nil +} + +func (sc *serverConn) processPriority(f *PriorityFrame) error { + adjustStreamPriority(sc.streams, f.StreamID, f.PriorityParam) + return nil +} + +func adjustStreamPriority(streams map[uint32]*stream, streamID uint32, priority PriorityParam) { + st, ok := streams[streamID] + if !ok { + // TODO: not quite correct (this streamID might + // already exist in the dep tree, but be closed), but + // close enough for now. + return + } + st.weight = priority.Weight + parent := streams[priority.StreamDep] // might be nil + if parent == st { + // if client tries to set this stream to be the parent of itself + // ignore and keep going + return + } + + // section 5.3.3: If a stream is made dependent on one of its + // own dependencies, the formerly dependent stream is first + // moved to be dependent on the reprioritized stream's previous + // parent. The moved dependency retains its weight. + for piter := parent; piter != nil; piter = piter.parent { + if piter == st { + parent.parent = st.parent + break + } + } + st.parent = parent + if priority.Exclusive && (st.parent != nil || priority.StreamDep == 0) { + for _, openStream := range streams { + if openStream != st && openStream.parent == st.parent { + openStream.parent = st + } + } + } +} + +func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { + sc.serveG.check() + + method := f.PseudoValue("method") + path := f.PseudoValue("path") + scheme := f.PseudoValue("scheme") + authority := f.PseudoValue("authority") + + isConnect := method == "CONNECT" + if isConnect { + if path != "" || scheme != "" || authority == "" { + return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} + } + } else if method == "" || path == "" || + (scheme != "https" && scheme != "http") { + // See 8.1.2.6 Malformed Requests and Responses: + // + // Malformed requests or responses that are detected + // MUST be treated as a stream error (Section 5.4.2) + // of type PROTOCOL_ERROR." + // + // 8.1.2.3 Request Pseudo-Header Fields + // "All HTTP/2 requests MUST include exactly one valid + // value for the :method, :scheme, and :path + // pseudo-header fields" + return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} + } + + bodyOpen := !f.StreamEnded() + if method == "HEAD" && bodyOpen { + // HEAD requests can't have bodies + return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} + } + var tlsState *tls.ConnectionState // nil if not scheme https + + if scheme == "https" { + tlsState = sc.tlsState + } + + header := make(http.Header) + for _, hf := range f.RegularFields() { + header.Add(sc.canonicalHeader(hf.Name), hf.Value) + } + + if authority == "" { + authority = header.Get("Host") + } + needsContinue := header.Get("Expect") == "100-continue" + if needsContinue { + header.Del("Expect") + } + // Merge Cookie headers into one "; "-delimited value. + if cookies := header["Cookie"]; len(cookies) > 1 { + header.Set("Cookie", strings.Join(cookies, "; ")) + } + + // Setup Trailers + var trailer http.Header + for _, v := range header["Trailer"] { + for _, key := range strings.Split(v, ",") { + key = http.CanonicalHeaderKey(strings.TrimSpace(key)) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + // Bogus. (copy of http1 rules) + // Ignore. + default: + if trailer == nil { + trailer = make(http.Header) + } + trailer[key] = nil + } + } + } + delete(header, "Trailer") + + body := &requestBody{ + conn: sc, + stream: st, + needsContinue: needsContinue, + } + var url_ *url.URL + var requestURI string + if isConnect { + url_ = &url.URL{Host: authority} + requestURI = authority // mimic HTTP/1 server behavior + } else { + var err error + url_, err = url.ParseRequestURI(path) + if err != nil { + return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} + } + requestURI = path + } + req := &http.Request{ + Method: method, + URL: url_, + RemoteAddr: sc.remoteAddrStr, + Header: header, + RequestURI: requestURI, + Proto: "HTTP/2.0", + ProtoMajor: 2, + ProtoMinor: 0, + TLS: tlsState, + Host: authority, + Body: body, + Trailer: trailer, + } + if bodyOpen { + st.reqBuf = sc.getRequestBodyBuf() + body.pipe = &pipe{ + b: &fixedBuffer{buf: st.reqBuf}, + } + + if vv, ok := header["Content-Length"]; ok { + req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) + } else { + req.ContentLength = -1 + } + } + + rws := responseWriterStatePool.Get().(*responseWriterState) + bwSave := rws.bw + *rws = responseWriterState{} // zero all the fields + rws.conn = sc + rws.bw = bwSave + rws.bw.Reset(chunkWriter{rws}) + rws.stream = st + rws.req = req + rws.body = body + + rw := &responseWriter{rws: rws} + return rw, req, nil +} + +func (sc *serverConn) getRequestBodyBuf() []byte { + sc.serveG.check() + if buf := sc.freeRequestBodyBuf; buf != nil { + sc.freeRequestBodyBuf = nil + return buf + } + return make([]byte, initialWindowSize) +} + +// Run on its own goroutine. +func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { + didPanic := true + defer func() { + if didPanic { + e := recover() + // Same as net/http: + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + sc.writeFrameFromHandler(frameWriteMsg{ + write: handlerPanicRST{rw.rws.stream.id}, + stream: rw.rws.stream, + }) + sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) + return + } + rw.handlerDone() + }() + handler(rw, req) + didPanic = false +} + +func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) { + // 10.5.1 Limits on Header Block Size: + // .. "A server that receives a larger header block than it is + // willing to handle can send an HTTP 431 (Request Header Fields Too + // Large) status code" + const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+ + w.WriteHeader(statusRequestHeaderFieldsTooLarge) + io.WriteString(w, "

HTTP Error 431

Request Header Field(s) Too Large

") +} + +// called from handler goroutines. +// h may be nil. +func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error { + sc.serveG.checkNotOn() // NOT on + var errc chan error + if headerData.h != nil { + // If there's a header map (which we don't own), so we have to block on + // waiting for this frame to be written, so an http.Flush mid-handler + // writes out the correct value of keys, before a handler later potentially + // mutates it. + errc = errChanPool.Get().(chan error) + } + if err := sc.writeFrameFromHandler(frameWriteMsg{ + write: headerData, + stream: st, + done: errc, + }); err != nil { + return err + } + if errc != nil { + select { + case err := <-errc: + errChanPool.Put(errc) + return err + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + } + } + return nil +} + +// called from handler goroutines. +func (sc *serverConn) write100ContinueHeaders(st *stream) { + sc.writeFrameFromHandler(frameWriteMsg{ + write: write100ContinueHeadersFrame{st.id}, + stream: st, + }) +} + +// A bodyReadMsg tells the server loop that the http.Handler read n +// bytes of the DATA from the client on the given stream. +type bodyReadMsg struct { + st *stream + n int +} + +// called from handler goroutines. +// Notes that the handler for the given stream ID read n bytes of its body +// and schedules flow control tokens to be sent. +func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int) { + sc.serveG.checkNotOn() // NOT on + select { + case sc.bodyReadCh <- bodyReadMsg{st, n}: + case <-sc.doneServing: + } +} + +func (sc *serverConn) noteBodyRead(st *stream, n int) { + sc.serveG.check() + sc.sendWindowUpdate(nil, n) // conn-level + if st.state != stateHalfClosedRemote && st.state != stateClosed { + // Don't send this WINDOW_UPDATE if the stream is closed + // remotely. + sc.sendWindowUpdate(st, n) + } +} + +// st may be nil for conn-level +func (sc *serverConn) sendWindowUpdate(st *stream, n int) { + sc.serveG.check() + // "The legal range for the increment to the flow control + // window is 1 to 2^31-1 (2,147,483,647) octets." + // A Go Read call on 64-bit machines could in theory read + // a larger Read than this. Very unlikely, but we handle it here + // rather than elsewhere for now. + const maxUint31 = 1<<31 - 1 + for n >= maxUint31 { + sc.sendWindowUpdate32(st, maxUint31) + n -= maxUint31 + } + sc.sendWindowUpdate32(st, int32(n)) +} + +// st may be nil for conn-level +func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { + sc.serveG.check() + if n == 0 { + return + } + if n < 0 { + panic("negative update") + } + var streamID uint32 + if st != nil { + streamID = st.id + } + sc.writeFrame(frameWriteMsg{ + write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, + stream: st, + }) + var ok bool + if st == nil { + ok = sc.inflow.add(n) + } else { + ok = st.inflow.add(n) + } + if !ok { + panic("internal error; sent too many window updates without decrements?") + } +} + +type requestBody struct { + stream *stream + conn *serverConn + closed bool + pipe *pipe // non-nil if we have a HTTP entity message body + needsContinue bool // need to send a 100-continue +} + +func (b *requestBody) Close() error { + if b.pipe != nil { + b.pipe.CloseWithError(errClosedBody) + } + b.closed = true + return nil +} + +func (b *requestBody) Read(p []byte) (n int, err error) { + if b.needsContinue { + b.needsContinue = false + b.conn.write100ContinueHeaders(b.stream) + } + if b.pipe == nil { + return 0, io.EOF + } + n, err = b.pipe.Read(p) + if n > 0 { + b.conn.noteBodyReadFromHandler(b.stream, n) + } + return +} + +// responseWriter is the http.ResponseWriter implementation. It's +// intentionally small (1 pointer wide) to minimize garbage. The +// responseWriterState pointer inside is zeroed at the end of a +// request (in handlerDone) and calls on the responseWriter thereafter +// simply crash (caller's mistake), but the much larger responseWriterState +// and buffers are reused between multiple requests. +type responseWriter struct { + rws *responseWriterState +} + +// Optional http.ResponseWriter interfaces implemented. +var ( + _ http.CloseNotifier = (*responseWriter)(nil) + _ http.Flusher = (*responseWriter)(nil) + _ stringWriter = (*responseWriter)(nil) +) + +type responseWriterState struct { + // immutable within a request: + stream *stream + req *http.Request + body *requestBody // to close at end of request, if DATA frames didn't + conn *serverConn + + // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc + bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState} + + // mutated by http.Handler goroutine: + handlerHeader http.Header // nil until called + snapHeader http.Header // snapshot of handlerHeader at WriteHeader time + trailers []string // set in writeChunk + status int // status code passed to WriteHeader + wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. + sentHeader bool // have we sent the header frame? + handlerDone bool // handler has finished + + sentContentLen int64 // non-zero if handler set a Content-Length header + wroteBytes int64 + + closeNotifierMu sync.Mutex // guards closeNotifierCh + closeNotifierCh chan bool // nil until first used +} + +type chunkWriter struct{ rws *responseWriterState } + +func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } + +func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 } + +// declareTrailer is called for each Trailer header when the +// response header is written. It notes that a header will need to be +// written in the trailers at the end of the response. +func (rws *responseWriterState) declareTrailer(k string) { + k = http.CanonicalHeaderKey(k) + switch k { + case "Transfer-Encoding", "Content-Length", "Trailer": + // Forbidden by RFC 2616 14.40. + return + } + if !strSliceContains(rws.trailers, k) { + rws.trailers = append(rws.trailers, k) + } +} + +// writeChunk writes chunks from the bufio.Writer. But because +// bufio.Writer may bypass its chunking, sometimes p may be +// arbitrarily large. +// +// writeChunk is also responsible (on the first chunk) for sending the +// HEADER response. +func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { + if !rws.wroteHeader { + rws.writeHeader(200) + } + + isHeadResp := rws.req.Method == "HEAD" + if !rws.sentHeader { + rws.sentHeader = true + var ctype, clen string + if clen = rws.snapHeader.Get("Content-Length"); clen != "" { + rws.snapHeader.Del("Content-Length") + clen64, err := strconv.ParseInt(clen, 10, 64) + if err == nil && clen64 >= 0 { + rws.sentContentLen = clen64 + } else { + clen = "" + } + } + if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { + clen = strconv.Itoa(len(p)) + } + _, hasContentType := rws.snapHeader["Content-Type"] + if !hasContentType && bodyAllowedForStatus(rws.status) { + ctype = http.DetectContentType(p) + } + var date string + if _, ok := rws.snapHeader["Date"]; !ok { + // TODO(bradfitz): be faster here, like net/http? measure. + date = time.Now().UTC().Format(http.TimeFormat) + } + + for _, v := range rws.snapHeader["Trailer"] { + foreachHeaderElement(v, rws.declareTrailer) + } + + endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp + err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + httpResCode: rws.status, + h: rws.snapHeader, + endStream: endStream, + contentType: ctype, + contentLength: clen, + date: date, + }) + if err != nil { + return 0, err + } + if endStream { + return 0, nil + } + } + if isHeadResp { + return len(p), nil + } + if len(p) == 0 && !rws.handlerDone { + return 0, nil + } + + if rws.handlerDone { + rws.promoteUndeclaredTrailers() + } + + endStream := rws.handlerDone && !rws.hasTrailers() + if len(p) > 0 || endStream { + // only send a 0 byte DATA frame if we're ending the stream. + if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { + return 0, err + } + } + + if rws.handlerDone && rws.hasTrailers() { + err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + h: rws.handlerHeader, + trailers: rws.trailers, + endStream: true, + }) + return len(p), err + } + return len(p), nil +} + +// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys +// that, if present, signals that the map entry is actually for +// the response trailers, and not the response headers. The prefix +// is stripped after the ServeHTTP call finishes and the values are +// sent in the trailers. +// +// This mechanism is intended only for trailers that are not known +// prior to the headers being written. If the set of trailers is fixed +// or known before the header is written, the normal Go trailers mechanism +// is preferred: +// https://golang.org/pkg/net/http/#ResponseWriter +// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers +const TrailerPrefix = "Trailer:" + +// promoteUndeclaredTrailers permits http.Handlers to set trailers +// after the header has already been flushed. Because the Go +// ResponseWriter interface has no way to set Trailers (only the +// Header), and because we didn't want to expand the ResponseWriter +// interface, and because nobody used trailers, and because RFC 2616 +// says you SHOULD (but not must) predeclare any trailers in the +// header, the official ResponseWriter rules said trailers in Go must +// be predeclared, and then we reuse the same ResponseWriter.Header() +// map to mean both Headers and Trailers. When it's time to write the +// Trailers, we pick out the fields of Headers that were declared as +// trailers. That worked for a while, until we found the first major +// user of Trailers in the wild: gRPC (using them only over http2), +// and gRPC libraries permit setting trailers mid-stream without +// predeclarnig them. So: change of plans. We still permit the old +// way, but we also permit this hack: if a Header() key begins with +// "Trailer:", the suffix of that key is a Trailer. Because ':' is an +// invalid token byte anyway, there is no ambiguity. (And it's already +// filtered out) It's mildly hacky, but not terrible. +// +// This method runs after the Handler is done and promotes any Header +// fields to be trailers. +func (rws *responseWriterState) promoteUndeclaredTrailers() { + for k, vv := range rws.handlerHeader { + if !strings.HasPrefix(k, TrailerPrefix) { + continue + } + trailerKey := strings.TrimPrefix(k, TrailerPrefix) + rws.declareTrailer(trailerKey) + rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv + } + + if len(rws.trailers) > 1 { + sorter := sorterPool.Get().(*sorter) + sorter.SortStrings(rws.trailers) + sorterPool.Put(sorter) + } +} + +func (w *responseWriter) Flush() { + rws := w.rws + if rws == nil { + panic("Header called after Handler finished") + } + if rws.bw.Buffered() > 0 { + if err := rws.bw.Flush(); err != nil { + // Ignore the error. The frame writer already knows. + return + } + } else { + // The bufio.Writer won't call chunkWriter.Write + // (writeChunk with zero bytes, so we have to do it + // ourselves to force the HTTP response header and/or + // final DATA frame (with END_STREAM) to be sent. + rws.writeChunk(nil) + } +} + +func (w *responseWriter) CloseNotify() <-chan bool { + rws := w.rws + if rws == nil { + panic("CloseNotify called after Handler finished") + } + rws.closeNotifierMu.Lock() + ch := rws.closeNotifierCh + if ch == nil { + ch = make(chan bool, 1) + rws.closeNotifierCh = ch + go func() { + rws.stream.cw.Wait() // wait for close + ch <- true + }() + } + rws.closeNotifierMu.Unlock() + return ch +} + +func (w *responseWriter) Header() http.Header { + rws := w.rws + if rws == nil { + panic("Header called after Handler finished") + } + if rws.handlerHeader == nil { + rws.handlerHeader = make(http.Header) + } + return rws.handlerHeader +} + +func (w *responseWriter) WriteHeader(code int) { + rws := w.rws + if rws == nil { + panic("WriteHeader called after Handler finished") + } + rws.writeHeader(code) +} + +func (rws *responseWriterState) writeHeader(code int) { + if !rws.wroteHeader { + rws.wroteHeader = true + rws.status = code + if len(rws.handlerHeader) > 0 { + rws.snapHeader = cloneHeader(rws.handlerHeader) + } + } +} + +func cloneHeader(h http.Header) http.Header { + h2 := make(http.Header, len(h)) + for k, vv := range h { + vv2 := make([]string, len(vv)) + copy(vv2, vv) + h2[k] = vv2 + } + return h2 +} + +// The Life Of A Write is like this: +// +// * Handler calls w.Write or w.WriteString -> +// * -> rws.bw (*bufio.Writer) -> +// * (Handler migth call Flush) +// * -> chunkWriter{rws} +// * -> responseWriterState.writeChunk(p []byte) +// * -> responseWriterState.writeChunk (most of the magic; see comment there) +func (w *responseWriter) Write(p []byte) (n int, err error) { + return w.write(len(p), p, "") +} + +func (w *responseWriter) WriteString(s string) (n int, err error) { + return w.write(len(s), nil, s) +} + +// either dataB or dataS is non-zero. +func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) { + rws := w.rws + if rws == nil { + panic("Write called after Handler finished") + } + if !rws.wroteHeader { + w.WriteHeader(200) + } + if !bodyAllowedForStatus(rws.status) { + return 0, http.ErrBodyNotAllowed + } + rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set + if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen { + // TODO: send a RST_STREAM + return 0, errors.New("http2: handler wrote more than declared Content-Length") + } + + if dataB != nil { + return rws.bw.Write(dataB) + } else { + return rws.bw.WriteString(dataS) + } +} + +func (w *responseWriter) handlerDone() { + rws := w.rws + rws.handlerDone = true + w.Flush() + w.rws = nil + responseWriterStatePool.Put(rws) +} + +// foreachHeaderElement splits v according to the "#rule" construction +// in RFC 2616 section 2.1 and calls fn for each non-empty element. +func foreachHeaderElement(v string, fn func(string)) { + v = textproto.TrimString(v) + if v == "" { + return + } + if !strings.Contains(v, ",") { + fn(v) + return + } + for _, f := range strings.Split(v, ",") { + if f = textproto.TrimString(f); f != "" { + fn(f) + } + } +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/transport.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/transport.go new file mode 100644 index 000000000..7d558a4bd --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/transport.go @@ -0,0 +1,1666 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Transport code. + +package http2 + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/tls" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "sort" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/http2/hpack" +) + +const ( + // transportDefaultConnFlow is how many connection-level flow control + // tokens we give the server at start-up, past the default 64k. + transportDefaultConnFlow = 1 << 30 + + // transportDefaultStreamFlow is how many stream-level flow + // control tokens we announce to the peer, and how many bytes + // we buffer per stream. + transportDefaultStreamFlow = 4 << 20 + + // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send + // a stream-level WINDOW_UPDATE for at a time. + transportDefaultStreamMinRefresh = 4 << 10 + + defaultUserAgent = "Go-http-client/2.0" +) + +// Transport is an HTTP/2 Transport. +// +// A Transport internally caches connections to servers. It is safe +// for concurrent use by multiple goroutines. +type Transport struct { + // DialTLS specifies an optional dial function for creating + // TLS connections for requests. + // + // If DialTLS is nil, tls.Dial is used. + // + // If the returned net.Conn has a ConnectionState method like tls.Conn, + // it will be used to set http.Response.TLS. + DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) + + // TLSClientConfig specifies the TLS configuration to use with + // tls.Client. If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // ConnPool optionally specifies an alternate connection pool to use. + // If nil, the default is used. + ConnPool ClientConnPool + + // DisableCompression, if true, prevents the Transport from + // requesting compression with an "Accept-Encoding: gzip" + // request header when the Request contains no existing + // Accept-Encoding value. If the Transport requests gzip on + // its own and gets a gzipped response, it's transparently + // decoded in the Response.Body. However, if the user + // explicitly requested gzip it is not automatically + // uncompressed. + DisableCompression bool + + // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to + // send in the initial settings frame. It is how many bytes + // of response headers are allow. Unlike the http2 spec, zero here + // means to use a default limit (currently 10MB). If you actually + // want to advertise an ulimited value to the peer, Transport + // interprets the highest possible value here (0xffffffff or 1<<32-1) + // to mean no limit. + MaxHeaderListSize uint32 + + // t1, if non-nil, is the standard library Transport using + // this transport. Its settings are used (but not its + // RoundTrip method, etc). + t1 *http.Transport + + connPoolOnce sync.Once + connPoolOrDef ClientConnPool // non-nil version of ConnPool +} + +func (t *Transport) maxHeaderListSize() uint32 { + if t.MaxHeaderListSize == 0 { + return 10 << 20 + } + if t.MaxHeaderListSize == 0xffffffff { + return 0 + } + return t.MaxHeaderListSize +} + +func (t *Transport) disableCompression() bool { + return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) +} + +var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6") + +// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. +// It requires Go 1.6 or later and returns an error if the net/http package is too old +// or if t1 has already been HTTP/2-enabled. +func ConfigureTransport(t1 *http.Transport) error { + _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go + return err +} + +func (t *Transport) connPool() ClientConnPool { + t.connPoolOnce.Do(t.initConnPool) + return t.connPoolOrDef +} + +func (t *Transport) initConnPool() { + if t.ConnPool != nil { + t.connPoolOrDef = t.ConnPool + } else { + t.connPoolOrDef = &clientConnPool{t: t} + } +} + +// ClientConn is the state of a single HTTP/2 client connection to an +// HTTP/2 server. +type ClientConn struct { + t *Transport + tconn net.Conn // usually *tls.Conn, except specialized impls + tlsState *tls.ConnectionState // nil only for specialized impls + + // readLoop goroutine fields: + readerDone chan struct{} // closed on error + readerErr error // set before readerDone is closed + + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow flow // our conn-level flow control quota (cs.flow is per stream) + inflow flow // peer's conn-level flow control + closed bool + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + streams map[uint32]*clientStream // client-initiated + nextStreamID uint32 + bw *bufio.Writer + br *bufio.Reader + fr *Framer + // Settings from peer: + maxFrameSize uint32 + maxConcurrentStreams uint32 + initialWindowSize uint32 + hbuf bytes.Buffer // HPACK encoder writes into this + henc *hpack.Encoder + freeBuf [][]byte + + wmu sync.Mutex // held while writing; acquire AFTER mu if holding both + werr error // first write error that has occurred +} + +// clientStream is the state for a single HTTP/2 stream. One of these +// is created for each Transport.RoundTrip call. +type clientStream struct { + cc *ClientConn + req *http.Request + ID uint32 + resc chan resAndError + bufPipe pipe // buffered pipe with the flow-controlled response payload + requestedGzip bool + + flow flow // guarded by cc.mu + inflow flow // guarded by cc.mu + bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read + readErr error // sticky read error; owned by transportResponseBody.Read + stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu + + peerReset chan struct{} // closed on peer reset + resetErr error // populated before peerReset is closed + + done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu + + // owned by clientConnReadLoop: + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + + trailer http.Header // accumulated trailers + resTrailer *http.Header // client's Response.Trailer +} + +// awaitRequestCancel runs in its own goroutine and waits for the user +// to either cancel a RoundTrip request (using the provided +// Request.Cancel channel), or for the request to be done (any way it +// might be removed from the cc.streams map: peer reset, successful +// completion, TCP connection breakage, etc) +func (cs *clientStream) awaitRequestCancel(cancel <-chan struct{}) { + if cancel == nil { + return + } + select { + case <-cancel: + cs.bufPipe.CloseWithError(errRequestCanceled) + cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + case <-cs.done: + } +} + +// checkReset reports any error sent in a RST_STREAM frame by the +// server. +func (cs *clientStream) checkReset() error { + select { + case <-cs.peerReset: + return cs.resetErr + default: + return nil + } +} + +func (cs *clientStream) abortRequestBodyWrite(err error) { + if err == nil { + panic("nil error") + } + cc := cs.cc + cc.mu.Lock() + cs.stopReqBody = err + cc.cond.Broadcast() + cc.mu.Unlock() +} + +type stickyErrWriter struct { + w io.Writer + err *error +} + +func (sew stickyErrWriter) Write(p []byte) (n int, err error) { + if *sew.err != nil { + return 0, *sew.err + } + n, err = sew.w.Write(p) + *sew.err = err + return +} + +var ErrNoCachedConn = errors.New("http2: no cached connection was available") + +// RoundTripOpt are options for the Transport.RoundTripOpt method. +type RoundTripOpt struct { + // OnlyCachedConn controls whether RoundTripOpt may + // create a new TCP connection. If set true and + // no cached connection is available, RoundTripOpt + // will return ErrNoCachedConn. + OnlyCachedConn bool +} + +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + return t.RoundTripOpt(req, RoundTripOpt{}) +} + +// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) +// and returns a host:port. The port 443 is added if needed. +func authorityAddr(authority string) (addr string) { + if _, _, err := net.SplitHostPort(authority); err == nil { + return authority + } + return net.JoinHostPort(authority, "443") +} + +// RoundTripOpt is like RoundTrip, but takes options. +func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { + if req.URL.Scheme != "https" { + return nil, errors.New("http2: unsupported scheme") + } + + addr := authorityAddr(req.URL.Host) + for { + cc, err := t.connPool().GetClientConn(req, addr) + if err != nil { + t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) + return nil, err + } + res, err := cc.RoundTrip(req) + if shouldRetryRequest(req, err) { + continue + } + if err != nil { + t.vlogf("RoundTrip failure: %v", err) + return nil, err + } + return res, nil + } +} + +// CloseIdleConnections closes any connections which were previously +// connected from previous requests but are now sitting idle. +// It does not interrupt any connections currently in use. +func (t *Transport) CloseIdleConnections() { + if cp, ok := t.connPool().(*clientConnPool); ok { + cp.closeIdleConnections() + } +} + +var ( + errClientConnClosed = errors.New("http2: client conn is closed") + errClientConnUnusable = errors.New("http2: client conn not usable") +) + +func shouldRetryRequest(req *http.Request, err error) bool { + // TODO: retry GET requests (no bodies) more aggressively, if shutdown + // before response. + return err == errClientConnUnusable +} + +func (t *Transport) dialClientConn(addr string) (*ClientConn, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host)) + if err != nil { + return nil, err + } + return t.NewClientConn(tconn) +} + +func (t *Transport) newTLSConfig(host string) *tls.Config { + cfg := new(tls.Config) + if t.TLSClientConfig != nil { + *cfg = *t.TLSClientConfig + } + if !strSliceContains(cfg.NextProtos, NextProtoTLS) { + cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) + } + if cfg.ServerName == "" { + cfg.ServerName = host + } + return cfg +} + +func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) { + if t.DialTLS != nil { + return t.DialTLS + } + return t.dialTLSDefault +} + +func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) { + cn, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + if err := cn.Handshake(); err != nil { + return nil, err + } + if !cfg.InsecureSkipVerify { + if err := cn.VerifyHostname(cfg.ServerName); err != nil { + return nil, err + } + } + state := cn.ConnectionState() + if p := state.NegotiatedProtocol; p != NextProtoTLS { + return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) + } + if !state.NegotiatedProtocolIsMutual { + return nil, errors.New("http2: could not negotiate protocol mutually") + } + return cn, nil +} + +// disableKeepAlives reports whether connections should be closed as +// soon as possible after handling the first request. +func (t *Transport) disableKeepAlives() bool { + return t.t1 != nil && t.t1.DisableKeepAlives +} + +func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { + if VerboseLogs { + t.vlogf("http2: Transport creating client conn to %v", c.RemoteAddr()) + } + if _, err := c.Write(clientPreface); err != nil { + t.vlogf("client preface write error: %v", err) + return nil, err + } + + cc := &ClientConn{ + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. + streams: make(map[uint32]*clientStream), + } + cc.cond = sync.NewCond(&cc.mu) + cc.flow.add(int32(initialWindowSize)) + + // TODO: adjust this writer size to account for frame size + + // MTU + crypto/tls record padding. + cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) + cc.br = bufio.NewReader(c) + cc.fr = NewFramer(cc.bw, cc.br) + cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + cc.fr.MaxHeaderListSize = t.maxHeaderListSize() + + // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on + // henc in response to SETTINGS frames? + cc.henc = hpack.NewEncoder(&cc.hbuf) + + if cs, ok := c.(connectionStater); ok { + state := cs.ConnectionState() + cc.tlsState = &state + } + + initialSettings := []Setting{ + {ID: SettingEnablePush, Val: 0}, + {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, + } + if max := t.maxHeaderListSize(); max != 0 { + initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) + } + cc.fr.WriteSettings(initialSettings...) + cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) + cc.inflow.add(transportDefaultConnFlow + initialWindowSize) + cc.bw.Flush() + if cc.werr != nil { + return nil, cc.werr + } + + // Read the obligatory SETTINGS frame + f, err := cc.fr.ReadFrame() + if err != nil { + return nil, err + } + sf, ok := f.(*SettingsFrame) + if !ok { + return nil, fmt.Errorf("expected settings frame, got: %T", f) + } + cc.fr.WriteSettingsAck() + cc.bw.Flush() + + sf.ForeachSetting(func(s Setting) error { + switch s.ID { + case SettingMaxFrameSize: + cc.maxFrameSize = s.Val + case SettingMaxConcurrentStreams: + cc.maxConcurrentStreams = s.Val + case SettingInitialWindowSize: + cc.initialWindowSize = s.Val + default: + // TODO(bradfitz): handle more; at least SETTINGS_HEADER_TABLE_SIZE? + t.vlogf("Unhandled Setting: %v", s) + } + return nil + }) + + go cc.readLoop() + return cc, nil +} + +func (cc *ClientConn) setGoAway(f *GoAwayFrame) { + cc.mu.Lock() + defer cc.mu.Unlock() + cc.goAway = f +} + +func (cc *ClientConn) CanTakeNewRequest() bool { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.canTakeNewRequestLocked() +} + +func (cc *ClientConn) canTakeNewRequestLocked() bool { + return cc.goAway == nil && !cc.closed && + int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) && + cc.nextStreamID < 2147483647 +} + +func (cc *ClientConn) closeIfIdle() { + cc.mu.Lock() + if len(cc.streams) > 0 { + cc.mu.Unlock() + return + } + cc.closed = true + // TODO: do clients send GOAWAY too? maybe? Just Close: + cc.mu.Unlock() + + cc.tconn.Close() +} + +const maxAllocFrameSize = 512 << 10 + +// frameBuffer returns a scratch buffer suitable for writing DATA frames. +// They're capped at the min of the peer's max frame size or 512KB +// (kinda arbitrarily), but definitely capped so we don't allocate 4GB +// bufers. +func (cc *ClientConn) frameScratchBuffer() []byte { + cc.mu.Lock() + size := cc.maxFrameSize + if size > maxAllocFrameSize { + size = maxAllocFrameSize + } + for i, buf := range cc.freeBuf { + if len(buf) >= int(size) { + cc.freeBuf[i] = nil + cc.mu.Unlock() + return buf[:size] + } + } + cc.mu.Unlock() + return make([]byte, size) +} + +func (cc *ClientConn) putFrameScratchBuffer(buf []byte) { + cc.mu.Lock() + defer cc.mu.Unlock() + const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. + if len(cc.freeBuf) < maxBufs { + cc.freeBuf = append(cc.freeBuf, buf) + return + } + for i, old := range cc.freeBuf { + if old == nil { + cc.freeBuf[i] = buf + return + } + } + // forget about it. +} + +// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not +// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. +var errRequestCanceled = errors.New("net/http: request canceled") + +func commaSeparatedTrailers(req *http.Request) (string, error) { + keys := make([]string, 0, len(req.Trailer)) + for k := range req.Trailer { + k = http.CanonicalHeaderKey(k) + switch k { + case "Transfer-Encoding", "Trailer", "Content-Length": + return "", &badStringError{"invalid Trailer key", k} + } + keys = append(keys, k) + } + if len(keys) > 0 { + sort.Strings(keys) + // TODO: could do better allocation-wise here, but trailers are rare, + // so being lazy for now. + return strings.Join(keys, ","), nil + } + return "", nil +} + +func (cc *ClientConn) responseHeaderTimeout() time.Duration { + if cc.t.t1 != nil { + return cc.t.t1.ResponseHeaderTimeout + } + // No way to do this (yet?) with just an http2.Transport. Probably + // no need. Request.Cancel this is the new way. We only need to support + // this for compatibility with the old http.Transport fields when + // we're doing transparent http2. + return 0 +} + +// checkConnHeaders checks whether req has any invalid connection-level headers. +// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. +// Certain headers are special-cased as okay but not transmitted later. +func checkConnHeaders(req *http.Request) error { + if v := req.Header.Get("Upgrade"); v != "" { + return errors.New("http2: invalid Upgrade request header") + } + if v := req.Header.Get("Transfer-Encoding"); (v != "" && v != "chunked") || len(req.Header["Transfer-Encoding"]) > 1 { + return errors.New("http2: invalid Transfer-Encoding request header") + } + if v := req.Header.Get("Connection"); (v != "" && v != "close" && v != "keep-alive") || len(req.Header["Connection"]) > 1 { + return errors.New("http2: invalid Connection request header") + } + return nil +} + +func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + if err := checkConnHeaders(req); err != nil { + return nil, err + } + + trailers, err := commaSeparatedTrailers(req) + if err != nil { + return nil, err + } + hasTrailers := trailers != "" + + var body io.Reader = req.Body + contentLen := req.ContentLength + if req.Body != nil && contentLen == 0 { + // Test to see if it's actually zero or just unset. + var buf [1]byte + n, rerr := io.ReadFull(body, buf[:]) + if rerr != nil && rerr != io.EOF { + contentLen = -1 + body = errorReader{rerr} + } else if n == 1 { + // Oh, guess there is data in this Body Reader after all. + // The ContentLength field just wasn't set. + // Stich the Body back together again, re-attaching our + // consumed byte. + contentLen = -1 + body = io.MultiReader(bytes.NewReader(buf[:]), body) + } else { + // Body is actually empty. + body = nil + } + } + + cc.mu.Lock() + if cc.closed || !cc.canTakeNewRequestLocked() { + cc.mu.Unlock() + return nil, errClientConnUnusable + } + + cs := cc.newStream() + cs.req = req + hasBody := body != nil + + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + if !cc.t.disableCompression() && + req.Header.Get("Accept-Encoding") == "" && + req.Header.Get("Range") == "" && + req.Method != "HEAD" { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + cs.requestedGzip = true + } + + // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is + // sent by writeRequestBody below, along with any Trailers, + // again in form HEADERS{1}, CONTINUATION{0,}) + hdrs := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen) + cc.wmu.Lock() + endStream := !hasBody && !hasTrailers + werr := cc.writeHeaders(cs.ID, endStream, hdrs) + cc.wmu.Unlock() + cc.mu.Unlock() + + if werr != nil { + if hasBody { + req.Body.Close() // per RoundTripper contract + } + cc.forgetStreamID(cs.ID) + // Don't bother sending a RST_STREAM (our write already failed; + // no need to keep writing) + return nil, werr + } + + var respHeaderTimer <-chan time.Time + var bodyCopyErrc chan error // result of body copy + if hasBody { + bodyCopyErrc = make(chan error, 1) + go func() { + bodyCopyErrc <- cs.writeRequestBody(body, req.Body) + }() + } else { + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + + readLoopResCh := cs.resc + requestCanceledCh := requestCancel(req) + bodyWritten := false + + for { + select { + case re := <-readLoopResCh: + res := re.res + if re.err != nil || res.StatusCode > 299 { + // On error or status code 3xx, 4xx, 5xx, etc abort any + // ongoing write, assuming that the server doesn't care + // about our request body. If the server replied with 1xx or + // 2xx, however, then assume the server DOES potentially + // want our body (e.g. full-duplex streaming: + // golang.org/issue/13444). If it turns out the server + // doesn't, they'll RST_STREAM us soon enough. This is a + // heuristic to avoid adding knobs to Transport. Hopefully + // we can keep it. + cs.abortRequestBodyWrite(errStopReqBodyWrite) + } + if re.err != nil { + cc.forgetStreamID(cs.ID) + return nil, re.err + } + res.Request = req + res.TLS = cc.tlsState + return res, nil + case <-respHeaderTimer: + cc.forgetStreamID(cs.ID) + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + return nil, errTimeout + case <-requestCanceledCh: + cc.forgetStreamID(cs.ID) + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + return nil, errRequestCanceled + case <-cs.peerReset: + // processResetStream already removed the + // stream from the streams map; no need for + // forgetStreamID. + return nil, cs.resetErr + case err := <-bodyCopyErrc: + if err != nil { + return nil, err + } + bodyWritten = true + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + } +} + +// requires cc.wmu be held +func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error { + first := true // first frame written (HEADERS is first, then CONTINUATION) + frameSize := int(cc.maxFrameSize) + for len(hdrs) > 0 && cc.werr == nil { + chunk := hdrs + if len(chunk) > frameSize { + chunk = chunk[:frameSize] + } + hdrs = hdrs[len(chunk):] + endHeaders := len(hdrs) == 0 + if first { + cc.fr.WriteHeaders(HeadersFrameParam{ + StreamID: streamID, + BlockFragment: chunk, + EndStream: endStream, + EndHeaders: endHeaders, + }) + first = false + } else { + cc.fr.WriteContinuation(streamID, endHeaders, chunk) + } + } + // TODO(bradfitz): this Flush could potentially block (as + // could the WriteHeaders call(s) above), which means they + // wouldn't respond to Request.Cancel being readable. That's + // rare, but this should probably be in a goroutine. + cc.bw.Flush() + return cc.werr +} + +// internal error values; they don't escape to callers +var ( + // abort request body write; don't send cancel + errStopReqBodyWrite = errors.New("http2: aborting request body write") + + // abort request body write, but send stream reset of cancel. + errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") +) + +func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { + cc := cs.cc + sentEnd := false // whether we sent the final DATA frame w/ END_STREAM + buf := cc.frameScratchBuffer() + defer cc.putFrameScratchBuffer(buf) + + defer func() { + // TODO: write h12Compare test showing whether + // Request.Body is closed by the Transport, + // and in multiple cases: server replies <=299 and >299 + // while still writing request body + cerr := bodyCloser.Close() + if err == nil { + err = cerr + } + }() + + req := cs.req + hasTrailers := req.Trailer != nil + + var sawEOF bool + for !sawEOF { + n, err := body.Read(buf) + if err == io.EOF { + sawEOF = true + err = nil + } else if err != nil { + return err + } + + remain := buf[:n] + for len(remain) > 0 && err == nil { + var allowed int32 + allowed, err = cs.awaitFlowControl(len(remain)) + switch { + case err == errStopReqBodyWrite: + return err + case err == errStopReqBodyWriteAndCancel: + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + return err + case err != nil: + return err + } + cc.wmu.Lock() + data := remain[:allowed] + remain = remain[allowed:] + sentEnd = sawEOF && len(remain) == 0 && !hasTrailers + err = cc.fr.WriteData(cs.ID, sentEnd, data) + if err == nil { + // TODO(bradfitz): this flush is for latency, not bandwidth. + // Most requests won't need this. Make this opt-in or opt-out? + // Use some heuristic on the body type? Nagel-like timers? + // Based on 'n'? Only last chunk of this for loop, unless flow control + // tokens are low? For now, always: + err = cc.bw.Flush() + } + cc.wmu.Unlock() + } + if err != nil { + return err + } + } + + cc.wmu.Lock() + if !sentEnd { + var trls []byte + if hasTrailers { + cc.mu.Lock() + trls = cc.encodeTrailers(req) + cc.mu.Unlock() + } + + // Avoid forgetting to send an END_STREAM if the encoded + // trailers are 0 bytes. Both results produce and END_STREAM. + if len(trls) > 0 { + err = cc.writeHeaders(cs.ID, true, trls) + } else { + err = cc.fr.WriteData(cs.ID, true, nil) + } + } + if ferr := cc.bw.Flush(); ferr != nil && err == nil { + err = ferr + } + cc.wmu.Unlock() + + return err +} + +// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow +// control tokens from the server. +// It returns either the non-zero number of tokens taken or an error +// if the stream is dead. +func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { + cc := cs.cc + cc.mu.Lock() + defer cc.mu.Unlock() + for { + if cc.closed { + return 0, errClientConnClosed + } + if cs.stopReqBody != nil { + return 0, cs.stopReqBody + } + if err := cs.checkReset(); err != nil { + return 0, err + } + if a := cs.flow.available(); a > 0 { + take := a + if int(take) > maxBytes { + + take = int32(maxBytes) // can't truncate int; take is int32 + } + if take > int32(cc.maxFrameSize) { + take = int32(cc.maxFrameSize) + } + cs.flow.take(take) + return take, nil + } + cc.cond.Wait() + } +} + +type badStringError struct { + what string + str string +} + +func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } + +// requires cc.mu be held. +func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) []byte { + cc.hbuf.Reset() + + host := req.Host + if host == "" { + host = req.URL.Host + } + + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production (see Sections 3.3 and 3.4 of + // [RFC3986]). + cc.writeHeader(":authority", host) + cc.writeHeader(":method", req.Method) + if req.Method != "CONNECT" { + cc.writeHeader(":path", req.URL.RequestURI()) + cc.writeHeader(":scheme", "https") + } + if trailers != "" { + cc.writeHeader("trailer", trailers) + } + + var didUA bool + for k, vv := range req.Header { + lowKey := strings.ToLower(k) + switch lowKey { + case "host", "content-length": + // Host is :authority, already sent. + // Content-Length is automatic, set below. + continue + case "connection", "proxy-connection", "transfer-encoding", "upgrade": + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We deal with these earlier in + // RoundTrip, deciding whether they're + // error-worthy, but we don't want to mutate + // the user's *Request so at this point, just + // skip over them at this point. + continue + case "user-agent": + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } + } + for _, v := range vv { + cc.writeHeader(lowKey, v) + } + } + if shouldSendReqContentLength(req.Method, contentLength) { + cc.writeHeader("content-length", strconv.FormatInt(contentLength, 10)) + } + if addGzipHeader { + cc.writeHeader("accept-encoding", "gzip") + } + if !didUA { + cc.writeHeader("user-agent", defaultUserAgent) + } + return cc.hbuf.Bytes() +} + +// shouldSendReqContentLength reports whether the http2.Transport should send +// a "content-length" request header. This logic is basically a copy of the net/http +// transferWriter.shouldSendContentLength. +// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). +// -1 means unknown. +func shouldSendReqContentLength(method string, contentLength int64) bool { + if contentLength > 0 { + return true + } + if contentLength < 0 { + return false + } + // For zero bodies, whether we send a content-length depends on the method. + // It also kinda doesn't matter for http2 either way, with END_STREAM. + switch method { + case "POST", "PUT", "PATCH": + return true + default: + return false + } +} + +// requires cc.mu be held. +func (cc *ClientConn) encodeTrailers(req *http.Request) []byte { + cc.hbuf.Reset() + for k, vv := range req.Trailer { + // Transfer-Encoding, etc.. have already been filter at the + // start of RoundTrip + lowKey := strings.ToLower(k) + for _, v := range vv { + cc.writeHeader(lowKey, v) + } + } + return cc.hbuf.Bytes() +} + +func (cc *ClientConn) writeHeader(name, value string) { + if VerboseLogs { + log.Printf("http2: Transport encoding header %q = %q", name, value) + } + cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) +} + +type resAndError struct { + res *http.Response + err error +} + +// requires cc.mu be held. +func (cc *ClientConn) newStream() *clientStream { + cs := &clientStream{ + cc: cc, + ID: cc.nextStreamID, + resc: make(chan resAndError, 1), + peerReset: make(chan struct{}), + done: make(chan struct{}), + } + cs.flow.add(int32(cc.initialWindowSize)) + cs.flow.setConnFlow(&cc.flow) + cs.inflow.add(transportDefaultStreamFlow) + cs.inflow.setConnFlow(&cc.inflow) + cc.nextStreamID += 2 + cc.streams[cs.ID] = cs + return cs +} + +func (cc *ClientConn) forgetStreamID(id uint32) { + cc.streamByID(id, true) +} + +func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { + cc.mu.Lock() + defer cc.mu.Unlock() + cs := cc.streams[id] + if andRemove && cs != nil && !cc.closed { + delete(cc.streams, id) + close(cs.done) + } + return cs +} + +// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. +type clientConnReadLoop struct { + cc *ClientConn + activeRes map[uint32]*clientStream // keyed by streamID + closeWhenIdle bool +} + +// readLoop runs in its own goroutine and reads and dispatches frames. +func (cc *ClientConn) readLoop() { + rl := &clientConnReadLoop{ + cc: cc, + activeRes: make(map[uint32]*clientStream), + } + + defer rl.cleanup() + cc.readerErr = rl.run() + if ce, ok := cc.readerErr.(ConnectionError); ok { + cc.wmu.Lock() + cc.fr.WriteGoAway(0, ErrCode(ce), nil) + cc.wmu.Unlock() + } +} + +func (rl *clientConnReadLoop) cleanup() { + cc := rl.cc + defer cc.tconn.Close() + defer cc.t.connPool().MarkDead(cc) + defer close(cc.readerDone) + + // Close any response bodies if the server closes prematurely. + // TODO: also do this if we've written the headers but not + // gotten a response yet. + err := cc.readerErr + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + cc.mu.Lock() + for _, cs := range rl.activeRes { + cs.bufPipe.CloseWithError(err) + } + for _, cs := range cc.streams { + select { + case cs.resc <- resAndError{err: err}: + default: + } + close(cs.done) + } + cc.closed = true + cc.cond.Broadcast() + cc.mu.Unlock() +} + +func (rl *clientConnReadLoop) run() error { + cc := rl.cc + rl.closeWhenIdle = cc.t.disableKeepAlives() + gotReply := false // ever saw a reply + for { + f, err := cc.fr.ReadFrame() + if err != nil { + cc.vlogf("Transport readFrame error: (%T) %v", err, err) + } + if se, ok := err.(StreamError); ok { + if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil { + rl.endStreamError(cs, cc.fr.errDetail) + } + continue + } else if err != nil { + return err + } + if VerboseLogs { + cc.vlogf("http2: Transport received %s", summarizeFrame(f)) + } + maybeIdle := false // whether frame might transition us to idle + + switch f := f.(type) { + case *MetaHeadersFrame: + err = rl.processHeaders(f) + maybeIdle = true + gotReply = true + case *DataFrame: + err = rl.processData(f) + maybeIdle = true + case *GoAwayFrame: + err = rl.processGoAway(f) + maybeIdle = true + case *RSTStreamFrame: + err = rl.processResetStream(f) + maybeIdle = true + case *SettingsFrame: + err = rl.processSettings(f) + case *PushPromiseFrame: + err = rl.processPushPromise(f) + case *WindowUpdateFrame: + err = rl.processWindowUpdate(f) + case *PingFrame: + err = rl.processPing(f) + default: + cc.logf("Transport: unhandled response frame type %T", f) + } + if err != nil { + return err + } + if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 { + cc.closeIfIdle() + } + } +} + +func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, f.StreamEnded()) + if cs == nil { + // We'd get here if we canceled a request while the + // server had its response still in flight. So if this + // was just something we canceled, ignore it. + return nil + } + if !cs.pastHeaders { + cs.pastHeaders = true + } else { + return rl.processTrailers(cs, f) + } + + res, err := rl.handleResponse(cs, f) + if err != nil { + if _, ok := err.(ConnectionError); ok { + return err + } + // Any other error type is a stream error. + cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) + cs.resc <- resAndError{err: err} + return nil // return nil from process* funcs to keep conn alive + } + if res == nil { + // (nil, nil) special case. See handleResponse docs. + return nil + } + if res.Body != noBody { + rl.activeRes[cs.ID] = cs + } + cs.resTrailer = &res.Trailer + cs.resc <- resAndError{res: res} + return nil +} + +// may return error types nil, or ConnectionError. Any other error value +// is a StreamError of type ErrCodeProtocol. The returned error in that case +// is the detail. +// +// As a special case, handleResponse may return (nil, nil) to skip the +// frame (currently only used for 100 expect continue). This special +// case is going away after Issue 13851 is fixed. +func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { + if f.Truncated { + return nil, errResponseHeaderListSize + } + + status := f.PseudoValue("status") + if status == "" { + return nil, errors.New("missing status pseudo header") + } + statusCode, err := strconv.Atoi(status) + if err != nil { + return nil, errors.New("malformed non-numeric status pseudo header") + } + + if statusCode == 100 { + // Just skip 100-continue response headers for now. + // TODO: golang.org/issue/13851 for doing it properly. + cs.pastHeaders = false // do it all again + return nil, nil + } + + header := make(http.Header) + res := &http.Response{ + Proto: "HTTP/2.0", + ProtoMajor: 2, + Header: header, + StatusCode: statusCode, + Status: status + " " + http.StatusText(statusCode), + } + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + if key == "Trailer" { + t := res.Trailer + if t == nil { + t = make(http.Header) + res.Trailer = t + } + foreachHeaderElement(hf.Value, func(v string) { + t[http.CanonicalHeaderKey(v)] = nil + }) + } else { + header[key] = append(header[key], hf.Value) + } + } + + streamEnded := f.StreamEnded() + if !streamEnded || cs.req.Method == "HEAD" { + res.ContentLength = -1 + if clens := res.Header["Content-Length"]; len(clens) == 1 { + if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { + res.ContentLength = clen64 + } else { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } else if len(clens) > 1 { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } + + if streamEnded { + res.Body = noBody + return res, nil + } + + buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage + cs.bufPipe = pipe{b: buf} + cs.bytesRemain = res.ContentLength + res.Body = transportResponseBody{cs} + go cs.awaitRequestCancel(requestCancel(cs.req)) + + if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { + res.Header.Del("Content-Encoding") + res.Header.Del("Content-Length") + res.ContentLength = -1 + res.Body = &gzipReader{body: res.Body} + } + return res, nil +} + +func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { + if cs.pastTrailers { + // Too many HEADERS frames for this stream. + return ConnectionError(ErrCodeProtocol) + } + cs.pastTrailers = true + if !f.StreamEnded() { + // We expect that any headers for trailers also + // has END_STREAM. + return ConnectionError(ErrCodeProtocol) + } + if len(f.PseudoFields()) > 0 { + // No pseudo header fields are defined for trailers. + // TODO: ConnectionError might be overly harsh? Check. + return ConnectionError(ErrCodeProtocol) + } + + trailer := make(http.Header) + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + trailer[key] = append(trailer[key], hf.Value) + } + cs.trailer = trailer + + rl.endStream(cs) + return nil +} + +// transportResponseBody is the concrete type of Transport.RoundTrip's +// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. +// On Close it sends RST_STREAM if EOF wasn't already seen. +type transportResponseBody struct { + cs *clientStream +} + +func (b transportResponseBody) Read(p []byte) (n int, err error) { + cs := b.cs + cc := cs.cc + + if cs.readErr != nil { + return 0, cs.readErr + } + n, err = b.cs.bufPipe.Read(p) + if cs.bytesRemain != -1 { + if int64(n) > cs.bytesRemain { + n = int(cs.bytesRemain) + if err == nil { + err = errors.New("net/http: server replied with more than declared Content-Length; truncated") + cc.writeStreamReset(cs.ID, ErrCodeProtocol, err) + } + cs.readErr = err + return int(cs.bytesRemain), err + } + cs.bytesRemain -= int64(n) + if err == io.EOF && cs.bytesRemain > 0 { + err = io.ErrUnexpectedEOF + cs.readErr = err + return n, err + } + } + if n == 0 { + // No flow control tokens to send back. + return + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + var connAdd, streamAdd int32 + // Check the conn-level first, before the stream-level. + if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { + connAdd = transportDefaultConnFlow - v + cc.inflow.add(connAdd) + } + if err == nil { // No need to refresh if the stream is over or failed. + if v := cs.inflow.available(); v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { + streamAdd = transportDefaultStreamFlow - v + cs.inflow.add(streamAdd) + } + } + if connAdd != 0 || streamAdd != 0 { + cc.wmu.Lock() + defer cc.wmu.Unlock() + if connAdd != 0 { + cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) + } + if streamAdd != 0 { + cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) + } + cc.bw.Flush() + } + return +} + +var errClosedResponseBody = errors.New("http2: response body closed") + +func (b transportResponseBody) Close() error { + cs := b.cs + if cs.bufPipe.Err() != io.EOF { + // TODO: write test for this + cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } + cs.bufPipe.BreakWithError(errClosedResponseBody) + return nil +} + +func (rl *clientConnReadLoop) processData(f *DataFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, f.StreamEnded()) + if cs == nil { + cc.mu.Lock() + neverSent := cc.nextStreamID + cc.mu.Unlock() + if f.StreamID >= neverSent { + // We never asked for this. + cc.logf("http2: Transport received unsolicited DATA frame; closing connection") + return ConnectionError(ErrCodeProtocol) + } + // We probably did ask for this, but canceled. Just ignore it. + // TODO: be stricter here? only silently ignore things which + // we canceled, but not things which were closed normally + // by the peer? Tough without accumulating too much state. + return nil + } + if data := f.Data(); len(data) > 0 { + if cs.bufPipe.b == nil { + // Data frame after it's already closed? + cc.logf("http2: Transport received DATA frame for closed stream; closing connection") + return ConnectionError(ErrCodeProtocol) + } + + // Check connection-level flow control. + cc.mu.Lock() + if cs.inflow.available() >= int32(len(data)) { + cs.inflow.take(int32(len(data))) + } else { + cc.mu.Unlock() + return ConnectionError(ErrCodeFlowControl) + } + cc.mu.Unlock() + + if _, err := cs.bufPipe.Write(data); err != nil { + rl.endStreamError(cs, err) + return err + } + } + + if f.StreamEnded() { + rl.endStream(cs) + } + return nil +} + +var errInvalidTrailers = errors.New("http2: invalid trailers") + +func (rl *clientConnReadLoop) endStream(cs *clientStream) { + // TODO: check that any declared content-length matches, like + // server.go's (*stream).endStream method. + rl.endStreamError(cs, nil) +} + +func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { + var code func() + if err == nil { + err = io.EOF + code = cs.copyTrailers + } + cs.bufPipe.closeWithErrorAndCode(err, code) + delete(rl.activeRes, cs.ID) + if cs.req.Close || cs.req.Header.Get("Connection") == "close" { + rl.closeWhenIdle = true + } +} + +func (cs *clientStream) copyTrailers() { + for k, vv := range cs.trailer { + t := cs.resTrailer + if *t == nil { + *t = make(http.Header) + } + (*t)[k] = vv + } +} + +func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { + cc := rl.cc + cc.t.connPool().MarkDead(cc) + if f.ErrCode != 0 { + // TODO: deal with GOAWAY more. particularly the error code + cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) + } + cc.setGoAway(f) + return nil +} + +func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + return f.ForeachSetting(func(s Setting) error { + switch s.ID { + case SettingMaxFrameSize: + cc.maxFrameSize = s.Val + case SettingMaxConcurrentStreams: + cc.maxConcurrentStreams = s.Val + case SettingInitialWindowSize: + // TODO: error if this is too large. + + // TODO: adjust flow control of still-open + // frames by the difference of the old initial + // window size and this one. + cc.initialWindowSize = s.Val + default: + // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. + cc.vlogf("Unhandled Setting: %v", s) + } + return nil + }) +} + +func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, false) + if f.StreamID != 0 && cs == nil { + return nil + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + fl := &cc.flow + if cs != nil { + fl = &cs.flow + } + if !fl.add(int32(f.Increment)) { + return ConnectionError(ErrCodeFlowControl) + } + cc.cond.Broadcast() + return nil +} + +func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { + cs := rl.cc.streamByID(f.StreamID, true) + if cs == nil { + // TODO: return error if server tries to RST_STEAM an idle stream + return nil + } + select { + case <-cs.peerReset: + // Already reset. + // This is the only goroutine + // which closes this, so there + // isn't a race. + default: + err := StreamError{cs.ID, f.ErrCode} + cs.resetErr = err + close(cs.peerReset) + cs.bufPipe.CloseWithError(err) + cs.cc.cond.Broadcast() // wake up checkReset via clientStream.awaitFlowControl + } + delete(rl.activeRes, cs.ID) + return nil +} + +func (rl *clientConnReadLoop) processPing(f *PingFrame) error { + if f.IsAck() { + // 6.7 PING: " An endpoint MUST NOT respond to PING frames + // containing this flag." + return nil + } + cc := rl.cc + cc.wmu.Lock() + defer cc.wmu.Unlock() + if err := cc.fr.WritePing(true, f.Data); err != nil { + return err + } + return cc.bw.Flush() +} + +func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { + // We told the peer we don't want them. + // Spec says: + // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH + // setting of the peer endpoint is set to 0. An endpoint that + // has set this setting and has received acknowledgement MUST + // treat the receipt of a PUSH_PROMISE frame as a connection + // error (Section 5.4.1) of type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) +} + +func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { + // TODO: do something with err? send it as a debug frame to the peer? + // But that's only in GOAWAY. Invent a new frame type? Is there one already? + cc.wmu.Lock() + cc.fr.WriteRSTStream(streamID, code) + cc.bw.Flush() + cc.wmu.Unlock() +} + +var ( + errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") + errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") +) + +func (cc *ClientConn) logf(format string, args ...interface{}) { + cc.t.logf(format, args...) +} + +func (cc *ClientConn) vlogf(format string, args ...interface{}) { + cc.t.vlogf(format, args...) +} + +func (t *Transport) vlogf(format string, args ...interface{}) { + if VerboseLogs { + t.logf(format, args...) + } +} + +func (t *Transport) logf(format string, args ...interface{}) { + log.Printf(format, args...) +} + +var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) + +func strSliceContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} + +type erringRoundTripper struct{ err error } + +func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } + +// gzipReader wraps a response body so it can lazily +// call gzip.NewReader on the first call to Read +type gzipReader struct { + body io.ReadCloser // underlying Response.Body + zr *gzip.Reader // lazily-initialized gzip reader + zerr error // sticky error +} + +func (gz *gzipReader) Read(p []byte) (n int, err error) { + if gz.zerr != nil { + return 0, gz.zerr + } + if gz.zr == nil { + gz.zr, err = gzip.NewReader(gz.body) + if err != nil { + gz.zerr = err + return 0, err + } + } + return gz.zr.Read(p) +} + +func (gz *gzipReader) Close() error { + return gz.body.Close() +} + +type errorReader struct{ err error } + +func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/write.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/write.go new file mode 100644 index 000000000..0143b24cd --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/write.go @@ -0,0 +1,262 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "fmt" + "log" + "net/http" + "time" + + "golang.org/x/net/http2/hpack" +) + +// writeFramer is implemented by any type that is used to write frames. +type writeFramer interface { + writeFrame(writeContext) error +} + +// writeContext is the interface needed by the various frame writer +// types below. All the writeFrame methods below are scheduled via the +// frame writing scheduler (see writeScheduler in writesched.go). +// +// This interface is implemented by *serverConn. +// +// TODO: decide whether to a) use this in the client code (which didn't +// end up using this yet, because it has a simpler design, not +// currently implementing priorities), or b) delete this and +// make the server code a bit more concrete. +type writeContext interface { + Framer() *Framer + Flush() error + CloseConn() error + // HeaderEncoder returns an HPACK encoder that writes to the + // returned buffer. + HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) +} + +// endsStream reports whether the given frame writer w will locally +// close the stream. +func endsStream(w writeFramer) bool { + switch v := w.(type) { + case *writeData: + return v.endStream + case *writeResHeaders: + return v.endStream + case nil: + // This can only happen if the caller reuses w after it's + // been intentionally nil'ed out to prevent use. Keep this + // here to catch future refactoring breaking it. + panic("endsStream called on nil writeFramer") + } + return false +} + +type flushFrameWriter struct{} + +func (flushFrameWriter) writeFrame(ctx writeContext) error { + return ctx.Flush() +} + +type writeSettings []Setting + +func (s writeSettings) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteSettings([]Setting(s)...) +} + +type writeGoAway struct { + maxStreamID uint32 + code ErrCode +} + +func (p *writeGoAway) writeFrame(ctx writeContext) error { + err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) + if p.code != 0 { + ctx.Flush() // ignore error: we're hanging up on them anyway + time.Sleep(50 * time.Millisecond) + ctx.CloseConn() + } + return err +} + +type writeData struct { + streamID uint32 + p []byte + endStream bool +} + +func (w *writeData) String() string { + return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream) +} + +func (w *writeData) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) +} + +// handlerPanicRST is the message sent from handler goroutines when +// the handler panics. +type handlerPanicRST struct { + StreamID uint32 +} + +func (hp handlerPanicRST) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal) +} + +func (se StreamError) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) +} + +type writePingAck struct{ pf *PingFrame } + +func (w writePingAck) writeFrame(ctx writeContext) error { + return ctx.Framer().WritePing(true, w.pf.Data) +} + +type writeSettingsAck struct{} + +func (writeSettingsAck) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteSettingsAck() +} + +// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames +// for HTTP response headers or trailers from a server handler. +type writeResHeaders struct { + streamID uint32 + httpResCode int // 0 means no ":status" line + h http.Header // may be nil + trailers []string // if non-nil, which keys of h to write. nil means all. + endStream bool + + date string + contentType string + contentLength string +} + +func encKV(enc *hpack.Encoder, k, v string) { + if VerboseLogs { + log.Printf("http2: server encoding header %q = %q", k, v) + } + enc.WriteField(hpack.HeaderField{Name: k, Value: v}) +} + +func (w *writeResHeaders) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + + if w.httpResCode != 0 { + encKV(enc, ":status", httpCodeString(w.httpResCode)) + } + + encodeHeaders(enc, w.h, w.trailers) + + if w.contentType != "" { + encKV(enc, "content-type", w.contentType) + } + if w.contentLength != "" { + encKV(enc, "content-length", w.contentLength) + } + if w.date != "" { + encKV(enc, "date", w.date) + } + + headerBlock := buf.Bytes() + if len(headerBlock) == 0 && w.trailers == nil { + panic("unexpected empty hpack") + } + + // For now we're lazy and just pick the minimum MAX_FRAME_SIZE + // that all peers must support (16KB). Later we could care + // more and send larger frames if the peer advertised it, but + // there's little point. Most headers are small anyway (so we + // generally won't have CONTINUATION frames), and extra frames + // only waste 9 bytes anyway. + const maxFrameSize = 16384 + + first := true + for len(headerBlock) > 0 { + frag := headerBlock + if len(frag) > maxFrameSize { + frag = frag[:maxFrameSize] + } + headerBlock = headerBlock[len(frag):] + endHeaders := len(headerBlock) == 0 + var err error + if first { + first = false + err = ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: frag, + EndStream: w.endStream, + EndHeaders: endHeaders, + }) + } else { + err = ctx.Framer().WriteContinuation(w.streamID, endHeaders, frag) + } + if err != nil { + return err + } + } + return nil +} + +type write100ContinueHeadersFrame struct { + streamID uint32 +} + +func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + encKV(enc, ":status", "100") + return ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: buf.Bytes(), + EndStream: false, + EndHeaders: true, + }) +} + +type writeWindowUpdate struct { + streamID uint32 // or 0 for conn-level + n uint32 +} + +func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) +} + +func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { + if keys == nil { + sorter := sorterPool.Get().(*sorter) + // Using defer here, since the returned keys from the + // sorter.Keys method is only valid until the sorter + // is returned: + defer sorterPool.Put(sorter) + keys = sorter.Keys(h) + } + for _, k := range keys { + vv := h[k] + k = lowerHeader(k) + if !validHeaderFieldName(k) { + // TODO: return an error? golang.org/issue/14048 + // For now just omit it. + continue + } + isTE := k == "transfer-encoding" + for _, v := range vv { + if !validHeaderFieldValue(v) { + // TODO: return an error? golang.org/issue/14048 + // For now just omit it. + continue + } + // TODO: more of "8.1.2.2 Connection-Specific Header Fields" + if isTE && v != "trailers" { + continue + } + encKV(enc, k, v) + } + } +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/writesched.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/writesched.go new file mode 100644 index 000000000..c24316ce7 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/writesched.go @@ -0,0 +1,283 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "fmt" + +// frameWriteMsg is a request to write a frame. +type frameWriteMsg struct { + // write is the interface value that does the writing, once the + // writeScheduler (below) has decided to select this frame + // to write. The write functions are all defined in write.go. + write writeFramer + + stream *stream // used for prioritization. nil for non-stream frames. + + // done, if non-nil, must be a buffered channel with space for + // 1 message and is sent the return value from write (or an + // earlier error) when the frame has been written. + done chan error +} + +// for debugging only: +func (wm frameWriteMsg) String() string { + var streamID uint32 + if wm.stream != nil { + streamID = wm.stream.id + } + var des string + if s, ok := wm.write.(fmt.Stringer); ok { + des = s.String() + } else { + des = fmt.Sprintf("%T", wm.write) + } + return fmt.Sprintf("[frameWriteMsg stream=%d, ch=%v, type: %v]", streamID, wm.done != nil, des) +} + +// writeScheduler tracks pending frames to write, priorities, and decides +// the next one to use. It is not thread-safe. +type writeScheduler struct { + // zero are frames not associated with a specific stream. + // They're sent before any stream-specific freams. + zero writeQueue + + // maxFrameSize is the maximum size of a DATA frame + // we'll write. Must be non-zero and between 16K-16M. + maxFrameSize uint32 + + // sq contains the stream-specific queues, keyed by stream ID. + // when a stream is idle, it's deleted from the map. + sq map[uint32]*writeQueue + + // canSend is a slice of memory that's reused between frame + // scheduling decisions to hold the list of writeQueues (from sq) + // which have enough flow control data to send. After canSend is + // built, the best is selected. + canSend []*writeQueue + + // pool of empty queues for reuse. + queuePool []*writeQueue +} + +func (ws *writeScheduler) putEmptyQueue(q *writeQueue) { + if len(q.s) != 0 { + panic("queue must be empty") + } + ws.queuePool = append(ws.queuePool, q) +} + +func (ws *writeScheduler) getEmptyQueue() *writeQueue { + ln := len(ws.queuePool) + if ln == 0 { + return new(writeQueue) + } + q := ws.queuePool[ln-1] + ws.queuePool = ws.queuePool[:ln-1] + return q +} + +func (ws *writeScheduler) empty() bool { return ws.zero.empty() && len(ws.sq) == 0 } + +func (ws *writeScheduler) add(wm frameWriteMsg) { + st := wm.stream + if st == nil { + ws.zero.push(wm) + } else { + ws.streamQueue(st.id).push(wm) + } +} + +func (ws *writeScheduler) streamQueue(streamID uint32) *writeQueue { + if q, ok := ws.sq[streamID]; ok { + return q + } + if ws.sq == nil { + ws.sq = make(map[uint32]*writeQueue) + } + q := ws.getEmptyQueue() + ws.sq[streamID] = q + return q +} + +// take returns the most important frame to write and removes it from the scheduler. +// It is illegal to call this if the scheduler is empty or if there are no connection-level +// flow control bytes available. +func (ws *writeScheduler) take() (wm frameWriteMsg, ok bool) { + if ws.maxFrameSize == 0 { + panic("internal error: ws.maxFrameSize not initialized or invalid") + } + + // If there any frames not associated with streams, prefer those first. + // These are usually SETTINGS, etc. + if !ws.zero.empty() { + return ws.zero.shift(), true + } + if len(ws.sq) == 0 { + return + } + + // Next, prioritize frames on streams that aren't DATA frames (no cost). + for id, q := range ws.sq { + if q.firstIsNoCost() { + return ws.takeFrom(id, q) + } + } + + // Now, all that remains are DATA frames with non-zero bytes to + // send. So pick the best one. + if len(ws.canSend) != 0 { + panic("should be empty") + } + for _, q := range ws.sq { + if n := ws.streamWritableBytes(q); n > 0 { + ws.canSend = append(ws.canSend, q) + } + } + if len(ws.canSend) == 0 { + return + } + defer ws.zeroCanSend() + + // TODO: find the best queue + q := ws.canSend[0] + + return ws.takeFrom(q.streamID(), q) +} + +// zeroCanSend is defered from take. +func (ws *writeScheduler) zeroCanSend() { + for i := range ws.canSend { + ws.canSend[i] = nil + } + ws.canSend = ws.canSend[:0] +} + +// streamWritableBytes returns the number of DATA bytes we could write +// from the given queue's stream, if this stream/queue were +// selected. It is an error to call this if q's head isn't a +// *writeData. +func (ws *writeScheduler) streamWritableBytes(q *writeQueue) int32 { + wm := q.head() + ret := wm.stream.flow.available() // max we can write + if ret == 0 { + return 0 + } + if int32(ws.maxFrameSize) < ret { + ret = int32(ws.maxFrameSize) + } + if ret == 0 { + panic("internal error: ws.maxFrameSize not initialized or invalid") + } + wd := wm.write.(*writeData) + if len(wd.p) < int(ret) { + ret = int32(len(wd.p)) + } + return ret +} + +func (ws *writeScheduler) takeFrom(id uint32, q *writeQueue) (wm frameWriteMsg, ok bool) { + wm = q.head() + // If the first item in this queue costs flow control tokens + // and we don't have enough, write as much as we can. + if wd, ok := wm.write.(*writeData); ok && len(wd.p) > 0 { + allowed := wm.stream.flow.available() // max we can write + if allowed == 0 { + // No quota available. Caller can try the next stream. + return frameWriteMsg{}, false + } + if int32(ws.maxFrameSize) < allowed { + allowed = int32(ws.maxFrameSize) + } + // TODO: further restrict the allowed size, because even if + // the peer says it's okay to write 16MB data frames, we might + // want to write smaller ones to properly weight competing + // streams' priorities. + + if len(wd.p) > int(allowed) { + wm.stream.flow.take(allowed) + chunk := wd.p[:allowed] + wd.p = wd.p[allowed:] + // Make up a new write message of a valid size, rather + // than shifting one off the queue. + return frameWriteMsg{ + stream: wm.stream, + write: &writeData{ + streamID: wd.streamID, + p: chunk, + // even if the original had endStream set, there + // arebytes remaining because len(wd.p) > allowed, + // so we know endStream is false: + endStream: false, + }, + // our caller is blocking on the final DATA frame, not + // these intermediates, so no need to wait: + done: nil, + }, true + } + wm.stream.flow.take(int32(len(wd.p))) + } + + q.shift() + if q.empty() { + ws.putEmptyQueue(q) + delete(ws.sq, id) + } + return wm, true +} + +func (ws *writeScheduler) forgetStream(id uint32) { + q, ok := ws.sq[id] + if !ok { + return + } + delete(ws.sq, id) + + // But keep it for others later. + for i := range q.s { + q.s[i] = frameWriteMsg{} + } + q.s = q.s[:0] + ws.putEmptyQueue(q) +} + +type writeQueue struct { + s []frameWriteMsg +} + +// streamID returns the stream ID for a non-empty stream-specific queue. +func (q *writeQueue) streamID() uint32 { return q.s[0].stream.id } + +func (q *writeQueue) empty() bool { return len(q.s) == 0 } + +func (q *writeQueue) push(wm frameWriteMsg) { + q.s = append(q.s, wm) +} + +// head returns the next item that would be removed by shift. +func (q *writeQueue) head() frameWriteMsg { + if len(q.s) == 0 { + panic("invalid use of queue") + } + return q.s[0] +} + +func (q *writeQueue) shift() frameWriteMsg { + if len(q.s) == 0 { + panic("invalid use of queue") + } + wm := q.s[0] + // TODO: less copy-happy queue. + copy(q.s, q.s[1:]) + q.s[len(q.s)-1] = frameWriteMsg{} + q.s = q.s[:len(q.s)-1] + return wm +} + +func (q *writeQueue) firstIsNoCost() bool { + if df, ok := q.s[0].write.(*writeData); ok { + return len(df.p) == 0 + } + return true +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/internal/timeseries/timeseries.go new file mode 100644 index 000000000..3f90b7300 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := dstStart.Sub(srcStart) / srcInterval + srcIndex += int(advance) + srcStart = srcStart.Add(advance * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/trace/events.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/trace/events.go new file mode 100644 index 000000000..e66c7e328 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/trace/events.go @@ -0,0 +1,524 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +var eventsTmpl = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, +}).Parse(eventsHTML)) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking; see AuthRequest for the default auth check +// used by the handler registered on http.DefaultServeMux. +// req may be nil. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl.Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +const eventsHTML = ` + + + events + + + + +

/debug/events

+ + + {{range $i, $fam := .Families}} + + + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + + {{end}} + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}{{end}} +
+ +{{if $.EventLogs}} +
+

Family: {{$.Family}}

+ +{{if $.Expanded}}{{end}} +[Summary]{{if $.Expanded}}{{end}} + +{{if not $.Expanded}}{{end}} +[Expanded]{{if not $.Expanded}}{{end}} + + + + {{range $el := $.EventLogs}} + + + + + {{if $.Expanded}} + + + + + + {{range $el.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} +
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
+{{end}} + + +` diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/trace/histogram.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/trace/histogram.go new file mode 100644 index 000000000..bb42aa532 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/trace/histogram.go @@ -0,0 +1,356 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// AddMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl.Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +// Input: data +var distTmpl = template.Must(template.New("distTmpl").Parse(` + + + + + + + +
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
+
+ +{{range $b := .Buckets}} +{{if $b}} + + + + + + + + + +{{end}} +{{end}} +
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
+`)) diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/trace/trace.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/trace/trace.go new file mode 100644 index 000000000..9ee193627 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/trace/trace.go @@ -0,0 +1,1062 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://" + f.domain + "/" + path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return "", err + } + f.events.Printf("Get(%q) = %s", path, resp.Status) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + return nil + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customise its authorisation requirements. +// +// The default AuthRequest function returns (true, true) iff the request comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": + return true, true + default: + return false, false + } +} + +func init() { + http.HandleFunc("/debug/requests", func(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + Render(w, req, sensitive) + }) + http.HandleFunc("/debug/events", func(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + RenderEvents(w, req, sensitive) + }) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking; see AuthRequest for the default auth check +// used by the handler registered on http.DefaultServeMux. +// req may be nil. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl.ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.events = make([]event, 0, maxEventsPerTrace) + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + tr.Elapsed = time.Now().Sub(tr.Start) + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(tr.Elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + What interface{} // string or fmt.Stringer + Sensitive bool // whether this event contains sensitive information +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Timing information. + Start time.Time + Elapsed time.Duration // zero while active + + // Trace information if non-zero. + traceID uint64 + spanID uint64 + + // Whether this trace resulted in an error. + IsError bool + + // Append-only sequence of events (modulo discards). + mu sync.RWMutex + events []event + + refs int32 // how many buckets this is in + recycler func(interface{}) + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.events = nil + tr.refs = 0 + tr.recycler = nil + tr.disc = 0 + tr.finishStack = nil +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a requestz.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < cap(tr.events) { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((cap(tr.events) - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[cap(tr.events)-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { tr.IsError = true } + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.recycler = f +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.traceID, tr.spanID = traceID, spanID +} + +func (tr *trace) SetMaxEvents(m int) { + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.events = make([]event, 0, m) + } +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + t := tr.Elapsed + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmpl = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, +}).Parse(pageHTML)) + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} + + + /debug/requests + + + + +

/debug/requests

+{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} + + {{range $fam := .Families}} + + + + {{$n := index $.ActiveTraceCount $fam}} + + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + + {{end}} + + {{$nb := len $f.Buckets}} + + + + + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} active] + {{if $n}}{{end}} + + {{if not $empty}}{{end}} + [{{.Cond}}] + {{if not $empty}}{{end}} + + [minute] + + [hour] + + [total] +
+{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +
+

Family: {{$.Family}}

+ +{{if or $.Expanded $.Traced}} + [Normal/Summary] +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + [Normal/Expanded] +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + [Traced/Summary] + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + [Traced/Expanded] + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +

Showing {{len $.Traces}} of {{$.Total}} traces.

+{{end}} + + + + + {{range $tr := $.Traces}} + + + + + {{/* TODO: include traceID/spanID */}} + + {{if $.Expanded}} + {{range $tr.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests +
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
+{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

+{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + + +{{end}} {{/* end of Epilog */}} +` diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/.travis.yml b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/.travis.yml new file mode 100644 index 000000000..a035125c3 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.3 + - 1.4 + +install: + - export GOPATH="$HOME/gopath" + - mkdir -p "$GOPATH/src/golang.org/x" + - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2" + - go get -v -t -d golang.org/x/oauth2/... + +script: + - go test -v golang.org/x/oauth2/... diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/AUTHORS b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/AUTHORS new file mode 100644 index 000000000..15167cd74 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/CONTRIBUTING.md new file mode 100644 index 000000000..46aa2b12d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + + +## Filing issues + +When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +**We do not accept GitHub pull requests** +(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. + diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/CONTRIBUTORS new file mode 100644 index 000000000..1c4577e96 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/LICENSE b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/LICENSE new file mode 100644 index 000000000..d02f24fd5 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The oauth2 Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/README.md b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/README.md new file mode 100644 index 000000000..0d5141733 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/README.md @@ -0,0 +1,64 @@ +# OAuth2 for Go + +[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) + +oauth2 package contains a client implementation for OAuth 2.0 spec. + +## Installation + +~~~~ +go get golang.org/x/oauth2 +~~~~ + +See godoc for further documentation and examples. + +* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) +* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) + + +## App Engine + +In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor +of the [`context.Context`](https://golang.org/x/net/context#Context) type from +the `golang.org/x/net/context` package + +This means its no longer possible to use the "Classic App Engine" +`appengine.Context` type with the `oauth2` package. (You're using +Classic App Engine if you import the package `"appengine"`.) + +To work around this, you may use the new `"google.golang.org/appengine"` +package. This package has almost the same API as the `"appengine"` package, +but it can be fetched with `go get` and used on "Managed VMs" and well as +Classic App Engine. + +See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app) +for information on updating your app. + +If you don't want to update your entire app to use the new App Engine packages, +you may use both sets of packages in parallel, using only the new packages +with the `oauth2` package. + + import ( + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + newappengine "google.golang.org/appengine" + newurlfetch "google.golang.org/appengine/urlfetch" + + "appengine" + ) + + func handler(w http.ResponseWriter, r *http.Request) { + var c appengine.Context = appengine.NewContext(r) + c.Infof("Logging a message with the old package") + + var ctx context.Context = newappengine.NewContext(r) + client := &http.Client{ + Transport: &oauth2.Transport{ + Source: google.AppEngineTokenSource(ctx, "scope"), + Base: &newurlfetch.Transport{Context: ctx}, + }, + } + client.Get("...") + } + diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/client_appengine.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/client_appengine.go new file mode 100644 index 000000000..8962c49d1 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/client_appengine.go @@ -0,0 +1,25 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +// App Engine hooks. + +package oauth2 + +import ( + "net/http" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" + "google.golang.org/appengine/urlfetch" +) + +func init() { + internal.RegisterContextClientFunc(contextClientAppEngine) +} + +func contextClientAppEngine(ctx context.Context) (*http.Client, error) { + return urlfetch.Client(ctx), nil +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appengine.go new file mode 100644 index 000000000..dc993efb5 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appengine.go @@ -0,0 +1,86 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "sort" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" +) + +// Set at init time by appenginevm_hook.go. If true, we are on App Engine Managed VMs. +var appengineVM bool + +// Set at init time by appengine_hook.go. If nil, we're not on App Engine. +var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) + +// AppEngineTokenSource returns a token source that fetches tokens +// issued to the current App Engine application's service account. +// If you are implementing a 3-legged OAuth 2.0 flow on App Engine +// that involves user accounts, see oauth2.Config instead. +// +// The provided context must have come from appengine.NewContext. +func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + if appengineTokenFunc == nil { + panic("google: AppEngineTokenSource can only be used on App Engine.") + } + scopes := append([]string{}, scope...) + sort.Strings(scopes) + return &appEngineTokenSource{ + ctx: ctx, + scopes: scopes, + key: strings.Join(scopes, " "), + } +} + +// aeTokens helps the fetched tokens to be reused until their expiration. +var ( + aeTokensMu sync.Mutex + aeTokens = make(map[string]*tokenLock) // key is space-separated scopes +) + +type tokenLock struct { + mu sync.Mutex // guards t; held while fetching or updating t + t *oauth2.Token +} + +type appEngineTokenSource struct { + ctx context.Context + scopes []string + key string // to aeTokens map; space-separated scopes +} + +func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) { + if appengineTokenFunc == nil { + panic("google: AppEngineTokenSource can only be used on App Engine.") + } + + aeTokensMu.Lock() + tok, ok := aeTokens[ts.key] + if !ok { + tok = &tokenLock{} + aeTokens[ts.key] = tok + } + aeTokensMu.Unlock() + + tok.mu.Lock() + defer tok.mu.Unlock() + if tok.t.Valid() { + return tok.t, nil + } + access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) + if err != nil { + return nil, err + } + tok.t = &oauth2.Token{ + AccessToken: access, + Expiry: exp, + } + return tok.t, nil +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appengine_hook.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appengine_hook.go new file mode 100644 index 000000000..4f42c8b34 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appengine_hook.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +package google + +import "google.golang.org/appengine" + +func init() { + appengineTokenFunc = appengine.AccessToken +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appenginevm_hook.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appenginevm_hook.go new file mode 100644 index 000000000..633611cc3 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appenginevm_hook.go @@ -0,0 +1,14 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appenginevm + +package google + +import "google.golang.org/appengine" + +func init() { + appengineVM = true + appengineTokenFunc = appengine.AccessToken +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/default.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/default.go new file mode 100644 index 000000000..b95236297 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/default.go @@ -0,0 +1,155 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "runtime" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/jwt" + "google.golang.org/cloud/compute/metadata" +) + +// DefaultClient returns an HTTP Client that uses the +// DefaultTokenSource to obtain authentication credentials. +// +// This client should be used when developing services +// that run on Google App Engine or Google Compute Engine +// and use "Application Default Credentials." +// +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +// +func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { + ts, err := DefaultTokenSource(ctx, scope...) + if err != nil { + return nil, err + } + return oauth2.NewClient(ctx, ts), nil +} + +// DefaultTokenSource is a token source that uses +// "Application Default Credentials". +// +// It looks for credentials in the following places, +// preferring the first location found: +// +// 1. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// 2. A JSON file in a location known to the gcloud command-line tool. +// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. +// On other systems, $HOME/.config/gcloud/application_default_credentials.json. +// 3. On Google App Engine it uses the appengine.AccessToken function. +// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches +// credentials from the metadata server. +// (In this final case any provided scopes are ignored.) +// +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +// +func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { + // First, try the environment variable. + const envVar = "GOOGLE_APPLICATION_CREDENTIALS" + if filename := os.Getenv(envVar); filename != "" { + ts, err := tokenSourceFromFile(ctx, filename, scope) + if err != nil { + return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) + } + return ts, nil + } + + // Second, try a well-known file. + filename := wellKnownFile() + _, err := os.Stat(filename) + if err == nil { + ts, err2 := tokenSourceFromFile(ctx, filename, scope) + if err2 == nil { + return ts, nil + } + err = err2 + } else if os.IsNotExist(err) { + err = nil // ignore this error + } + if err != nil { + return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) + } + + // Third, if we're on Google App Engine use those credentials. + if appengineTokenFunc != nil && !appengineVM { + return AppEngineTokenSource(ctx, scope...), nil + } + + // Fourth, if we're on Google Compute Engine use the metadata server. + if metadata.OnGCE() { + return ComputeTokenSource(""), nil + } + + // None are found; return helpful error. + const url = "https://developers.google.com/accounts/docs/application-default-credentials" + return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) +} + +func wellKnownFile() string { + const f = "application_default_credentials.json" + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud", f) + } + return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) +} + +func tokenSourceFromFile(ctx context.Context, filename string, scopes []string) (oauth2.TokenSource, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + var d struct { + // Common fields + Type string + ClientID string `json:"client_id"` + + // User Credential fields + ClientSecret string `json:"client_secret"` + RefreshToken string `json:"refresh_token"` + + // Service Account fields + ClientEmail string `json:"client_email"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + } + if err := json.Unmarshal(b, &d); err != nil { + return nil, err + } + switch d.Type { + case "authorized_user": + cfg := &oauth2.Config{ + ClientID: d.ClientID, + ClientSecret: d.ClientSecret, + Scopes: append([]string{}, scopes...), // copy + Endpoint: Endpoint, + } + tok := &oauth2.Token{RefreshToken: d.RefreshToken} + return cfg.TokenSource(ctx, tok), nil + case "service_account": + cfg := &jwt.Config{ + Email: d.ClientEmail, + PrivateKey: []byte(d.PrivateKey), + Scopes: append([]string{}, scopes...), // copy + TokenURL: JWTTokenURL, + } + return cfg.TokenSource(ctx), nil + case "": + return nil, errors.New("missing 'type' field in credentials") + default: + return nil, fmt.Errorf("unknown credential type: %q", d.Type) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/google.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/google.go new file mode 100644 index 000000000..0bed73866 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/google.go @@ -0,0 +1,145 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package google provides support for making OAuth2 authorized and +// authenticated HTTP requests to Google APIs. +// It supports the Web server flow, client-side credentials, service accounts, +// Google Compute Engine service accounts, and Google App Engine service +// accounts. +// +// For more information, please read +// https://developers.google.com/accounts/docs/OAuth2 +// and +// https://developers.google.com/accounts/docs/application-default-credentials. +package google + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/jwt" + "google.golang.org/cloud/compute/metadata" +) + +// Endpoint is Google's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://accounts.google.com/o/oauth2/token", +} + +// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. +const JWTTokenURL = "https://accounts.google.com/o/oauth2/token" + +// ConfigFromJSON uses a Google Developers Console client_credentials.json +// file to construct a config. +// client_credentials.json can be downloadable from https://console.developers.google.com, +// under "APIs & Auth" > "Credentials". Download the Web application credentials in the +// JSON format and provide the contents of the file as jsonKey. +func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { + type cred struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + RedirectURIs []string `json:"redirect_uris"` + AuthURI string `json:"auth_uri"` + TokenURI string `json:"token_uri"` + } + var j struct { + Web *cred `json:"web"` + Installed *cred `json:"installed"` + } + if err := json.Unmarshal(jsonKey, &j); err != nil { + return nil, err + } + var c *cred + switch { + case j.Web != nil: + c = j.Web + case j.Installed != nil: + c = j.Installed + default: + return nil, fmt.Errorf("oauth2/google: no credentials found") + } + if len(c.RedirectURIs) < 1 { + return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json") + } + return &oauth2.Config{ + ClientID: c.ClientID, + ClientSecret: c.ClientSecret, + RedirectURL: c.RedirectURIs[0], + Scopes: scope, + Endpoint: oauth2.Endpoint{ + AuthURL: c.AuthURI, + TokenURL: c.TokenURI, + }, + }, nil +} + +// JWTConfigFromJSON uses a Google Developers service account JSON key file to read +// the credentials that authorize and authenticate the requests. +// Create a service account on "Credentials" page under "APIs & Auth" for your +// project at https://console.developers.google.com to download a JSON key file. +func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { + var key struct { + Email string `json:"client_email"` + PrivateKey string `json:"private_key"` + } + if err := json.Unmarshal(jsonKey, &key); err != nil { + return nil, err + } + return &jwt.Config{ + Email: key.Email, + PrivateKey: []byte(key.PrivateKey), + Scopes: scope, + TokenURL: JWTTokenURL, + }, nil +} + +// ComputeTokenSource returns a token source that fetches access tokens +// from Google Compute Engine (GCE)'s metadata server. It's only valid to use +// this token source if your program is running on a GCE instance. +// If no account is specified, "default" is used. +// Further information about retrieving access tokens from the GCE metadata +// server can be found at https://cloud.google.com/compute/docs/authentication. +func ComputeTokenSource(account string) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, computeSource{account: account}) +} + +type computeSource struct { + account string +} + +func (cs computeSource) Token() (*oauth2.Token, error) { + if !metadata.OnGCE() { + return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE") + } + acct := cs.account + if acct == "" { + acct = "default" + } + tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token") + if err != nil { + return nil, err + } + var res struct { + AccessToken string `json:"access_token"` + ExpiresInSec int `json:"expires_in"` + TokenType string `json:"token_type"` + } + err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res) + if err != nil { + return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err) + } + if res.ExpiresInSec == 0 || res.AccessToken == "" { + return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") + } + return &oauth2.Token{ + AccessToken: res.AccessToken, + TokenType: res.TokenType, + Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), + }, nil +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/jwt.go new file mode 100644 index 000000000..b91991786 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/jwt.go @@ -0,0 +1,71 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "crypto/rsa" + "fmt" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON +// key file to read the credentials that authorize and authenticate the +// requests, and returns a TokenSource that does not use any OAuth2 flow but +// instead creates a JWT and sends that as the access token. +// The audience is typically a URL that specifies the scope of the credentials. +// +// Note that this is not a standard OAuth flow, but rather an +// optimization supported by a few Google services. +// Unless you know otherwise, you should use JWTConfigFromJSON instead. +func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { + cfg, err := JWTConfigFromJSON(jsonKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse JSON key: %v", err) + } + pk, err := internal.ParseKey(cfg.PrivateKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse key: %v", err) + } + ts := &jwtAccessTokenSource{ + email: cfg.Email, + audience: audience, + pk: pk, + } + tok, err := ts.Token() + if err != nil { + return nil, err + } + return oauth2.ReuseTokenSource(tok, ts), nil +} + +type jwtAccessTokenSource struct { + email, audience string + pk *rsa.PrivateKey +} + +func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { + iat := time.Now() + exp := iat.Add(time.Hour) + cs := &jws.ClaimSet{ + Iss: ts.email, + Sub: ts.email, + Aud: ts.audience, + Iat: iat.Unix(), + Exp: exp.Unix(), + } + hdr := &jws.Header{ + Algorithm: "RS256", + Typ: "JWT", + } + msg, err := jws.Encode(hdr, cs, ts.pk) + if err != nil { + return nil, fmt.Errorf("google: could not encode JWT: %v", err) + } + return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/sdk.go new file mode 100644 index 000000000..d29a3bb9b --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/sdk.go @@ -0,0 +1,168 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "os/user" + "path/filepath" + "runtime" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" +) + +type sdkCredentials struct { + Data []struct { + Credential struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + TokenExpiry *time.Time `json:"token_expiry"` + } `json:"credential"` + Key struct { + Account string `json:"account"` + Scope string `json:"scope"` + } `json:"key"` + } +} + +// An SDKConfig provides access to tokens from an account already +// authorized via the Google Cloud SDK. +type SDKConfig struct { + conf oauth2.Config + initialToken *oauth2.Token +} + +// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK +// account. If account is empty, the account currently active in +// Google Cloud SDK properties is used. +// Google Cloud SDK credentials must be created by running `gcloud auth` +// before using this function. +// The Google Cloud SDK is available at https://cloud.google.com/sdk/. +func NewSDKConfig(account string) (*SDKConfig, error) { + configPath, err := sdkConfigPath() + if err != nil { + return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err) + } + credentialsPath := filepath.Join(configPath, "credentials") + f, err := os.Open(credentialsPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err) + } + defer f.Close() + + var c sdkCredentials + if err := json.NewDecoder(f).Decode(&c); err != nil { + return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err) + } + if len(c.Data) == 0 { + return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath) + } + if account == "" { + propertiesPath := filepath.Join(configPath, "properties") + f, err := os.Open(propertiesPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err) + } + defer f.Close() + ini, err := internal.ParseINI(f) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err) + } + core, ok := ini["core"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini) + } + active, ok := core["account"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core) + } + account = active + } + + for _, d := range c.Data { + if account == "" || d.Key.Account == account { + if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" { + return nil, fmt.Errorf("oauth2/google: no token available for account %q", account) + } + var expiry time.Time + if d.Credential.TokenExpiry != nil { + expiry = *d.Credential.TokenExpiry + } + return &SDKConfig{ + conf: oauth2.Config{ + ClientID: d.Credential.ClientID, + ClientSecret: d.Credential.ClientSecret, + Scopes: strings.Split(d.Key.Scope, " "), + Endpoint: Endpoint, + RedirectURL: "oob", + }, + initialToken: &oauth2.Token{ + AccessToken: d.Credential.AccessToken, + RefreshToken: d.Credential.RefreshToken, + Expiry: expiry, + }, + }, nil + } + } + return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account) +} + +// Client returns an HTTP client using Google Cloud SDK credentials to +// authorize requests. The token will auto-refresh as necessary. The +// underlying http.RoundTripper will be obtained using the provided +// context. The returned client and its Transport should not be +// modified. +func (c *SDKConfig) Client(ctx context.Context) *http.Client { + return &http.Client{ + Transport: &oauth2.Transport{ + Source: c.TokenSource(ctx), + }, + } +} + +// TokenSource returns an oauth2.TokenSource that retrieve tokens from +// Google Cloud SDK credentials using the provided context. +// It will returns the current access token stored in the credentials, +// and refresh it when it expires, but it won't update the credentials +// with the new access token. +func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource { + return c.conf.TokenSource(ctx, c.initialToken) +} + +// Scopes are the OAuth 2.0 scopes the current account is authorized for. +func (c *SDKConfig) Scopes() []string { + return c.conf.Scopes +} + +// sdkConfigPath tries to guess where the gcloud config is located. +// It can be overridden during tests. +var sdkConfigPath = func() (string, error) { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil + } + homeDir := guessUnixHomeDir() + if homeDir == "" { + return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty") + } + return filepath.Join(homeDir, ".config", "gcloud"), nil +} + +func guessUnixHomeDir() string { + usr, err := user.Current() + if err == nil { + return usr.HomeDir + } + return os.Getenv("HOME") +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/oauth2.go new file mode 100644 index 000000000..fbe1028d6 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -0,0 +1,76 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "bufio" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io" + "strings" +) + +// ParseKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func ParseKey(key []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(key) + if block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err) + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("private key is invalid") + } + return parsed, nil +} + +func ParseINI(ini io.Reader) (map[string]map[string]string, error) { + result := map[string]map[string]string{ + "": map[string]string{}, // root section + } + scanner := bufio.NewScanner(ini) + currentSection := "" + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if strings.HasPrefix(line, ";") { + // comment. + continue + } + if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { + currentSection = strings.TrimSpace(line[1 : len(line)-1]) + result[currentSection] = map[string]string{} + continue + } + parts := strings.SplitN(line, "=", 2) + if len(parts) == 2 && parts[0] != "" { + result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) + } + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning ini: %v", err) + } + return result, nil +} + +func CondVal(v string) []string { + if v == "" { + return nil + } + return []string{v} +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/token.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/token.go new file mode 100644 index 000000000..739a89bfe --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/token.go @@ -0,0 +1,224 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/net/context" +) + +// Token represents the crendentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// This type is a mirror of oauth2.Token and exists to break +// an otherwise-circular dependency. Other internal packages +// should convert this Token into an oauth2.Token before use. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time + + // Raw optionally contains extra metadata from the server + // when updating a token. + Raw interface{} +} + +// tokenJSON is the struct representing the HTTP response from OAuth2 +// providers returning a token in JSON form. +type tokenJSON struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token"` + ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number + Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in +} + +func (e *tokenJSON) expiry() (t time.Time) { + if v := e.ExpiresIn; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + if v := e.Expires; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + return +} + +type expirationTime int32 + +func (e *expirationTime) UnmarshalJSON(b []byte) error { + var n json.Number + err := json.Unmarshal(b, &n) + if err != nil { + return err + } + i, err := n.Int64() + if err != nil { + return err + } + *e = expirationTime(i) + return nil +} + +var brokenAuthHeaderProviders = []string{ + "https://accounts.google.com/", + "https://api.dropbox.com/", + "https://api.instagram.com/", + "https://api.netatmo.net/", + "https://api.odnoklassniki.ru/", + "https://api.pushbullet.com/", + "https://api.soundcloud.com/", + "https://api.twitch.tv/", + "https://app.box.com/", + "https://connect.stripe.com/", + "https://login.microsoftonline.com/", + "https://login.salesforce.com/", + "https://oauth.sandbox.trainingpeaks.com/", + "https://oauth.trainingpeaks.com/", + "https://oauth.vk.com/", + "https://openapi.baidu.com/", + "https://slack.com/", + "https://test-sandbox.auth.corp.google.com", + "https://test.salesforce.com/", + "https://user.gini.net/", + "https://www.douban.com/", + "https://www.googleapis.com/", + "https://www.linkedin.com/", + "https://www.strava.com/oauth/", + "https://www.wunderlist.com/oauth/", + "https://api.patreon.com/", +} + +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL) +} + +// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL +// implements the OAuth2 spec correctly +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +// In summary: +// - Reddit only accepts client secret in the Authorization header +// - Dropbox accepts either it in URL param or Auth header, but not both. +// - Google only accepts URL param (not spec compliant?), not Auth header +// - Stripe only accepts client secret in Auth header with Bearer method, not Basic +func providerAuthHeaderWorks(tokenURL string) bool { + for _, s := range brokenAuthHeaderProviders { + if strings.HasPrefix(tokenURL, s) { + // Some sites fail to implement the OAuth2 spec fully. + return false + } + } + + // Assume the provider implements the spec properly + // otherwise. We can add more exceptions as they're + // discovered. We will _not_ be adding configurable hooks + // to this package to let users select server bugs. + return true +} + +func RetrieveToken(ctx context.Context, ClientID, ClientSecret, TokenURL string, v url.Values) (*Token, error) { + hc, err := ContextClient(ctx) + if err != nil { + return nil, err + } + v.Set("client_id", ClientID) + bustedAuth := !providerAuthHeaderWorks(TokenURL) + if bustedAuth && ClientSecret != "" { + v.Set("client_secret", ClientSecret) + } + req, err := http.NewRequest("POST", TokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + if !bustedAuth { + req.SetBasicAuth(ClientID, ClientSecret) + } + r, err := hc.Do(req) + if err != nil { + return nil, err + } + defer r.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body) + } + + var token *Token + content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + vals, err := url.ParseQuery(string(body)) + if err != nil { + return nil, err + } + token = &Token{ + AccessToken: vals.Get("access_token"), + TokenType: vals.Get("token_type"), + RefreshToken: vals.Get("refresh_token"), + Raw: vals, + } + e := vals.Get("expires_in") + if e == "" { + // TODO(jbd): Facebook's OAuth2 implementation is broken and + // returns expires_in field in expires. Remove the fallback to expires, + // when Facebook fixes their implementation. + e = vals.Get("expires") + } + expires, _ := strconv.Atoi(e) + if expires != 0 { + token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) + } + default: + var tj tokenJSON + if err = json.Unmarshal(body, &tj); err != nil { + return nil, err + } + token = &Token{ + AccessToken: tj.AccessToken, + TokenType: tj.TokenType, + RefreshToken: tj.RefreshToken, + Expiry: tj.expiry(), + Raw: make(map[string]interface{}), + } + json.Unmarshal(body, &token.Raw) // no error checks for optional fields + } + // Don't overwrite `RefreshToken` with an empty value + // if this was a token refreshing request. + if token.RefreshToken == "" { + token.RefreshToken = v.Get("refresh_token") + } + return token, nil +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/transport.go new file mode 100644 index 000000000..f1f173e34 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/transport.go @@ -0,0 +1,69 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient ContextKey + +// ContextKey is just an empty struct. It exists so HTTPClient can be +// an immutable public variable with a unique type. It's immutable +// because nobody else can create a ContextKey, being unexported. +type ContextKey struct{} + +// ContextClientFunc is a func which tries to return an *http.Client +// given a Context value. If it returns an error, the search stops +// with that error. If it returns (nil, nil), the search continues +// down the list of registered funcs. +type ContextClientFunc func(context.Context) (*http.Client, error) + +var contextClientFuncs []ContextClientFunc + +func RegisterContextClientFunc(fn ContextClientFunc) { + contextClientFuncs = append(contextClientFuncs, fn) +} + +func ContextClient(ctx context.Context) (*http.Client, error) { + if ctx != nil { + if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { + return hc, nil + } + } + for _, fn := range contextClientFuncs { + c, err := fn(ctx) + if err != nil { + return nil, err + } + if c != nil { + return c, nil + } + } + return http.DefaultClient, nil +} + +func ContextTransport(ctx context.Context) http.RoundTripper { + hc, err := ContextClient(ctx) + // This is a rare error case (somebody using nil on App Engine). + if err != nil { + return ErrorTransport{err} + } + return hc.Transport +} + +// ErrorTransport returns the specified error on RoundTrip. +// This RoundTripper should be used in rare error cases where +// error handling can be postponed to response handling time. +type ErrorTransport struct{ Err error } + +func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) { + return nil, t.Err +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/jws/jws.go new file mode 100644 index 000000000..b46edb27c --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/jws/jws.go @@ -0,0 +1,172 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jws provides encoding and decoding utilities for +// signed JWS messages. +package jws + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" +) + +// ClaimSet contains information about the JWT signature including the +// permissions being requested (scopes), the target of the token, the issuer, +// the time the token was issued, and the lifetime of the token. +type ClaimSet struct { + Iss string `json:"iss"` // email address of the client_id of the application making the access token request + Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests + Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). + Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch) + Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch) + Typ string `json:"typ,omitempty"` // token type (Optional). + + // Email for which the application is requesting delegated access (Optional). + Sub string `json:"sub,omitempty"` + + // The old name of Sub. Client keeps setting Prn to be + // complaint with legacy OAuth 2.0 providers. (Optional) + Prn string `json:"prn,omitempty"` + + // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 + // This array is marshalled using custom code (see (c *ClaimSet) encode()). + PrivateClaims map[string]interface{} `json:"-"` +} + +func (c *ClaimSet) encode() (string, error) { + // Reverting time back for machines whose time is not perfectly in sync. + // If client machine's time is in the future according + // to Google servers, an access token will not be issued. + now := time.Now().Add(-10 * time.Second) + if c.Iat == 0 { + c.Iat = now.Unix() + } + if c.Exp == 0 { + c.Exp = now.Add(time.Hour).Unix() + } + if c.Exp < c.Iat { + return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat) + } + + b, err := json.Marshal(c) + if err != nil { + return "", err + } + + if len(c.PrivateClaims) == 0 { + return base64Encode(b), nil + } + + // Marshal private claim set and then append it to b. + prv, err := json.Marshal(c.PrivateClaims) + if err != nil { + return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims) + } + + // Concatenate public and private claim JSON objects. + if !bytes.HasSuffix(b, []byte{'}'}) { + return "", fmt.Errorf("jws: invalid JSON %s", b) + } + if !bytes.HasPrefix(prv, []byte{'{'}) { + return "", fmt.Errorf("jws: invalid JSON %s", prv) + } + b[len(b)-1] = ',' // Replace closing curly brace with a comma. + b = append(b, prv[1:]...) // Append private claims. + return base64Encode(b), nil +} + +// Header represents the header for the signed JWS payloads. +type Header struct { + // The algorithm used for signature. + Algorithm string `json:"alg"` + + // Represents the token type. + Typ string `json:"typ"` +} + +func (h *Header) encode() (string, error) { + b, err := json.Marshal(h) + if err != nil { + return "", err + } + return base64Encode(b), nil +} + +// Decode decodes a claim set from a JWS payload. +func Decode(payload string) (*ClaimSet, error) { + // decode returned id token to get expiry + s := strings.Split(payload, ".") + if len(s) < 2 { + // TODO(jbd): Provide more context about the error. + return nil, errors.New("jws: invalid token received") + } + decoded, err := base64Decode(s[1]) + if err != nil { + return nil, err + } + c := &ClaimSet{} + err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c) + return c, err +} + +// Signer returns a signature for the given data. +type Signer func(data []byte) (sig []byte, err error) + +// EncodeWithSigner encodes a header and claim set with the provided signer. +func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) { + head, err := header.encode() + if err != nil { + return "", err + } + cs, err := c.encode() + if err != nil { + return "", err + } + ss := fmt.Sprintf("%s.%s", head, cs) + sig, err := sg([]byte(ss)) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", ss, base64Encode(sig)), nil +} + +// Encode encodes a signed JWS with provided header and claim set. +// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key. +func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) { + sg := func(data []byte) (sig []byte, err error) { + h := sha256.New() + h.Write([]byte(data)) + return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) + } + return EncodeWithSigner(header, c, sg) +} + +// base64Encode returns and Base64url encoded version of the input string with any +// trailing "=" stripped. +func base64Encode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// base64Decode decodes the Base64url encoded string +func base64Decode(s string) ([]byte, error) { + // add back missing padding + switch len(s) % 4 { + case 1: + s += "===" + case 2: + s += "==" + case 3: + s += "=" + } + return base64.URLEncoding.DecodeString(s) +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/jwt/jwt.go new file mode 100644 index 000000000..2ffad21a6 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/jwt/jwt.go @@ -0,0 +1,153 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly +// known as "two-legged OAuth 2.0". +// +// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12 +package jwt + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +var ( + defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" + defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"} +) + +// Config is the configuration for using JWT to fetch tokens, +// commonly known as "two-legged OAuth 2.0". +type Config struct { + // Email is the OAuth client identifier used when communicating with + // the configured OAuth provider. + Email string + + // PrivateKey contains the contents of an RSA private key or the + // contents of a PEM file that contains a private key. The provided + // private key is used to sign JWT payloads. + // PEM containers with a passphrase are not supported. + // Use the following command to convert a PKCS 12 file into a PEM. + // + // $ openssl pkcs12 -in key.p12 -out key.pem -nodes + // + PrivateKey []byte + + // Subject is the optional user to impersonate. + Subject string + + // Scopes optionally specifies a list of requested permission scopes. + Scopes []string + + // TokenURL is the endpoint required to complete the 2-legged JWT flow. + TokenURL string + + // Expires optionally specifies how long the token is valid for. + Expires time.Duration +} + +// TokenSource returns a JWT TokenSource using the configuration +// in c and the HTTP client from the provided context. +func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) +} + +// Client returns an HTTP client wrapping the context's +// HTTP transport and adding Authorization headers with tokens +// obtained from c. +// +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context) *http.Client { + return oauth2.NewClient(ctx, c.TokenSource(ctx)) +} + +// jwtSource is a source that always does a signed JWT request for a token. +// It should typically be wrapped with a reuseTokenSource. +type jwtSource struct { + ctx context.Context + conf *Config +} + +func (js jwtSource) Token() (*oauth2.Token, error) { + pk, err := internal.ParseKey(js.conf.PrivateKey) + if err != nil { + return nil, err + } + hc := oauth2.NewClient(js.ctx, nil) + claimSet := &jws.ClaimSet{ + Iss: js.conf.Email, + Scope: strings.Join(js.conf.Scopes, " "), + Aud: js.conf.TokenURL, + } + if subject := js.conf.Subject; subject != "" { + claimSet.Sub = subject + // prn is the old name of sub. Keep setting it + // to be compatible with legacy OAuth 2.0 providers. + claimSet.Prn = subject + } + if t := js.conf.Expires; t > 0 { + claimSet.Exp = time.Now().Add(t).Unix() + } + payload, err := jws.Encode(defaultHeader, claimSet, pk) + if err != nil { + return nil, err + } + v := url.Values{} + v.Set("grant_type", defaultGrantType) + v.Set("assertion", payload) + resp, err := hc.PostForm(js.conf.TokenURL, v) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body) + } + // tokenRes is the JSON response body. + var tokenRes struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + IDToken string `json:"id_token"` + ExpiresIn int64 `json:"expires_in"` // relative seconds from now + } + if err := json.Unmarshal(body, &tokenRes); err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + token := &oauth2.Token{ + AccessToken: tokenRes.AccessToken, + TokenType: tokenRes.TokenType, + } + raw := make(map[string]interface{}) + json.Unmarshal(body, &raw) // no error checks for optional fields + token = token.WithExtra(raw) + + if secs := tokenRes.ExpiresIn; secs > 0 { + token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) + } + if v := tokenRes.IDToken; v != "" { + // decode returned id token to get expiry + claimSet, err := jws.Decode(v) + if err != nil { + return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err) + } + token.Expiry = time.Unix(claimSet.Exp, 0) + } + return token, nil +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/oauth2.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/oauth2.go new file mode 100644 index 000000000..9b7b977da --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/oauth2.go @@ -0,0 +1,337 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package oauth2 provides support for making +// OAuth2 authorized and authenticated HTTP requests. +// It can additionally grant authorization with Bearer JWT. +package oauth2 + +import ( + "bytes" + "errors" + "net/http" + "net/url" + "strings" + "sync" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" +) + +// NoContext is the default context you should supply if not using +// your own context.Context (see https://golang.org/x/net/context). +var NoContext = context.TODO() + +// RegisterBrokenAuthHeaderProvider registers an OAuth2 server +// identified by the tokenURL prefix as an OAuth2 implementation +// which doesn't support the HTTP Basic authentication +// scheme to authenticate with the authorization server. +// Once a server is registered, credentials (client_id and client_secret) +// will be passed as query parameters rather than being present +// in the Authorization header. +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + internal.RegisterBrokenAuthHeaderProvider(tokenURL) +} + +// Config describes a typical 3-legged OAuth2 flow, with both the +// client application information and the server's endpoint URLs. +type Config struct { + // ClientID is the application's ID. + ClientID string + + // ClientSecret is the application's secret. + ClientSecret string + + // Endpoint contains the resource server's token endpoint + // URLs. These are constants specific to each server and are + // often available via site-specific packages, such as + // google.Endpoint or github.Endpoint. + Endpoint Endpoint + + // RedirectURL is the URL to redirect users going through + // the OAuth flow, after the resource owner's URLs. + RedirectURL string + + // Scope specifies optional requested permissions. + Scopes []string +} + +// A TokenSource is anything that can return a token. +type TokenSource interface { + // Token returns a token or an error. + // Token must be safe for concurrent use by multiple goroutines. + // The returned Token must not be modified. + Token() (*Token, error) +} + +// Endpoint contains the OAuth 2.0 provider's authorization and token +// endpoint URLs. +type Endpoint struct { + AuthURL string + TokenURL string +} + +var ( + // AccessTypeOnline and AccessTypeOffline are options passed + // to the Options.AuthCodeURL method. They modify the + // "access_type" field that gets sent in the URL returned by + // AuthCodeURL. + // + // Online is the default if neither is specified. If your + // application needs to refresh access tokens when the user + // is not present at the browser, then use offline. This will + // result in your application obtaining a refresh token the + // first time your application exchanges an authorization + // code for a user. + AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online") + AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline") + + // ApprovalForce forces the users to view the consent dialog + // and confirm the permissions request at the URL returned + // from AuthCodeURL, even if they've already done so. + ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force") +) + +// An AuthCodeOption is passed to Config.AuthCodeURL. +type AuthCodeOption interface { + setValue(url.Values) +} + +type setParam struct{ k, v string } + +func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } + +// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters +// to a provider's authorization endpoint. +func SetAuthURLParam(key, value string) AuthCodeOption { + return setParam{key, value} +} + +// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page +// that asks for permissions for the required scopes explicitly. +// +// State is a token to protect the user from CSRF attacks. You must +// always provide a non-zero string and validate that it matches the +// the state query parameter on your redirect callback. +// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// +// Opts may include AccessTypeOnline or AccessTypeOffline, as well +// as ApprovalForce. +func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { + var buf bytes.Buffer + buf.WriteString(c.Endpoint.AuthURL) + v := url.Values{ + "response_type": {"code"}, + "client_id": {c.ClientID}, + "redirect_uri": internal.CondVal(c.RedirectURL), + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + "state": internal.CondVal(state), + } + for _, opt := range opts { + opt.setValue(v) + } + if strings.Contains(c.Endpoint.AuthURL, "?") { + buf.WriteByte('&') + } else { + buf.WriteByte('?') + } + buf.WriteString(v.Encode()) + return buf.String() +} + +// PasswordCredentialsToken converts a resource owner username and password +// pair into a token. +// +// Per the RFC, this grant type should only be used "when there is a high +// degree of trust between the resource owner and the client (e.g., the client +// is part of the device operating system or a highly privileged application), +// and when other authorization grant types are not available." +// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. +// +// The HTTP client to use is derived from the context. +// If nil, http.DefaultClient is used. +func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"password"}, + "username": {username}, + "password": {password}, + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + }) +} + +// Exchange converts an authorization code into a token. +// +// It is used after a resource provider redirects the user back +// to the Redirect URI (the URL obtained from AuthCodeURL). +// +// The HTTP client to use is derived from the context. +// If a client is not provided via the context, http.DefaultClient is used. +// +// The code will be in the *http.Request.FormValue("code"). Before +// calling Exchange, be sure to validate FormValue("state"). +func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"authorization_code"}, + "code": {code}, + "redirect_uri": internal.CondVal(c.RedirectURL), + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + }) +} + +// Client returns an HTTP client using the provided token. +// The token will auto-refresh as necessary. The underlying +// HTTP transport will be obtained using the provided context. +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context, t *Token) *http.Client { + return NewClient(ctx, c.TokenSource(ctx, t)) +} + +// TokenSource returns a TokenSource that returns t until t expires, +// automatically refreshing it as necessary using the provided context. +// +// Most users will use Config.Client instead. +func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { + tkr := &tokenRefresher{ + ctx: ctx, + conf: c, + } + if t != nil { + tkr.refreshToken = t.RefreshToken + } + return &reuseTokenSource{ + t: t, + new: tkr, + } +} + +// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" +// HTTP requests to renew a token using a RefreshToken. +type tokenRefresher struct { + ctx context.Context // used to get HTTP requests + conf *Config + refreshToken string +} + +// WARNING: Token is not safe for concurrent access, as it +// updates the tokenRefresher's refreshToken field. +// Within this package, it is used by reuseTokenSource which +// synchronizes calls to this method with its own mutex. +func (tf *tokenRefresher) Token() (*Token, error) { + if tf.refreshToken == "" { + return nil, errors.New("oauth2: token expired and refresh token is not set") + } + + tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{ + "grant_type": {"refresh_token"}, + "refresh_token": {tf.refreshToken}, + }) + + if err != nil { + return nil, err + } + if tf.refreshToken != tk.RefreshToken { + tf.refreshToken = tk.RefreshToken + } + return tk, err +} + +// reuseTokenSource is a TokenSource that holds a single token in memory +// and validates its expiry before each call to retrieve it with +// Token. If it's expired, it will be auto-refreshed using the +// new TokenSource. +type reuseTokenSource struct { + new TokenSource // called when t is expired. + + mu sync.Mutex // guards t + t *Token +} + +// Token returns the current token if it's still valid, else will +// refresh the current token (using r.Context for HTTP client +// information) and return the new one. +func (s *reuseTokenSource) Token() (*Token, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.t.Valid() { + return s.t, nil + } + t, err := s.new.Token() + if err != nil { + return nil, err + } + s.t = t + return t, nil +} + +// StaticTokenSource returns a TokenSource that always returns the same token. +// Because the provided token t is never refreshed, StaticTokenSource is only +// useful for tokens that never expire. +func StaticTokenSource(t *Token) TokenSource { + return staticTokenSource{t} +} + +// staticTokenSource is a TokenSource that always returns the same Token. +type staticTokenSource struct { + t *Token +} + +func (s staticTokenSource) Token() (*Token, error) { + return s.t, nil +} + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient internal.ContextKey + +// NewClient creates an *http.Client from a Context and TokenSource. +// The returned client is not valid beyond the lifetime of the context. +// +// As a special case, if src is nil, a non-OAuth2 client is returned +// using the provided context. This exists to support related OAuth2 +// packages. +func NewClient(ctx context.Context, src TokenSource) *http.Client { + if src == nil { + c, err := internal.ContextClient(ctx) + if err != nil { + return &http.Client{Transport: internal.ErrorTransport{err}} + } + return c + } + return &http.Client{ + Transport: &Transport{ + Base: internal.ContextTransport(ctx), + Source: ReuseTokenSource(nil, src), + }, + } +} + +// ReuseTokenSource returns a TokenSource which repeatedly returns the +// same token as long as it's valid, starting with t. +// When its cached token is invalid, a new token is obtained from src. +// +// ReuseTokenSource is typically used to reuse tokens from a cache +// (such as a file on disk) between runs of a program, rather than +// obtaining new tokens unnecessarily. +// +// The initial token t may be nil, in which case the TokenSource is +// wrapped in a caching version if it isn't one already. This also +// means it's always safe to wrap ReuseTokenSource around any other +// TokenSource without adverse effects. +func ReuseTokenSource(t *Token, src TokenSource) TokenSource { + // Don't wrap a reuseTokenSource in itself. That would work, + // but cause an unnecessary number of mutex operations. + // Just build the equivalent one. + if rt, ok := src.(*reuseTokenSource); ok { + if t == nil { + // Just use it directly. + return rt + } + src = rt.new + } + return &reuseTokenSource{ + t: t, + new: src, + } +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/token.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/token.go new file mode 100644 index 000000000..7a3167f15 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/token.go @@ -0,0 +1,158 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" +) + +// expiryDelta determines how earlier a token should be considered +// expired than its actual expiration time. It is used to avoid late +// expirations due to client-server time mismatches. +const expiryDelta = 10 * time.Second + +// Token represents the crendentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// Most users of this package should not access fields of Token +// directly. They're exported mostly for use by related packages +// implementing derivative OAuth2 flows. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string `json:"access_token"` + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string `json:"token_type,omitempty"` + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string `json:"refresh_token,omitempty"` + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time `json:"expiry,omitempty"` + + // raw optionally contains extra metadata from the server + // when updating a token. + raw interface{} +} + +// Type returns t.TokenType if non-empty, else "Bearer". +func (t *Token) Type() string { + if strings.EqualFold(t.TokenType, "bearer") { + return "Bearer" + } + if strings.EqualFold(t.TokenType, "mac") { + return "MAC" + } + if strings.EqualFold(t.TokenType, "basic") { + return "Basic" + } + if t.TokenType != "" { + return t.TokenType + } + return "Bearer" +} + +// SetAuthHeader sets the Authorization header to r using the access +// token in t. +// +// This method is unnecessary when using Transport or an HTTP Client +// returned by this package. +func (t *Token) SetAuthHeader(r *http.Request) { + r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) +} + +// WithExtra returns a new Token that's a clone of t, but using the +// provided raw extra map. This is only intended for use by packages +// implementing derivative OAuth2 flows. +func (t *Token) WithExtra(extra interface{}) *Token { + t2 := new(Token) + *t2 = *t + t2.raw = extra + return t2 +} + +// Extra returns an extra field. +// Extra fields are key-value pairs returned by the server as a +// part of the token retrieval response. +func (t *Token) Extra(key string) interface{} { + if raw, ok := t.raw.(map[string]interface{}); ok { + return raw[key] + } + + vals, ok := t.raw.(url.Values) + if !ok { + return nil + } + + v := vals.Get(key) + switch s := strings.TrimSpace(v); strings.Count(s, ".") { + case 0: // Contains no "."; try to parse as int + if i, err := strconv.ParseInt(s, 10, 64); err == nil { + return i + } + case 1: // Contains a single "."; try to parse as float + if f, err := strconv.ParseFloat(s, 64); err == nil { + return f + } + } + + return v +} + +// expired reports whether the token is expired. +// t must be non-nil. +func (t *Token) expired() bool { + if t.Expiry.IsZero() { + return false + } + return t.Expiry.Add(-expiryDelta).Before(time.Now()) +} + +// Valid reports whether t is non-nil, has an AccessToken, and is not expired. +func (t *Token) Valid() bool { + return t != nil && t.AccessToken != "" && !t.expired() +} + +// tokenFromInternal maps an *internal.Token struct into +// a *Token struct. +func tokenFromInternal(t *internal.Token) *Token { + if t == nil { + return nil + } + return &Token{ + AccessToken: t.AccessToken, + TokenType: t.TokenType, + RefreshToken: t.RefreshToken, + Expiry: t.Expiry, + raw: t.Raw, + } +} + +// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. +// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along +// with an error.. +func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v) + if err != nil { + return nil, err + } + return tokenFromInternal(tk), nil +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/transport.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/transport.go new file mode 100644 index 000000000..92ac7e253 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/transport.go @@ -0,0 +1,132 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "errors" + "io" + "net/http" + "sync" +) + +// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, +// wrapping a base RoundTripper and adding an Authorization header +// with a token from the supplied Sources. +// +// Transport is a low-level mechanism. Most code will use the +// higher-level Config.Client method instead. +type Transport struct { + // Source supplies the token to add to outgoing requests' + // Authorization headers. + Source TokenSource + + // Base is the base RoundTripper used to make HTTP requests. + // If nil, http.DefaultTransport is used. + Base http.RoundTripper + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// RoundTrip authorizes and authenticates the request with an +// access token. If no token exists or token is expired, +// tries to refresh/fetch a new token. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + if t.Source == nil { + return nil, errors.New("oauth2: Transport's Source is nil") + } + token, err := t.Source.Token() + if err != nil { + return nil, err + } + + req2 := cloneRequest(req) // per RoundTripper contract + token.SetAuthHeader(req2) + t.setModReq(req, req2) + res, err := t.base().RoundTrip(req2) + if err != nil { + t.setModReq(req, nil) + return nil, err + } + res.Body = &onEOFReader{ + rc: res.Body, + fn: func() { t.setModReq(req, nil) }, + } + return res, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + t.mu.Lock() + modReq := t.modReq[req] + delete(t.modReq, req) + t.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func (t *Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +func (t *Transport) setModReq(orig, mod *http.Request) { + t.mu.Lock() + defer t.mu.Unlock() + if t.modReq == nil { + t.modReq = make(map[*http.Request]*http.Request) + } + if mod == nil { + delete(t.modReq, orig) + } else { + t.modReq[orig] = mod + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + return r2 +} + +type onEOFReader struct { + rc io.ReadCloser + fn func() +} + +func (r *onEOFReader) Read(p []byte) (n int, err error) { + n, err = r.rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *onEOFReader) Close() error { + err := r.rc.Close() + r.runFunc() + return err +} + +func (r *onEOFReader) runFunc() { + if fn := r.fn; fn != nil { + fn() + r.fn = nil + } +} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/time/LICENSE b/vendor/github.com/docker/distribution/vendor/golang.org/x/time/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/time/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/time/PATENTS b/vendor/github.com/docker/distribution/vendor/golang.org/x/time/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/time/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/time/rate/rate.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/time/rate/rate.go new file mode 100644 index 000000000..2131b9217 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/golang.org/x/time/rate/rate.go @@ -0,0 +1,368 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rate provides a rate limiter. +package rate + +import ( + "fmt" + "math" + "sync" + "time" + + "golang.org/x/net/context" +) + +// Limit defines the maximum frequency of some events. +// Limit is represented as number of events per second. +// A zero Limit allows no events. +type Limit float64 + +// Inf is the infinite rate limit; it allows all events (even if burst is zero). +const Inf = Limit(math.MaxFloat64) + +// Every converts a minimum time interval between events to a Limit. +func Every(interval time.Duration) Limit { + if interval <= 0 { + return Inf + } + return 1 / Limit(interval.Seconds()) +} + +// A Limiter controls how frequently events are allowed to happen. +// It implements a "token bucket" of size b, initially full and refilled +// at rate r tokens per second. +// Informally, in any large enough time interval, the Limiter limits the +// rate to r tokens per second, with a maximum burst size of b events. +// As a special case, if r == Inf (the infinite rate), b is ignored. +// See https://en.wikipedia.org/wiki/Token_bucket for more about token buckets. +// +// The zero value is a valid Limiter, but it will reject all events. +// Use NewLimiter to create non-zero Limiters. +// +// Limiter has three main methods, Allow, Reserve, and Wait. +// Most callers should use Wait. +// +// Each of the three methods consumes a single token. +// They differ in their behavior when no token is available. +// If no token is available, Allow returns false. +// If no token is available, Reserve returns a reservation for a future token +// and the amount of time the caller must wait before using it. +// If no token is available, Wait blocks until one can be obtained +// or its associated context.Context is canceled. +// +// The methods AllowN, ReserveN, and WaitN consume n tokens. +type Limiter struct { + limit Limit + burst int + + mu sync.Mutex + tokens float64 + // last is the last time the limiter's tokens field was updated + last time.Time + // lastEvent is the latest time of a rate-limited event (past or future) + lastEvent time.Time +} + +// Limit returns the maximum overall event rate. +func (lim *Limiter) Limit() Limit { + lim.mu.Lock() + defer lim.mu.Unlock() + return lim.limit +} + +// Burst returns the maximum burst size. Burst is the maximum number of tokens +// that can be consumed in a single call to Allow, Reserve, or Wait, so higher +// Burst values allow more events to happen at once. +// A zero Burst allows no events, unless limit == Inf. +func (lim *Limiter) Burst() int { + return lim.burst +} + +// NewLimiter returns a new Limiter that allows events up to rate r and permits +// bursts of at most b tokens. +func NewLimiter(r Limit, b int) *Limiter { + return &Limiter{ + limit: r, + burst: b, + } +} + +// Allow is shorthand for AllowN(time.Now(), 1). +func (lim *Limiter) Allow() bool { + return lim.AllowN(time.Now(), 1) +} + +// AllowN reports whether n events may happen at time now. +// Use this method if you intend to drop / skip events that exceed the rate limit. +// Otherwise use Reserve or Wait. +func (lim *Limiter) AllowN(now time.Time, n int) bool { + return lim.reserveN(now, n, 0).ok +} + +// A Reservation holds information about events that are permitted by a Limiter to happen after a delay. +// A Reservation may be canceled, which may enable the Limiter to permit additional events. +type Reservation struct { + ok bool + lim *Limiter + tokens int + timeToAct time.Time + // This is the Limit at reservation time, it can change later. + limit Limit +} + +// OK returns whether the limiter can provide the requested number of tokens +// within the maximum wait time. If OK is false, Delay returns InfDuration, and +// Cancel does nothing. +func (r *Reservation) OK() bool { + return r.ok +} + +// Delay is shorthand for DelayFrom(time.Now()). +func (r *Reservation) Delay() time.Duration { + return r.DelayFrom(time.Now()) +} + +// InfDuration is the duration returned by Delay when a Reservation is not OK. +const InfDuration = time.Duration(1<<63 - 1) + +// DelayFrom returns the duration for which the reservation holder must wait +// before taking the reserved action. Zero duration means act immediately. +// InfDuration means the limiter cannot grant the tokens requested in this +// Reservation within the maximum wait time. +func (r *Reservation) DelayFrom(now time.Time) time.Duration { + if !r.ok { + return InfDuration + } + delay := r.timeToAct.Sub(now) + if delay < 0 { + return 0 + } + return delay +} + +// Cancel is shorthand for CancelAt(time.Now()). +func (r *Reservation) Cancel() { + r.CancelAt(time.Now()) + return +} + +// CancelAt indicates that the reservation holder will not perform the reserved action +// and reverses the effects of this Reservation on the rate limit as much as possible, +// considering that other reservations may have already been made. +func (r *Reservation) CancelAt(now time.Time) { + if !r.ok { + return + } + + r.lim.mu.Lock() + defer r.lim.mu.Unlock() + + if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(now) { + return + } + + // calculate tokens to restore + // The duration between lim.lastEvent and r.timeToAct tells us how many tokens were reserved + // after r was obtained. These tokens should not be restored. + restoreTokens := float64(r.tokens) - r.limit.tokensFromDuration(r.lim.lastEvent.Sub(r.timeToAct)) + if restoreTokens <= 0 { + return + } + // advance time to now + now, _, tokens := r.lim.advance(now) + // calculate new number of tokens + tokens += restoreTokens + if burst := float64(r.lim.burst); tokens > burst { + tokens = burst + } + // update state + r.lim.last = now + r.lim.tokens = tokens + if r.timeToAct == r.lim.lastEvent { + prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) + if !prevEvent.Before(now) { + r.lim.lastEvent = prevEvent + } + } + + return +} + +// Reserve is shorthand for ReserveN(time.Now(), 1). +func (lim *Limiter) Reserve() *Reservation { + return lim.ReserveN(time.Now(), 1) +} + +// ReserveN returns a Reservation that indicates how long the caller must wait before n events happen. +// The Limiter takes this Reservation into account when allowing future events. +// ReserveN returns false if n exceeds the Limiter's burst size. +// Usage example: +// r, ok := lim.ReserveN(time.Now(), 1) +// if !ok { +// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? +// } +// time.Sleep(r.Delay()) +// Act() +// Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events. +// If you need to respect a deadline or cancel the delay, use Wait instead. +// To drop or skip events exceeding rate limit, use Allow instead. +func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation { + r := lim.reserveN(now, n, InfDuration) + return &r +} + +// Wait is shorthand for WaitN(ctx, 1). +func (lim *Limiter) Wait(ctx context.Context) (err error) { + return lim.WaitN(ctx, 1) +} + +// WaitN blocks until lim permits n events to happen. +// It returns an error if n exceeds the Limiter's burst size, the Context is +// canceled, or the expected wait time exceeds the Context's Deadline. +func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { + if n > lim.burst { + return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, lim.burst) + } + // Check if ctx is already cancelled + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + // Determine wait limit + now := time.Now() + waitLimit := InfDuration + if deadline, ok := ctx.Deadline(); ok { + waitLimit = deadline.Sub(now) + } + // Reserve + r := lim.reserveN(now, n, waitLimit) + if !r.ok { + return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n) + } + // Wait + t := time.NewTimer(r.DelayFrom(now)) + defer t.Stop() + select { + case <-t.C: + // We can proceed. + return nil + case <-ctx.Done(): + // Context was canceled before we could proceed. Cancel the + // reservation, which may permit other events to proceed sooner. + r.Cancel() + return ctx.Err() + } +} + +// SetLimit is shorthand for SetLimitAt(time.Now(), newLimit). +func (lim *Limiter) SetLimit(newLimit Limit) { + lim.SetLimitAt(time.Now(), newLimit) +} + +// SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated +// or underutilized by those which reserved (using Reserve or Wait) but did not yet act +// before SetLimitAt was called. +func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) { + lim.mu.Lock() + defer lim.mu.Unlock() + + now, _, tokens := lim.advance(now) + + lim.last = now + lim.tokens = tokens + lim.limit = newLimit +} + +// reserveN is a helper method for AllowN, ReserveN, and WaitN. +// maxFutureReserve specifies the maximum reservation wait duration allowed. +// reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. +func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation { + lim.mu.Lock() + defer lim.mu.Unlock() + + if lim.limit == Inf { + return Reservation{ + ok: true, + lim: lim, + tokens: n, + timeToAct: now, + } + } + + now, last, tokens := lim.advance(now) + + // Calculate the remaining number of tokens resulting from the request. + tokens -= float64(n) + + // Calculate the wait duration + var waitDuration time.Duration + if tokens < 0 { + waitDuration = lim.limit.durationFromTokens(-tokens) + } + + // Decide result + ok := n <= lim.burst && waitDuration <= maxFutureReserve + + // Prepare reservation + r := Reservation{ + ok: ok, + lim: lim, + limit: lim.limit, + } + if ok { + r.tokens = n + r.timeToAct = now.Add(waitDuration) + } + + // Update state + if ok { + lim.last = now + lim.tokens = tokens + lim.lastEvent = r.timeToAct + } else { + lim.last = last + } + + return r +} + +// advance calculates and returns an updated state for lim resulting from the passage of time. +// lim is not changed. +func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) { + last := lim.last + if now.Before(last) { + last = now + } + + // Avoid making delta overflow below when last is very old. + maxElapsed := lim.limit.durationFromTokens(float64(lim.burst) - lim.tokens) + elapsed := now.Sub(last) + if elapsed > maxElapsed { + elapsed = maxElapsed + } + + // Calculate the new number of tokens, due to time that passed. + delta := lim.limit.tokensFromDuration(elapsed) + tokens := lim.tokens + delta + if burst := float64(lim.burst); tokens > burst { + tokens = burst + } + + return now, last, tokens +} + +// durationFromTokens is a unit conversion function from the number of tokens to the duration +// of time it takes to accumulate them at a rate of limit tokens per second. +func (limit Limit) durationFromTokens(tokens float64) time.Duration { + seconds := tokens / float64(limit) + return time.Nanosecond * time.Duration(1e9*seconds) +} + +// tokensFromDuration is a unit conversion function from a time duration to the number of tokens +// which could be accumulated during that duration at a rate of limit tokens per second. +func (limit Limit) tokensFromDuration(d time.Duration) float64 { + return d.Seconds() * float64(limit) +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/LICENSE b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/LICENSE new file mode 100644 index 000000000..263aa7a0c --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/backoff.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/backoff.go new file mode 100644 index 000000000..135614047 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/backoff.go @@ -0,0 +1,46 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "math/rand" + "time" +) + +type BackoffStrategy interface { + // Pause returns the duration of the next pause and true if the operation should be + // retried, or false if no further retries should be attempted. + Pause() (time.Duration, bool) + + // Reset restores the strategy to its initial state. + Reset() +} + +// ExponentialBackoff performs exponential backoff as per https://en.wikipedia.org/wiki/Exponential_backoff. +// The initial pause time is given by Base. +// Once the total pause time exceeds Max, Pause will indicate no further retries. +type ExponentialBackoff struct { + Base time.Duration + Max time.Duration + total time.Duration + n uint +} + +func (eb *ExponentialBackoff) Pause() (time.Duration, bool) { + if eb.total > eb.Max { + return 0, false + } + + // The next pause is selected from randomly from [0, 2^n * Base). + d := time.Duration(rand.Int63n((1 << eb.n) * int64(eb.Base))) + eb.total += d + eb.n++ + return d, true +} + +func (eb *ExponentialBackoff) Reset() { + eb.n = 0 + eb.total = 0 +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/buffer.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/buffer.go new file mode 100644 index 000000000..4b8ec1424 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/buffer.go @@ -0,0 +1,77 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "bytes" + "io" + + "google.golang.org/api/googleapi" +) + +// ResumableBuffer buffers data from an io.Reader to support uploading media in retryable chunks. +type ResumableBuffer struct { + media io.Reader + + chunk []byte // The current chunk which is pending upload. The capacity is the chunk size. + err error // Any error generated when populating chunk by reading media. + + // The absolute position of chunk in the underlying media. + off int64 +} + +func NewResumableBuffer(media io.Reader, chunkSize int) *ResumableBuffer { + return &ResumableBuffer{media: media, chunk: make([]byte, 0, chunkSize)} +} + +// Chunk returns the current buffered chunk, the offset in the underlying media +// from which the chunk is drawn, and the size of the chunk. +// Successive calls to Chunk return the same chunk between calls to Next. +func (rb *ResumableBuffer) Chunk() (chunk io.Reader, off int64, size int, err error) { + // There may already be data in chunk if Next has not been called since the previous call to Chunk. + if rb.err == nil && len(rb.chunk) == 0 { + rb.err = rb.loadChunk() + } + return bytes.NewReader(rb.chunk), rb.off, len(rb.chunk), rb.err +} + +// loadChunk will read from media into chunk, up to the capacity of chunk. +func (rb *ResumableBuffer) loadChunk() error { + bufSize := cap(rb.chunk) + rb.chunk = rb.chunk[:bufSize] + + read := 0 + var err error + for err == nil && read < bufSize { + var n int + n, err = rb.media.Read(rb.chunk[read:]) + read += n + } + rb.chunk = rb.chunk[:read] + return err +} + +// Next advances to the next chunk, which will be returned by the next call to Chunk. +// Calls to Next without a corresponding prior call to Chunk will have no effect. +func (rb *ResumableBuffer) Next() { + rb.off += int64(len(rb.chunk)) + rb.chunk = rb.chunk[0:0] +} + +type readerTyper struct { + io.Reader + googleapi.ContentTyper +} + +// ReaderAtToReader adapts a ReaderAt to be used as a Reader. +// If ra implements googleapi.ContentTyper, then the returned reader +// will also implement googleapi.ContentTyper, delegating to ra. +func ReaderAtToReader(ra io.ReaderAt, size int64) io.Reader { + r := io.NewSectionReader(ra, 0, size) + if typer, ok := ra.(googleapi.ContentTyper); ok { + return readerTyper{r, typer} + } + return r +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/doc.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/doc.go new file mode 100644 index 000000000..752c4b411 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/doc.go @@ -0,0 +1,10 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gensupport is an internal implementation detail used by code +// generated by the google-api-go-generator tool. +// +// This package may be modified at any time without regard for backwards +// compatibility. It should not be used directly by API users. +package gensupport diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/json.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/json.go new file mode 100644 index 000000000..dd7bcd2eb --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/json.go @@ -0,0 +1,172 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" +) + +// MarshalJSON returns a JSON encoding of schema containing only selected fields. +// A field is selected if: +// * it has a non-empty value, or +// * its field name is present in forceSendFields, and +// * it is not a nil pointer or nil interface. +// The JSON key for each selected field is taken from the field's json: struct tag. +func MarshalJSON(schema interface{}, forceSendFields []string) ([]byte, error) { + if len(forceSendFields) == 0 { + return json.Marshal(schema) + } + + mustInclude := make(map[string]struct{}) + for _, f := range forceSendFields { + mustInclude[f] = struct{}{} + } + + dataMap, err := schemaToMap(schema, mustInclude) + if err != nil { + return nil, err + } + return json.Marshal(dataMap) +} + +func schemaToMap(schema interface{}, mustInclude map[string]struct{}) (map[string]interface{}, error) { + m := make(map[string]interface{}) + s := reflect.ValueOf(schema) + st := s.Type() + + for i := 0; i < s.NumField(); i++ { + jsonTag := st.Field(i).Tag.Get("json") + if jsonTag == "" { + continue + } + tag, err := parseJSONTag(jsonTag) + if err != nil { + return nil, err + } + if tag.ignore { + continue + } + + v := s.Field(i) + f := st.Field(i) + if !includeField(v, f, mustInclude) { + continue + } + + // nil maps are treated as empty maps. + if f.Type.Kind() == reflect.Map && v.IsNil() { + m[tag.apiName] = map[string]string{} + continue + } + + // nil slices are treated as empty slices. + if f.Type.Kind() == reflect.Slice && v.IsNil() { + m[tag.apiName] = []bool{} + continue + } + + if tag.stringFormat { + m[tag.apiName] = formatAsString(v, f.Type.Kind()) + } else { + m[tag.apiName] = v.Interface() + } + } + return m, nil +} + +// formatAsString returns a string representation of v, dereferencing it first if possible. +func formatAsString(v reflect.Value, kind reflect.Kind) string { + if kind == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + + return fmt.Sprintf("%v", v.Interface()) +} + +// jsonTag represents a restricted version of the struct tag format used by encoding/json. +// It is used to describe the JSON encoding of fields in a Schema struct. +type jsonTag struct { + apiName string + stringFormat bool + ignore bool +} + +// parseJSONTag parses a restricted version of the struct tag format used by encoding/json. +// The format of the tag must match that generated by the Schema.writeSchemaStruct method +// in the api generator. +func parseJSONTag(val string) (jsonTag, error) { + if val == "-" { + return jsonTag{ignore: true}, nil + } + + var tag jsonTag + + i := strings.Index(val, ",") + if i == -1 || val[:i] == "" { + return tag, fmt.Errorf("malformed json tag: %s", val) + } + + tag = jsonTag{ + apiName: val[:i], + } + + switch val[i+1:] { + case "omitempty": + case "omitempty,string": + tag.stringFormat = true + default: + return tag, fmt.Errorf("malformed json tag: %s", val) + } + + return tag, nil +} + +// Reports whether the struct field "f" with value "v" should be included in JSON output. +func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]struct{}) bool { + // The regular JSON encoding of a nil pointer is "null", which means "delete this field". + // Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set. + // However, many fields are not pointers, so there would be no way to delete these fields. + // Rather than partially supporting field deletion, we ignore mustInclude for nil pointer fields. + // Deletion will be handled by a separate mechanism. + if f.Type.Kind() == reflect.Ptr && v.IsNil() { + return false + } + + // The "any" type is represented as an interface{}. If this interface + // is nil, there is no reasonable representation to send. We ignore + // these fields, for the same reasons as given above for pointers. + if f.Type.Kind() == reflect.Interface && v.IsNil() { + return false + } + + _, ok := mustInclude[f.Name] + return ok || !isEmptyValue(v) +} + +// isEmptyValue reports whether v is the empty value for its type. This +// implementation is based on that of the encoding/json package, but its +// correctness does not depend on it being identical. What's important is that +// this function return false in situations where v should not be sent as part +// of a PATCH operation. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/media.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/media.go new file mode 100644 index 000000000..817f46f5d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/media.go @@ -0,0 +1,200 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/textproto" + + "google.golang.org/api/googleapi" +) + +const sniffBuffSize = 512 + +func newContentSniffer(r io.Reader) *contentSniffer { + return &contentSniffer{r: r} +} + +// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. +type contentSniffer struct { + r io.Reader + start []byte // buffer for the sniffed bytes. + err error // set to any error encountered while reading bytes to be sniffed. + + ctype string // set on first sniff. + sniffed bool // set to true on first sniff. +} + +func (cs *contentSniffer) Read(p []byte) (n int, err error) { + // Ensure that the content type is sniffed before any data is consumed from Reader. + _, _ = cs.ContentType() + + if len(cs.start) > 0 { + n := copy(p, cs.start) + cs.start = cs.start[n:] + return n, nil + } + + // We may have read some bytes into start while sniffing, even if the read ended in an error. + // We should first return those bytes, then the error. + if cs.err != nil { + return 0, cs.err + } + + // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. + return cs.r.Read(p) +} + +// ContentType returns the sniffed content type, and whether the content type was succesfully sniffed. +func (cs *contentSniffer) ContentType() (string, bool) { + if cs.sniffed { + return cs.ctype, cs.ctype != "" + } + cs.sniffed = true + // If ReadAll hits EOF, it returns err==nil. + cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) + + // Don't try to detect the content type based on possibly incomplete data. + if cs.err != nil { + return "", false + } + + cs.ctype = http.DetectContentType(cs.start) + return cs.ctype, true +} + +// DetermineContentType determines the content type of the supplied reader. +// If the content type is already known, it can be specified via ctype. +// Otherwise, the content of media will be sniffed to determine the content type. +// If media implements googleapi.ContentTyper (deprecated), this will be used +// instead of sniffing the content. +// After calling DetectContentType the caller must not perform further reads on +// media, but rather read from the Reader that is returned. +func DetermineContentType(media io.Reader, ctype string) (io.Reader, string) { + // Note: callers could avoid calling DetectContentType if ctype != "", + // but doing the check inside this function reduces the amount of + // generated code. + if ctype != "" { + return media, ctype + } + + // For backwards compatability, allow clients to set content + // type by providing a ContentTyper for media. + if typer, ok := media.(googleapi.ContentTyper); ok { + return media, typer.ContentType() + } + + sniffer := newContentSniffer(media) + if ctype, ok := sniffer.ContentType(); ok { + return sniffer, ctype + } + // If content type could not be sniffed, reads from sniffer will eventually fail with an error. + return sniffer, "" +} + +type typeReader struct { + io.Reader + typ string +} + +// multipartReader combines the contents of multiple readers to creat a multipart/related HTTP body. +// Close must be called if reads from the multipartReader are abandoned before reaching EOF. +type multipartReader struct { + pr *io.PipeReader + pipeOpen bool + ctype string +} + +func newMultipartReader(parts []typeReader) *multipartReader { + mp := &multipartReader{pipeOpen: true} + var pw *io.PipeWriter + mp.pr, pw = io.Pipe() + mpw := multipart.NewWriter(pw) + mp.ctype = "multipart/related; boundary=" + mpw.Boundary() + go func() { + for _, part := range parts { + w, err := mpw.CreatePart(typeHeader(part.typ)) + if err != nil { + mpw.Close() + pw.CloseWithError(fmt.Errorf("googleapi: CreatePart failed: %v", err)) + return + } + _, err = io.Copy(w, part.Reader) + if err != nil { + mpw.Close() + pw.CloseWithError(fmt.Errorf("googleapi: Copy failed: %v", err)) + return + } + } + + mpw.Close() + pw.Close() + }() + return mp +} + +func (mp *multipartReader) Read(data []byte) (n int, err error) { + return mp.pr.Read(data) +} + +func (mp *multipartReader) Close() error { + if !mp.pipeOpen { + return nil + } + mp.pipeOpen = false + return mp.pr.Close() +} + +// CombineBodyMedia combines a json body with media content to create a multipart/related HTTP body. +// It returns a ReadCloser containing the combined body, and the overall "multipart/related" content type, with random boundary. +// +// The caller must call Close on the returned ReadCloser if reads are abandoned before reaching EOF. +func CombineBodyMedia(body io.Reader, bodyContentType string, media io.Reader, mediaContentType string) (io.ReadCloser, string) { + mp := newMultipartReader([]typeReader{ + {body, bodyContentType}, + {media, mediaContentType}, + }) + return mp, mp.ctype +} + +func typeHeader(contentType string) textproto.MIMEHeader { + h := make(textproto.MIMEHeader) + if contentType != "" { + h.Set("Content-Type", contentType) + } + return h +} + +// PrepareUpload determines whether the data in the supplied reader should be +// uploaded in a single request, or in sequential chunks. +// chunkSize is the size of the chunk that media should be split into. +// If chunkSize is non-zero and the contents of media do not fit in a single +// chunk (or there is an error reading media), then media will be returned as a +// ResumableBuffer. Otherwise, media will be returned as a Reader. +// +// After PrepareUpload has been called, media should no longer be used: the +// media content should be accessed via one of the return values. +func PrepareUpload(media io.Reader, chunkSize int) (io.Reader, + *ResumableBuffer) { + if chunkSize == 0 { // do not chunk + return media, nil + } + + rb := NewResumableBuffer(media, chunkSize) + rdr, _, _, err := rb.Chunk() + + if err == io.EOF { // we can upload this in a single request + return rdr, nil + } + // err might be a non-EOF error. If it is, the next call to rb.Chunk will + // return the same error. Returning a ResumableBuffer ensures that this error + // will be handled at some point. + + return nil, rb +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/params.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/params.go new file mode 100644 index 000000000..3b3c74396 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/params.go @@ -0,0 +1,50 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "net/url" + + "google.golang.org/api/googleapi" +) + +// URLParams is a simplified replacement for url.Values +// that safely builds up URL parameters for encoding. +type URLParams map[string][]string + +// Get returns the first value for the given key, or "". +func (u URLParams) Get(key string) string { + vs := u[key] + if len(vs) == 0 { + return "" + } + return vs[0] +} + +// Set sets the key to value. +// It replaces any existing values. +func (u URLParams) Set(key, value string) { + u[key] = []string{value} +} + +// SetMulti sets the key to an array of values. +// It replaces any existing values. +// Note that values must not be modified after calling SetMulti +// so the caller is responsible for making a copy if necessary. +func (u URLParams) SetMulti(key string, values []string) { + u[key] = values +} + +// Encode encodes the values into ``URL encoded'' form +// ("bar=baz&foo=quux") sorted by key. +func (u URLParams) Encode() string { + return url.Values(u).Encode() +} + +func SetOptions(u URLParams, opts ...googleapi.CallOption) { + for _, o := range opts { + u.Set(o.Get()) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/resumable.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/resumable.go new file mode 100644 index 000000000..b3e774aa4 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/resumable.go @@ -0,0 +1,198 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "fmt" + "io" + "net/http" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +const ( + // statusResumeIncomplete is the code returned by the Google uploader + // when the transfer is not yet complete. + statusResumeIncomplete = 308 + + // statusTooManyRequests is returned by the storage API if the + // per-project limits have been temporarily exceeded. The request + // should be retried. + // https://cloud.google.com/storage/docs/json_api/v1/status-codes#standardcodes + statusTooManyRequests = 429 +) + +// ResumableUpload is used by the generated APIs to provide resumable uploads. +// It is not used by developers directly. +type ResumableUpload struct { + Client *http.Client + // URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable". + URI string + UserAgent string // User-Agent for header of the request + // Media is the object being uploaded. + Media *ResumableBuffer + // MediaType defines the media type, e.g. "image/jpeg". + MediaType string + + mu sync.Mutex // guards progress + progress int64 // number of bytes uploaded so far + + // Callback is an optional function that will be periodically called with the cumulative number of bytes uploaded. + Callback func(int64) + + // If not specified, a default exponential backoff strategy will be used. + Backoff BackoffStrategy +} + +// Progress returns the number of bytes uploaded at this point. +func (rx *ResumableUpload) Progress() int64 { + rx.mu.Lock() + defer rx.mu.Unlock() + return rx.progress +} + +// doUploadRequest performs a single HTTP request to upload data. +// off specifies the offset in rx.Media from which data is drawn. +// size is the number of bytes in data. +// final specifies whether data is the final chunk to be uploaded. +func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, off, size int64, final bool) (*http.Response, error) { + req, err := http.NewRequest("POST", rx.URI, data) + if err != nil { + return nil, err + } + + req.ContentLength = size + var contentRange string + if final { + if size == 0 { + contentRange = fmt.Sprintf("bytes */%v", off) + } else { + contentRange = fmt.Sprintf("bytes %v-%v/%v", off, off+size-1, off+size) + } + } else { + contentRange = fmt.Sprintf("bytes %v-%v/*", off, off+size-1) + } + req.Header.Set("Content-Range", contentRange) + req.Header.Set("Content-Type", rx.MediaType) + req.Header.Set("User-Agent", rx.UserAgent) + return ctxhttp.Do(ctx, rx.Client, req) + +} + +// reportProgress calls a user-supplied callback to report upload progress. +// If old==updated, the callback is not called. +func (rx *ResumableUpload) reportProgress(old, updated int64) { + if updated-old == 0 { + return + } + rx.mu.Lock() + rx.progress = updated + rx.mu.Unlock() + if rx.Callback != nil { + rx.Callback(updated) + } +} + +// transferChunk performs a single HTTP request to upload a single chunk from rx.Media. +func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, error) { + chunk, off, size, err := rx.Media.Chunk() + + done := err == io.EOF + if !done && err != nil { + return nil, err + } + + res, err := rx.doUploadRequest(ctx, chunk, off, int64(size), done) + if err != nil { + return res, err + } + + if res.StatusCode == statusResumeIncomplete || res.StatusCode == http.StatusOK { + rx.reportProgress(off, off+int64(size)) + } + + if res.StatusCode == statusResumeIncomplete { + rx.Media.Next() + } + return res, nil +} + +func contextDone(ctx context.Context) bool { + select { + case <-ctx.Done(): + return true + default: + return false + } +} + +// Upload starts the process of a resumable upload with a cancellable context. +// It retries using the provided back off strategy until cancelled or the +// strategy indicates to stop retrying. +// It is called from the auto-generated API code and is not visible to the user. +// rx is private to the auto-generated API code. +// Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close. +func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) { + var pause time.Duration + backoff := rx.Backoff + if backoff == nil { + backoff = DefaultBackoffStrategy() + } + + for { + // Ensure that we return in the case of cancelled context, even if pause is 0. + if contextDone(ctx) { + return nil, ctx.Err() + } + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(pause): + } + + resp, err = rx.transferChunk(ctx) + + var status int + if resp != nil { + status = resp.StatusCode + } + + // Check if we should retry the request. + if shouldRetry(status, err) { + var retry bool + pause, retry = backoff.Pause() + if retry { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + continue + } + } + + // If the chunk was uploaded successfully, but there's still + // more to go, upload the next chunk without any delay. + if status == statusResumeIncomplete { + pause = 0 + backoff.Reset() + resp.Body.Close() + continue + } + + // It's possible for err and resp to both be non-nil here, but we expose a simpler + // contract to our callers: exactly one of resp and err will be non-nil. This means + // that any response body must be closed here before returning a non-nil error. + if err != nil { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + return nil, err + } + + return resp, nil + } +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/retry.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/retry.go new file mode 100644 index 000000000..7f83d1da9 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/retry.go @@ -0,0 +1,77 @@ +package gensupport + +import ( + "io" + "net" + "net/http" + "time" + + "golang.org/x/net/context" +) + +// Retry invokes the given function, retrying it multiple times if the connection failed or +// the HTTP status response indicates the request should be attempted again. ctx may be nil. +func Retry(ctx context.Context, f func() (*http.Response, error), backoff BackoffStrategy) (*http.Response, error) { + for { + resp, err := f() + + var status int + if resp != nil { + status = resp.StatusCode + } + + // Return if we shouldn't retry. + pause, retry := backoff.Pause() + if !shouldRetry(status, err) || !retry { + return resp, err + } + + // Ensure the response body is closed, if any. + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + + // Pause, but still listen to ctx.Done if context is not nil. + var done <-chan struct{} + if ctx != nil { + done = ctx.Done() + } + select { + case <-done: + return nil, ctx.Err() + case <-time.After(pause): + } + } +} + +// DefaultBackoffStrategy returns a default strategy to use for retrying failed upload requests. +func DefaultBackoffStrategy() BackoffStrategy { + return &ExponentialBackoff{ + Base: 250 * time.Millisecond, + Max: 16 * time.Second, + } +} + +// shouldRetry returns true if the HTTP response / error indicates that the +// request should be attempted again. +func shouldRetry(status int, err error) bool { + // Retry for 5xx response codes. + if 500 <= status && status < 600 { + return true + } + + // Retry on statusTooManyRequests{ + if status == statusTooManyRequests { + return true + } + + // Retry on unexpected EOFs and temporary network errors. + if err == io.ErrUnexpectedEOF { + return true + } + if err, ok := err.(net.Error); ok { + return err.Temporary() + } + + return false +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/googleapi.go new file mode 100644 index 000000000..03e9acdd8 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/googleapi.go @@ -0,0 +1,424 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package googleapi contains the common code shared by all Google API +// libraries. +package googleapi + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "google.golang.org/api/googleapi/internal/uritemplates" +) + +// ContentTyper is an interface for Readers which know (or would like +// to override) their Content-Type. If a media body doesn't implement +// ContentTyper, the type is sniffed from the content using +// http.DetectContentType. +type ContentTyper interface { + ContentType() string +} + +// A SizeReaderAt is a ReaderAt with a Size method. +// An io.SectionReader implements SizeReaderAt. +type SizeReaderAt interface { + io.ReaderAt + Size() int64 +} + +// ServerResponse is embedded in each Do response and +// provides the HTTP status code and header sent by the server. +type ServerResponse struct { + // HTTPStatusCode is the server's response status code. + // When using a resource method's Do call, this will always be in the 2xx range. + HTTPStatusCode int + // Header contains the response header fields from the server. + Header http.Header +} + +const ( + Version = "0.5" + + // UserAgent is the header string used to identify this package. + UserAgent = "google-api-go-client/" + Version + + // The default chunk size to use for resumable uplods if not specified by the user. + DefaultUploadChunkSize = 8 * 1024 * 1024 + + // The minimum chunk size that can be used for resumable uploads. All + // user-specified chunk sizes must be multiple of this value. + MinUploadChunkSize = 256 * 1024 +) + +// Error contains an error response from the server. +type Error struct { + // Code is the HTTP response status code and will always be populated. + Code int `json:"code"` + // Message is the server response message and is only populated when + // explicitly referenced by the JSON server response. + Message string `json:"message"` + // Body is the raw response returned by the server. + // It is often but not always JSON, depending on how the request fails. + Body string + // Header contains the response header fields from the server. + Header http.Header + + Errors []ErrorItem +} + +// ErrorItem is a detailed error code & message from the Google API frontend. +type ErrorItem struct { + // Reason is the typed error code. For example: "some_example". + Reason string `json:"reason"` + // Message is the human-readable description of the error. + Message string `json:"message"` +} + +func (e *Error) Error() string { + if len(e.Errors) == 0 && e.Message == "" { + return fmt.Sprintf("googleapi: got HTTP response code %d with body: %v", e.Code, e.Body) + } + var buf bytes.Buffer + fmt.Fprintf(&buf, "googleapi: Error %d: ", e.Code) + if e.Message != "" { + fmt.Fprintf(&buf, "%s", e.Message) + } + if len(e.Errors) == 0 { + return strings.TrimSpace(buf.String()) + } + if len(e.Errors) == 1 && e.Errors[0].Message == e.Message { + fmt.Fprintf(&buf, ", %s", e.Errors[0].Reason) + return buf.String() + } + fmt.Fprintln(&buf, "\nMore details:") + for _, v := range e.Errors { + fmt.Fprintf(&buf, "Reason: %s, Message: %s\n", v.Reason, v.Message) + } + return buf.String() +} + +type errorReply struct { + Error *Error `json:"error"` +} + +// CheckResponse returns an error (of type *Error) if the response +// status code is not 2xx. +func CheckResponse(res *http.Response) error { + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } + slurp, err := ioutil.ReadAll(res.Body) + if err == nil { + jerr := new(errorReply) + err = json.Unmarshal(slurp, jerr) + if err == nil && jerr.Error != nil { + if jerr.Error.Code == 0 { + jerr.Error.Code = res.StatusCode + } + jerr.Error.Body = string(slurp) + return jerr.Error + } + } + return &Error{ + Code: res.StatusCode, + Body: string(slurp), + Header: res.Header, + } +} + +// IsNotModified reports whether err is the result of the +// server replying with http.StatusNotModified. +// Such error values are sometimes returned by "Do" methods +// on calls when If-None-Match is used. +func IsNotModified(err error) bool { + if err == nil { + return false + } + ae, ok := err.(*Error) + return ok && ae.Code == http.StatusNotModified +} + +// CheckMediaResponse returns an error (of type *Error) if the response +// status code is not 2xx. Unlike CheckResponse it does not assume the +// body is a JSON error document. +func CheckMediaResponse(res *http.Response) error { + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } + slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20)) + res.Body.Close() + return &Error{ + Code: res.StatusCode, + Body: string(slurp), + } +} + +type MarshalStyle bool + +var WithDataWrapper = MarshalStyle(true) +var WithoutDataWrapper = MarshalStyle(false) + +func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { + buf := new(bytes.Buffer) + if wrap { + buf.Write([]byte(`{"data": `)) + } + err := json.NewEncoder(buf).Encode(v) + if err != nil { + return nil, err + } + if wrap { + buf.Write([]byte(`}`)) + } + return buf, nil +} + +// endingWithErrorReader from r until it returns an error. If the +// final error from r is io.EOF and e is non-nil, e is used instead. +type endingWithErrorReader struct { + r io.Reader + e error +} + +func (er endingWithErrorReader) Read(p []byte) (n int, err error) { + n, err = er.r.Read(p) + if err == io.EOF && er.e != nil { + err = er.e + } + return +} + +// countingWriter counts the number of bytes it receives to write, but +// discards them. +type countingWriter struct { + n *int64 +} + +func (w countingWriter) Write(p []byte) (int, error) { + *w.n += int64(len(p)) + return len(p), nil +} + +// ProgressUpdater is a function that is called upon every progress update of a resumable upload. +// This is the only part of a resumable upload (from googleapi) that is usable by the developer. +// The remaining usable pieces of resumable uploads is exposed in each auto-generated API. +type ProgressUpdater func(current, total int64) + +type MediaOption interface { + setOptions(o *MediaOptions) +} + +type contentTypeOption string + +func (ct contentTypeOption) setOptions(o *MediaOptions) { + o.ContentType = string(ct) + if o.ContentType == "" { + o.ForceEmptyContentType = true + } +} + +// ContentType returns a MediaOption which sets the Content-Type header for media uploads. +// If ctype is empty, the Content-Type header will be omitted. +func ContentType(ctype string) MediaOption { + return contentTypeOption(ctype) +} + +type chunkSizeOption int + +func (cs chunkSizeOption) setOptions(o *MediaOptions) { + size := int(cs) + if size%MinUploadChunkSize != 0 { + size += MinUploadChunkSize - (size % MinUploadChunkSize) + } + o.ChunkSize = size +} + +// ChunkSize returns a MediaOption which sets the chunk size for media uploads. +// size will be rounded up to the nearest multiple of 256K. +// Media which contains fewer than size bytes will be uploaded in a single request. +// Media which contains size bytes or more will be uploaded in separate chunks. +// If size is zero, media will be uploaded in a single request. +func ChunkSize(size int) MediaOption { + return chunkSizeOption(size) +} + +// MediaOptions stores options for customizing media upload. It is not used by developers directly. +type MediaOptions struct { + ContentType string + ForceEmptyContentType bool + + ChunkSize int +} + +// ProcessMediaOptions stores options from opts in a MediaOptions. +// It is not used by developers directly. +func ProcessMediaOptions(opts []MediaOption) *MediaOptions { + mo := &MediaOptions{ChunkSize: DefaultUploadChunkSize} + for _, o := range opts { + o.setOptions(mo) + } + return mo +} + +func ResolveRelative(basestr, relstr string) string { + u, _ := url.Parse(basestr) + rel, _ := url.Parse(relstr) + u = u.ResolveReference(rel) + us := u.String() + us = strings.Replace(us, "%7B", "{", -1) + us = strings.Replace(us, "%7D", "}", -1) + return us +} + +// has4860Fix is whether this Go environment contains the fix for +// http://golang.org/issue/4860 +var has4860Fix bool + +// init initializes has4860Fix by checking the behavior of the net/http package. +func init() { + r := http.Request{ + URL: &url.URL{ + Scheme: "http", + Opaque: "//opaque", + }, + } + b := &bytes.Buffer{} + r.Write(b) + has4860Fix = bytes.HasPrefix(b.Bytes(), []byte("GET http")) +} + +// SetOpaque sets u.Opaque from u.Path such that HTTP requests to it +// don't alter any hex-escaped characters in u.Path. +func SetOpaque(u *url.URL) { + u.Opaque = "//" + u.Host + u.Path + if !has4860Fix { + u.Opaque = u.Scheme + ":" + u.Opaque + } +} + +// Expand subsitutes any {encoded} strings in the URL passed in using +// the map supplied. +// +// This calls SetOpaque to avoid encoding of the parameters in the URL path. +func Expand(u *url.URL, expansions map[string]string) { + expanded, err := uritemplates.Expand(u.Path, expansions) + if err == nil { + u.Path = expanded + SetOpaque(u) + } +} + +// CloseBody is used to close res.Body. +// Prior to calling Close, it also tries to Read a small amount to see an EOF. +// Not seeing an EOF can prevent HTTP Transports from reusing connections. +func CloseBody(res *http.Response) { + if res == nil || res.Body == nil { + return + } + // Justification for 3 byte reads: two for up to "\r\n" after + // a JSON/XML document, and then 1 to see EOF if we haven't yet. + // TODO(bradfitz): detect Go 1.3+ and skip these reads. + // See https://codereview.appspot.com/58240043 + // and https://codereview.appspot.com/49570044 + buf := make([]byte, 1) + for i := 0; i < 3; i++ { + _, err := res.Body.Read(buf) + if err != nil { + break + } + } + res.Body.Close() + +} + +// VariantType returns the type name of the given variant. +// If the map doesn't contain the named key or the value is not a []interface{}, "" is returned. +// This is used to support "variant" APIs that can return one of a number of different types. +func VariantType(t map[string]interface{}) string { + s, _ := t["type"].(string) + return s +} + +// ConvertVariant uses the JSON encoder/decoder to fill in the struct 'dst' with the fields found in variant 'v'. +// This is used to support "variant" APIs that can return one of a number of different types. +// It reports whether the conversion was successful. +func ConvertVariant(v map[string]interface{}, dst interface{}) bool { + var buf bytes.Buffer + err := json.NewEncoder(&buf).Encode(v) + if err != nil { + return false + } + return json.Unmarshal(buf.Bytes(), dst) == nil +} + +// A Field names a field to be retrieved with a partial response. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// +// Partial responses can dramatically reduce the amount of data that must be sent to your application. +// In order to request partial responses, you can specify the full list of fields +// that your application needs by adding the Fields option to your request. +// +// Field strings use camelCase with leading lower-case characters to identify fields within the response. +// +// For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields, +// you could request just those fields like this: +// +// svc.Events.List().Fields("nextPageToken", "items/id").Do() +// +// or if you were also interested in each Item's "Updated" field, you can combine them like this: +// +// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() +// +// More information about field formatting can be found here: +// https://developers.google.com/+/api/#fields-syntax +// +// Another way to find field names is through the Google API explorer: +// https://developers.google.com/apis-explorer/#p/ +type Field string + +// CombineFields combines fields into a single string. +func CombineFields(s []Field) string { + r := make([]string, len(s)) + for i, v := range s { + r[i] = string(v) + } + return strings.Join(r, ",") +} + +// A CallOption is an optional argument to an API call. +// It should be treated as an opaque value by users of Google APIs. +// +// A CallOption is something that configures an API call in a way that is +// not specific to that API; for instance, controlling the quota user for +// an API call is common across many APIs, and is thus a CallOption. +type CallOption interface { + Get() (key, value string) +} + +// QuotaUser returns a CallOption that will set the quota user for a call. +// The quota user can be used by server-side applications to control accounting. +// It can be an arbitrary string up to 40 characters, and will override UserIP +// if both are provided. +func QuotaUser(u string) CallOption { return quotaUser(u) } + +type quotaUser string + +func (q quotaUser) Get() (string, string) { return "quotaUser", string(q) } + +// UserIP returns a CallOption that will set the "userIp" parameter of a call. +// This should be the IP address of the originating request. +func UserIP(ip string) CallOption { return userIP(ip) } + +type userIP string + +func (i userIP) Get() (string, string) { return "userIp", string(i) } + +// TODO: Fields too diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE new file mode 100644 index 000000000..de9c88cb6 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) 2013 Joshua Tacoma + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go new file mode 100644 index 000000000..7c103ba13 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go @@ -0,0 +1,220 @@ +// Copyright 2013 Joshua Tacoma. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uritemplates is a level 3 implementation of RFC 6570 (URI +// Template, http://tools.ietf.org/html/rfc6570). +// uritemplates does not support composite values (in Go: slices or maps) +// and so does not qualify as a level 4 implementation. +package uritemplates + +import ( + "bytes" + "errors" + "regexp" + "strconv" + "strings" +) + +var ( + unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]") + reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]") + validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$") + hex = []byte("0123456789ABCDEF") +) + +func pctEncode(src []byte) []byte { + dst := make([]byte, len(src)*3) + for i, b := range src { + buf := dst[i*3 : i*3+3] + buf[0] = 0x25 + buf[1] = hex[b/16] + buf[2] = hex[b%16] + } + return dst +} + +func escape(s string, allowReserved bool) string { + if allowReserved { + return string(reserved.ReplaceAllFunc([]byte(s), pctEncode)) + } + return string(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) +} + +// A uriTemplate is a parsed representation of a URI template. +type uriTemplate struct { + raw string + parts []templatePart +} + +// parse parses a URI template string into a uriTemplate object. +func parse(rawTemplate string) (*uriTemplate, error) { + split := strings.Split(rawTemplate, "{") + parts := make([]templatePart, len(split)*2-1) + for i, s := range split { + if i == 0 { + if strings.Contains(s, "}") { + return nil, errors.New("unexpected }") + } + parts[i].raw = s + continue + } + subsplit := strings.Split(s, "}") + if len(subsplit) != 2 { + return nil, errors.New("malformed template") + } + expression := subsplit[0] + var err error + parts[i*2-1], err = parseExpression(expression) + if err != nil { + return nil, err + } + parts[i*2].raw = subsplit[1] + } + return &uriTemplate{ + raw: rawTemplate, + parts: parts, + }, nil +} + +type templatePart struct { + raw string + terms []templateTerm + first string + sep string + named bool + ifemp string + allowReserved bool +} + +type templateTerm struct { + name string + explode bool + truncate int +} + +func parseExpression(expression string) (result templatePart, err error) { + switch expression[0] { + case '+': + result.sep = "," + result.allowReserved = true + expression = expression[1:] + case '.': + result.first = "." + result.sep = "." + expression = expression[1:] + case '/': + result.first = "/" + result.sep = "/" + expression = expression[1:] + case ';': + result.first = ";" + result.sep = ";" + result.named = true + expression = expression[1:] + case '?': + result.first = "?" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '&': + result.first = "&" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '#': + result.first = "#" + result.sep = "," + result.allowReserved = true + expression = expression[1:] + default: + result.sep = "," + } + rawterms := strings.Split(expression, ",") + result.terms = make([]templateTerm, len(rawterms)) + for i, raw := range rawterms { + result.terms[i], err = parseTerm(raw) + if err != nil { + break + } + } + return result, err +} + +func parseTerm(term string) (result templateTerm, err error) { + // TODO(djd): Remove "*" suffix parsing once we check that no APIs have + // mistakenly used that attribute. + if strings.HasSuffix(term, "*") { + result.explode = true + term = term[:len(term)-1] + } + split := strings.Split(term, ":") + if len(split) == 1 { + result.name = term + } else if len(split) == 2 { + result.name = split[0] + var parsed int64 + parsed, err = strconv.ParseInt(split[1], 10, 0) + result.truncate = int(parsed) + } else { + err = errors.New("multiple colons in same term") + } + if !validname.MatchString(result.name) { + err = errors.New("not a valid name: " + result.name) + } + if result.explode && result.truncate > 0 { + err = errors.New("both explode and prefix modifers on same term") + } + return result, err +} + +// Expand expands a URI template with a set of values to produce a string. +func (t *uriTemplate) Expand(values map[string]string) string { + var buf bytes.Buffer + for _, p := range t.parts { + p.expand(&buf, values) + } + return buf.String() +} + +func (tp *templatePart) expand(buf *bytes.Buffer, values map[string]string) { + if len(tp.raw) > 0 { + buf.WriteString(tp.raw) + return + } + var first = true + for _, term := range tp.terms { + value, exists := values[term.name] + if !exists { + continue + } + if first { + buf.WriteString(tp.first) + first = false + } else { + buf.WriteString(tp.sep) + } + tp.expandString(buf, term, value) + } +} + +func (tp *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) { + if tp.named { + buf.WriteString(name) + if empty { + buf.WriteString(tp.ifemp) + } else { + buf.WriteString("=") + } + } +} + +func (tp *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) { + if len(s) > t.truncate && t.truncate > 0 { + s = s[:t.truncate] + } + tp.expandName(buf, t.name, len(s) == 0) + buf.WriteString(escape(s, tp.allowReserved)) +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go new file mode 100644 index 000000000..eff260a69 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go @@ -0,0 +1,13 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uritemplates + +func Expand(path string, values map[string]string) (string, error) { + template, err := parse(path) + if err != nil { + return "", err + } + return template.Expand(values), nil +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/types.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/types.go new file mode 100644 index 000000000..a02b4b071 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/types.go @@ -0,0 +1,182 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package googleapi + +import ( + "encoding/json" + "strconv" +) + +// Int64s is a slice of int64s that marshal as quoted strings in JSON. +type Int64s []int64 + +func (q *Int64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return err + } + *q = append(*q, int64(v)) + } + return nil +} + +// Int32s is a slice of int32s that marshal as quoted strings in JSON. +type Int32s []int32 + +func (q *Int32s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return err + } + *q = append(*q, int32(v)) + } + return nil +} + +// Uint64s is a slice of uint64s that marshal as quoted strings in JSON. +type Uint64s []uint64 + +func (q *Uint64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return err + } + *q = append(*q, uint64(v)) + } + return nil +} + +// Uint32s is a slice of uint32s that marshal as quoted strings in JSON. +type Uint32s []uint32 + +func (q *Uint32s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return err + } + *q = append(*q, uint32(v)) + } + return nil +} + +// Float64s is a slice of float64s that marshal as quoted strings in JSON. +type Float64s []float64 + +func (q *Float64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseFloat(s, 64) + if err != nil { + return err + } + *q = append(*q, float64(v)) + } + return nil +} + +func quotedList(n int, fn func(dst []byte, i int) []byte) ([]byte, error) { + dst := make([]byte, 0, 2+n*10) // somewhat arbitrary + dst = append(dst, '[') + for i := 0; i < n; i++ { + if i > 0 { + dst = append(dst, ',') + } + dst = append(dst, '"') + dst = fn(dst, i) + dst = append(dst, '"') + } + dst = append(dst, ']') + return dst, nil +} + +func (s Int64s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendInt(dst, s[i], 10) + }) +} + +func (s Int32s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendInt(dst, int64(s[i]), 10) + }) +} + +func (s Uint64s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendUint(dst, s[i], 10) + }) +} + +func (s Uint32s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendUint(dst, uint64(s[i]), 10) + }) +} + +func (s Float64s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendFloat(dst, s[i], 'g', -1, 64) + }) +} + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { return &v } + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { return &v } + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { return &v } + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { return &v } + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { return &v } + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { return &v } diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/storage/v1/storage-api.json new file mode 100644 index 000000000..3768b4687 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -0,0 +1,2865 @@ +{ + "kind": "discovery#restDescription", + "etag": "\"bRFOOrZKfO9LweMbPqu0kcu6De8/KVPQfwGxQTBtH0g1kuij0C9i4uc\"", + "discoveryVersion": "v1", + "id": "storage:v1", + "name": "storage", + "version": "v1", + "revision": "20160304", + "title": "Cloud Storage JSON API", + "description": "Stores and retrieves potentially large, immutable data objects.", + "ownerDomain": "google.com", + "ownerName": "Google", + "icons": { + "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", + "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" + }, + "documentationLink": "https://developers.google.com/storage/docs/json_api/", + "labels": [ + "labs" + ], + "protocol": "rest", + "baseUrl": "https://www.googleapis.com/storage/v1/", + "basePath": "/storage/v1/", + "rootUrl": "https://www.googleapis.com/", + "servicePath": "storage/v1/", + "batchPath": "batch", + "parameters": { + "alt": { + "type": "string", + "description": "Data format for the response.", + "default": "json", + "enum": [ + "json" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json" + ], + "location": "query" + }, + "fields": { + "type": "string", + "description": "Selector specifying which fields to include in a partial response.", + "location": "query" + }, + "key": { + "type": "string", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query" + }, + "oauth_token": { + "type": "string", + "description": "OAuth 2.0 token for the current user.", + "location": "query" + }, + "prettyPrint": { + "type": "boolean", + "description": "Returns response with indentations and line breaks.", + "default": "true", + "location": "query" + }, + "quotaUser": { + "type": "string", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.", + "location": "query" + }, + "userIp": { + "type": "string", + "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.", + "location": "query" + } + }, + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/cloud-platform.read-only": { + "description": "View your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/devstorage.full_control": { + "description": "Manage your data and permissions in Google Cloud Storage" + }, + "https://www.googleapis.com/auth/devstorage.read_only": { + "description": "View your data in Google Cloud Storage" + }, + "https://www.googleapis.com/auth/devstorage.read_write": { + "description": "Manage your data in Google Cloud Storage" + } + } + } + }, + "schemas": { + "Bucket": { + "id": "Bucket", + "type": "object", + "description": "A bucket.", + "properties": { + "acl": { + "type": "array", + "description": "Access controls on the bucket.", + "items": { + "$ref": "BucketAccessControl" + }, + "annotations": { + "required": [ + "storage.buckets.update" + ] + } + }, + "cors": { + "type": "array", + "description": "The bucket's Cross-Origin Resource Sharing (CORS) configuration.", + "items": { + "type": "object", + "properties": { + "maxAgeSeconds": { + "type": "integer", + "description": "The value, in seconds, to return in the Access-Control-Max-Age header used in preflight responses.", + "format": "int32" + }, + "method": { + "type": "array", + "description": "The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, etc) Note: \"*\" is permitted in the list of methods, and means \"any method\".", + "items": { + "type": "string" + } + }, + "origin": { + "type": "array", + "description": "The list of Origins eligible to receive CORS response headers. Note: \"*\" is permitted in the list of origins, and means \"any Origin\".", + "items": { + "type": "string" + } + }, + "responseHeader": { + "type": "array", + "description": "The list of HTTP headers other than the simple response headers to give permission for the user-agent to share across domains.", + "items": { + "type": "string" + } + } + } + } + }, + "defaultObjectAcl": { + "type": "array", + "description": "Default access controls to apply to new objects when no ACL is provided.", + "items": { + "$ref": "ObjectAccessControl" + } + }, + "etag": { + "type": "string", + "description": "HTTP 1.1 Entity tag for the bucket." + }, + "id": { + "type": "string", + "description": "The ID of the bucket." + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For buckets, this is always storage#bucket.", + "default": "storage#bucket" + }, + "lifecycle": { + "type": "object", + "description": "The bucket's lifecycle configuration. See lifecycle management for more information.", + "properties": { + "rule": { + "type": "array", + "description": "A lifecycle management rule, which is made of an action to take and the condition(s) under which the action will be taken.", + "items": { + "type": "object", + "properties": { + "action": { + "type": "object", + "description": "The action to take.", + "properties": { + "type": { + "type": "string", + "description": "Type of the action. Currently, only Delete is supported." + } + } + }, + "condition": { + "type": "object", + "description": "The condition(s) under which the action will be taken.", + "properties": { + "age": { + "type": "integer", + "description": "Age of an object (in days). This condition is satisfied when an object reaches the specified age.", + "format": "int32" + }, + "createdBefore": { + "type": "string", + "description": "A date in RFC 3339 format with only the date part (for instance, \"2013-01-15\"). This condition is satisfied when an object is created before midnight of the specified date in UTC.", + "format": "date" + }, + "isLive": { + "type": "boolean", + "description": "Relevant only for versioned objects. If the value is true, this condition matches live objects; if the value is false, it matches archived objects." + }, + "numNewerVersions": { + "type": "integer", + "description": "Relevant only for versioned objects. If the value is N, this condition is satisfied when there are at least N versions (including the live version) newer than this version of the object.", + "format": "int32" + } + } + } + } + } + } + } + }, + "location": { + "type": "string", + "description": "The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. See the developer's guide for the authoritative list." + }, + "logging": { + "type": "object", + "description": "The bucket's logging configuration, which defines the destination bucket and optional name prefix for the current bucket's logs.", + "properties": { + "logBucket": { + "type": "string", + "description": "The destination bucket where the current bucket's logs should be placed." + }, + "logObjectPrefix": { + "type": "string", + "description": "A prefix for log object names." + } + } + }, + "metageneration": { + "type": "string", + "description": "The metadata generation of this bucket.", + "format": "int64" + }, + "name": { + "type": "string", + "description": "The name of the bucket.", + "annotations": { + "required": [ + "storage.buckets.insert" + ] + } + }, + "owner": { + "type": "object", + "description": "The owner of the bucket. This is always the project team's owner group.", + "properties": { + "entity": { + "type": "string", + "description": "The entity, in the form project-owner-projectId." + }, + "entityId": { + "type": "string", + "description": "The ID for the entity." + } + } + }, + "projectNumber": { + "type": "string", + "description": "The project number of the project the bucket belongs to.", + "format": "uint64" + }, + "selfLink": { + "type": "string", + "description": "The URI of this bucket." + }, + "storageClass": { + "type": "string", + "description": "The bucket's storage class. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include STANDARD, NEARLINE and DURABLE_REDUCED_AVAILABILITY. Defaults to STANDARD. For more information, see storage classes." + }, + "timeCreated": { + "type": "string", + "description": "The creation time of the bucket in RFC 3339 format.", + "format": "date-time" + }, + "updated": { + "type": "string", + "description": "The modification time of the bucket in RFC 3339 format.", + "format": "date-time" + }, + "versioning": { + "type": "object", + "description": "The bucket's versioning configuration.", + "properties": { + "enabled": { + "type": "boolean", + "description": "While set to true, versioning is fully enabled for this bucket." + } + } + }, + "website": { + "type": "object", + "description": "The bucket's website configuration.", + "properties": { + "mainPageSuffix": { + "type": "string", + "description": "Behaves as the bucket's directory index where missing objects are treated as potential directories." + }, + "notFoundPage": { + "type": "string", + "description": "The custom object to return when a requested resource is not found." + } + } + } + } + }, + "BucketAccessControl": { + "id": "BucketAccessControl", + "type": "object", + "description": "An access-control entry.", + "properties": { + "bucket": { + "type": "string", + "description": "The name of the bucket." + }, + "domain": { + "type": "string", + "description": "The domain associated with the entity, if any." + }, + "email": { + "type": "string", + "description": "The email address associated with the entity, if any." + }, + "entity": { + "type": "string", + "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.", + "annotations": { + "required": [ + "storage.bucketAccessControls.insert" + ] + } + }, + "entityId": { + "type": "string", + "description": "The ID for the entity, if any." + }, + "etag": { + "type": "string", + "description": "HTTP 1.1 Entity tag for the access-control entry." + }, + "id": { + "type": "string", + "description": "The ID of the access-control entry." + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For bucket access control entries, this is always storage#bucketAccessControl.", + "default": "storage#bucketAccessControl" + }, + "projectTeam": { + "type": "object", + "description": "The project team associated with the entity, if any.", + "properties": { + "projectNumber": { + "type": "string", + "description": "The project number." + }, + "team": { + "type": "string", + "description": "The team. Can be owners, editors, or viewers." + } + } + }, + "role": { + "type": "string", + "description": "The access permission for the entity. Can be READER, WRITER, or OWNER.", + "annotations": { + "required": [ + "storage.bucketAccessControls.insert" + ] + } + }, + "selfLink": { + "type": "string", + "description": "The link to this access-control entry." + } + } + }, + "BucketAccessControls": { + "id": "BucketAccessControls", + "type": "object", + "description": "An access-control list.", + "properties": { + "items": { + "type": "array", + "description": "The list of items.", + "items": { + "$ref": "BucketAccessControl" + } + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For lists of bucket access control entries, this is always storage#bucketAccessControls.", + "default": "storage#bucketAccessControls" + } + } + }, + "Buckets": { + "id": "Buckets", + "type": "object", + "description": "A list of buckets.", + "properties": { + "items": { + "type": "array", + "description": "The list of items.", + "items": { + "$ref": "Bucket" + } + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For lists of buckets, this is always storage#buckets.", + "default": "storage#buckets" + }, + "nextPageToken": { + "type": "string", + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results." + } + } + }, + "Channel": { + "id": "Channel", + "type": "object", + "description": "An notification channel used to watch for resource changes.", + "properties": { + "address": { + "type": "string", + "description": "The address where notifications are delivered for this channel." + }, + "expiration": { + "type": "string", + "description": "Date and time of notification channel expiration, expressed as a Unix timestamp, in milliseconds. Optional.", + "format": "int64" + }, + "id": { + "type": "string", + "description": "A UUID or similar unique string that identifies this channel." + }, + "kind": { + "type": "string", + "description": "Identifies this as a notification channel used to watch for changes to a resource. Value: the fixed string \"api#channel\".", + "default": "api#channel" + }, + "params": { + "type": "object", + "description": "Additional parameters controlling delivery channel behavior. Optional.", + "additionalProperties": { + "type": "string", + "description": "Declares a new parameter by name." + } + }, + "payload": { + "type": "boolean", + "description": "A Boolean value to indicate whether payload is wanted. Optional." + }, + "resourceId": { + "type": "string", + "description": "An opaque ID that identifies the resource being watched on this channel. Stable across different API versions." + }, + "resourceUri": { + "type": "string", + "description": "A version-specific identifier for the watched resource." + }, + "token": { + "type": "string", + "description": "An arbitrary string delivered to the target address with each notification delivered over this channel. Optional." + }, + "type": { + "type": "string", + "description": "The type of delivery mechanism used for this channel." + } + } + }, + "ComposeRequest": { + "id": "ComposeRequest", + "type": "object", + "description": "A Compose request.", + "properties": { + "destination": { + "$ref": "Object", + "description": "Properties of the resulting object." + }, + "kind": { + "type": "string", + "description": "The kind of item this is.", + "default": "storage#composeRequest" + }, + "sourceObjects": { + "type": "array", + "description": "The list of source objects that will be concatenated into a single object.", + "items": { + "type": "object", + "properties": { + "generation": { + "type": "string", + "description": "The generation of this object to use as the source.", + "format": "int64" + }, + "name": { + "type": "string", + "description": "The source object's name. The source object's bucket is implicitly the destination bucket.", + "annotations": { + "required": [ + "storage.objects.compose" + ] + } + }, + "objectPreconditions": { + "type": "object", + "description": "Conditions that must be met for this operation to execute.", + "properties": { + "ifGenerationMatch": { + "type": "string", + "description": "Only perform the composition if the generation of the source object that would be used matches this value. If this value and a generation are both specified, they must be the same value or the call will fail.", + "format": "int64" + } + } + } + } + }, + "annotations": { + "required": [ + "storage.objects.compose" + ] + } + } + } + }, + "Object": { + "id": "Object", + "type": "object", + "description": "An object.", + "properties": { + "acl": { + "type": "array", + "description": "Access controls on the object.", + "items": { + "$ref": "ObjectAccessControl" + }, + "annotations": { + "required": [ + "storage.objects.update" + ] + } + }, + "bucket": { + "type": "string", + "description": "The name of the bucket containing this object." + }, + "cacheControl": { + "type": "string", + "description": "Cache-Control directive for the object data." + }, + "componentCount": { + "type": "integer", + "description": "Number of underlying components that make up this object. Components are accumulated by compose operations.", + "format": "int32" + }, + "contentDisposition": { + "type": "string", + "description": "Content-Disposition of the object data." + }, + "contentEncoding": { + "type": "string", + "description": "Content-Encoding of the object data." + }, + "contentLanguage": { + "type": "string", + "description": "Content-Language of the object data." + }, + "contentType": { + "type": "string", + "description": "Content-Type of the object data." + }, + "crc32c": { + "type": "string", + "description": "CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order. For more information about using the CRC32c checksum, see Hashes and ETags: Best Practices." + }, + "customerEncryption": { + "type": "object", + "description": "Metadata of customer-supplied encryption key, if the object is encrypted by such a key.", + "properties": { + "encryptionAlgorithm": { + "type": "string", + "description": "The encryption algorithm." + }, + "keySha256": { + "type": "string", + "description": "SHA256 hash value of the encryption key." + } + } + }, + "etag": { + "type": "string", + "description": "HTTP 1.1 Entity tag for the object." + }, + "generation": { + "type": "string", + "description": "The content generation of this object. Used for object versioning.", + "format": "int64" + }, + "id": { + "type": "string", + "description": "The ID of the object." + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For objects, this is always storage#object.", + "default": "storage#object" + }, + "md5Hash": { + "type": "string", + "description": "MD5 hash of the data; encoded using base64. For more information about using the MD5 hash, see Hashes and ETags: Best Practices." + }, + "mediaLink": { + "type": "string", + "description": "Media download link." + }, + "metadata": { + "type": "object", + "description": "User-provided metadata, in key/value pairs.", + "additionalProperties": { + "type": "string", + "description": "An individual metadata entry." + } + }, + "metageneration": { + "type": "string", + "description": "The version of the metadata for this object at this generation. Used for preconditions and for detecting changes in metadata. A metageneration number is only meaningful in the context of a particular generation of a particular object.", + "format": "int64" + }, + "name": { + "type": "string", + "description": "The name of this object. Required if not specified by URL parameter." + }, + "owner": { + "type": "object", + "description": "The owner of the object. This will always be the uploader of the object.", + "properties": { + "entity": { + "type": "string", + "description": "The entity, in the form user-userId." + }, + "entityId": { + "type": "string", + "description": "The ID for the entity." + } + } + }, + "selfLink": { + "type": "string", + "description": "The link to this object." + }, + "size": { + "type": "string", + "description": "Content-Length of the data in bytes.", + "format": "uint64" + }, + "storageClass": { + "type": "string", + "description": "Storage class of the object." + }, + "timeCreated": { + "type": "string", + "description": "The creation time of the object in RFC 3339 format.", + "format": "date-time" + }, + "timeDeleted": { + "type": "string", + "description": "The deletion time of the object in RFC 3339 format. Will be returned if and only if this version of the object has been deleted.", + "format": "date-time" + }, + "updated": { + "type": "string", + "description": "The modification time of the object metadata in RFC 3339 format.", + "format": "date-time" + } + } + }, + "ObjectAccessControl": { + "id": "ObjectAccessControl", + "type": "object", + "description": "An access-control entry.", + "properties": { + "bucket": { + "type": "string", + "description": "The name of the bucket." + }, + "domain": { + "type": "string", + "description": "The domain associated with the entity, if any." + }, + "email": { + "type": "string", + "description": "The email address associated with the entity, if any." + }, + "entity": { + "type": "string", + "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com." + }, + "entityId": { + "type": "string", + "description": "The ID for the entity, if any." + }, + "etag": { + "type": "string", + "description": "HTTP 1.1 Entity tag for the access-control entry." + }, + "generation": { + "type": "string", + "description": "The content generation of the object.", + "format": "int64" + }, + "id": { + "type": "string", + "description": "The ID of the access-control entry." + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For object access control entries, this is always storage#objectAccessControl.", + "default": "storage#objectAccessControl" + }, + "object": { + "type": "string", + "description": "The name of the object." + }, + "projectTeam": { + "type": "object", + "description": "The project team associated with the entity, if any.", + "properties": { + "projectNumber": { + "type": "string", + "description": "The project number." + }, + "team": { + "type": "string", + "description": "The team. Can be owners, editors, or viewers." + } + } + }, + "role": { + "type": "string", + "description": "The access permission for the entity. Can be READER or OWNER." + }, + "selfLink": { + "type": "string", + "description": "The link to this access-control entry." + } + } + }, + "ObjectAccessControls": { + "id": "ObjectAccessControls", + "type": "object", + "description": "An access-control list.", + "properties": { + "items": { + "type": "array", + "description": "The list of items.", + "items": { + "type": "any" + } + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For lists of object access control entries, this is always storage#objectAccessControls.", + "default": "storage#objectAccessControls" + } + } + }, + "Objects": { + "id": "Objects", + "type": "object", + "description": "A list of objects.", + "properties": { + "items": { + "type": "array", + "description": "The list of items.", + "items": { + "$ref": "Object" + } + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For lists of objects, this is always storage#objects.", + "default": "storage#objects" + }, + "nextPageToken": { + "type": "string", + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results." + }, + "prefixes": { + "type": "array", + "description": "The list of prefixes of objects matching-but-not-listed up to and including the requested delimiter.", + "items": { + "type": "string" + } + } + } + }, + "RewriteResponse": { + "id": "RewriteResponse", + "type": "object", + "description": "A rewrite response.", + "properties": { + "done": { + "type": "boolean", + "description": "true if the copy is finished; otherwise, false if the copy is in progress. This property is always present in the response." + }, + "kind": { + "type": "string", + "description": "The kind of item this is.", + "default": "storage#rewriteResponse" + }, + "objectSize": { + "type": "string", + "description": "The total size of the object being copied in bytes. This property is always present in the response.", + "format": "uint64" + }, + "resource": { + "$ref": "Object", + "description": "A resource containing the metadata for the copied-to object. This property is present in the response only when copying completes." + }, + "rewriteToken": { + "type": "string", + "description": "A token to use in subsequent requests to continue copying data. This token is present in the response only when there is more data to copy." + }, + "totalBytesRewritten": { + "type": "string", + "description": "The total bytes written so far, which can be used to provide a waiting user with a progress indicator. This property is always present in the response.", + "format": "uint64" + } + } + } + }, + "resources": { + "bucketAccessControls": { + "methods": { + "delete": { + "id": "storage.bucketAccessControls.delete", + "path": "b/{bucket}/acl/{entity}", + "httpMethod": "DELETE", + "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "get": { + "id": "storage.bucketAccessControls.get", + "path": "b/{bucket}/acl/{entity}", + "httpMethod": "GET", + "description": "Returns the ACL entry for the specified entity on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "response": { + "$ref": "BucketAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "insert": { + "id": "storage.bucketAccessControls.insert", + "path": "b/{bucket}/acl", + "httpMethod": "POST", + "description": "Creates a new ACL entry on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket" + ], + "request": { + "$ref": "BucketAccessControl" + }, + "response": { + "$ref": "BucketAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "list": { + "id": "storage.bucketAccessControls.list", + "path": "b/{bucket}/acl", + "httpMethod": "GET", + "description": "Retrieves ACL entries on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket" + ], + "response": { + "$ref": "BucketAccessControls" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "patch": { + "id": "storage.bucketAccessControls.patch", + "path": "b/{bucket}/acl/{entity}", + "httpMethod": "PATCH", + "description": "Updates an ACL entry on the specified bucket. This method supports patch semantics.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "request": { + "$ref": "BucketAccessControl" + }, + "response": { + "$ref": "BucketAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "update": { + "id": "storage.bucketAccessControls.update", + "path": "b/{bucket}/acl/{entity}", + "httpMethod": "PUT", + "description": "Updates an ACL entry on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "request": { + "$ref": "BucketAccessControl" + }, + "response": { + "$ref": "BucketAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + } + } + }, + "buckets": { + "methods": { + "delete": { + "id": "storage.buckets.delete", + "path": "b/{bucket}", + "httpMethod": "DELETE", + "description": "Permanently deletes an empty bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "If set, only deletes the bucket if its metageneration matches this value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "If set, only deletes the bucket if its metageneration does not match this value.", + "format": "int64", + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "id": "storage.buckets.get", + "path": "b/{bucket}", + "httpMethod": "GET", + "description": "Returns metadata for the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit acl and defaultObjectAcl properties." + ], + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "insert": { + "id": "storage.buckets.insert", + "path": "b", + "httpMethod": "POST", + "description": "Creates a new bucket.", + "parameters": { + "predefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to this bucket.", + "enum": [ + "authenticatedRead", + "private", + "projectPrivate", + "publicRead", + "publicReadWrite" + ], + "enumDescriptions": [ + "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + "Project team owners get OWNER access.", + "Project team members get access according to their roles.", + "Project team owners get OWNER access, and allUsers get READER access.", + "Project team owners get OWNER access, and allUsers get WRITER access." + ], + "location": "query" + }, + "predefinedDefaultObjectAcl": { + "type": "string", + "description": "Apply a predefined set of default object access controls to this bucket.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "project": { + "type": "string", + "description": "A valid API project identifier.", + "required": true, + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit acl and defaultObjectAcl properties." + ], + "location": "query" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "Bucket" + }, + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "list": { + "id": "storage.buckets.list", + "path": "b", + "httpMethod": "GET", + "description": "Retrieves a list of buckets for a given project.", + "parameters": { + "maxResults": { + "type": "integer", + "description": "Maximum number of buckets to return.", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query" + }, + "prefix": { + "type": "string", + "description": "Filter results to buckets whose names begin with this prefix.", + "location": "query" + }, + "project": { + "type": "string", + "description": "A valid API project identifier.", + "required": true, + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit acl and defaultObjectAcl properties." + ], + "location": "query" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "Buckets" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "patch": { + "id": "storage.buckets.patch", + "path": "b/{bucket}", + "httpMethod": "PATCH", + "description": "Updates a bucket. This method supports patch semantics.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "predefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to this bucket.", + "enum": [ + "authenticatedRead", + "private", + "projectPrivate", + "publicRead", + "publicReadWrite" + ], + "enumDescriptions": [ + "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + "Project team owners get OWNER access.", + "Project team members get access according to their roles.", + "Project team owners get OWNER access, and allUsers get READER access.", + "Project team owners get OWNER access, and allUsers get WRITER access." + ], + "location": "query" + }, + "predefinedDefaultObjectAcl": { + "type": "string", + "description": "Apply a predefined set of default object access controls to this bucket.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit acl and defaultObjectAcl properties." + ], + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "request": { + "$ref": "Bucket" + }, + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "update": { + "id": "storage.buckets.update", + "path": "b/{bucket}", + "httpMethod": "PUT", + "description": "Updates a bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "predefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to this bucket.", + "enum": [ + "authenticatedRead", + "private", + "projectPrivate", + "publicRead", + "publicReadWrite" + ], + "enumDescriptions": [ + "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + "Project team owners get OWNER access.", + "Project team members get access according to their roles.", + "Project team owners get OWNER access, and allUsers get READER access.", + "Project team owners get OWNER access, and allUsers get WRITER access." + ], + "location": "query" + }, + "predefinedDefaultObjectAcl": { + "type": "string", + "description": "Apply a predefined set of default object access controls to this bucket.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit acl and defaultObjectAcl properties." + ], + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "request": { + "$ref": "Bucket" + }, + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + }, + "channels": { + "methods": { + "stop": { + "id": "storage.channels.stop", + "path": "channels/stop", + "httpMethod": "POST", + "description": "Stop watching resources through this channel", + "request": { + "$ref": "Channel", + "parameterName": "resource" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + }, + "defaultObjectAccessControls": { + "methods": { + "delete": { + "id": "storage.defaultObjectAccessControls.delete", + "path": "b/{bucket}/defaultObjectAcl/{entity}", + "httpMethod": "DELETE", + "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "get": { + "id": "storage.defaultObjectAccessControls.get", + "path": "b/{bucket}/defaultObjectAcl/{entity}", + "httpMethod": "GET", + "description": "Returns the default object ACL entry for the specified entity on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "insert": { + "id": "storage.defaultObjectAccessControls.insert", + "path": "b/{bucket}/defaultObjectAcl", + "httpMethod": "POST", + "description": "Creates a new default object ACL entry on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket" + ], + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "list": { + "id": "storage.defaultObjectAccessControls.list", + "path": "b/{bucket}/defaultObjectAcl", + "httpMethod": "GET", + "description": "Retrieves default object ACL entries on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "response": { + "$ref": "ObjectAccessControls" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "patch": { + "id": "storage.defaultObjectAccessControls.patch", + "path": "b/{bucket}/defaultObjectAcl/{entity}", + "httpMethod": "PATCH", + "description": "Updates a default object ACL entry on the specified bucket. This method supports patch semantics.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "update": { + "id": "storage.defaultObjectAccessControls.update", + "path": "b/{bucket}/defaultObjectAcl/{entity}", + "httpMethod": "PUT", + "description": "Updates a default object ACL entry on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + } + } + }, + "objectAccessControls": { + "methods": { + "delete": { + "id": "storage.objectAccessControls.delete", + "path": "b/{bucket}/o/{object}/acl/{entity}", + "httpMethod": "DELETE", + "description": "Permanently deletes the ACL entry for the specified entity on the specified object.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "object", + "entity" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "get": { + "id": "storage.objectAccessControls.get", + "path": "b/{bucket}/o/{object}/acl/{entity}", + "httpMethod": "GET", + "description": "Returns the ACL entry for the specified entity on the specified object.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "object", + "entity" + ], + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "insert": { + "id": "storage.objectAccessControls.insert", + "path": "b/{bucket}/o/{object}/acl", + "httpMethod": "POST", + "description": "Creates a new ACL entry on the specified object.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "object" + ], + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "list": { + "id": "storage.objectAccessControls.list", + "path": "b/{bucket}/o/{object}/acl", + "httpMethod": "GET", + "description": "Retrieves ACL entries on the specified object.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "object" + ], + "response": { + "$ref": "ObjectAccessControls" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "patch": { + "id": "storage.objectAccessControls.patch", + "path": "b/{bucket}/o/{object}/acl/{entity}", + "httpMethod": "PATCH", + "description": "Updates an ACL entry on the specified object. This method supports patch semantics.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "object", + "entity" + ], + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "update": { + "id": "storage.objectAccessControls.update", + "path": "b/{bucket}/o/{object}/acl/{entity}", + "httpMethod": "PUT", + "description": "Updates an ACL entry on the specified object.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "object", + "entity" + ], + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + } + } + }, + "objects": { + "methods": { + "compose": { + "id": "storage.objects.compose", + "path": "b/{destinationBucket}/o/{destinationObject}/compose", + "httpMethod": "POST", + "description": "Concatenates a list of existing objects into a new object in the same bucket.", + "parameters": { + "destinationBucket": { + "type": "string", + "description": "Name of the bucket in which to store the new object.", + "required": true, + "location": "path" + }, + "destinationObject": { + "type": "string", + "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "destinationPredefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to the destination object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + } + }, + "parameterOrder": [ + "destinationBucket", + "destinationObject" + ], + "request": { + "$ref": "ComposeRequest" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsMediaDownload": true, + "useMediaDownloadService": true + }, + "copy": { + "id": "storage.objects.copy", + "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}", + "httpMethod": "POST", + "description": "Copies a source object to a destination object. Optionally overrides metadata.", + "parameters": { + "destinationBucket": { + "type": "string", + "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "destinationObject": { + "type": "string", + "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", + "required": true, + "location": "path" + }, + "destinationPredefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to the destination object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's generation does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the acl property." + ], + "location": "query" + }, + "sourceBucket": { + "type": "string", + "description": "Name of the bucket in which to find the source object.", + "required": true, + "location": "path" + }, + "sourceGeneration": { + "type": "string", + "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "sourceObject": { + "type": "string", + "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "sourceBucket", + "sourceObject", + "destinationBucket", + "destinationObject" + ], + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsMediaDownload": true, + "useMediaDownloadService": true + }, + "delete": { + "id": "storage.objects.delete", + "path": "b/{bucket}/o/{object}", + "httpMethod": "DELETE", + "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which the object resides.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "bucket", + "object" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "id": "storage.objects.get", + "path": "b/{bucket}/o/{object}", + "httpMethod": "GET", + "description": "Retrieves an object or its metadata.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which the object resides.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's generation does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the acl property." + ], + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "object" + ], + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsMediaDownload": true, + "useMediaDownloadService": true + }, + "insert": { + "id": "storage.objects.insert", + "path": "b/{bucket}/o", + "httpMethod": "POST", + "description": "Stores a new object and metadata.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + "required": true, + "location": "path" + }, + "contentEncoding": { + "type": "string", + "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.", + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "name": { + "type": "string", + "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "query" + }, + "predefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to this object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the acl property." + ], + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsMediaDownload": true, + "useMediaDownloadService": true, + "supportsMediaUpload": true, + "mediaUpload": { + "accept": [ + "*/*" + ], + "protocols": { + "simple": { + "multipart": true, + "path": "/upload/storage/v1/b/{bucket}/o" + }, + "resumable": { + "multipart": true, + "path": "/resumable/upload/storage/v1/b/{bucket}/o" + } + } + } + }, + "list": { + "id": "storage.objects.list", + "path": "b/{bucket}/o", + "httpMethod": "GET", + "description": "Retrieves a list of objects matching the criteria.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which to look for objects.", + "required": true, + "location": "path" + }, + "delimiter": { + "type": "string", + "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query" + }, + "prefix": { + "type": "string", + "description": "Filter results to objects whose names begin with this prefix.", + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the acl property." + ], + "location": "query" + }, + "versions": { + "type": "boolean", + "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "response": { + "$ref": "Objects" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsSubscription": true + }, + "patch": { + "id": "storage.objects.patch", + "path": "b/{bucket}/o/{object}", + "httpMethod": "PATCH", + "description": "Updates an object's metadata. This method supports patch semantics.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which the object resides.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "predefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to this object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the acl property." + ], + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "object" + ], + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "rewrite": { + "id": "storage.objects.rewrite", + "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}", + "httpMethod": "POST", + "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", + "parameters": { + "destinationBucket": { + "type": "string", + "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + "required": true, + "location": "path" + }, + "destinationObject": { + "type": "string", + "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "destinationPredefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to the destination object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's generation does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "maxBytesRewrittenPerCall": { + "type": "string", + "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.", + "format": "int64", + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the acl property." + ], + "location": "query" + }, + "rewriteToken": { + "type": "string", + "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", + "location": "query" + }, + "sourceBucket": { + "type": "string", + "description": "Name of the bucket in which to find the source object.", + "required": true, + "location": "path" + }, + "sourceGeneration": { + "type": "string", + "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "sourceObject": { + "type": "string", + "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "sourceBucket", + "sourceObject", + "destinationBucket", + "destinationObject" + ], + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "RewriteResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "update": { + "id": "storage.objects.update", + "path": "b/{bucket}/o/{object}", + "httpMethod": "PUT", + "description": "Updates an object's metadata.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which the object resides.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "predefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to this object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the acl property." + ], + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "object" + ], + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsMediaDownload": true, + "useMediaDownloadService": true + }, + "watchAll": { + "id": "storage.objects.watchAll", + "path": "b/{bucket}/o/watch", + "httpMethod": "POST", + "description": "Watch for changes on all objects in a bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which to look for objects.", + "required": true, + "location": "path" + }, + "delimiter": { + "type": "string", + "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query" + }, + "prefix": { + "type": "string", + "description": "Filter results to objects whose names begin with this prefix.", + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the acl property." + ], + "location": "query" + }, + "versions": { + "type": "boolean", + "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "request": { + "$ref": "Channel", + "parameterName": "resource" + }, + "response": { + "$ref": "Channel" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsSubscription": true + } + } + } + } +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/storage/v1/storage-gen.go new file mode 100644 index 000000000..a29904439 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -0,0 +1,7690 @@ +// Package storage provides access to the Cloud Storage JSON API. +// +// See https://developers.google.com/storage/docs/json_api/ +// +// Usage example: +// +// import "google.golang.org/api/storage/v1" +// ... +// storageService, err := storage.New(oauthHttpClient) +package storage + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + context "golang.org/x/net/context" + ctxhttp "golang.org/x/net/context/ctxhttp" + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = ctxhttp.Do + +const apiId = "storage:v1" +const apiName = "storage" +const apiVersion = "v1" +const basePath = "https://www.googleapis.com/storage/v1/" + +// OAuth2 scopes used by this API. +const ( + // View and manage your data across Google Cloud Platform services + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" + + // View your data across Google Cloud Platform services + CloudPlatformReadOnlyScope = "https://www.googleapis.com/auth/cloud-platform.read-only" + + // Manage your data and permissions in Google Cloud Storage + DevstorageFullControlScope = "https://www.googleapis.com/auth/devstorage.full_control" + + // View your data in Google Cloud Storage + DevstorageReadOnlyScope = "https://www.googleapis.com/auth/devstorage.read_only" + + // Manage your data in Google Cloud Storage + DevstorageReadWriteScope = "https://www.googleapis.com/auth/devstorage.read_write" +) + +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.BucketAccessControls = NewBucketAccessControlsService(s) + s.Buckets = NewBucketsService(s) + s.Channels = NewChannelsService(s) + s.DefaultObjectAccessControls = NewDefaultObjectAccessControlsService(s) + s.ObjectAccessControls = NewObjectAccessControlsService(s) + s.Objects = NewObjectsService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + + BucketAccessControls *BucketAccessControlsService + + Buckets *BucketsService + + Channels *ChannelsService + + DefaultObjectAccessControls *DefaultObjectAccessControlsService + + ObjectAccessControls *ObjectAccessControlsService + + Objects *ObjectsService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func NewBucketAccessControlsService(s *Service) *BucketAccessControlsService { + rs := &BucketAccessControlsService{s: s} + return rs +} + +type BucketAccessControlsService struct { + s *Service +} + +func NewBucketsService(s *Service) *BucketsService { + rs := &BucketsService{s: s} + return rs +} + +type BucketsService struct { + s *Service +} + +func NewChannelsService(s *Service) *ChannelsService { + rs := &ChannelsService{s: s} + return rs +} + +type ChannelsService struct { + s *Service +} + +func NewDefaultObjectAccessControlsService(s *Service) *DefaultObjectAccessControlsService { + rs := &DefaultObjectAccessControlsService{s: s} + return rs +} + +type DefaultObjectAccessControlsService struct { + s *Service +} + +func NewObjectAccessControlsService(s *Service) *ObjectAccessControlsService { + rs := &ObjectAccessControlsService{s: s} + return rs +} + +type ObjectAccessControlsService struct { + s *Service +} + +func NewObjectsService(s *Service) *ObjectsService { + rs := &ObjectsService{s: s} + return rs +} + +type ObjectsService struct { + s *Service +} + +// Bucket: A bucket. +type Bucket struct { + // Acl: Access controls on the bucket. + Acl []*BucketAccessControl `json:"acl,omitempty"` + + // Cors: The bucket's Cross-Origin Resource Sharing (CORS) + // configuration. + Cors []*BucketCors `json:"cors,omitempty"` + + // DefaultObjectAcl: Default access controls to apply to new objects + // when no ACL is provided. + DefaultObjectAcl []*ObjectAccessControl `json:"defaultObjectAcl,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the bucket. + Etag string `json:"etag,omitempty"` + + // Id: The ID of the bucket. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For buckets, this is always + // storage#bucket. + Kind string `json:"kind,omitempty"` + + // Lifecycle: The bucket's lifecycle configuration. See lifecycle + // management for more information. + Lifecycle *BucketLifecycle `json:"lifecycle,omitempty"` + + // Location: The location of the bucket. Object data for objects in the + // bucket resides in physical storage within this region. Defaults to + // US. See the developer's guide for the authoritative list. + Location string `json:"location,omitempty"` + + // Logging: The bucket's logging configuration, which defines the + // destination bucket and optional name prefix for the current bucket's + // logs. + Logging *BucketLogging `json:"logging,omitempty"` + + // Metageneration: The metadata generation of this bucket. + Metageneration int64 `json:"metageneration,omitempty,string"` + + // Name: The name of the bucket. + Name string `json:"name,omitempty"` + + // Owner: The owner of the bucket. This is always the project team's + // owner group. + Owner *BucketOwner `json:"owner,omitempty"` + + // ProjectNumber: The project number of the project the bucket belongs + // to. + ProjectNumber uint64 `json:"projectNumber,omitempty,string"` + + // SelfLink: The URI of this bucket. + SelfLink string `json:"selfLink,omitempty"` + + // StorageClass: The bucket's storage class. This defines how objects in + // the bucket are stored and determines the SLA and the cost of storage. + // Values include STANDARD, NEARLINE and DURABLE_REDUCED_AVAILABILITY. + // Defaults to STANDARD. For more information, see storage classes. + StorageClass string `json:"storageClass,omitempty"` + + // TimeCreated: The creation time of the bucket in RFC 3339 format. + TimeCreated string `json:"timeCreated,omitempty"` + + // Updated: The modification time of the bucket in RFC 3339 format. + Updated string `json:"updated,omitempty"` + + // Versioning: The bucket's versioning configuration. + Versioning *BucketVersioning `json:"versioning,omitempty"` + + // Website: The bucket's website configuration. + Website *BucketWebsite `json:"website,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Acl") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Bucket) MarshalJSON() ([]byte, error) { + type noMethod Bucket + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type BucketCors struct { + // MaxAgeSeconds: The value, in seconds, to return in the + // Access-Control-Max-Age header used in preflight responses. + MaxAgeSeconds int64 `json:"maxAgeSeconds,omitempty"` + + // Method: The list of HTTP methods on which to include CORS response + // headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list + // of methods, and means "any method". + Method []string `json:"method,omitempty"` + + // Origin: The list of Origins eligible to receive CORS response + // headers. Note: "*" is permitted in the list of origins, and means + // "any Origin". + Origin []string `json:"origin,omitempty"` + + // ResponseHeader: The list of HTTP headers other than the simple + // response headers to give permission for the user-agent to share + // across domains. + ResponseHeader []string `json:"responseHeader,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MaxAgeSeconds") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BucketCors) MarshalJSON() ([]byte, error) { + type noMethod BucketCors + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// BucketLifecycle: The bucket's lifecycle configuration. See lifecycle +// management for more information. +type BucketLifecycle struct { + // Rule: A lifecycle management rule, which is made of an action to take + // and the condition(s) under which the action will be taken. + Rule []*BucketLifecycleRule `json:"rule,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Rule") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BucketLifecycle) MarshalJSON() ([]byte, error) { + type noMethod BucketLifecycle + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type BucketLifecycleRule struct { + // Action: The action to take. + Action *BucketLifecycleRuleAction `json:"action,omitempty"` + + // Condition: The condition(s) under which the action will be taken. + Condition *BucketLifecycleRuleCondition `json:"condition,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Action") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BucketLifecycleRule) MarshalJSON() ([]byte, error) { + type noMethod BucketLifecycleRule + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// BucketLifecycleRuleAction: The action to take. +type BucketLifecycleRuleAction struct { + // Type: Type of the action. Currently, only Delete is supported. + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Type") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BucketLifecycleRuleAction) MarshalJSON() ([]byte, error) { + type noMethod BucketLifecycleRuleAction + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// BucketLifecycleRuleCondition: The condition(s) under which the action +// will be taken. +type BucketLifecycleRuleCondition struct { + // Age: Age of an object (in days). This condition is satisfied when an + // object reaches the specified age. + Age int64 `json:"age,omitempty"` + + // CreatedBefore: A date in RFC 3339 format with only the date part (for + // instance, "2013-01-15"). This condition is satisfied when an object + // is created before midnight of the specified date in UTC. + CreatedBefore string `json:"createdBefore,omitempty"` + + // IsLive: Relevant only for versioned objects. If the value is true, + // this condition matches live objects; if the value is false, it + // matches archived objects. + IsLive bool `json:"isLive,omitempty"` + + // NumNewerVersions: Relevant only for versioned objects. If the value + // is N, this condition is satisfied when there are at least N versions + // (including the live version) newer than this version of the object. + NumNewerVersions int64 `json:"numNewerVersions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Age") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BucketLifecycleRuleCondition) MarshalJSON() ([]byte, error) { + type noMethod BucketLifecycleRuleCondition + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// BucketLogging: The bucket's logging configuration, which defines the +// destination bucket and optional name prefix for the current bucket's +// logs. +type BucketLogging struct { + // LogBucket: The destination bucket where the current bucket's logs + // should be placed. + LogBucket string `json:"logBucket,omitempty"` + + // LogObjectPrefix: A prefix for log object names. + LogObjectPrefix string `json:"logObjectPrefix,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LogBucket") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BucketLogging) MarshalJSON() ([]byte, error) { + type noMethod BucketLogging + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// BucketOwner: The owner of the bucket. This is always the project +// team's owner group. +type BucketOwner struct { + // Entity: The entity, in the form project-owner-projectId. + Entity string `json:"entity,omitempty"` + + // EntityId: The ID for the entity. + EntityId string `json:"entityId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Entity") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BucketOwner) MarshalJSON() ([]byte, error) { + type noMethod BucketOwner + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// BucketVersioning: The bucket's versioning configuration. +type BucketVersioning struct { + // Enabled: While set to true, versioning is fully enabled for this + // bucket. + Enabled bool `json:"enabled,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Enabled") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BucketVersioning) MarshalJSON() ([]byte, error) { + type noMethod BucketVersioning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// BucketWebsite: The bucket's website configuration. +type BucketWebsite struct { + // MainPageSuffix: Behaves as the bucket's directory index where missing + // objects are treated as potential directories. + MainPageSuffix string `json:"mainPageSuffix,omitempty"` + + // NotFoundPage: The custom object to return when a requested resource + // is not found. + NotFoundPage string `json:"notFoundPage,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MainPageSuffix") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BucketWebsite) MarshalJSON() ([]byte, error) { + type noMethod BucketWebsite + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// BucketAccessControl: An access-control entry. +type BucketAccessControl struct { + // Bucket: The name of the bucket. + Bucket string `json:"bucket,omitempty"` + + // Domain: The domain associated with the entity, if any. + Domain string `json:"domain,omitempty"` + + // Email: The email address associated with the entity, if any. + Email string `json:"email,omitempty"` + + // Entity: The entity holding the permission, in one of the following + // forms: + // - user-userId + // - user-email + // - group-groupId + // - group-email + // - domain-domain + // - project-team-projectId + // - allUsers + // - allAuthenticatedUsers Examples: + // - The user liz@example.com would be user-liz@example.com. + // - The group example@googlegroups.com would be + // group-example@googlegroups.com. + // - To refer to all members of the Google Apps for Business domain + // example.com, the entity would be domain-example.com. + Entity string `json:"entity,omitempty"` + + // EntityId: The ID for the entity, if any. + EntityId string `json:"entityId,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the access-control entry. + Etag string `json:"etag,omitempty"` + + // Id: The ID of the access-control entry. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For bucket access control entries, + // this is always storage#bucketAccessControl. + Kind string `json:"kind,omitempty"` + + // ProjectTeam: The project team associated with the entity, if any. + ProjectTeam *BucketAccessControlProjectTeam `json:"projectTeam,omitempty"` + + // Role: The access permission for the entity. Can be READER, WRITER, or + // OWNER. + Role string `json:"role,omitempty"` + + // SelfLink: The link to this access-control entry. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Bucket") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BucketAccessControl) MarshalJSON() ([]byte, error) { + type noMethod BucketAccessControl + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// BucketAccessControlProjectTeam: The project team associated with the +// entity, if any. +type BucketAccessControlProjectTeam struct { + // ProjectNumber: The project number. + ProjectNumber string `json:"projectNumber,omitempty"` + + // Team: The team. Can be owners, editors, or viewers. + Team string `json:"team,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ProjectNumber") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BucketAccessControlProjectTeam) MarshalJSON() ([]byte, error) { + type noMethod BucketAccessControlProjectTeam + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// BucketAccessControls: An access-control list. +type BucketAccessControls struct { + // Items: The list of items. + Items []*BucketAccessControl `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of bucket access control + // entries, this is always storage#bucketAccessControls. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BucketAccessControls) MarshalJSON() ([]byte, error) { + type noMethod BucketAccessControls + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Buckets: A list of buckets. +type Buckets struct { + // Items: The list of items. + Items []*Bucket `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of buckets, this is always + // storage#buckets. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The continuation token, used to page through large + // result sets. Provide this value in a subsequent request to return the + // next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Buckets) MarshalJSON() ([]byte, error) { + type noMethod Buckets + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Channel: An notification channel used to watch for resource changes. +type Channel struct { + // Address: The address where notifications are delivered for this + // channel. + Address string `json:"address,omitempty"` + + // Expiration: Date and time of notification channel expiration, + // expressed as a Unix timestamp, in milliseconds. Optional. + Expiration int64 `json:"expiration,omitempty,string"` + + // Id: A UUID or similar unique string that identifies this channel. + Id string `json:"id,omitempty"` + + // Kind: Identifies this as a notification channel used to watch for + // changes to a resource. Value: the fixed string "api#channel". + Kind string `json:"kind,omitempty"` + + // Params: Additional parameters controlling delivery channel behavior. + // Optional. + Params map[string]string `json:"params,omitempty"` + + // Payload: A Boolean value to indicate whether payload is wanted. + // Optional. + Payload bool `json:"payload,omitempty"` + + // ResourceId: An opaque ID that identifies the resource being watched + // on this channel. Stable across different API versions. + ResourceId string `json:"resourceId,omitempty"` + + // ResourceUri: A version-specific identifier for the watched resource. + ResourceUri string `json:"resourceUri,omitempty"` + + // Token: An arbitrary string delivered to the target address with each + // notification delivered over this channel. Optional. + Token string `json:"token,omitempty"` + + // Type: The type of delivery mechanism used for this channel. + Type string `json:"type,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Address") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Channel) MarshalJSON() ([]byte, error) { + type noMethod Channel + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ComposeRequest: A Compose request. +type ComposeRequest struct { + // Destination: Properties of the resulting object. + Destination *Object `json:"destination,omitempty"` + + // Kind: The kind of item this is. + Kind string `json:"kind,omitempty"` + + // SourceObjects: The list of source objects that will be concatenated + // into a single object. + SourceObjects []*ComposeRequestSourceObjects `json:"sourceObjects,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Destination") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ComposeRequest) MarshalJSON() ([]byte, error) { + type noMethod ComposeRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type ComposeRequestSourceObjects struct { + // Generation: The generation of this object to use as the source. + Generation int64 `json:"generation,omitempty,string"` + + // Name: The source object's name. The source object's bucket is + // implicitly the destination bucket. + Name string `json:"name,omitempty"` + + // ObjectPreconditions: Conditions that must be met for this operation + // to execute. + ObjectPreconditions *ComposeRequestSourceObjectsObjectPreconditions `json:"objectPreconditions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Generation") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ComposeRequestSourceObjects) MarshalJSON() ([]byte, error) { + type noMethod ComposeRequestSourceObjects + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ComposeRequestSourceObjectsObjectPreconditions: Conditions that must +// be met for this operation to execute. +type ComposeRequestSourceObjectsObjectPreconditions struct { + // IfGenerationMatch: Only perform the composition if the generation of + // the source object that would be used matches this value. If this + // value and a generation are both specified, they must be the same + // value or the call will fail. + IfGenerationMatch int64 `json:"ifGenerationMatch,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "IfGenerationMatch") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ComposeRequestSourceObjectsObjectPreconditions) MarshalJSON() ([]byte, error) { + type noMethod ComposeRequestSourceObjectsObjectPreconditions + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Object: An object. +type Object struct { + // Acl: Access controls on the object. + Acl []*ObjectAccessControl `json:"acl,omitempty"` + + // Bucket: The name of the bucket containing this object. + Bucket string `json:"bucket,omitempty"` + + // CacheControl: Cache-Control directive for the object data. + CacheControl string `json:"cacheControl,omitempty"` + + // ComponentCount: Number of underlying components that make up this + // object. Components are accumulated by compose operations. + ComponentCount int64 `json:"componentCount,omitempty"` + + // ContentDisposition: Content-Disposition of the object data. + ContentDisposition string `json:"contentDisposition,omitempty"` + + // ContentEncoding: Content-Encoding of the object data. + ContentEncoding string `json:"contentEncoding,omitempty"` + + // ContentLanguage: Content-Language of the object data. + ContentLanguage string `json:"contentLanguage,omitempty"` + + // ContentType: Content-Type of the object data. + ContentType string `json:"contentType,omitempty"` + + // Crc32c: CRC32c checksum, as described in RFC 4960, Appendix B; + // encoded using base64 in big-endian byte order. For more information + // about using the CRC32c checksum, see Hashes and ETags: Best + // Practices. + Crc32c string `json:"crc32c,omitempty"` + + // CustomerEncryption: Metadata of customer-supplied encryption key, if + // the object is encrypted by such a key. + CustomerEncryption *ObjectCustomerEncryption `json:"customerEncryption,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the object. + Etag string `json:"etag,omitempty"` + + // Generation: The content generation of this object. Used for object + // versioning. + Generation int64 `json:"generation,omitempty,string"` + + // Id: The ID of the object. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For objects, this is always + // storage#object. + Kind string `json:"kind,omitempty"` + + // Md5Hash: MD5 hash of the data; encoded using base64. For more + // information about using the MD5 hash, see Hashes and ETags: Best + // Practices. + Md5Hash string `json:"md5Hash,omitempty"` + + // MediaLink: Media download link. + MediaLink string `json:"mediaLink,omitempty"` + + // Metadata: User-provided metadata, in key/value pairs. + Metadata map[string]string `json:"metadata,omitempty"` + + // Metageneration: The version of the metadata for this object at this + // generation. Used for preconditions and for detecting changes in + // metadata. A metageneration number is only meaningful in the context + // of a particular generation of a particular object. + Metageneration int64 `json:"metageneration,omitempty,string"` + + // Name: The name of this object. Required if not specified by URL + // parameter. + Name string `json:"name,omitempty"` + + // Owner: The owner of the object. This will always be the uploader of + // the object. + Owner *ObjectOwner `json:"owner,omitempty"` + + // SelfLink: The link to this object. + SelfLink string `json:"selfLink,omitempty"` + + // Size: Content-Length of the data in bytes. + Size uint64 `json:"size,omitempty,string"` + + // StorageClass: Storage class of the object. + StorageClass string `json:"storageClass,omitempty"` + + // TimeCreated: The creation time of the object in RFC 3339 format. + TimeCreated string `json:"timeCreated,omitempty"` + + // TimeDeleted: The deletion time of the object in RFC 3339 format. Will + // be returned if and only if this version of the object has been + // deleted. + TimeDeleted string `json:"timeDeleted,omitempty"` + + // Updated: The modification time of the object metadata in RFC 3339 + // format. + Updated string `json:"updated,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Acl") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Object) MarshalJSON() ([]byte, error) { + type noMethod Object + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ObjectCustomerEncryption: Metadata of customer-supplied encryption +// key, if the object is encrypted by such a key. +type ObjectCustomerEncryption struct { + // EncryptionAlgorithm: The encryption algorithm. + EncryptionAlgorithm string `json:"encryptionAlgorithm,omitempty"` + + // KeySha256: SHA256 hash value of the encryption key. + KeySha256 string `json:"keySha256,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EncryptionAlgorithm") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ObjectCustomerEncryption) MarshalJSON() ([]byte, error) { + type noMethod ObjectCustomerEncryption + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ObjectOwner: The owner of the object. This will always be the +// uploader of the object. +type ObjectOwner struct { + // Entity: The entity, in the form user-userId. + Entity string `json:"entity,omitempty"` + + // EntityId: The ID for the entity. + EntityId string `json:"entityId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Entity") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ObjectOwner) MarshalJSON() ([]byte, error) { + type noMethod ObjectOwner + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ObjectAccessControl: An access-control entry. +type ObjectAccessControl struct { + // Bucket: The name of the bucket. + Bucket string `json:"bucket,omitempty"` + + // Domain: The domain associated with the entity, if any. + Domain string `json:"domain,omitempty"` + + // Email: The email address associated with the entity, if any. + Email string `json:"email,omitempty"` + + // Entity: The entity holding the permission, in one of the following + // forms: + // - user-userId + // - user-email + // - group-groupId + // - group-email + // - domain-domain + // - project-team-projectId + // - allUsers + // - allAuthenticatedUsers Examples: + // - The user liz@example.com would be user-liz@example.com. + // - The group example@googlegroups.com would be + // group-example@googlegroups.com. + // - To refer to all members of the Google Apps for Business domain + // example.com, the entity would be domain-example.com. + Entity string `json:"entity,omitempty"` + + // EntityId: The ID for the entity, if any. + EntityId string `json:"entityId,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the access-control entry. + Etag string `json:"etag,omitempty"` + + // Generation: The content generation of the object. + Generation int64 `json:"generation,omitempty,string"` + + // Id: The ID of the access-control entry. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For object access control entries, + // this is always storage#objectAccessControl. + Kind string `json:"kind,omitempty"` + + // Object: The name of the object. + Object string `json:"object,omitempty"` + + // ProjectTeam: The project team associated with the entity, if any. + ProjectTeam *ObjectAccessControlProjectTeam `json:"projectTeam,omitempty"` + + // Role: The access permission for the entity. Can be READER or OWNER. + Role string `json:"role,omitempty"` + + // SelfLink: The link to this access-control entry. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Bucket") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ObjectAccessControl) MarshalJSON() ([]byte, error) { + type noMethod ObjectAccessControl + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ObjectAccessControlProjectTeam: The project team associated with the +// entity, if any. +type ObjectAccessControlProjectTeam struct { + // ProjectNumber: The project number. + ProjectNumber string `json:"projectNumber,omitempty"` + + // Team: The team. Can be owners, editors, or viewers. + Team string `json:"team,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ProjectNumber") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ObjectAccessControlProjectTeam) MarshalJSON() ([]byte, error) { + type noMethod ObjectAccessControlProjectTeam + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ObjectAccessControls: An access-control list. +type ObjectAccessControls struct { + // Items: The list of items. + Items []interface{} `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of object access control + // entries, this is always storage#objectAccessControls. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ObjectAccessControls) MarshalJSON() ([]byte, error) { + type noMethod ObjectAccessControls + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Objects: A list of objects. +type Objects struct { + // Items: The list of items. + Items []*Object `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of objects, this is always + // storage#objects. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The continuation token, used to page through large + // result sets. Provide this value in a subsequent request to return the + // next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Prefixes: The list of prefixes of objects matching-but-not-listed up + // to and including the requested delimiter. + Prefixes []string `json:"prefixes,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Objects) MarshalJSON() ([]byte, error) { + type noMethod Objects + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// RewriteResponse: A rewrite response. +type RewriteResponse struct { + // Done: true if the copy is finished; otherwise, false if the copy is + // in progress. This property is always present in the response. + Done bool `json:"done,omitempty"` + + // Kind: The kind of item this is. + Kind string `json:"kind,omitempty"` + + // ObjectSize: The total size of the object being copied in bytes. This + // property is always present in the response. + ObjectSize uint64 `json:"objectSize,omitempty,string"` + + // Resource: A resource containing the metadata for the copied-to + // object. This property is present in the response only when copying + // completes. + Resource *Object `json:"resource,omitempty"` + + // RewriteToken: A token to use in subsequent requests to continue + // copying data. This token is present in the response only when there + // is more data to copy. + RewriteToken string `json:"rewriteToken,omitempty"` + + // TotalBytesRewritten: The total bytes written so far, which can be + // used to provide a waiting user with a progress indicator. This + // property is always present in the response. + TotalBytesRewritten uint64 `json:"totalBytesRewritten,omitempty,string"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Done") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *RewriteResponse) MarshalJSON() ([]byte, error) { + type noMethod RewriteResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// method id "storage.bucketAccessControls.delete": + +type BucketAccessControlsDeleteCall struct { + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Permanently deletes the ACL entry for the specified entity on +// the specified bucket. +func (r *BucketAccessControlsService) Delete(bucket string, entity string) *BucketAccessControlsDeleteCall { + c := &BucketAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsDeleteCall) Fields(s ...googleapi.Field) *BucketAccessControlsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsDeleteCall) Context(ctx context.Context) *BucketAccessControlsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.bucketAccessControls.delete" call. +func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.", + // "httpMethod": "DELETE", + // "id": "storage.bucketAccessControls.delete", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl/{entity}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.get": + +type BucketAccessControlsGetCall struct { + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the ACL entry for the specified entity on the specified +// bucket. +func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketAccessControlsGetCall { + c := &BucketAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsGetCall) Fields(s ...googleapi.Field) *BucketAccessControlsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketAccessControlsGetCall) IfNoneMatch(entityTag string) *BucketAccessControlsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsGetCall) Context(ctx context.Context) *BucketAccessControlsGetCall { + c.ctx_ = ctx + return c +} + +func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.bucketAccessControls.get" call. +// Exactly one of *BucketAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BucketAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the ACL entry for the specified entity on the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.bucketAccessControls.get", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl/{entity}", + // "response": { + // "$ref": "BucketAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.insert": + +type BucketAccessControlsInsertCall struct { + s *Service + bucket string + bucketaccesscontrol *BucketAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a new ACL entry on the specified bucket. +func (r *BucketAccessControlsService) Insert(bucket string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsInsertCall { + c := &BucketAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.bucketaccesscontrol = bucketaccesscontrol + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsInsertCall) Fields(s ...googleapi.Field) *BucketAccessControlsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsInsertCall) Context(ctx context.Context) *BucketAccessControlsInsertCall { + c.ctx_ = ctx + return c +} + +func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.bucketAccessControls.insert" call. +// Exactly one of *BucketAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BucketAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new ACL entry on the specified bucket.", + // "httpMethod": "POST", + // "id": "storage.bucketAccessControls.insert", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl", + // "request": { + // "$ref": "BucketAccessControl" + // }, + // "response": { + // "$ref": "BucketAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.list": + +type BucketAccessControlsListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves ACL entries on the specified bucket. +func (r *BucketAccessControlsService) List(bucket string) *BucketAccessControlsListCall { + c := &BucketAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsListCall) Fields(s ...googleapi.Field) *BucketAccessControlsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketAccessControlsListCall) IfNoneMatch(entityTag string) *BucketAccessControlsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsListCall) Context(ctx context.Context) *BucketAccessControlsListCall { + c.ctx_ = ctx + return c +} + +func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.bucketAccessControls.list" call. +// Exactly one of *BucketAccessControls or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControls.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*BucketAccessControls, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BucketAccessControls{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves ACL entries on the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.bucketAccessControls.list", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl", + // "response": { + // "$ref": "BucketAccessControls" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.patch": + +type BucketAccessControlsPatchCall struct { + s *Service + bucket string + entity string + bucketaccesscontrol *BucketAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Patch: Updates an ACL entry on the specified bucket. This method +// supports patch semantics. +func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsPatchCall { + c := &BucketAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + c.bucketaccesscontrol = bucketaccesscontrol + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsPatchCall) Fields(s ...googleapi.Field) *BucketAccessControlsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsPatchCall) Context(ctx context.Context) *BucketAccessControlsPatchCall { + c.ctx_ = ctx + return c +} + +func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.bucketAccessControls.patch" call. +// Exactly one of *BucketAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BucketAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an ACL entry on the specified bucket. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "storage.bucketAccessControls.patch", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl/{entity}", + // "request": { + // "$ref": "BucketAccessControl" + // }, + // "response": { + // "$ref": "BucketAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.update": + +type BucketAccessControlsUpdateCall struct { + s *Service + bucket string + entity string + bucketaccesscontrol *BucketAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates an ACL entry on the specified bucket. +func (r *BucketAccessControlsService) Update(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsUpdateCall { + c := &BucketAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + c.bucketaccesscontrol = bucketaccesscontrol + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsUpdateCall) Fields(s ...googleapi.Field) *BucketAccessControlsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsUpdateCall) Context(ctx context.Context) *BucketAccessControlsUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.bucketAccessControls.update" call. +// Exactly one of *BucketAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BucketAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an ACL entry on the specified bucket.", + // "httpMethod": "PUT", + // "id": "storage.bucketAccessControls.update", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl/{entity}", + // "request": { + // "$ref": "BucketAccessControl" + // }, + // "response": { + // "$ref": "BucketAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.buckets.delete": + +type BucketsDeleteCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Permanently deletes an empty bucket. +func (r *BucketsService) Delete(bucket string) *BucketsDeleteCall { + c := &BucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": If set, only deletes the bucket if its +// metageneration matches this value. +func (c *BucketsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsDeleteCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": If set, only deletes the bucket if its +// metageneration does not match this value. +func (c *BucketsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsDeleteCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsDeleteCall) Fields(s ...googleapi.Field) *BucketsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsDeleteCall) Context(ctx context.Context) *BucketsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.buckets.delete" call. +func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes an empty bucket.", + // "httpMethod": "DELETE", + // "id": "storage.buckets.delete", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "If set, only deletes the bucket if its metageneration matches this value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "If set, only deletes the bucket if its metageneration does not match this value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.get": + +type BucketsGetCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns metadata for the specified bucket. +func (r *BucketsService) Get(bucket string) *BucketsGetCall { + c := &BucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration matches +// the given value. +func (c *BucketsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsGetCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration does not +// match the given value. +func (c *BucketsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsGetCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit acl and defaultObjectAcl properties. +func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsGetCall) Fields(s ...googleapi.Field) *BucketsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketsGetCall) IfNoneMatch(entityTag string) *BucketsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsGetCall) Context(ctx context.Context) *BucketsGetCall { + c.ctx_ = ctx + return c +} + +func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.buckets.get" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns metadata for the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.buckets.get", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}", + // "response": { + // "$ref": "Bucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.insert": + +type BucketsInsertCall struct { + s *Service + bucket *Bucket + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a new bucket. +func (r *BucketsService) Insert(projectid string, bucket *Bucket) *BucketsInsertCall { + c := &BucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.urlParams_.Set("project", projectid) + c.bucket = bucket + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Project team owners get OWNER access, and +// allAuthenticatedUsers get READER access. +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// their roles. +// "publicRead" - Project team owners get OWNER access, and allUsers +// get READER access. +// "publicReadWrite" - Project team owners get OWNER access, and +// allUsers get WRITER access. +func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// PredefinedDefaultObjectAcl sets the optional parameter +// "predefinedDefaultObjectAcl": Apply a predefined set of default +// object access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsInsertCall { + c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the bucket resource +// specifies acl or defaultObjectAcl properties, when it defaults to +// full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit acl and defaultObjectAcl properties. +func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsInsertCall) Fields(s ...googleapi.Field) *BucketsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsInsertCall) Context(ctx context.Context) *BucketsInsertCall { + c.ctx_ = ctx + return c +} + +func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.buckets.insert" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new bucket.", + // "httpMethod": "POST", + // "id": "storage.buckets.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "private", + // "projectPrivate", + // "publicRead", + // "publicReadWrite" + // ], + // "enumDescriptions": [ + // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + // "Project team owners get OWNER access.", + // "Project team members get access according to their roles.", + // "Project team owners get OWNER access, and allUsers get READER access.", + // "Project team owners get OWNER access, and allUsers get WRITER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "predefinedDefaultObjectAcl": { + // "description": "Apply a predefined set of default object access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "A valid API project identifier.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b", + // "request": { + // "$ref": "Bucket" + // }, + // "response": { + // "$ref": "Bucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.list": + +type BucketsListCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves a list of buckets for a given project. +func (r *BucketsService) List(projectid string) *BucketsListCall { + c := &BucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.urlParams_.Set("project", projectid) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of buckets to return. +func (c *BucketsListCall) MaxResults(maxResults int64) *BucketsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *BucketsListCall) PageToken(pageToken string) *BucketsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Prefix sets the optional parameter "prefix": Filter results to +// buckets whose names begin with this prefix. +func (c *BucketsListCall) Prefix(prefix string) *BucketsListCall { + c.urlParams_.Set("prefix", prefix) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit acl and defaultObjectAcl properties. +func (c *BucketsListCall) Projection(projection string) *BucketsListCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsListCall) Fields(s ...googleapi.Field) *BucketsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketsListCall) IfNoneMatch(entityTag string) *BucketsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsListCall) Context(ctx context.Context) *BucketsListCall { + c.ctx_ = ctx + return c +} + +func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.buckets.list" call. +// Exactly one of *Buckets or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Buckets.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Buckets{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of buckets for a given project.", + // "httpMethod": "GET", + // "id": "storage.buckets.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "maxResults": { + // "description": "Maximum number of buckets to return.", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", + // "type": "string" + // }, + // "prefix": { + // "description": "Filter results to buckets whose names begin with this prefix.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "A valid API project identifier.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b", + // "response": { + // "$ref": "Buckets" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *BucketsListCall) Pages(ctx context.Context, f func(*Buckets) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "storage.buckets.patch": + +type BucketsPatchCall struct { + s *Service + bucket string + bucket2 *Bucket + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Patch: Updates a bucket. This method supports patch semantics. +func (r *BucketsService) Patch(bucket string, bucket2 *Bucket) *BucketsPatchCall { + c := &BucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.bucket2 = bucket2 + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration matches +// the given value. +func (c *BucketsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsPatchCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration does not +// match the given value. +func (c *BucketsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsPatchCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Project team owners get OWNER access, and +// allAuthenticatedUsers get READER access. +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// their roles. +// "publicRead" - Project team owners get OWNER access, and allUsers +// get READER access. +// "publicReadWrite" - Project team owners get OWNER access, and +// allUsers get WRITER access. +func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// PredefinedDefaultObjectAcl sets the optional parameter +// "predefinedDefaultObjectAcl": Apply a predefined set of default +// object access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsPatchCall { + c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit acl and defaultObjectAcl properties. +func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsPatchCall) Fields(s ...googleapi.Field) *BucketsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsPatchCall) Context(ctx context.Context) *BucketsPatchCall { + c.ctx_ = ctx + return c +} + +func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.buckets.patch" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a bucket. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "storage.buckets.patch", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "private", + // "projectPrivate", + // "publicRead", + // "publicReadWrite" + // ], + // "enumDescriptions": [ + // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + // "Project team owners get OWNER access.", + // "Project team members get access according to their roles.", + // "Project team owners get OWNER access, and allUsers get READER access.", + // "Project team owners get OWNER access, and allUsers get WRITER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "predefinedDefaultObjectAcl": { + // "description": "Apply a predefined set of default object access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}", + // "request": { + // "$ref": "Bucket" + // }, + // "response": { + // "$ref": "Bucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.update": + +type BucketsUpdateCall struct { + s *Service + bucket string + bucket2 *Bucket + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates a bucket. +func (r *BucketsService) Update(bucket string, bucket2 *Bucket) *BucketsUpdateCall { + c := &BucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.bucket2 = bucket2 + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration matches +// the given value. +func (c *BucketsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsUpdateCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration does not +// match the given value. +func (c *BucketsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsUpdateCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Project team owners get OWNER access, and +// allAuthenticatedUsers get READER access. +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// their roles. +// "publicRead" - Project team owners get OWNER access, and allUsers +// get READER access. +// "publicReadWrite" - Project team owners get OWNER access, and +// allUsers get WRITER access. +func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// PredefinedDefaultObjectAcl sets the optional parameter +// "predefinedDefaultObjectAcl": Apply a predefined set of default +// object access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsUpdateCall { + c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit acl and defaultObjectAcl properties. +func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsUpdateCall) Fields(s ...googleapi.Field) *BucketsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsUpdateCall) Context(ctx context.Context) *BucketsUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.buckets.update" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a bucket.", + // "httpMethod": "PUT", + // "id": "storage.buckets.update", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "private", + // "projectPrivate", + // "publicRead", + // "publicReadWrite" + // ], + // "enumDescriptions": [ + // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + // "Project team owners get OWNER access.", + // "Project team members get access according to their roles.", + // "Project team owners get OWNER access, and allUsers get READER access.", + // "Project team owners get OWNER access, and allUsers get WRITER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "predefinedDefaultObjectAcl": { + // "description": "Apply a predefined set of default object access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}", + // "request": { + // "$ref": "Bucket" + // }, + // "response": { + // "$ref": "Bucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.channels.stop": + +type ChannelsStopCall struct { + s *Service + channel *Channel + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Stop: Stop watching resources through this channel +func (r *ChannelsService) Stop(channel *Channel) *ChannelsStopCall { + c := &ChannelsStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.channel = channel + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChannelsStopCall) Fields(s ...googleapi.Field) *ChannelsStopCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChannelsStopCall) Context(ctx context.Context) *ChannelsStopCall { + c.ctx_ = ctx + return c +} + +func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "channels/stop") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.SetOpaque(req.URL) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.channels.stop" call. +func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Stop watching resources through this channel", + // "httpMethod": "POST", + // "id": "storage.channels.stop", + // "path": "channels/stop", + // "request": { + // "$ref": "Channel", + // "parameterName": "resource" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.delete": + +type DefaultObjectAccessControlsDeleteCall struct { + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Permanently deletes the default object ACL entry for the +// specified entity on the specified bucket. +func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string) *DefaultObjectAccessControlsDeleteCall { + c := &DefaultObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsDeleteCall) Context(ctx context.Context) *DefaultObjectAccessControlsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.defaultObjectAccessControls.delete" call. +func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.", + // "httpMethod": "DELETE", + // "id": "storage.defaultObjectAccessControls.delete", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.get": + +type DefaultObjectAccessControlsGetCall struct { + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the default object ACL entry for the specified entity on +// the specified bucket. +func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) *DefaultObjectAccessControlsGetCall { + c := &DefaultObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DefaultObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsGetCall) Context(ctx context.Context) *DefaultObjectAccessControlsGetCall { + c.ctx_ = ctx + return c +} + +func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.defaultObjectAccessControls.get" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the default object ACL entry for the specified entity on the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.defaultObjectAccessControls.get", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.insert": + +type DefaultObjectAccessControlsInsertCall struct { + s *Service + bucket string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a new default object ACL entry on the specified +// bucket. +func (r *DefaultObjectAccessControlsService) Insert(bucket string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsInsertCall { + c := &DefaultObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsInsertCall) Context(ctx context.Context) *DefaultObjectAccessControlsInsertCall { + c.ctx_ = ctx + return c +} + +func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.defaultObjectAccessControls.insert" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new default object ACL entry on the specified bucket.", + // "httpMethod": "POST", + // "id": "storage.defaultObjectAccessControls.insert", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.list": + +type DefaultObjectAccessControlsListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves default object ACL entries on the specified bucket. +func (r *DefaultObjectAccessControlsService) List(bucket string) *DefaultObjectAccessControlsListCall { + c := &DefaultObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": If present, only return default ACL listing +// if the bucket's current metageneration matches this value. +func (c *DefaultObjectAccessControlsListCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *DefaultObjectAccessControlsListCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": If present, only return default ACL +// listing if the bucket's current metageneration does not match the +// given value. +func (c *DefaultObjectAccessControlsListCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *DefaultObjectAccessControlsListCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsListCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DefaultObjectAccessControlsListCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsListCall) Context(ctx context.Context) *DefaultObjectAccessControlsListCall { + c.ctx_ = ctx + return c +} + +func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.defaultObjectAccessControls.list" call. +// Exactly one of *ObjectAccessControls or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControls.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControls{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves default object ACL entries on the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.defaultObjectAccessControls.list", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl", + // "response": { + // "$ref": "ObjectAccessControls" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.patch": + +type DefaultObjectAccessControlsPatchCall struct { + s *Service + bucket string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Patch: Updates a default object ACL entry on the specified bucket. +// This method supports patch semantics. +func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsPatchCall { + c := &DefaultObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsPatchCall) Context(ctx context.Context) *DefaultObjectAccessControlsPatchCall { + c.ctx_ = ctx + return c +} + +func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.defaultObjectAccessControls.patch" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a default object ACL entry on the specified bucket. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "storage.defaultObjectAccessControls.patch", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.update": + +type DefaultObjectAccessControlsUpdateCall struct { + s *Service + bucket string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates a default object ACL entry on the specified bucket. +func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsUpdateCall { + c := &DefaultObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsUpdateCall) Context(ctx context.Context) *DefaultObjectAccessControlsUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.defaultObjectAccessControls.update" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a default object ACL entry on the specified bucket.", + // "httpMethod": "PUT", + // "id": "storage.defaultObjectAccessControls.update", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.delete": + +type ObjectAccessControlsDeleteCall struct { + s *Service + bucket string + object string + entity string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Permanently deletes the ACL entry for the specified entity on +// the specified object. +func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall { + c := &ObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsDeleteCall) Generation(generation int64) *ObjectAccessControlsDeleteCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *ObjectAccessControlsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsDeleteCall) Context(ctx context.Context) *ObjectAccessControlsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.objectAccessControls.delete" call. +func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes the ACL entry for the specified entity on the specified object.", + // "httpMethod": "DELETE", + // "id": "storage.objectAccessControls.delete", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.get": + +type ObjectAccessControlsGetCall struct { + s *Service + bucket string + object string + entity string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the ACL entry for the specified entity on the specified +// object. +func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall { + c := &ObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsGetCall) Generation(generation int64) *ObjectAccessControlsGetCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *ObjectAccessControlsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *ObjectAccessControlsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsGetCall) Context(ctx context.Context) *ObjectAccessControlsGetCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.objectAccessControls.get" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the ACL entry for the specified entity on the specified object.", + // "httpMethod": "GET", + // "id": "storage.objectAccessControls.get", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.insert": + +type ObjectAccessControlsInsertCall struct { + s *Service + bucket string + object string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a new ACL entry on the specified object. +func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall { + c := &ObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsInsertCall) Generation(generation int64) *ObjectAccessControlsInsertCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *ObjectAccessControlsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsInsertCall) Context(ctx context.Context) *ObjectAccessControlsInsertCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.objectAccessControls.insert" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new ACL entry on the specified object.", + // "httpMethod": "POST", + // "id": "storage.objectAccessControls.insert", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.list": + +type ObjectAccessControlsListCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves ACL entries on the specified object. +func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall { + c := &ObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsListCall) Generation(generation int64) *ObjectAccessControlsListCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsListCall) Fields(s ...googleapi.Field) *ObjectAccessControlsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectAccessControlsListCall) IfNoneMatch(entityTag string) *ObjectAccessControlsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsListCall) Context(ctx context.Context) *ObjectAccessControlsListCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.objectAccessControls.list" call. +// Exactly one of *ObjectAccessControls or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControls.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControls{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves ACL entries on the specified object.", + // "httpMethod": "GET", + // "id": "storage.objectAccessControls.list", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl", + // "response": { + // "$ref": "ObjectAccessControls" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.patch": + +type ObjectAccessControlsPatchCall struct { + s *Service + bucket string + object string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Patch: Updates an ACL entry on the specified object. This method +// supports patch semantics. +func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall { + c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsPatchCall) Generation(generation int64) *ObjectAccessControlsPatchCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *ObjectAccessControlsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsPatchCall) Context(ctx context.Context) *ObjectAccessControlsPatchCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.objectAccessControls.patch" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an ACL entry on the specified object. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "storage.objectAccessControls.patch", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.update": + +type ObjectAccessControlsUpdateCall struct { + s *Service + bucket string + object string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates an ACL entry on the specified object. +func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall { + c := &ObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsUpdateCall) Generation(generation int64) *ObjectAccessControlsUpdateCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *ObjectAccessControlsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsUpdateCall) Context(ctx context.Context) *ObjectAccessControlsUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.objectAccessControls.update" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an ACL entry on the specified object.", + // "httpMethod": "PUT", + // "id": "storage.objectAccessControls.update", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objects.compose": + +type ObjectsComposeCall struct { + s *Service + destinationBucket string + destinationObject string + composerequest *ComposeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Compose: Concatenates a list of existing objects into a new object in +// the same bucket. +func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall { + c := &ObjectsComposeCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.destinationBucket = destinationBucket + c.destinationObject = destinationObject + c.composerequest = composerequest + return c +} + +// DestinationPredefinedAcl sets the optional parameter +// "destinationPredefinedAcl": Apply a predefined set of access controls +// to the destination object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsComposeCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsComposeCall { + c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. +func (c *ObjectsComposeCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsComposeCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsComposeCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsComposeCall) Fields(s ...googleapi.Field) *ObjectsComposeCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do and Download +// methods. Any pending HTTP request will be aborted if the provided +// context is canceled. +func (c *ObjectsComposeCall) Context(ctx context.Context) *ObjectsComposeCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{destinationBucket}/o/{destinationObject}/compose") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "destinationBucket": c.destinationBucket, + "destinationObject": c.destinationObject, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Download fetches the API endpoint's "media" value, instead of the normal +// API response value. If the returned error is nil, the Response is guaranteed to +// have a 2xx status code. Callers must close the Response.Body as usual. +func (c *ObjectsComposeCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("media") + if err != nil { + return nil, err + } + if err := googleapi.CheckMediaResponse(res); err != nil { + res.Body.Close() + return nil, err + } + return res, nil +} + +// Do executes the "storage.objects.compose" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Concatenates a list of existing objects into a new object in the same bucket.", + // "httpMethod": "POST", + // "id": "storage.objects.compose", + // "parameterOrder": [ + // "destinationBucket", + // "destinationObject" + // ], + // "parameters": { + // "destinationBucket": { + // "description": "Name of the bucket in which to store the new object.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationObject": { + // "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationPredefinedAcl": { + // "description": "Apply a predefined set of access controls to the destination object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{destinationBucket}/o/{destinationObject}/compose", + // "request": { + // "$ref": "ComposeRequest" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsMediaDownload": true, + // "useMediaDownloadService": true + // } + +} + +// method id "storage.objects.copy": + +type ObjectsCopyCall struct { + s *Service + sourceBucket string + sourceObject string + destinationBucket string + destinationObject string + object *Object + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Copy: Copies a source object to a destination object. Optionally +// overrides metadata. +func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall { + c := &ObjectsCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.sourceBucket = sourceBucket + c.sourceObject = sourceObject + c.destinationBucket = destinationBucket + c.destinationObject = destinationObject + c.object = object + return c +} + +// DestinationPredefinedAcl sets the optional parameter +// "destinationPredefinedAcl": Apply a predefined set of access controls +// to the destination object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsCopyCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsCopyCall { + c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the destination object's +// current generation matches the given value. +func (c *ObjectsCopyCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the destination object's current generation does not match the given +// value. +func (c *ObjectsCopyCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the destination object's current metageneration matches the given +// value. +func (c *ObjectsCopyCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the destination object's current metageneration does not +// match the given value. +func (c *ObjectsCopyCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// IfSourceGenerationMatch sets the optional parameter +// "ifSourceGenerationMatch": Makes the operation conditional on whether +// the source object's generation matches the given value. +func (c *ObjectsCopyCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) + return c +} + +// IfSourceGenerationNotMatch sets the optional parameter +// "ifSourceGenerationNotMatch": Makes the operation conditional on +// whether the source object's generation does not match the given +// value. +func (c *ObjectsCopyCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) + return c +} + +// IfSourceMetagenerationMatch sets the optional parameter +// "ifSourceMetagenerationMatch": Makes the operation conditional on +// whether the source object's current metageneration matches the given +// value. +func (c *ObjectsCopyCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) + return c +} + +// IfSourceMetagenerationNotMatch sets the optional parameter +// "ifSourceMetagenerationNotMatch": Makes the operation conditional on +// whether the source object's current metageneration does not match the +// given value. +func (c *ObjectsCopyCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the object resource +// specifies the acl property, when it defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the acl property. +func (c *ObjectsCopyCall) Projection(projection string) *ObjectsCopyCall { + c.urlParams_.Set("projection", projection) + return c +} + +// SourceGeneration sets the optional parameter "sourceGeneration": If +// present, selects a specific revision of the source object (as opposed +// to the latest version, the default). +func (c *ObjectsCopyCall) SourceGeneration(sourceGeneration int64) *ObjectsCopyCall { + c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsCopyCall) Fields(s ...googleapi.Field) *ObjectsCopyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do and Download +// methods. Any pending HTTP request will be aborted if the provided +// context is canceled. +func (c *ObjectsCopyCall) Context(ctx context.Context) *ObjectsCopyCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "sourceBucket": c.sourceBucket, + "sourceObject": c.sourceObject, + "destinationBucket": c.destinationBucket, + "destinationObject": c.destinationObject, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Download fetches the API endpoint's "media" value, instead of the normal +// API response value. If the returned error is nil, the Response is guaranteed to +// have a 2xx status code. Callers must close the Response.Body as usual. +func (c *ObjectsCopyCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("media") + if err != nil { + return nil, err + } + if err := googleapi.CheckMediaResponse(res); err != nil { + res.Body.Close() + return nil, err + } + return res, nil +} + +// Do executes the "storage.objects.copy" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Copies a source object to a destination object. Optionally overrides metadata.", + // "httpMethod": "POST", + // "id": "storage.objects.copy", + // "parameterOrder": [ + // "sourceBucket", + // "sourceObject", + // "destinationBucket", + // "destinationObject" + // ], + // "parameters": { + // "destinationBucket": { + // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationObject": { + // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationPredefinedAcl": { + // "description": "Apply a predefined set of access controls to the destination object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "sourceBucket": { + // "description": "Name of the bucket in which to find the source object.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sourceGeneration": { + // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "sourceObject": { + // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsMediaDownload": true, + // "useMediaDownloadService": true + // } + +} + +// method id "storage.objects.delete": + +type ObjectsDeleteCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes an object and its metadata. Deletions are permanent +// if versioning is not enabled for the bucket, or if the generation +// parameter is used. +func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall { + c := &ObjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, +// permanently deletes a specific revision of this object (as opposed to +// the latest version, the default). +func (c *ObjectsDeleteCall) Generation(generation int64) *ObjectsDeleteCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. +func (c *ObjectsDeleteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. +func (c *ObjectsDeleteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsDeleteCall) Fields(s ...googleapi.Field) *ObjectsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsDeleteCall) Context(ctx context.Context) *ObjectsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.objects.delete" call. +func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.", + // "httpMethod": "DELETE", + // "id": "storage.objects.delete", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.get": + +type ObjectsGetCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Retrieves an object or its metadata. +func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall { + c := &ObjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsGetCall) Generation(generation int64) *ObjectsGetCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's generation +// matches the given value. +func (c *ObjectsGetCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's generation does not match the given value. +func (c *ObjectsGetCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the acl property. +func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsGetCall) Fields(s ...googleapi.Field) *ObjectsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectsGetCall) IfNoneMatch(entityTag string) *ObjectsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do and Download +// methods. Any pending HTTP request will be aborted if the provided +// context is canceled. +func (c *ObjectsGetCall) Context(ctx context.Context) *ObjectsGetCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Download fetches the API endpoint's "media" value, instead of the normal +// API response value. If the returned error is nil, the Response is guaranteed to +// have a 2xx status code. Callers must close the Response.Body as usual. +func (c *ObjectsGetCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("media") + if err != nil { + return nil, err + } + if err := googleapi.CheckMediaResponse(res); err != nil { + res.Body.Close() + return nil, err + } + return res, nil +} + +// Do executes the "storage.objects.get" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an object or its metadata.", + // "httpMethod": "GET", + // "id": "storage.objects.get", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the acl property." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsMediaDownload": true, + // "useMediaDownloadService": true + // } + +} + +// method id "storage.objects.insert": + +type ObjectsInsertCall struct { + s *Service + bucket string + object *Object + urlParams_ gensupport.URLParams + media_ io.Reader + resumableBuffer_ *gensupport.ResumableBuffer + mediaType_ string + mediaSize_ int64 // mediaSize, if known. Used only for calls to progressUpdater_. + progressUpdater_ googleapi.ProgressUpdater + ctx_ context.Context +} + +// Insert: Stores a new object and metadata. +func (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCall { + c := &ObjectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// ContentEncoding sets the optional parameter "contentEncoding": If +// set, sets the contentEncoding property of the final object to this +// value. Setting this parameter is equivalent to setting the +// contentEncoding metadata property. This can be useful when uploading +// an object with uploadType=media to indicate the encoding of the +// content being uploaded. +func (c *ObjectsInsertCall) ContentEncoding(contentEncoding string) *ObjectsInsertCall { + c.urlParams_.Set("contentEncoding", contentEncoding) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. +func (c *ObjectsInsertCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. +func (c *ObjectsInsertCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsInsertCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsInsertCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Name sets the optional parameter "name": Name of the object. Required +// when the object metadata is not otherwise provided. Overrides the +// object metadata's name value, if any. For information about how to +// URL encode object names to be path safe, see Encoding URI Path Parts. +func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall { + c.urlParams_.Set("name", name) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the object resource +// specifies the acl property, when it defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the acl property. +func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Media specifies the media to upload in one or more chunks. The chunk +// size may be controlled by supplying a MediaOption generated by +// googleapi.ChunkSize. The chunk size defaults to +// googleapi.DefaultUploadChunkSize.The Content-Type header used in the +// upload request will be determined by sniffing the contents of r, +// unless a MediaOption generated by googleapi.ContentType is +// supplied. +// At most one of Media and ResumableMedia may be set. +func (c *ObjectsInsertCall) Media(r io.Reader, options ...googleapi.MediaOption) *ObjectsInsertCall { + opts := googleapi.ProcessMediaOptions(options) + chunkSize := opts.ChunkSize + if !opts.ForceEmptyContentType { + r, c.mediaType_ = gensupport.DetermineContentType(r, opts.ContentType) + } + c.media_, c.resumableBuffer_ = gensupport.PrepareUpload(r, chunkSize) + return c +} + +// ResumableMedia specifies the media to upload in chunks and can be +// canceled with ctx. +// +// Deprecated: use Media instead. +// +// At most one of Media and ResumableMedia may be set. mediaType +// identifies the MIME media type of the upload, such as "image/png". If +// mediaType is "", it will be auto-detected. The provided ctx will +// supersede any context previously provided to the Context method. +func (c *ObjectsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *ObjectsInsertCall { + c.ctx_ = ctx + rdr := gensupport.ReaderAtToReader(r, size) + rdr, c.mediaType_ = gensupport.DetermineContentType(rdr, mediaType) + c.resumableBuffer_ = gensupport.NewResumableBuffer(rdr, googleapi.DefaultUploadChunkSize) + c.media_ = nil + c.mediaSize_ = size + return c +} + +// ProgressUpdater provides a callback function that will be called +// after every chunk. It should be a low-latency function in order to +// not slow down the upload operation. This should only be called when +// using ResumableMedia (as opposed to Media). +func (c *ObjectsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *ObjectsInsertCall { + c.progressUpdater_ = pu + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsInsertCall) Fields(s ...googleapi.Field) *ObjectsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +// This context will supersede any context previously provided to the +// ResumableMedia method. +func (c *ObjectsInsertCall) Context(ctx context.Context) *ObjectsInsertCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") + if c.media_ != nil || c.resumableBuffer_ != nil { + urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1) + protocol := "multipart" + if c.resumableBuffer_ != nil { + protocol = "resumable" + } + c.urlParams_.Set("uploadType", protocol) + } + urls += "?" + c.urlParams_.Encode() + if c.media_ != nil { + var combined io.ReadCloser + combined, ctype = gensupport.CombineBodyMedia(body, ctype, c.media_, c.mediaType_) + defer combined.Close() + body = combined + } + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + if c.resumableBuffer_ != nil && c.mediaType_ != "" { + req.Header.Set("X-Upload-Content-Type", c.mediaType_) + } + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.objects.insert" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := gensupport.Retry(c.ctx_, func() (*http.Response, error) { + return c.doRequest("json") + }, gensupport.DefaultBackoffStrategy()) + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + if c.resumableBuffer_ != nil { + loc := res.Header.Get("Location") + rx := &gensupport.ResumableUpload{ + Client: c.s.client, + UserAgent: c.s.userAgent(), + URI: loc, + Media: c.resumableBuffer_, + MediaType: c.mediaType_, + Callback: func(curr int64) { + if c.progressUpdater_ != nil { + c.progressUpdater_(curr, c.mediaSize_) + } + }, + } + ctx := c.ctx_ + if ctx == nil { + ctx = context.TODO() + } + res, err = rx.Upload(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Stores a new object and metadata.", + // "httpMethod": "POST", + // "id": "storage.objects.insert", + // "mediaUpload": { + // "accept": [ + // "*/*" + // ], + // "protocols": { + // "resumable": { + // "multipart": true, + // "path": "/resumable/upload/storage/v1/b/{bucket}/o" + // }, + // "simple": { + // "multipart": true, + // "path": "/upload/storage/v1/b/{bucket}/o" + // } + // } + // }, + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "contentEncoding": { + // "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "name": { + // "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "query", + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the acl property." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsMediaDownload": true, + // "supportsMediaUpload": true, + // "useMediaDownloadService": true + // } + +} + +// method id "storage.objects.list": + +type ObjectsListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves a list of objects matching the criteria. +func (r *ObjectsService) List(bucket string) *ObjectsListCall { + c := &ObjectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// Delimiter sets the optional parameter "delimiter": Returns results in +// a directory-like mode. items will contain only objects whose names, +// aside from the prefix, do not contain delimiter. Objects whose names, +// aside from the prefix, contain delimiter will have their name, +// truncated after the delimiter, returned in prefixes. Duplicate +// prefixes are omitted. +func (c *ObjectsListCall) Delimiter(delimiter string) *ObjectsListCall { + c.urlParams_.Set("delimiter", delimiter) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of items plus prefixes to return. As duplicate prefixes are omitted, +// fewer total results may be returned than requested. The default value +// of this parameter is 1,000 items. +func (c *ObjectsListCall) MaxResults(maxResults int64) *ObjectsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *ObjectsListCall) PageToken(pageToken string) *ObjectsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Prefix sets the optional parameter "prefix": Filter results to +// objects whose names begin with this prefix. +func (c *ObjectsListCall) Prefix(prefix string) *ObjectsListCall { + c.urlParams_.Set("prefix", prefix) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the acl property. +func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Versions sets the optional parameter "versions": If true, lists all +// versions of an object as distinct results. The default is false. For +// more information, see Object Versioning. +func (c *ObjectsListCall) Versions(versions bool) *ObjectsListCall { + c.urlParams_.Set("versions", fmt.Sprint(versions)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsListCall) Fields(s ...googleapi.Field) *ObjectsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectsListCall) IfNoneMatch(entityTag string) *ObjectsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsListCall) Context(ctx context.Context) *ObjectsListCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.objects.list" call. +// Exactly one of *Objects or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Objects.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Objects{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of objects matching the criteria.", + // "httpMethod": "GET", + // "id": "storage.objects.list", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which to look for objects.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "delimiter": { + // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "description": "Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", + // "type": "string" + // }, + // "prefix": { + // "description": "Filter results to objects whose names begin with this prefix.", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "versions": { + // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "b/{bucket}/o", + // "response": { + // "$ref": "Objects" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsSubscription": true + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ObjectsListCall) Pages(ctx context.Context, f func(*Objects) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "storage.objects.patch": + +type ObjectsPatchCall struct { + s *Service + bucket string + object string + object2 *Object + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Patch: Updates an object's metadata. This method supports patch +// semantics. +func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall { + c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.object2 = object2 + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsPatchCall) Generation(generation int64) *ObjectsPatchCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. +func (c *ObjectsPatchCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. +func (c *ObjectsPatchCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the acl property. +func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsPatchCall) Fields(s ...googleapi.Field) *ObjectsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsPatchCall) Context(ctx context.Context) *ObjectsPatchCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.objects.patch" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an object's metadata. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "storage.objects.patch", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the acl property." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.rewrite": + +type ObjectsRewriteCall struct { + s *Service + sourceBucket string + sourceObject string + destinationBucket string + destinationObject string + object *Object + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Rewrite: Rewrites a source object to a destination object. Optionally +// overrides metadata. +func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall { + c := &ObjectsRewriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.sourceBucket = sourceBucket + c.sourceObject = sourceObject + c.destinationBucket = destinationBucket + c.destinationObject = destinationObject + c.object = object + return c +} + +// DestinationPredefinedAcl sets the optional parameter +// "destinationPredefinedAcl": Apply a predefined set of access controls +// to the destination object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsRewriteCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsRewriteCall { + c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the destination object's +// current generation matches the given value. +func (c *ObjectsRewriteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the destination object's current generation does not match the given +// value. +func (c *ObjectsRewriteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the destination object's current metageneration matches the given +// value. +func (c *ObjectsRewriteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the destination object's current metageneration does not +// match the given value. +func (c *ObjectsRewriteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// IfSourceGenerationMatch sets the optional parameter +// "ifSourceGenerationMatch": Makes the operation conditional on whether +// the source object's generation matches the given value. +func (c *ObjectsRewriteCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) + return c +} + +// IfSourceGenerationNotMatch sets the optional parameter +// "ifSourceGenerationNotMatch": Makes the operation conditional on +// whether the source object's generation does not match the given +// value. +func (c *ObjectsRewriteCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) + return c +} + +// IfSourceMetagenerationMatch sets the optional parameter +// "ifSourceMetagenerationMatch": Makes the operation conditional on +// whether the source object's current metageneration matches the given +// value. +func (c *ObjectsRewriteCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) + return c +} + +// IfSourceMetagenerationNotMatch sets the optional parameter +// "ifSourceMetagenerationNotMatch": Makes the operation conditional on +// whether the source object's current metageneration does not match the +// given value. +func (c *ObjectsRewriteCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) + return c +} + +// MaxBytesRewrittenPerCall sets the optional parameter +// "maxBytesRewrittenPerCall": The maximum number of bytes that will be +// rewritten per rewrite request. Most callers shouldn't need to specify +// this parameter - it is primarily in place to support testing. If +// specified the value must be an integral multiple of 1 MiB (1048576). +// Also, this only applies to requests where the source and destination +// span locations and/or storage classes. Finally, this value must not +// change across rewrite calls else you'll get an error that the +// rewriteToken is invalid. +func (c *ObjectsRewriteCall) MaxBytesRewrittenPerCall(maxBytesRewrittenPerCall int64) *ObjectsRewriteCall { + c.urlParams_.Set("maxBytesRewrittenPerCall", fmt.Sprint(maxBytesRewrittenPerCall)) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the object resource +// specifies the acl property, when it defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the acl property. +func (c *ObjectsRewriteCall) Projection(projection string) *ObjectsRewriteCall { + c.urlParams_.Set("projection", projection) + return c +} + +// RewriteToken sets the optional parameter "rewriteToken": Include this +// field (from the previous rewrite response) on each rewrite request +// after the first one, until the rewrite response 'done' flag is true. +// Calls that provide a rewriteToken can omit all other request fields, +// but if included those fields must match the values provided in the +// first rewrite request. +func (c *ObjectsRewriteCall) RewriteToken(rewriteToken string) *ObjectsRewriteCall { + c.urlParams_.Set("rewriteToken", rewriteToken) + return c +} + +// SourceGeneration sets the optional parameter "sourceGeneration": If +// present, selects a specific revision of the source object (as opposed +// to the latest version, the default). +func (c *ObjectsRewriteCall) SourceGeneration(sourceGeneration int64) *ObjectsRewriteCall { + c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsRewriteCall) Fields(s ...googleapi.Field) *ObjectsRewriteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsRewriteCall) Context(ctx context.Context) *ObjectsRewriteCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "sourceBucket": c.sourceBucket, + "sourceObject": c.sourceObject, + "destinationBucket": c.destinationBucket, + "destinationObject": c.destinationObject, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.objects.rewrite" call. +// Exactly one of *RewriteResponse or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *RewriteResponse.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &RewriteResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", + // "httpMethod": "POST", + // "id": "storage.objects.rewrite", + // "parameterOrder": [ + // "sourceBucket", + // "sourceObject", + // "destinationBucket", + // "destinationObject" + // ], + // "parameters": { + // "destinationBucket": { + // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationObject": { + // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationPredefinedAcl": { + // "description": "Apply a predefined set of access controls to the destination object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "maxBytesRewrittenPerCall": { + // "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "rewriteToken": { + // "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", + // "location": "query", + // "type": "string" + // }, + // "sourceBucket": { + // "description": "Name of the bucket in which to find the source object.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sourceGeneration": { + // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "sourceObject": { + // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "RewriteResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.update": + +type ObjectsUpdateCall struct { + s *Service + bucket string + object string + object2 *Object + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates an object's metadata. +func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall { + c := &ObjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.object2 = object2 + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsUpdateCall) Generation(generation int64) *ObjectsUpdateCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. +func (c *ObjectsUpdateCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. +func (c *ObjectsUpdateCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the acl property. +func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsUpdateCall) Fields(s ...googleapi.Field) *ObjectsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do and Download +// methods. Any pending HTTP request will be aborted if the provided +// context is canceled. +func (c *ObjectsUpdateCall) Context(ctx context.Context) *ObjectsUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Download fetches the API endpoint's "media" value, instead of the normal +// API response value. If the returned error is nil, the Response is guaranteed to +// have a 2xx status code. Callers must close the Response.Body as usual. +func (c *ObjectsUpdateCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("media") + if err != nil { + return nil, err + } + if err := googleapi.CheckMediaResponse(res); err != nil { + res.Body.Close() + return nil, err + } + return res, nil +} + +// Do executes the "storage.objects.update" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an object's metadata.", + // "httpMethod": "PUT", + // "id": "storage.objects.update", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the acl property." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsMediaDownload": true, + // "useMediaDownloadService": true + // } + +} + +// method id "storage.objects.watchAll": + +type ObjectsWatchAllCall struct { + s *Service + bucket string + channel *Channel + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// WatchAll: Watch for changes on all objects in a bucket. +func (r *ObjectsService) WatchAll(bucket string, channel *Channel) *ObjectsWatchAllCall { + c := &ObjectsWatchAllCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.channel = channel + return c +} + +// Delimiter sets the optional parameter "delimiter": Returns results in +// a directory-like mode. items will contain only objects whose names, +// aside from the prefix, do not contain delimiter. Objects whose names, +// aside from the prefix, contain delimiter will have their name, +// truncated after the delimiter, returned in prefixes. Duplicate +// prefixes are omitted. +func (c *ObjectsWatchAllCall) Delimiter(delimiter string) *ObjectsWatchAllCall { + c.urlParams_.Set("delimiter", delimiter) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of items plus prefixes to return. As duplicate prefixes are omitted, +// fewer total results may be returned than requested. The default value +// of this parameter is 1,000 items. +func (c *ObjectsWatchAllCall) MaxResults(maxResults int64) *ObjectsWatchAllCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *ObjectsWatchAllCall) PageToken(pageToken string) *ObjectsWatchAllCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Prefix sets the optional parameter "prefix": Filter results to +// objects whose names begin with this prefix. +func (c *ObjectsWatchAllCall) Prefix(prefix string) *ObjectsWatchAllCall { + c.urlParams_.Set("prefix", prefix) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the acl property. +func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall { + c.urlParams_.Set("projection", projection) + return c +} + +// Versions sets the optional parameter "versions": If true, lists all +// versions of an object as distinct results. The default is false. For +// more information, see Object Versioning. +func (c *ObjectsWatchAllCall) Versions(versions bool) *ObjectsWatchAllCall { + c.urlParams_.Set("versions", fmt.Sprint(versions)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsWatchAllCall) Fields(s ...googleapi.Field) *ObjectsWatchAllCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsWatchAllCall) Context(ctx context.Context) *ObjectsWatchAllCall { + c.ctx_ = ctx + return c +} + +func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/watch") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "storage.objects.watchAll" call. +// Exactly one of *Channel or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Channel.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Channel{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Watch for changes on all objects in a bucket.", + // "httpMethod": "POST", + // "id": "storage.objects.watchAll", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which to look for objects.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "delimiter": { + // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "description": "Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", + // "type": "string" + // }, + // "prefix": { + // "description": "Filter results to objects whose names begin with this prefix.", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "versions": { + // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "b/{bucket}/o/watch", + // "request": { + // "$ref": "Channel", + // "parameterName": "resource" + // }, + // "response": { + // "$ref": "Channel" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsSubscription": true + // } + +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/.travis.yml b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/.travis.yml new file mode 100644 index 000000000..771520977 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/.travis.yml @@ -0,0 +1,18 @@ +language: go +sudo: false + +go: + - 1.4 + +install: + - go get -v -t -d google.golang.org/appengine/... + - mkdir sdk + - curl -o sdk.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.24.zip" + - unzip sdk.zip -d sdk + - export APPENGINE_DEV_APPSERVER=$(pwd)/sdk/go_appengine/dev_appserver.py + +script: + - go version + - go test -v google.golang.org/appengine/... + - go test -v -race google.golang.org/appengine/... + - sdk/go_appengine/goapp test -v google.golang.org/appengine/... diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/LICENSE b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/README.md b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/README.md new file mode 100644 index 000000000..1dbb3341f --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/README.md @@ -0,0 +1,73 @@ +# Go App Engine packages + +[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine) + +This repository supports the Go runtime on App Engine, +including both classic App Engine and Managed VMs. +It provides APIs for interacting with App Engine services. +Its canonical import path is `google.golang.org/appengine`. + +See https://cloud.google.com/appengine/docs/go/ +for more information. + +File issue reports and feature requests on the [Google App Engine issue +tracker](https://code.google.com/p/googleappengine/issues/entry?template=Go%20defect). + +## Directory structure +The top level directory of this repository is the `appengine` package. It +contains the +basic APIs (e.g. `appengine.NewContext`) that apply across APIs. Specific API +packages are in subdirectories (e.g. `datastore`). + +There is an `internal` subdirectory that contains service protocol buffers, +plus packages required for connectivity to make API calls. App Engine apps +should not directly import any package under `internal`. + +## Updating a Go App Engine app + +This section describes how to update a traditional Go App Engine app to use +these packages. + +### 1. Update YAML files (Managed VMs only) + +The `app.yaml` file (and YAML files for modules) should have these new lines added: +``` +vm: true +``` +See https://cloud.google.com/appengine/docs/go/modules/#Go_Instance_scaling_and_class for details. + +### 2. Update import paths + +The import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`. +You will need to update your code to use import paths starting with that; for instance, +code importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`. +You can do that manually, or by running this command to recursively update all Go source files in the current directory: +(may require GNU sed) +``` +sed -i '/"appengine/{s,"appengine,"google.golang.org/appengine,;s,appengine_,appengine/,}' \ + $(find . -name '*.go') +``` + +### 3. Update code using deprecated, removed or modified APIs + +Most App Engine services are available with exactly the same API. +A few APIs were cleaned up, and some are not available yet. +This list summarises the differences: + +* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`. +* Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`. +* `appengine.Timeout` has been removed. Use `context.WithTimeout` instead. +* `appengine.Datacenter` now takes a `context.Context` argument. +* `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels. +* `delay.Call` now returns an error. +* `search.FieldLoadSaver` now handles document metadata. +* `urlfetch.Transport` no longer has a Deadline field; set a deadline on the + `context.Context` instead. +* `aetest` no longer declares its own Context type, and uses the standard one instead. +* `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been + deprecated and unused for a long time. +* `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature. + Use `appengine.ModuleHostname`and `appengine.ModuleName` instead. +* Most of `appengine/file` and parts of `appengine/blobstore` are deprecated. + Use [Google Cloud Storage](https://godoc.org/google.golang.org/cloud/storage) instead. +* `appengine/socket` is not required on Managed VMs. Use the standard `net` package instead. diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/appengine.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/appengine.go new file mode 100644 index 000000000..be0b5f2bc --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/appengine.go @@ -0,0 +1,76 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package appengine provides basic functionality for Google App Engine. +// +// For more information on how to write Go apps for Google App Engine, see: +// https://cloud.google.com/appengine/docs/go/ +package appengine + +import ( + "net/http" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +// IsDevAppServer reports whether the App Engine app is running in the +// development App Server. +func IsDevAppServer() bool { + return internal.IsDevAppServer() +} + +// NewContext returns a context for an in-flight HTTP request. +// This function is cheap. +func NewContext(req *http.Request) context.Context { + return WithContext(context.Background(), req) +} + +// WithContext returns a copy of the parent context +// and associates it with an in-flight HTTP request. +// This function is cheap. +func WithContext(parent context.Context, req *http.Request) context.Context { + return internal.WithContext(parent, req) +} + +// TODO(dsymonds): Add a Call function here? Otherwise other packages can't access internal.Call. + +// BlobKey is a key for a blobstore blob. +// +// Conceptually, this type belongs in the blobstore package, but it lives in +// the appengine package to avoid a circular dependency: blobstore depends on +// datastore, and datastore needs to refer to the BlobKey type. +type BlobKey string + +// GeoPoint represents a location as latitude/longitude in degrees. +type GeoPoint struct { + Lat, Lng float64 +} + +// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude. +func (g GeoPoint) Valid() bool { + return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180 +} + +// APICallFunc defines a function type for handling an API call. +// See WithCallOverride. +type APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error + +// WithAPICallFunc returns a copy of the parent context +// that will cause API calls to invoke f instead of their normal operation. +// +// This is intended for advanced users only. +func WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context { + return internal.WithCallOverride(ctx, internal.CallOverrideFunc(f)) +} + +// APICall performs an API call. +// +// This is not intended for general use; it is exported for use in conjunction +// with WithAPICallFunc. +func APICall(ctx context.Context, service, method string, in, out proto.Message) error { + return internal.Call(ctx, service, method, in, out) +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/appengine_vm.go new file mode 100644 index 000000000..2f7759067 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/appengine_vm.go @@ -0,0 +1,56 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package appengine + +import ( + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +// The comment below must not be changed. +// It is used by go-app-builder to recognise that this package has +// the Main function to use in the synthetic main. +// The gophers party all night; the rabbits provide the beats. + +// Main is the principal entry point for a Managed VMs app. +// It installs a trivial health checker if one isn't already registered, +// and starts listening on port 8080 (overridden by the $PORT environment +// variable). +// +// See https://cloud.google.com/appengine/docs/managed-vms/custom-runtimes#health_check_requests +// for details on how to do your own health checking. +// +// Main never returns. +// +// Main is designed so that the app's main package looks like this: +// +// package main +// +// import ( +// "google.golang.org/appengine" +// +// _ "myapp/package0" +// _ "myapp/package1" +// ) +// +// func main() { +// appengine.Main() +// } +// +// The "myapp/packageX" packages are expected to register HTTP handlers +// in their init functions. +func Main() { + internal.Main() +} + +// BackgroundContext returns a context not associated with a request. +// This should only be used when not servicing a request. +// This only works on Managed VMs. +func BackgroundContext() context.Context { + return internal.BackgroundContext() +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/errors.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/errors.go new file mode 100644 index 000000000..16d0772e2 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/errors.go @@ -0,0 +1,46 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// This file provides error functions for common API failure modes. + +package appengine + +import ( + "fmt" + + "google.golang.org/appengine/internal" +) + +// IsOverQuota reports whether err represents an API call failure +// due to insufficient available quota. +func IsOverQuota(err error) bool { + callErr, ok := err.(*internal.CallError) + return ok && callErr.Code == 4 +} + +// MultiError is returned by batch operations when there are errors with +// particular elements. Errors will be in a one-to-one correspondence with +// the input elements; successful elements will have a nil entry. +type MultiError []error + +func (m MultiError) Error() string { + s, n := "", 0 + for _, e := range m { + if e != nil { + if n == 0 { + s = e.Error() + } + n++ + } + } + switch n { + case 0: + return "(0 errors)" + case 1: + return s + case 2: + return s + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", s, n-1) +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/identity.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/identity.go new file mode 100644 index 000000000..b8dcf8f36 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/identity.go @@ -0,0 +1,142 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import ( + "time" + + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/app_identity" + modpb "google.golang.org/appengine/internal/modules" +) + +// AppID returns the application ID for the current application. +// The string will be a plain application ID (e.g. "appid"), with a +// domain prefix for custom domain deployments (e.g. "example.com:appid"). +func AppID(c context.Context) string { return internal.AppID(c) } + +// DefaultVersionHostname returns the standard hostname of the default version +// of the current application (e.g. "my-app.appspot.com"). This is suitable for +// use in constructing URLs. +func DefaultVersionHostname(c context.Context) string { + return internal.DefaultVersionHostname(c) +} + +// ModuleName returns the module name of the current instance. +func ModuleName(c context.Context) string { + return internal.ModuleName(c) +} + +// ModuleHostname returns a hostname of a module instance. +// If module is the empty string, it refers to the module of the current instance. +// If version is empty, it refers to the version of the current instance if valid, +// or the default version of the module of the current instance. +// If instance is empty, ModuleHostname returns the load-balancing hostname. +func ModuleHostname(c context.Context, module, version, instance string) (string, error) { + req := &modpb.GetHostnameRequest{} + if module != "" { + req.Module = &module + } + if version != "" { + req.Version = &version + } + if instance != "" { + req.Instance = &instance + } + res := &modpb.GetHostnameResponse{} + if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil { + return "", err + } + return *res.Hostname, nil +} + +// VersionID returns the version ID for the current application. +// It will be of the form "X.Y", where X is specified in app.yaml, +// and Y is a number generated when each version of the app is uploaded. +// It does not include a module name. +func VersionID(c context.Context) string { return internal.VersionID(c) } + +// InstanceID returns a mostly-unique identifier for this instance. +func InstanceID() string { return internal.InstanceID() } + +// Datacenter returns an identifier for the datacenter that the instance is running in. +func Datacenter(c context.Context) string { return internal.Datacenter(c) } + +// ServerSoftware returns the App Engine release version. +// In production, it looks like "Google App Engine/X.Y.Z". +// In the development appserver, it looks like "Development/X.Y". +func ServerSoftware() string { return internal.ServerSoftware() } + +// RequestID returns a string that uniquely identifies the request. +func RequestID(c context.Context) string { return internal.RequestID(c) } + +// AccessToken generates an OAuth2 access token for the specified scopes on +// behalf of service account of this application. This token will expire after +// the returned time. +func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) { + req := &pb.GetAccessTokenRequest{Scope: scopes} + res := &pb.GetAccessTokenResponse{} + + err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res) + if err != nil { + return "", time.Time{}, err + } + return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil +} + +// Certificate represents a public certificate for the app. +type Certificate struct { + KeyName string + Data []byte // PEM-encoded X.509 certificate +} + +// PublicCertificates retrieves the public certificates for the app. +// They can be used to verify a signature returned by SignBytes. +func PublicCertificates(c context.Context) ([]Certificate, error) { + req := &pb.GetPublicCertificateForAppRequest{} + res := &pb.GetPublicCertificateForAppResponse{} + if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil { + return nil, err + } + var cs []Certificate + for _, pc := range res.PublicCertificateList { + cs = append(cs, Certificate{ + KeyName: pc.GetKeyName(), + Data: []byte(pc.GetX509CertificatePem()), + }) + } + return cs, nil +} + +// ServiceAccount returns a string representing the service account name, in +// the form of an email address (typically app_id@appspot.gserviceaccount.com). +func ServiceAccount(c context.Context) (string, error) { + req := &pb.GetServiceAccountNameRequest{} + res := &pb.GetServiceAccountNameResponse{} + + err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res) + if err != nil { + return "", err + } + return res.GetServiceAccountName(), err +} + +// SignBytes signs bytes using a private key unique to your application. +func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) { + req := &pb.SignForAppRequest{BytesToSign: bytes} + res := &pb.SignForAppResponse{} + + if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil { + return "", nil, err + } + return res.GetKeyName(), res.GetSignatureBytes(), nil +} + +func init() { + internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name) + internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name) +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/api.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/api.go new file mode 100644 index 000000000..aa139d4d5 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/api.go @@ -0,0 +1,640 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package internal + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "log" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" + + basepb "google.golang.org/appengine/internal/base" + logpb "google.golang.org/appengine/internal/log" + remotepb "google.golang.org/appengine/internal/remote_api" +) + +const ( + apiPath = "/rpc_http" +) + +var ( + // Incoming headers. + ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket") + dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo") + traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context") + curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace") + userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP") + remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr") + + // Outgoing headers. + apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint") + apiEndpointHeaderValue = []string{"app-engine-apis"} + apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method") + apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"} + apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline") + apiContentType = http.CanonicalHeaderKey("Content-Type") + apiContentTypeValue = []string{"application/octet-stream"} + logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count") + + apiHTTPClient = &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: limitDial, + }, + } +) + +func apiURL() *url.URL { + host, port := "appengine.googleapis.internal", "10001" + if h := os.Getenv("API_HOST"); h != "" { + host = h + } + if p := os.Getenv("API_PORT"); p != "" { + port = p + } + return &url.URL{ + Scheme: "http", + Host: host + ":" + port, + Path: apiPath, + } +} + +func handleHTTP(w http.ResponseWriter, r *http.Request) { + c := &context{ + req: r, + outHeader: w.Header(), + apiURL: apiURL(), + } + stopFlushing := make(chan int) + + ctxs.Lock() + ctxs.m[r] = c + ctxs.Unlock() + defer func() { + ctxs.Lock() + delete(ctxs.m, r) + ctxs.Unlock() + }() + + // Patch up RemoteAddr so it looks reasonable. + if addr := r.Header.Get(userIPHeader); addr != "" { + r.RemoteAddr = addr + } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { + r.RemoteAddr = addr + } else { + // Should not normally reach here, but pick a sensible default anyway. + r.RemoteAddr = "127.0.0.1" + } + // The address in the headers will most likely be of these forms: + // 123.123.123.123 + // 2001:db8::1 + // net/http.Request.RemoteAddr is specified to be in "IP:port" form. + if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { + // Assume the remote address is only a host; add a default port. + r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") + } + + // Start goroutine responsible for flushing app logs. + // This is done after adding c to ctx.m (and stopped before removing it) + // because flushing logs requires making an API call. + go c.logFlusher(stopFlushing) + + executeRequestSafely(c, r) + c.outHeader = nil // make sure header changes aren't respected any more + + stopFlushing <- 1 // any logging beyond this point will be dropped + + // Flush any pending logs asynchronously. + c.pendingLogs.Lock() + flushes := c.pendingLogs.flushes + if len(c.pendingLogs.lines) > 0 { + flushes++ + } + c.pendingLogs.Unlock() + go c.flushLog(false) + w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + + // Avoid nil Write call if c.Write is never called. + if c.outCode != 0 { + w.WriteHeader(c.outCode) + } + if c.outBody != nil { + w.Write(c.outBody) + } +} + +func executeRequestSafely(c *context, r *http.Request) { + defer func() { + if x := recover(); x != nil { + logf(c, 4, "%s", renderPanic(x)) // 4 == critical + c.outCode = 500 + } + }() + + http.DefaultServeMux.ServeHTTP(c, r) +} + +func renderPanic(x interface{}) string { + buf := make([]byte, 16<<10) // 16 KB should be plenty + buf = buf[:runtime.Stack(buf, false)] + + // Remove the first few stack frames: + // this func + // the recover closure in the caller + // That will root the stack trace at the site of the panic. + const ( + skipStart = "internal.renderPanic" + skipFrames = 2 + ) + start := bytes.Index(buf, []byte(skipStart)) + p := start + for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ { + p = bytes.IndexByte(buf[p+1:], '\n') + p + 1 + if p < 0 { + break + } + } + if p >= 0 { + // buf[start:p+1] is the block to remove. + // Copy buf[p+1:] over buf[start:] and shrink buf. + copy(buf[start:], buf[p+1:]) + buf = buf[:len(buf)-(p+1-start)] + } + + // Add panic heading. + head := fmt.Sprintf("panic: %v\n\n", x) + if len(head) > len(buf) { + // Extremely unlikely to happen. + return head + } + copy(buf[len(head):], buf) + copy(buf, head) + + return string(buf) +} + +var ctxs = struct { + sync.Mutex + m map[*http.Request]*context + bg *context // background context, lazily initialized + // dec is used by tests to decorate the netcontext.Context returned + // for a given request. This allows tests to add overrides (such as + // WithAppIDOverride) to the context. The map is nil outside tests. + dec map[*http.Request]func(netcontext.Context) netcontext.Context +}{ + m: make(map[*http.Request]*context), +} + +// context represents the context of an in-flight HTTP request. +// It implements the appengine.Context and http.ResponseWriter interfaces. +type context struct { + req *http.Request + + outCode int + outHeader http.Header + outBody []byte + + pendingLogs struct { + sync.Mutex + lines []*logpb.UserAppLogLine + flushes int + } + + apiURL *url.URL +} + +var contextKey = "holds a *context" + +func fromContext(ctx netcontext.Context) *context { + c, _ := ctx.Value(&contextKey).(*context) + return c +} + +func withContext(parent netcontext.Context, c *context) netcontext.Context { + ctx := netcontext.WithValue(parent, &contextKey, c) + if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { + ctx = withNamespace(ctx, ns) + } + return ctx +} + +func toContext(c *context) netcontext.Context { + return withContext(netcontext.Background(), c) +} + +func IncomingHeaders(ctx netcontext.Context) http.Header { + if c := fromContext(ctx); c != nil { + return c.req.Header + } + return nil +} + +func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { + ctxs.Lock() + c := ctxs.m[req] + d := ctxs.dec[req] + ctxs.Unlock() + + if d != nil { + parent = d(parent) + } + + if c == nil { + // Someone passed in an http.Request that is not in-flight. + // We panic here rather than panicking at a later point + // so that stack traces will be more sensible. + log.Panic("appengine: NewContext passed an unknown http.Request") + } + return withContext(parent, c) +} + +func BackgroundContext() netcontext.Context { + ctxs.Lock() + defer ctxs.Unlock() + + if ctxs.bg != nil { + return toContext(ctxs.bg) + } + + // Compute background security ticket. + appID := partitionlessAppID() + escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) + majVersion := VersionID(nil) + if i := strings.Index(majVersion, "."); i > 0 { + majVersion = majVersion[:i] + } + ticket := fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) + + ctxs.bg = &context{ + req: &http.Request{ + Header: http.Header{ + ticketHeader: []string{ticket}, + }, + }, + apiURL: apiURL(), + } + + // TODO(dsymonds): Wire up the shutdown handler to do a final flush. + go ctxs.bg.logFlusher(make(chan int)) + + return toContext(ctxs.bg) +} + +// RegisterTestRequest registers the HTTP request req for testing, such that +// any API calls are sent to the provided URL. It returns a closure to delete +// the registration. +// It should only be used by aetest package. +func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) func() { + c := &context{ + req: req, + apiURL: apiURL, + } + ctxs.Lock() + defer ctxs.Unlock() + if _, ok := ctxs.m[req]; ok { + log.Panic("req already associated with context") + } + if _, ok := ctxs.dec[req]; ok { + log.Panic("req already associated with context") + } + if ctxs.dec == nil { + ctxs.dec = make(map[*http.Request]func(netcontext.Context) netcontext.Context) + } + ctxs.m[req] = c + ctxs.dec[req] = decorate + + return func() { + ctxs.Lock() + delete(ctxs.m, req) + delete(ctxs.dec, req) + ctxs.Unlock() + } +} + +var errTimeout = &CallError{ + Detail: "Deadline exceeded", + Code: int32(remotepb.RpcError_CANCELLED), + Timeout: true, +} + +func (c *context) Header() http.Header { return c.outHeader } + +// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status +// codes do not permit a response body (nor response entity headers such as +// Content-Length, Content-Type, etc). +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +func (c *context) Write(b []byte) (int, error) { + if c.outCode == 0 { + c.WriteHeader(http.StatusOK) + } + if len(b) > 0 && !bodyAllowedForStatus(c.outCode) { + return 0, http.ErrBodyNotAllowed + } + c.outBody = append(c.outBody, b...) + return len(b), nil +} + +func (c *context) WriteHeader(code int) { + if c.outCode != 0 { + logf(c, 3, "WriteHeader called multiple times on request.") // error level + return + } + c.outCode = code +} + +func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { + hreq := &http.Request{ + Method: "POST", + URL: c.apiURL, + Header: http.Header{ + apiEndpointHeader: apiEndpointHeaderValue, + apiMethodHeader: apiMethodHeaderValue, + apiContentType: apiContentTypeValue, + apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)}, + }, + Body: ioutil.NopCloser(bytes.NewReader(body)), + ContentLength: int64(len(body)), + Host: c.apiURL.Host, + } + if info := c.req.Header.Get(dapperHeader); info != "" { + hreq.Header.Set(dapperHeader, info) + } + if info := c.req.Header.Get(traceHeader); info != "" { + hreq.Header.Set(traceHeader, info) + } + + tr := apiHTTPClient.Transport.(*http.Transport) + + var timedOut int32 // atomic; set to 1 if timed out + t := time.AfterFunc(timeout, func() { + atomic.StoreInt32(&timedOut, 1) + tr.CancelRequest(hreq) + }) + defer t.Stop() + defer func() { + // Check if timeout was exceeded. + if atomic.LoadInt32(&timedOut) != 0 { + err = errTimeout + } + }() + + hresp, err := apiHTTPClient.Do(hreq) + if err != nil { + return nil, &CallError{ + Detail: fmt.Sprintf("service bridge HTTP failed: %v", err), + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + defer hresp.Body.Close() + hrespBody, err := ioutil.ReadAll(hresp.Body) + if hresp.StatusCode != 200 { + return nil, &CallError{ + Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody), + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + if err != nil { + return nil, &CallError{ + Detail: fmt.Sprintf("service bridge response bad: %v", err), + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + return hrespBody, nil +} + +func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { + if f, ctx, ok := callOverrideFromContext(ctx); ok { + return f(ctx, service, method, in, out) + } + + // Handle already-done contexts quickly. + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + c := fromContext(ctx) + if c == nil { + // Give a good error message rather than a panic lower down. + return errors.New("not an App Engine context") + } + + // Apply transaction modifications if we're in a transaction. + if t := transactionFromContext(ctx); t != nil { + if t.finished { + return errors.New("transaction context has expired") + } + applyTransaction(in, &t.transaction) + } + + // Default RPC timeout is 60s. + timeout := 60 * time.Second + if deadline, ok := ctx.Deadline(); ok { + timeout = deadline.Sub(time.Now()) + } + + data, err := proto.Marshal(in) + if err != nil { + return err + } + + ticket := c.req.Header.Get(ticketHeader) + req := &remotepb.Request{ + ServiceName: &service, + Method: &method, + Request: data, + RequestId: &ticket, + } + hreqBody, err := proto.Marshal(req) + if err != nil { + return err + } + + hrespBody, err := c.post(hreqBody, timeout) + if err != nil { + return err + } + + res := &remotepb.Response{} + if err := proto.Unmarshal(hrespBody, res); err != nil { + return err + } + if res.RpcError != nil { + ce := &CallError{ + Detail: res.RpcError.GetDetail(), + Code: *res.RpcError.Code, + } + switch remotepb.RpcError_ErrorCode(ce.Code) { + case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED: + ce.Timeout = true + } + return ce + } + if res.ApplicationError != nil { + return &APIError{ + Service: *req.ServiceName, + Detail: res.ApplicationError.GetDetail(), + Code: *res.ApplicationError.Code, + } + } + if res.Exception != nil || res.JavaException != nil { + // This shouldn't happen, but let's be defensive. + return &CallError{ + Detail: "service bridge returned exception", + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + return proto.Unmarshal(res.Response, out) +} + +func (c *context) Request() *http.Request { + return c.req +} + +func (c *context) addLogLine(ll *logpb.UserAppLogLine) { + // Truncate long log lines. + // TODO(dsymonds): Check if this is still necessary. + const lim = 8 << 10 + if len(*ll.Message) > lim { + suffix := fmt.Sprintf("...(length %d)", len(*ll.Message)) + ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix) + } + + c.pendingLogs.Lock() + c.pendingLogs.lines = append(c.pendingLogs.lines, ll) + c.pendingLogs.Unlock() +} + +var logLevelName = map[int64]string{ + 0: "DEBUG", + 1: "INFO", + 2: "WARNING", + 3: "ERROR", + 4: "CRITICAL", +} + +func logf(c *context, level int64, format string, args ...interface{}) { + s := fmt.Sprintf(format, args...) + s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. + c.addLogLine(&logpb.UserAppLogLine{ + TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), + Level: &level, + Message: &s, + }) + log.Print(logLevelName[level] + ": " + s) +} + +// flushLog attempts to flush any pending logs to the appserver. +// It should not be called concurrently. +func (c *context) flushLog(force bool) (flushed bool) { + c.pendingLogs.Lock() + // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. + n, rem := 0, 30<<20 + for ; n < len(c.pendingLogs.lines); n++ { + ll := c.pendingLogs.lines[n] + // Each log line will require about 3 bytes of overhead. + nb := proto.Size(ll) + 3 + if nb > rem { + break + } + rem -= nb + } + lines := c.pendingLogs.lines[:n] + c.pendingLogs.lines = c.pendingLogs.lines[n:] + c.pendingLogs.Unlock() + + if len(lines) == 0 && !force { + // Nothing to flush. + return false + } + + rescueLogs := false + defer func() { + if rescueLogs { + c.pendingLogs.Lock() + c.pendingLogs.lines = append(lines, c.pendingLogs.lines...) + c.pendingLogs.Unlock() + } + }() + + buf, err := proto.Marshal(&logpb.UserAppLogGroup{ + LogLine: lines, + }) + if err != nil { + log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err) + rescueLogs = true + return false + } + + req := &logpb.FlushRequest{ + Logs: buf, + } + res := &basepb.VoidProto{} + c.pendingLogs.Lock() + c.pendingLogs.flushes++ + c.pendingLogs.Unlock() + if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil { + log.Printf("internal.flushLog: Flush RPC: %v", err) + rescueLogs = true + return false + } + return true +} + +const ( + // Log flushing parameters. + flushInterval = 1 * time.Second + forceFlushInterval = 60 * time.Second +) + +func (c *context) logFlusher(stop <-chan int) { + lastFlush := time.Now() + tick := time.NewTicker(flushInterval) + for { + select { + case <-stop: + // Request finished. + tick.Stop() + return + case <-tick.C: + force := time.Now().Sub(lastFlush) > forceFlushInterval + if c.flushLog(force) { + lastFlush = time.Now() + } + } + } +} + +func ContextForTesting(req *http.Request) netcontext.Context { + return toContext(&context{req: req}) +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/api_classic.go new file mode 100644 index 000000000..1c072e9db --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/api_classic.go @@ -0,0 +1,133 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appengine + +package internal + +import ( + "errors" + "net/http" + "time" + + "appengine" + "appengine_internal" + basepb "appengine_internal/base" + + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" +) + +var contextKey = "holds an appengine.Context" + +func fromContext(ctx netcontext.Context) appengine.Context { + c, _ := ctx.Value(&contextKey).(appengine.Context) + return c +} + +// This is only for classic App Engine adapters. +func ClassicContextFromContext(ctx netcontext.Context) appengine.Context { + return fromContext(ctx) +} + +func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context { + ctx := netcontext.WithValue(parent, &contextKey, c) + + s := &basepb.StringProto{} + c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil) + if ns := s.GetValue(); ns != "" { + ctx = NamespacedContext(ctx, ns) + } + + return ctx +} + +func IncomingHeaders(ctx netcontext.Context) http.Header { + if c := fromContext(ctx); c != nil { + if req, ok := c.Request().(*http.Request); ok { + return req.Header + } + } + return nil +} + +func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { + c := appengine.NewContext(req) + return withContext(parent, c) +} + +func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { + if f, ctx, ok := callOverrideFromContext(ctx); ok { + return f(ctx, service, method, in, out) + } + + // Handle already-done contexts quickly. + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + c := fromContext(ctx) + if c == nil { + // Give a good error message rather than a panic lower down. + return errors.New("not an App Engine context") + } + + // Apply transaction modifications if we're in a transaction. + if t := transactionFromContext(ctx); t != nil { + if t.finished { + return errors.New("transaction context has expired") + } + applyTransaction(in, &t.transaction) + } + + var opts *appengine_internal.CallOptions + if d, ok := ctx.Deadline(); ok { + opts = &appengine_internal.CallOptions{ + Timeout: d.Sub(time.Now()), + } + } + + err := c.Call(service, method, in, out, opts) + switch v := err.(type) { + case *appengine_internal.APIError: + return &APIError{ + Service: v.Service, + Detail: v.Detail, + Code: v.Code, + } + case *appengine_internal.CallError: + return &CallError{ + Detail: v.Detail, + Code: v.Code, + Timeout: v.Timeout, + } + } + return err +} + +func handleHTTP(w http.ResponseWriter, r *http.Request) { + panic("handleHTTP called; this should be impossible") +} + +func logf(c appengine.Context, level int64, format string, args ...interface{}) { + var fn func(format string, args ...interface{}) + switch level { + case 0: + fn = c.Debugf + case 1: + fn = c.Infof + case 2: + fn = c.Warningf + case 3: + fn = c.Errorf + case 4: + fn = c.Criticalf + default: + // This shouldn't happen. + fn = c.Criticalf + } + fn(format, args...) +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/api_common.go new file mode 100644 index 000000000..ec5383e66 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/api_common.go @@ -0,0 +1,101 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +import ( + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" +) + +type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error + +var callOverrideKey = "holds []CallOverrideFunc" + +func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context { + // We avoid appending to any existing call override + // so we don't risk overwriting a popped stack below. + var cofs []CallOverrideFunc + if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok { + cofs = append(cofs, uf...) + } + cofs = append(cofs, f) + return netcontext.WithValue(ctx, &callOverrideKey, cofs) +} + +func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) { + cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc) + if len(cofs) == 0 { + return nil, nil, false + } + // We found a list of overrides; grab the last, and reconstitute a + // context that will hide it. + f := cofs[len(cofs)-1] + ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) + return f, ctx, true +} + +type logOverrideFunc func(level int64, format string, args ...interface{}) + +var logOverrideKey = "holds a logOverrideFunc" + +func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context { + return netcontext.WithValue(ctx, &logOverrideKey, f) +} + +var appIDOverrideKey = "holds a string, being the full app ID" + +func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context { + return netcontext.WithValue(ctx, &appIDOverrideKey, appID) +} + +var namespaceKey = "holds the namespace string" + +func withNamespace(ctx netcontext.Context, ns string) netcontext.Context { + return netcontext.WithValue(ctx, &namespaceKey, ns) +} + +func NamespaceFromContext(ctx netcontext.Context) string { + // If there's no namespace, return the empty string. + ns, _ := ctx.Value(&namespaceKey).(string) + return ns +} + +// FullyQualifiedAppID returns the fully-qualified application ID. +// This may contain a partition prefix (e.g. "s~" for High Replication apps), +// or a domain prefix (e.g. "example.com:"). +func FullyQualifiedAppID(ctx netcontext.Context) string { + if id, ok := ctx.Value(&appIDOverrideKey).(string); ok { + return id + } + return fullyQualifiedAppID(ctx) +} + +func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) { + if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok { + f(level, format, args...) + return + } + logf(fromContext(ctx), level, format, args...) +} + +// NamespacedContext wraps a Context to support namespaces. +func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { + n := &namespacedContext{ + namespace: namespace, + } + return withNamespace(WithCallOverride(ctx, n.call), namespace) +} + +type namespacedContext struct { + namespace string +} + +func (n *namespacedContext) call(ctx netcontext.Context, service, method string, in, out proto.Message) error { + // Apply any namespace mods. + if mod, ok := NamespaceMods[service]; ok { + mod(in, n.namespace) + } + return Call(ctx, service, method, in, out) +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/app_id.go new file mode 100644 index 000000000..11df8c07b --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/app_id.go @@ -0,0 +1,28 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +import ( + "strings" +) + +func parseFullAppID(appid string) (partition, domain, displayID string) { + if i := strings.Index(appid, "~"); i != -1 { + partition, appid = appid[:i], appid[i+1:] + } + if i := strings.Index(appid, ":"); i != -1 { + domain, appid = appid[:i], appid[i+1:] + } + return partition, domain, appid +} + +// appID returns "appid" or "domain.com:appid". +func appID(fullAppID string) string { + _, dom, dis := parseFullAppID(fullAppID) + if dom != "" { + return dom + ":" + dis + } + return dis +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go new file mode 100644 index 000000000..87d9701b8 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go @@ -0,0 +1,296 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto +// DO NOT EDIT! + +/* +Package app_identity is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/app_identity/app_identity_service.proto + +It has these top-level messages: + AppIdentityServiceError + SignForAppRequest + SignForAppResponse + GetPublicCertificateForAppRequest + PublicCertificate + GetPublicCertificateForAppResponse + GetServiceAccountNameRequest + GetServiceAccountNameResponse + GetAccessTokenRequest + GetAccessTokenResponse + GetDefaultGcsBucketNameRequest + GetDefaultGcsBucketNameResponse +*/ +package app_identity + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type AppIdentityServiceError_ErrorCode int32 + +const ( + AppIdentityServiceError_SUCCESS AppIdentityServiceError_ErrorCode = 0 + AppIdentityServiceError_UNKNOWN_SCOPE AppIdentityServiceError_ErrorCode = 9 + AppIdentityServiceError_BLOB_TOO_LARGE AppIdentityServiceError_ErrorCode = 1000 + AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001 + AppIdentityServiceError_NOT_A_VALID_APP AppIdentityServiceError_ErrorCode = 1002 + AppIdentityServiceError_UNKNOWN_ERROR AppIdentityServiceError_ErrorCode = 1003 + AppIdentityServiceError_NOT_ALLOWED AppIdentityServiceError_ErrorCode = 1005 + AppIdentityServiceError_NOT_IMPLEMENTED AppIdentityServiceError_ErrorCode = 1006 +) + +var AppIdentityServiceError_ErrorCode_name = map[int32]string{ + 0: "SUCCESS", + 9: "UNKNOWN_SCOPE", + 1000: "BLOB_TOO_LARGE", + 1001: "DEADLINE_EXCEEDED", + 1002: "NOT_A_VALID_APP", + 1003: "UNKNOWN_ERROR", + 1005: "NOT_ALLOWED", + 1006: "NOT_IMPLEMENTED", +} +var AppIdentityServiceError_ErrorCode_value = map[string]int32{ + "SUCCESS": 0, + "UNKNOWN_SCOPE": 9, + "BLOB_TOO_LARGE": 1000, + "DEADLINE_EXCEEDED": 1001, + "NOT_A_VALID_APP": 1002, + "UNKNOWN_ERROR": 1003, + "NOT_ALLOWED": 1005, + "NOT_IMPLEMENTED": 1006, +} + +func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode { + p := new(AppIdentityServiceError_ErrorCode) + *p = x + return p +} +func (x AppIdentityServiceError_ErrorCode) String() string { + return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x)) +} +func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode") + if err != nil { + return err + } + *x = AppIdentityServiceError_ErrorCode(value) + return nil +} + +type AppIdentityServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} } +func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) } +func (*AppIdentityServiceError) ProtoMessage() {} + +type SignForAppRequest struct { + BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign" json:"bytes_to_sign,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} } +func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) } +func (*SignForAppRequest) ProtoMessage() {} + +func (m *SignForAppRequest) GetBytesToSign() []byte { + if m != nil { + return m.BytesToSign + } + return nil +} + +type SignForAppResponse struct { + KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"` + SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes" json:"signature_bytes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} } +func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) } +func (*SignForAppResponse) ProtoMessage() {} + +func (m *SignForAppResponse) GetKeyName() string { + if m != nil && m.KeyName != nil { + return *m.KeyName + } + return "" +} + +func (m *SignForAppResponse) GetSignatureBytes() []byte { + if m != nil { + return m.SignatureBytes + } + return nil +} + +type GetPublicCertificateForAppRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} } +func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) } +func (*GetPublicCertificateForAppRequest) ProtoMessage() {} + +type PublicCertificate struct { + KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"` + X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem" json:"x509_certificate_pem,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PublicCertificate) Reset() { *m = PublicCertificate{} } +func (m *PublicCertificate) String() string { return proto.CompactTextString(m) } +func (*PublicCertificate) ProtoMessage() {} + +func (m *PublicCertificate) GetKeyName() string { + if m != nil && m.KeyName != nil { + return *m.KeyName + } + return "" +} + +func (m *PublicCertificate) GetX509CertificatePem() string { + if m != nil && m.X509CertificatePem != nil { + return *m.X509CertificatePem + } + return "" +} + +type GetPublicCertificateForAppResponse struct { + PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list" json:"public_certificate_list,omitempty"` + MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second" json:"max_client_cache_time_in_second,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} } +func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) } +func (*GetPublicCertificateForAppResponse) ProtoMessage() {} + +func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate { + if m != nil { + return m.PublicCertificateList + } + return nil +} + +func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 { + if m != nil && m.MaxClientCacheTimeInSecond != nil { + return *m.MaxClientCacheTimeInSecond + } + return 0 +} + +type GetServiceAccountNameRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} } +func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetServiceAccountNameRequest) ProtoMessage() {} + +type GetServiceAccountNameResponse struct { + ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name" json:"service_account_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} } +func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) } +func (*GetServiceAccountNameResponse) ProtoMessage() {} + +func (m *GetServiceAccountNameResponse) GetServiceAccountName() string { + if m != nil && m.ServiceAccountName != nil { + return *m.ServiceAccountName + } + return "" +} + +type GetAccessTokenRequest struct { + Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"` + ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id" json:"service_account_id,omitempty"` + ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name" json:"service_account_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} } +func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) } +func (*GetAccessTokenRequest) ProtoMessage() {} + +func (m *GetAccessTokenRequest) GetScope() []string { + if m != nil { + return m.Scope + } + return nil +} + +func (m *GetAccessTokenRequest) GetServiceAccountId() int64 { + if m != nil && m.ServiceAccountId != nil { + return *m.ServiceAccountId + } + return 0 +} + +func (m *GetAccessTokenRequest) GetServiceAccountName() string { + if m != nil && m.ServiceAccountName != nil { + return *m.ServiceAccountName + } + return "" +} + +type GetAccessTokenResponse struct { + AccessToken *string `protobuf:"bytes,1,opt,name=access_token" json:"access_token,omitempty"` + ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time" json:"expiration_time,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} } +func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) } +func (*GetAccessTokenResponse) ProtoMessage() {} + +func (m *GetAccessTokenResponse) GetAccessToken() string { + if m != nil && m.AccessToken != nil { + return *m.AccessToken + } + return "" +} + +func (m *GetAccessTokenResponse) GetExpirationTime() int64 { + if m != nil && m.ExpirationTime != nil { + return *m.ExpirationTime + } + return 0 +} + +type GetDefaultGcsBucketNameRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} } +func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {} + +type GetDefaultGcsBucketNameResponse struct { + DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name" json:"default_gcs_bucket_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} } +func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) } +func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {} + +func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string { + if m != nil && m.DefaultGcsBucketName != nil { + return *m.DefaultGcsBucketName + } + return "" +} + +func init() { +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto new file mode 100644 index 000000000..19610ca5b --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto @@ -0,0 +1,64 @@ +syntax = "proto2"; +option go_package = "app_identity"; + +package appengine; + +message AppIdentityServiceError { + enum ErrorCode { + SUCCESS = 0; + UNKNOWN_SCOPE = 9; + BLOB_TOO_LARGE = 1000; + DEADLINE_EXCEEDED = 1001; + NOT_A_VALID_APP = 1002; + UNKNOWN_ERROR = 1003; + NOT_ALLOWED = 1005; + NOT_IMPLEMENTED = 1006; + } +} + +message SignForAppRequest { + optional bytes bytes_to_sign = 1; +} + +message SignForAppResponse { + optional string key_name = 1; + optional bytes signature_bytes = 2; +} + +message GetPublicCertificateForAppRequest { +} + +message PublicCertificate { + optional string key_name = 1; + optional string x509_certificate_pem = 2; +} + +message GetPublicCertificateForAppResponse { + repeated PublicCertificate public_certificate_list = 1; + optional int64 max_client_cache_time_in_second = 2; +} + +message GetServiceAccountNameRequest { +} + +message GetServiceAccountNameResponse { + optional string service_account_name = 1; +} + +message GetAccessTokenRequest { + repeated string scope = 1; + optional int64 service_account_id = 2; + optional string service_account_name = 3; +} + +message GetAccessTokenResponse { + optional string access_token = 1; + optional int64 expiration_time = 2; +} + +message GetDefaultGcsBucketNameRequest { +} + +message GetDefaultGcsBucketNameResponse { + optional string default_gcs_bucket_name = 1; +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/base/api_base.pb.go new file mode 100644 index 000000000..36a195650 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/base/api_base.pb.go @@ -0,0 +1,133 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/base/api_base.proto +// DO NOT EDIT! + +/* +Package base is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/base/api_base.proto + +It has these top-level messages: + StringProto + Integer32Proto + Integer64Proto + BoolProto + DoubleProto + BytesProto + VoidProto +*/ +package base + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type StringProto struct { + Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StringProto) Reset() { *m = StringProto{} } +func (m *StringProto) String() string { return proto.CompactTextString(m) } +func (*StringProto) ProtoMessage() {} + +func (m *StringProto) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type Integer32Proto struct { + Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Integer32Proto) Reset() { *m = Integer32Proto{} } +func (m *Integer32Proto) String() string { return proto.CompactTextString(m) } +func (*Integer32Proto) ProtoMessage() {} + +func (m *Integer32Proto) GetValue() int32 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Integer64Proto struct { + Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Integer64Proto) Reset() { *m = Integer64Proto{} } +func (m *Integer64Proto) String() string { return proto.CompactTextString(m) } +func (*Integer64Proto) ProtoMessage() {} + +func (m *Integer64Proto) GetValue() int64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type BoolProto struct { + Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BoolProto) Reset() { *m = BoolProto{} } +func (m *BoolProto) String() string { return proto.CompactTextString(m) } +func (*BoolProto) ProtoMessage() {} + +func (m *BoolProto) GetValue() bool { + if m != nil && m.Value != nil { + return *m.Value + } + return false +} + +type DoubleProto struct { + Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DoubleProto) Reset() { *m = DoubleProto{} } +func (m *DoubleProto) String() string { return proto.CompactTextString(m) } +func (*DoubleProto) ProtoMessage() {} + +func (m *DoubleProto) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type BytesProto struct { + Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BytesProto) Reset() { *m = BytesProto{} } +func (m *BytesProto) String() string { return proto.CompactTextString(m) } +func (*BytesProto) ProtoMessage() {} + +func (m *BytesProto) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type VoidProto struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *VoidProto) Reset() { *m = VoidProto{} } +func (m *VoidProto) String() string { return proto.CompactTextString(m) } +func (*VoidProto) ProtoMessage() {} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/base/api_base.proto new file mode 100644 index 000000000..56cd7a3ca --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/base/api_base.proto @@ -0,0 +1,33 @@ +// Built-in base types for API calls. Primarily useful as return types. + +syntax = "proto2"; +option go_package = "base"; + +package appengine.base; + +message StringProto { + required string value = 1; +} + +message Integer32Proto { + required int32 value = 1; +} + +message Integer64Proto { + required int64 value = 1; +} + +message BoolProto { + required bool value = 1; +} + +message DoubleProto { + required double value = 1; +} + +message BytesProto { + required bytes value = 1 [ctype=CORD]; +} + +message VoidProto { +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go new file mode 100644 index 000000000..8613cb731 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go @@ -0,0 +1,2778 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto +// DO NOT EDIT! + +/* +Package datastore is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/datastore/datastore_v3.proto + +It has these top-level messages: + Action + PropertyValue + Property + Path + Reference + User + EntityProto + CompositeProperty + Index + CompositeIndex + IndexPostfix + IndexPosition + Snapshot + InternalHeader + Transaction + Query + CompiledQuery + CompiledCursor + Cursor + Error + Cost + GetRequest + GetResponse + PutRequest + PutResponse + TouchRequest + TouchResponse + DeleteRequest + DeleteResponse + NextRequest + QueryResult + AllocateIdsRequest + AllocateIdsResponse + CompositeIndices + AddActionsRequest + AddActionsResponse + BeginTransactionRequest + CommitResponse +*/ +package datastore + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type Property_Meaning int32 + +const ( + Property_NO_MEANING Property_Meaning = 0 + Property_BLOB Property_Meaning = 14 + Property_TEXT Property_Meaning = 15 + Property_BYTESTRING Property_Meaning = 16 + Property_ATOM_CATEGORY Property_Meaning = 1 + Property_ATOM_LINK Property_Meaning = 2 + Property_ATOM_TITLE Property_Meaning = 3 + Property_ATOM_CONTENT Property_Meaning = 4 + Property_ATOM_SUMMARY Property_Meaning = 5 + Property_ATOM_AUTHOR Property_Meaning = 6 + Property_GD_WHEN Property_Meaning = 7 + Property_GD_EMAIL Property_Meaning = 8 + Property_GEORSS_POINT Property_Meaning = 9 + Property_GD_IM Property_Meaning = 10 + Property_GD_PHONENUMBER Property_Meaning = 11 + Property_GD_POSTALADDRESS Property_Meaning = 12 + Property_GD_RATING Property_Meaning = 13 + Property_BLOBKEY Property_Meaning = 17 + Property_ENTITY_PROTO Property_Meaning = 19 + Property_INDEX_VALUE Property_Meaning = 18 +) + +var Property_Meaning_name = map[int32]string{ + 0: "NO_MEANING", + 14: "BLOB", + 15: "TEXT", + 16: "BYTESTRING", + 1: "ATOM_CATEGORY", + 2: "ATOM_LINK", + 3: "ATOM_TITLE", + 4: "ATOM_CONTENT", + 5: "ATOM_SUMMARY", + 6: "ATOM_AUTHOR", + 7: "GD_WHEN", + 8: "GD_EMAIL", + 9: "GEORSS_POINT", + 10: "GD_IM", + 11: "GD_PHONENUMBER", + 12: "GD_POSTALADDRESS", + 13: "GD_RATING", + 17: "BLOBKEY", + 19: "ENTITY_PROTO", + 18: "INDEX_VALUE", +} +var Property_Meaning_value = map[string]int32{ + "NO_MEANING": 0, + "BLOB": 14, + "TEXT": 15, + "BYTESTRING": 16, + "ATOM_CATEGORY": 1, + "ATOM_LINK": 2, + "ATOM_TITLE": 3, + "ATOM_CONTENT": 4, + "ATOM_SUMMARY": 5, + "ATOM_AUTHOR": 6, + "GD_WHEN": 7, + "GD_EMAIL": 8, + "GEORSS_POINT": 9, + "GD_IM": 10, + "GD_PHONENUMBER": 11, + "GD_POSTALADDRESS": 12, + "GD_RATING": 13, + "BLOBKEY": 17, + "ENTITY_PROTO": 19, + "INDEX_VALUE": 18, +} + +func (x Property_Meaning) Enum() *Property_Meaning { + p := new(Property_Meaning) + *p = x + return p +} +func (x Property_Meaning) String() string { + return proto.EnumName(Property_Meaning_name, int32(x)) +} +func (x *Property_Meaning) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning") + if err != nil { + return err + } + *x = Property_Meaning(value) + return nil +} + +type Property_FtsTokenizationOption int32 + +const ( + Property_HTML Property_FtsTokenizationOption = 1 + Property_ATOM Property_FtsTokenizationOption = 2 +) + +var Property_FtsTokenizationOption_name = map[int32]string{ + 1: "HTML", + 2: "ATOM", +} +var Property_FtsTokenizationOption_value = map[string]int32{ + "HTML": 1, + "ATOM": 2, +} + +func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption { + p := new(Property_FtsTokenizationOption) + *p = x + return p +} +func (x Property_FtsTokenizationOption) String() string { + return proto.EnumName(Property_FtsTokenizationOption_name, int32(x)) +} +func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption") + if err != nil { + return err + } + *x = Property_FtsTokenizationOption(value) + return nil +} + +type EntityProto_Kind int32 + +const ( + EntityProto_GD_CONTACT EntityProto_Kind = 1 + EntityProto_GD_EVENT EntityProto_Kind = 2 + EntityProto_GD_MESSAGE EntityProto_Kind = 3 +) + +var EntityProto_Kind_name = map[int32]string{ + 1: "GD_CONTACT", + 2: "GD_EVENT", + 3: "GD_MESSAGE", +} +var EntityProto_Kind_value = map[string]int32{ + "GD_CONTACT": 1, + "GD_EVENT": 2, + "GD_MESSAGE": 3, +} + +func (x EntityProto_Kind) Enum() *EntityProto_Kind { + p := new(EntityProto_Kind) + *p = x + return p +} +func (x EntityProto_Kind) String() string { + return proto.EnumName(EntityProto_Kind_name, int32(x)) +} +func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind") + if err != nil { + return err + } + *x = EntityProto_Kind(value) + return nil +} + +type Index_Property_Direction int32 + +const ( + Index_Property_ASCENDING Index_Property_Direction = 1 + Index_Property_DESCENDING Index_Property_Direction = 2 +) + +var Index_Property_Direction_name = map[int32]string{ + 1: "ASCENDING", + 2: "DESCENDING", +} +var Index_Property_Direction_value = map[string]int32{ + "ASCENDING": 1, + "DESCENDING": 2, +} + +func (x Index_Property_Direction) Enum() *Index_Property_Direction { + p := new(Index_Property_Direction) + *p = x + return p +} +func (x Index_Property_Direction) String() string { + return proto.EnumName(Index_Property_Direction_name, int32(x)) +} +func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction") + if err != nil { + return err + } + *x = Index_Property_Direction(value) + return nil +} + +type CompositeIndex_State int32 + +const ( + CompositeIndex_WRITE_ONLY CompositeIndex_State = 1 + CompositeIndex_READ_WRITE CompositeIndex_State = 2 + CompositeIndex_DELETED CompositeIndex_State = 3 + CompositeIndex_ERROR CompositeIndex_State = 4 +) + +var CompositeIndex_State_name = map[int32]string{ + 1: "WRITE_ONLY", + 2: "READ_WRITE", + 3: "DELETED", + 4: "ERROR", +} +var CompositeIndex_State_value = map[string]int32{ + "WRITE_ONLY": 1, + "READ_WRITE": 2, + "DELETED": 3, + "ERROR": 4, +} + +func (x CompositeIndex_State) Enum() *CompositeIndex_State { + p := new(CompositeIndex_State) + *p = x + return p +} +func (x CompositeIndex_State) String() string { + return proto.EnumName(CompositeIndex_State_name, int32(x)) +} +func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State") + if err != nil { + return err + } + *x = CompositeIndex_State(value) + return nil +} + +type Snapshot_Status int32 + +const ( + Snapshot_INACTIVE Snapshot_Status = 0 + Snapshot_ACTIVE Snapshot_Status = 1 +) + +var Snapshot_Status_name = map[int32]string{ + 0: "INACTIVE", + 1: "ACTIVE", +} +var Snapshot_Status_value = map[string]int32{ + "INACTIVE": 0, + "ACTIVE": 1, +} + +func (x Snapshot_Status) Enum() *Snapshot_Status { + p := new(Snapshot_Status) + *p = x + return p +} +func (x Snapshot_Status) String() string { + return proto.EnumName(Snapshot_Status_name, int32(x)) +} +func (x *Snapshot_Status) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status") + if err != nil { + return err + } + *x = Snapshot_Status(value) + return nil +} + +type Query_Hint int32 + +const ( + Query_ORDER_FIRST Query_Hint = 1 + Query_ANCESTOR_FIRST Query_Hint = 2 + Query_FILTER_FIRST Query_Hint = 3 +) + +var Query_Hint_name = map[int32]string{ + 1: "ORDER_FIRST", + 2: "ANCESTOR_FIRST", + 3: "FILTER_FIRST", +} +var Query_Hint_value = map[string]int32{ + "ORDER_FIRST": 1, + "ANCESTOR_FIRST": 2, + "FILTER_FIRST": 3, +} + +func (x Query_Hint) Enum() *Query_Hint { + p := new(Query_Hint) + *p = x + return p +} +func (x Query_Hint) String() string { + return proto.EnumName(Query_Hint_name, int32(x)) +} +func (x *Query_Hint) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint") + if err != nil { + return err + } + *x = Query_Hint(value) + return nil +} + +type Query_Filter_Operator int32 + +const ( + Query_Filter_LESS_THAN Query_Filter_Operator = 1 + Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2 + Query_Filter_GREATER_THAN Query_Filter_Operator = 3 + Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4 + Query_Filter_EQUAL Query_Filter_Operator = 5 + Query_Filter_IN Query_Filter_Operator = 6 + Query_Filter_EXISTS Query_Filter_Operator = 7 +) + +var Query_Filter_Operator_name = map[int32]string{ + 1: "LESS_THAN", + 2: "LESS_THAN_OR_EQUAL", + 3: "GREATER_THAN", + 4: "GREATER_THAN_OR_EQUAL", + 5: "EQUAL", + 6: "IN", + 7: "EXISTS", +} +var Query_Filter_Operator_value = map[string]int32{ + "LESS_THAN": 1, + "LESS_THAN_OR_EQUAL": 2, + "GREATER_THAN": 3, + "GREATER_THAN_OR_EQUAL": 4, + "EQUAL": 5, + "IN": 6, + "EXISTS": 7, +} + +func (x Query_Filter_Operator) Enum() *Query_Filter_Operator { + p := new(Query_Filter_Operator) + *p = x + return p +} +func (x Query_Filter_Operator) String() string { + return proto.EnumName(Query_Filter_Operator_name, int32(x)) +} +func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator") + if err != nil { + return err + } + *x = Query_Filter_Operator(value) + return nil +} + +type Query_Order_Direction int32 + +const ( + Query_Order_ASCENDING Query_Order_Direction = 1 + Query_Order_DESCENDING Query_Order_Direction = 2 +) + +var Query_Order_Direction_name = map[int32]string{ + 1: "ASCENDING", + 2: "DESCENDING", +} +var Query_Order_Direction_value = map[string]int32{ + "ASCENDING": 1, + "DESCENDING": 2, +} + +func (x Query_Order_Direction) Enum() *Query_Order_Direction { + p := new(Query_Order_Direction) + *p = x + return p +} +func (x Query_Order_Direction) String() string { + return proto.EnumName(Query_Order_Direction_name, int32(x)) +} +func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction") + if err != nil { + return err + } + *x = Query_Order_Direction(value) + return nil +} + +type Error_ErrorCode int32 + +const ( + Error_BAD_REQUEST Error_ErrorCode = 1 + Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2 + Error_INTERNAL_ERROR Error_ErrorCode = 3 + Error_NEED_INDEX Error_ErrorCode = 4 + Error_TIMEOUT Error_ErrorCode = 5 + Error_PERMISSION_DENIED Error_ErrorCode = 6 + Error_BIGTABLE_ERROR Error_ErrorCode = 7 + Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8 + Error_CAPABILITY_DISABLED Error_ErrorCode = 9 + Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10 + Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11 +) + +var Error_ErrorCode_name = map[int32]string{ + 1: "BAD_REQUEST", + 2: "CONCURRENT_TRANSACTION", + 3: "INTERNAL_ERROR", + 4: "NEED_INDEX", + 5: "TIMEOUT", + 6: "PERMISSION_DENIED", + 7: "BIGTABLE_ERROR", + 8: "COMMITTED_BUT_STILL_APPLYING", + 9: "CAPABILITY_DISABLED", + 10: "TRY_ALTERNATE_BACKEND", + 11: "SAFE_TIME_TOO_OLD", +} +var Error_ErrorCode_value = map[string]int32{ + "BAD_REQUEST": 1, + "CONCURRENT_TRANSACTION": 2, + "INTERNAL_ERROR": 3, + "NEED_INDEX": 4, + "TIMEOUT": 5, + "PERMISSION_DENIED": 6, + "BIGTABLE_ERROR": 7, + "COMMITTED_BUT_STILL_APPLYING": 8, + "CAPABILITY_DISABLED": 9, + "TRY_ALTERNATE_BACKEND": 10, + "SAFE_TIME_TOO_OLD": 11, +} + +func (x Error_ErrorCode) Enum() *Error_ErrorCode { + p := new(Error_ErrorCode) + *p = x + return p +} +func (x Error_ErrorCode) String() string { + return proto.EnumName(Error_ErrorCode_name, int32(x)) +} +func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode") + if err != nil { + return err + } + *x = Error_ErrorCode(value) + return nil +} + +type PutRequest_AutoIdPolicy int32 + +const ( + PutRequest_CURRENT PutRequest_AutoIdPolicy = 0 + PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1 +) + +var PutRequest_AutoIdPolicy_name = map[int32]string{ + 0: "CURRENT", + 1: "SEQUENTIAL", +} +var PutRequest_AutoIdPolicy_value = map[string]int32{ + "CURRENT": 0, + "SEQUENTIAL": 1, +} + +func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy { + p := new(PutRequest_AutoIdPolicy) + *p = x + return p +} +func (x PutRequest_AutoIdPolicy) String() string { + return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x)) +} +func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy") + if err != nil { + return err + } + *x = PutRequest_AutoIdPolicy(value) + return nil +} + +type Action struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Action) Reset() { *m = Action{} } +func (m *Action) String() string { return proto.CompactTextString(m) } +func (*Action) ProtoMessage() {} + +type PropertyValue struct { + Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"` + BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"` + StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"` + Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue" json:"pointvalue,omitempty"` + Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue" json:"uservalue,omitempty"` + Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue" json:"referencevalue,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyValue) Reset() { *m = PropertyValue{} } +func (m *PropertyValue) String() string { return proto.CompactTextString(m) } +func (*PropertyValue) ProtoMessage() {} + +func (m *PropertyValue) GetInt64Value() int64 { + if m != nil && m.Int64Value != nil { + return *m.Int64Value + } + return 0 +} + +func (m *PropertyValue) GetBooleanValue() bool { + if m != nil && m.BooleanValue != nil { + return *m.BooleanValue + } + return false +} + +func (m *PropertyValue) GetStringValue() string { + if m != nil && m.StringValue != nil { + return *m.StringValue + } + return "" +} + +func (m *PropertyValue) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue { + if m != nil { + return m.Pointvalue + } + return nil +} + +func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue { + if m != nil { + return m.Uservalue + } + return nil +} + +func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue { + if m != nil { + return m.Referencevalue + } + return nil +} + +type PropertyValue_PointValue struct { + X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"` + Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} } +func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) } +func (*PropertyValue_PointValue) ProtoMessage() {} + +func (m *PropertyValue_PointValue) GetX() float64 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +func (m *PropertyValue_PointValue) GetY() float64 { + if m != nil && m.Y != nil { + return *m.Y + } + return 0 +} + +type PropertyValue_UserValue struct { + Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"` + AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain" json:"auth_domain,omitempty"` + Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"` + FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity" json:"federated_identity,omitempty"` + FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider" json:"federated_provider,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} } +func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) } +func (*PropertyValue_UserValue) ProtoMessage() {} + +func (m *PropertyValue_UserValue) GetEmail() string { + if m != nil && m.Email != nil { + return *m.Email + } + return "" +} + +func (m *PropertyValue_UserValue) GetAuthDomain() string { + if m != nil && m.AuthDomain != nil { + return *m.AuthDomain + } + return "" +} + +func (m *PropertyValue_UserValue) GetNickname() string { + if m != nil && m.Nickname != nil { + return *m.Nickname + } + return "" +} + +func (m *PropertyValue_UserValue) GetFederatedIdentity() string { + if m != nil && m.FederatedIdentity != nil { + return *m.FederatedIdentity + } + return "" +} + +func (m *PropertyValue_UserValue) GetFederatedProvider() string { + if m != nil && m.FederatedProvider != nil { + return *m.FederatedProvider + } + return "" +} + +type PropertyValue_ReferenceValue struct { + App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` + NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"` + Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement" json:"pathelement,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} } +func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) } +func (*PropertyValue_ReferenceValue) ProtoMessage() {} + +func (m *PropertyValue_ReferenceValue) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *PropertyValue_ReferenceValue) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement { + if m != nil { + return m.Pathelement + } + return nil +} + +type PropertyValue_ReferenceValue_PathElement struct { + Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"` + Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"` + Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyValue_ReferenceValue_PathElement) Reset() { + *m = PropertyValue_ReferenceValue_PathElement{} +} +func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) } +func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {} + +func (m *PropertyValue_ReferenceValue_PathElement) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 { + if m != nil && m.Id != nil { + return *m.Id + } + return 0 +} + +func (m *PropertyValue_ReferenceValue_PathElement) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +type Property struct { + Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"` + MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri" json:"meaning_uri,omitempty"` + Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` + Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"` + Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"` + Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"` + FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"` + Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Property) Reset() { *m = Property{} } +func (m *Property) String() string { return proto.CompactTextString(m) } +func (*Property) ProtoMessage() {} + +const Default_Property_Meaning Property_Meaning = Property_NO_MEANING +const Default_Property_Searchable bool = false +const Default_Property_Locale string = "en" + +func (m *Property) GetMeaning() Property_Meaning { + if m != nil && m.Meaning != nil { + return *m.Meaning + } + return Default_Property_Meaning +} + +func (m *Property) GetMeaningUri() string { + if m != nil && m.MeaningUri != nil { + return *m.MeaningUri + } + return "" +} + +func (m *Property) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Property) GetValue() *PropertyValue { + if m != nil { + return m.Value + } + return nil +} + +func (m *Property) GetMultiple() bool { + if m != nil && m.Multiple != nil { + return *m.Multiple + } + return false +} + +func (m *Property) GetSearchable() bool { + if m != nil && m.Searchable != nil { + return *m.Searchable + } + return Default_Property_Searchable +} + +func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption { + if m != nil && m.FtsTokenizationOption != nil { + return *m.FtsTokenizationOption + } + return Property_HTML +} + +func (m *Property) GetLocale() string { + if m != nil && m.Locale != nil { + return *m.Locale + } + return Default_Property_Locale +} + +type Path struct { + Element []*Path_Element `protobuf:"group,1,rep,name=Element" json:"element,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Path) Reset() { *m = Path{} } +func (m *Path) String() string { return proto.CompactTextString(m) } +func (*Path) ProtoMessage() {} + +func (m *Path) GetElement() []*Path_Element { + if m != nil { + return m.Element + } + return nil +} + +type Path_Element struct { + Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"` + Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"` + Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Path_Element) Reset() { *m = Path_Element{} } +func (m *Path_Element) String() string { return proto.CompactTextString(m) } +func (*Path_Element) ProtoMessage() {} + +func (m *Path_Element) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +func (m *Path_Element) GetId() int64 { + if m != nil && m.Id != nil { + return *m.Id + } + return 0 +} + +func (m *Path_Element) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +type Reference struct { + App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` + NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"` + Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Reference) Reset() { *m = Reference{} } +func (m *Reference) String() string { return proto.CompactTextString(m) } +func (*Reference) ProtoMessage() {} + +func (m *Reference) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *Reference) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *Reference) GetPath() *Path { + if m != nil { + return m.Path + } + return nil +} + +type User struct { + Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"` + AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain" json:"auth_domain,omitempty"` + Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"` + FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity" json:"federated_identity,omitempty"` + FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider" json:"federated_provider,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *User) Reset() { *m = User{} } +func (m *User) String() string { return proto.CompactTextString(m) } +func (*User) ProtoMessage() {} + +func (m *User) GetEmail() string { + if m != nil && m.Email != nil { + return *m.Email + } + return "" +} + +func (m *User) GetAuthDomain() string { + if m != nil && m.AuthDomain != nil { + return *m.AuthDomain + } + return "" +} + +func (m *User) GetNickname() string { + if m != nil && m.Nickname != nil { + return *m.Nickname + } + return "" +} + +func (m *User) GetFederatedIdentity() string { + if m != nil && m.FederatedIdentity != nil { + return *m.FederatedIdentity + } + return "" +} + +func (m *User) GetFederatedProvider() string { + if m != nil && m.FederatedProvider != nil { + return *m.FederatedProvider + } + return "" +} + +type EntityProto struct { + Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"` + EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group" json:"entity_group,omitempty"` + Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"` + Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"` + KindUri *string `protobuf:"bytes,5,opt,name=kind_uri" json:"kind_uri,omitempty"` + Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` + RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property" json:"raw_property,omitempty"` + Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EntityProto) Reset() { *m = EntityProto{} } +func (m *EntityProto) String() string { return proto.CompactTextString(m) } +func (*EntityProto) ProtoMessage() {} + +func (m *EntityProto) GetKey() *Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *EntityProto) GetEntityGroup() *Path { + if m != nil { + return m.EntityGroup + } + return nil +} + +func (m *EntityProto) GetOwner() *User { + if m != nil { + return m.Owner + } + return nil +} + +func (m *EntityProto) GetKind() EntityProto_Kind { + if m != nil && m.Kind != nil { + return *m.Kind + } + return EntityProto_GD_CONTACT +} + +func (m *EntityProto) GetKindUri() string { + if m != nil && m.KindUri != nil { + return *m.KindUri + } + return "" +} + +func (m *EntityProto) GetProperty() []*Property { + if m != nil { + return m.Property + } + return nil +} + +func (m *EntityProto) GetRawProperty() []*Property { + if m != nil { + return m.RawProperty + } + return nil +} + +func (m *EntityProto) GetRank() int32 { + if m != nil && m.Rank != nil { + return *m.Rank + } + return 0 +} + +type CompositeProperty struct { + IndexId *int64 `protobuf:"varint,1,req,name=index_id" json:"index_id,omitempty"` + Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompositeProperty) Reset() { *m = CompositeProperty{} } +func (m *CompositeProperty) String() string { return proto.CompactTextString(m) } +func (*CompositeProperty) ProtoMessage() {} + +func (m *CompositeProperty) GetIndexId() int64 { + if m != nil && m.IndexId != nil { + return *m.IndexId + } + return 0 +} + +func (m *CompositeProperty) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +type Index struct { + EntityType *string `protobuf:"bytes,1,req,name=entity_type" json:"entity_type,omitempty"` + Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"` + Property []*Index_Property `protobuf:"group,2,rep,name=Property" json:"property,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Index) Reset() { *m = Index{} } +func (m *Index) String() string { return proto.CompactTextString(m) } +func (*Index) ProtoMessage() {} + +func (m *Index) GetEntityType() string { + if m != nil && m.EntityType != nil { + return *m.EntityType + } + return "" +} + +func (m *Index) GetAncestor() bool { + if m != nil && m.Ancestor != nil { + return *m.Ancestor + } + return false +} + +func (m *Index) GetProperty() []*Index_Property { + if m != nil { + return m.Property + } + return nil +} + +type Index_Property struct { + Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` + Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Index_Property) Reset() { *m = Index_Property{} } +func (m *Index_Property) String() string { return proto.CompactTextString(m) } +func (*Index_Property) ProtoMessage() {} + +const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING + +func (m *Index_Property) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Index_Property) GetDirection() Index_Property_Direction { + if m != nil && m.Direction != nil { + return *m.Direction + } + return Default_Index_Property_Direction +} + +type CompositeIndex struct { + AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"` + Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"` + State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"` + OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,def=0" json:"only_use_if_required,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompositeIndex) Reset() { *m = CompositeIndex{} } +func (m *CompositeIndex) String() string { return proto.CompactTextString(m) } +func (*CompositeIndex) ProtoMessage() {} + +const Default_CompositeIndex_OnlyUseIfRequired bool = false + +func (m *CompositeIndex) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *CompositeIndex) GetId() int64 { + if m != nil && m.Id != nil { + return *m.Id + } + return 0 +} + +func (m *CompositeIndex) GetDefinition() *Index { + if m != nil { + return m.Definition + } + return nil +} + +func (m *CompositeIndex) GetState() CompositeIndex_State { + if m != nil && m.State != nil { + return *m.State + } + return CompositeIndex_WRITE_ONLY +} + +func (m *CompositeIndex) GetOnlyUseIfRequired() bool { + if m != nil && m.OnlyUseIfRequired != nil { + return *m.OnlyUseIfRequired + } + return Default_CompositeIndex_OnlyUseIfRequired +} + +type IndexPostfix struct { + IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value" json:"index_value,omitempty"` + Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"` + Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexPostfix) Reset() { *m = IndexPostfix{} } +func (m *IndexPostfix) String() string { return proto.CompactTextString(m) } +func (*IndexPostfix) ProtoMessage() {} + +const Default_IndexPostfix_Before bool = true + +func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue { + if m != nil { + return m.IndexValue + } + return nil +} + +func (m *IndexPostfix) GetKey() *Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *IndexPostfix) GetBefore() bool { + if m != nil && m.Before != nil { + return *m.Before + } + return Default_IndexPostfix_Before +} + +type IndexPostfix_IndexValue struct { + PropertyName *string `protobuf:"bytes,1,req,name=property_name" json:"property_name,omitempty"` + Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} } +func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) } +func (*IndexPostfix_IndexValue) ProtoMessage() {} + +func (m *IndexPostfix_IndexValue) GetPropertyName() string { + if m != nil && m.PropertyName != nil { + return *m.PropertyName + } + return "" +} + +func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue { + if m != nil { + return m.Value + } + return nil +} + +type IndexPosition struct { + Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` + Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexPosition) Reset() { *m = IndexPosition{} } +func (m *IndexPosition) String() string { return proto.CompactTextString(m) } +func (*IndexPosition) ProtoMessage() {} + +const Default_IndexPosition_Before bool = true + +func (m *IndexPosition) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *IndexPosition) GetBefore() bool { + if m != nil && m.Before != nil { + return *m.Before + } + return Default_IndexPosition_Before +} + +type Snapshot struct { + Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} + +func (m *Snapshot) GetTs() int64 { + if m != nil && m.Ts != nil { + return *m.Ts + } + return 0 +} + +type InternalHeader struct { + Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *InternalHeader) Reset() { *m = InternalHeader{} } +func (m *InternalHeader) String() string { return proto.CompactTextString(m) } +func (*InternalHeader) ProtoMessage() {} + +func (m *InternalHeader) GetQos() string { + if m != nil && m.Qos != nil { + return *m.Qos + } + return "" +} + +type Transaction struct { + Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` + Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"` + App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"` + MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Transaction) Reset() { *m = Transaction{} } +func (m *Transaction) String() string { return proto.CompactTextString(m) } +func (*Transaction) ProtoMessage() {} + +const Default_Transaction_MarkChanges bool = false + +func (m *Transaction) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *Transaction) GetHandle() uint64 { + if m != nil && m.Handle != nil { + return *m.Handle + } + return 0 +} + +func (m *Transaction) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *Transaction) GetMarkChanges() bool { + if m != nil && m.MarkChanges != nil { + return *m.MarkChanges + } + return Default_Transaction_MarkChanges +} + +type Query struct { + Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"` + App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` + NameSpace *string `protobuf:"bytes,29,opt,name=name_space" json:"name_space,omitempty"` + Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"` + Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"` + Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter" json:"filter,omitempty"` + SearchQuery *string `protobuf:"bytes,8,opt,name=search_query" json:"search_query,omitempty"` + Order []*Query_Order `protobuf:"group,9,rep,name=Order" json:"order,omitempty"` + Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"` + Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"` + Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"` + Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"` + CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"` + EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor" json:"end_compiled_cursor,omitempty"` + CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index" json:"composite_index,omitempty"` + RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,def=0" json:"require_perfect_plan,omitempty"` + KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,def=0" json:"keys_only,omitempty"` + Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"` + Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"` + FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms" json:"failover_ms,omitempty"` + Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"` + PropertyName []string `protobuf:"bytes,33,rep,name=property_name" json:"property_name,omitempty"` + GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name" json:"group_by_property_name,omitempty"` + Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"` + MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds" json:"min_safe_time_seconds,omitempty"` + SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name" json:"safe_replica_name,omitempty"` + PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,def=0" json:"persist_offset,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Query) Reset() { *m = Query{} } +func (m *Query) String() string { return proto.CompactTextString(m) } +func (*Query) ProtoMessage() {} + +const Default_Query_Offset int32 = 0 +const Default_Query_RequirePerfectPlan bool = false +const Default_Query_KeysOnly bool = false +const Default_Query_Compile bool = false +const Default_Query_PersistOffset bool = false + +func (m *Query) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *Query) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *Query) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *Query) GetKind() string { + if m != nil && m.Kind != nil { + return *m.Kind + } + return "" +} + +func (m *Query) GetAncestor() *Reference { + if m != nil { + return m.Ancestor + } + return nil +} + +func (m *Query) GetFilter() []*Query_Filter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *Query) GetSearchQuery() string { + if m != nil && m.SearchQuery != nil { + return *m.SearchQuery + } + return "" +} + +func (m *Query) GetOrder() []*Query_Order { + if m != nil { + return m.Order + } + return nil +} + +func (m *Query) GetHint() Query_Hint { + if m != nil && m.Hint != nil { + return *m.Hint + } + return Query_ORDER_FIRST +} + +func (m *Query) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *Query) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return Default_Query_Offset +} + +func (m *Query) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +func (m *Query) GetCompiledCursor() *CompiledCursor { + if m != nil { + return m.CompiledCursor + } + return nil +} + +func (m *Query) GetEndCompiledCursor() *CompiledCursor { + if m != nil { + return m.EndCompiledCursor + } + return nil +} + +func (m *Query) GetCompositeIndex() []*CompositeIndex { + if m != nil { + return m.CompositeIndex + } + return nil +} + +func (m *Query) GetRequirePerfectPlan() bool { + if m != nil && m.RequirePerfectPlan != nil { + return *m.RequirePerfectPlan + } + return Default_Query_RequirePerfectPlan +} + +func (m *Query) GetKeysOnly() bool { + if m != nil && m.KeysOnly != nil { + return *m.KeysOnly + } + return Default_Query_KeysOnly +} + +func (m *Query) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *Query) GetCompile() bool { + if m != nil && m.Compile != nil { + return *m.Compile + } + return Default_Query_Compile +} + +func (m *Query) GetFailoverMs() int64 { + if m != nil && m.FailoverMs != nil { + return *m.FailoverMs + } + return 0 +} + +func (m *Query) GetStrong() bool { + if m != nil && m.Strong != nil { + return *m.Strong + } + return false +} + +func (m *Query) GetPropertyName() []string { + if m != nil { + return m.PropertyName + } + return nil +} + +func (m *Query) GetGroupByPropertyName() []string { + if m != nil { + return m.GroupByPropertyName + } + return nil +} + +func (m *Query) GetDistinct() bool { + if m != nil && m.Distinct != nil { + return *m.Distinct + } + return false +} + +func (m *Query) GetMinSafeTimeSeconds() int64 { + if m != nil && m.MinSafeTimeSeconds != nil { + return *m.MinSafeTimeSeconds + } + return 0 +} + +func (m *Query) GetSafeReplicaName() []string { + if m != nil { + return m.SafeReplicaName + } + return nil +} + +func (m *Query) GetPersistOffset() bool { + if m != nil && m.PersistOffset != nil { + return *m.PersistOffset + } + return Default_Query_PersistOffset +} + +type Query_Filter struct { + Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"` + Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Query_Filter) Reset() { *m = Query_Filter{} } +func (m *Query_Filter) String() string { return proto.CompactTextString(m) } +func (*Query_Filter) ProtoMessage() {} + +func (m *Query_Filter) GetOp() Query_Filter_Operator { + if m != nil && m.Op != nil { + return *m.Op + } + return Query_Filter_LESS_THAN +} + +func (m *Query_Filter) GetProperty() []*Property { + if m != nil { + return m.Property + } + return nil +} + +type Query_Order struct { + Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"` + Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Query_Order) Reset() { *m = Query_Order{} } +func (m *Query_Order) String() string { return proto.CompactTextString(m) } +func (*Query_Order) ProtoMessage() {} + +const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING + +func (m *Query_Order) GetProperty() string { + if m != nil && m.Property != nil { + return *m.Property + } + return "" +} + +func (m *Query_Order) GetDirection() Query_Order_Direction { + if m != nil && m.Direction != nil { + return *m.Direction + } + return Default_Query_Order_Direction +} + +type CompiledQuery struct { + Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan" json:"primaryscan,omitempty"` + Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan" json:"mergejoinscan,omitempty"` + IndexDef *Index `protobuf:"bytes,21,opt,name=index_def" json:"index_def,omitempty"` + Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"` + Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"` + KeysOnly *bool `protobuf:"varint,12,req,name=keys_only" json:"keys_only,omitempty"` + PropertyName []string `protobuf:"bytes,24,rep,name=property_name" json:"property_name,omitempty"` + DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size" json:"distinct_infix_size,omitempty"` + Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter" json:"entityfilter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledQuery) Reset() { *m = CompiledQuery{} } +func (m *CompiledQuery) String() string { return proto.CompactTextString(m) } +func (*CompiledQuery) ProtoMessage() {} + +const Default_CompiledQuery_Offset int32 = 0 + +func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan { + if m != nil { + return m.Primaryscan + } + return nil +} + +func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan { + if m != nil { + return m.Mergejoinscan + } + return nil +} + +func (m *CompiledQuery) GetIndexDef() *Index { + if m != nil { + return m.IndexDef + } + return nil +} + +func (m *CompiledQuery) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return Default_CompiledQuery_Offset +} + +func (m *CompiledQuery) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +func (m *CompiledQuery) GetKeysOnly() bool { + if m != nil && m.KeysOnly != nil { + return *m.KeysOnly + } + return false +} + +func (m *CompiledQuery) GetPropertyName() []string { + if m != nil { + return m.PropertyName + } + return nil +} + +func (m *CompiledQuery) GetDistinctInfixSize() int32 { + if m != nil && m.DistinctInfixSize != nil { + return *m.DistinctInfixSize + } + return 0 +} + +func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter { + if m != nil { + return m.Entityfilter + } + return nil +} + +type CompiledQuery_PrimaryScan struct { + IndexName *string `protobuf:"bytes,2,opt,name=index_name" json:"index_name,omitempty"` + StartKey *string `protobuf:"bytes,3,opt,name=start_key" json:"start_key,omitempty"` + StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive" json:"start_inclusive,omitempty"` + EndKey *string `protobuf:"bytes,5,opt,name=end_key" json:"end_key,omitempty"` + EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive" json:"end_inclusive,omitempty"` + StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value" json:"start_postfix_value,omitempty"` + EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value" json:"end_postfix_value,omitempty"` + EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us" json:"end_unapplied_log_timestamp_us,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} } +func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) } +func (*CompiledQuery_PrimaryScan) ProtoMessage() {} + +func (m *CompiledQuery_PrimaryScan) GetIndexName() string { + if m != nil && m.IndexName != nil { + return *m.IndexName + } + return "" +} + +func (m *CompiledQuery_PrimaryScan) GetStartKey() string { + if m != nil && m.StartKey != nil { + return *m.StartKey + } + return "" +} + +func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool { + if m != nil && m.StartInclusive != nil { + return *m.StartInclusive + } + return false +} + +func (m *CompiledQuery_PrimaryScan) GetEndKey() string { + if m != nil && m.EndKey != nil { + return *m.EndKey + } + return "" +} + +func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool { + if m != nil && m.EndInclusive != nil { + return *m.EndInclusive + } + return false +} + +func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string { + if m != nil { + return m.StartPostfixValue + } + return nil +} + +func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string { + if m != nil { + return m.EndPostfixValue + } + return nil +} + +func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 { + if m != nil && m.EndUnappliedLogTimestampUs != nil { + return *m.EndUnappliedLogTimestampUs + } + return 0 +} + +type CompiledQuery_MergeJoinScan struct { + IndexName *string `protobuf:"bytes,8,req,name=index_name" json:"index_name,omitempty"` + PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value" json:"prefix_value,omitempty"` + ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,def=0" json:"value_prefix,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} } +func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) } +func (*CompiledQuery_MergeJoinScan) ProtoMessage() {} + +const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false + +func (m *CompiledQuery_MergeJoinScan) GetIndexName() string { + if m != nil && m.IndexName != nil { + return *m.IndexName + } + return "" +} + +func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string { + if m != nil { + return m.PrefixValue + } + return nil +} + +func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool { + if m != nil && m.ValuePrefix != nil { + return *m.ValuePrefix + } + return Default_CompiledQuery_MergeJoinScan_ValuePrefix +} + +type CompiledQuery_EntityFilter struct { + Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"` + Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"` + Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} } +func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) } +func (*CompiledQuery_EntityFilter) ProtoMessage() {} + +const Default_CompiledQuery_EntityFilter_Distinct bool = false + +func (m *CompiledQuery_EntityFilter) GetDistinct() bool { + if m != nil && m.Distinct != nil { + return *m.Distinct + } + return Default_CompiledQuery_EntityFilter_Distinct +} + +func (m *CompiledQuery_EntityFilter) GetKind() string { + if m != nil && m.Kind != nil { + return *m.Kind + } + return "" +} + +func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference { + if m != nil { + return m.Ancestor + } + return nil +} + +type CompiledCursor struct { + Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position" json:"position,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledCursor) Reset() { *m = CompiledCursor{} } +func (m *CompiledCursor) String() string { return proto.CompactTextString(m) } +func (*CompiledCursor) ProtoMessage() {} + +func (m *CompiledCursor) GetPosition() *CompiledCursor_Position { + if m != nil { + return m.Position + } + return nil +} + +type CompiledCursor_Position struct { + StartKey *string `protobuf:"bytes,27,opt,name=start_key" json:"start_key,omitempty"` + Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue" json:"indexvalue,omitempty"` + Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"` + StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,def=1" json:"start_inclusive,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} } +func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) } +func (*CompiledCursor_Position) ProtoMessage() {} + +const Default_CompiledCursor_Position_StartInclusive bool = true + +func (m *CompiledCursor_Position) GetStartKey() string { + if m != nil && m.StartKey != nil { + return *m.StartKey + } + return "" +} + +func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue { + if m != nil { + return m.Indexvalue + } + return nil +} + +func (m *CompiledCursor_Position) GetKey() *Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *CompiledCursor_Position) GetStartInclusive() bool { + if m != nil && m.StartInclusive != nil { + return *m.StartInclusive + } + return Default_CompiledCursor_Position_StartInclusive +} + +type CompiledCursor_Position_IndexValue struct { + Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"` + Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} } +func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) } +func (*CompiledCursor_Position_IndexValue) ProtoMessage() {} + +func (m *CompiledCursor_Position_IndexValue) GetProperty() string { + if m != nil && m.Property != nil { + return *m.Property + } + return "" +} + +func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue { + if m != nil { + return m.Value + } + return nil +} + +type Cursor struct { + Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"` + App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Cursor) Reset() { *m = Cursor{} } +func (m *Cursor) String() string { return proto.CompactTextString(m) } +func (*Cursor) ProtoMessage() {} + +func (m *Cursor) GetCursor() uint64 { + if m != nil && m.Cursor != nil { + return *m.Cursor + } + return 0 +} + +func (m *Cursor) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +type Error struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Error) Reset() { *m = Error{} } +func (m *Error) String() string { return proto.CompactTextString(m) } +func (*Error) ProtoMessage() {} + +type Cost struct { + IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes" json:"index_writes,omitempty"` + IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes" json:"index_write_bytes,omitempty"` + EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes" json:"entity_writes,omitempty"` + EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes" json:"entity_write_bytes,omitempty"` + Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost" json:"commitcost,omitempty"` + ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta" json:"approximate_storage_delta,omitempty"` + IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates" json:"id_sequence_updates,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Cost) Reset() { *m = Cost{} } +func (m *Cost) String() string { return proto.CompactTextString(m) } +func (*Cost) ProtoMessage() {} + +func (m *Cost) GetIndexWrites() int32 { + if m != nil && m.IndexWrites != nil { + return *m.IndexWrites + } + return 0 +} + +func (m *Cost) GetIndexWriteBytes() int32 { + if m != nil && m.IndexWriteBytes != nil { + return *m.IndexWriteBytes + } + return 0 +} + +func (m *Cost) GetEntityWrites() int32 { + if m != nil && m.EntityWrites != nil { + return *m.EntityWrites + } + return 0 +} + +func (m *Cost) GetEntityWriteBytes() int32 { + if m != nil && m.EntityWriteBytes != nil { + return *m.EntityWriteBytes + } + return 0 +} + +func (m *Cost) GetCommitcost() *Cost_CommitCost { + if m != nil { + return m.Commitcost + } + return nil +} + +func (m *Cost) GetApproximateStorageDelta() int32 { + if m != nil && m.ApproximateStorageDelta != nil { + return *m.ApproximateStorageDelta + } + return 0 +} + +func (m *Cost) GetIdSequenceUpdates() int32 { + if m != nil && m.IdSequenceUpdates != nil { + return *m.IdSequenceUpdates + } + return 0 +} + +type Cost_CommitCost struct { + RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts" json:"requested_entity_puts,omitempty"` + RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes" json:"requested_entity_deletes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} } +func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) } +func (*Cost_CommitCost) ProtoMessage() {} + +func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 { + if m != nil && m.RequestedEntityPuts != nil { + return *m.RequestedEntityPuts + } + return 0 +} + +func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 { + if m != nil && m.RequestedEntityDeletes != nil { + return *m.RequestedEntityDeletes + } + return 0 +} + +type GetRequest struct { + Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"` + Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` + Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` + FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms" json:"failover_ms,omitempty"` + Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"` + AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,def=0" json:"allow_deferred,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetRequest) Reset() { *m = GetRequest{} } +func (m *GetRequest) String() string { return proto.CompactTextString(m) } +func (*GetRequest) ProtoMessage() {} + +const Default_GetRequest_AllowDeferred bool = false + +func (m *GetRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *GetRequest) GetKey() []*Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *GetRequest) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *GetRequest) GetFailoverMs() int64 { + if m != nil && m.FailoverMs != nil { + return *m.FailoverMs + } + return 0 +} + +func (m *GetRequest) GetStrong() bool { + if m != nil && m.Strong != nil { + return *m.Strong + } + return false +} + +func (m *GetRequest) GetAllowDeferred() bool { + if m != nil && m.AllowDeferred != nil { + return *m.AllowDeferred + } + return Default_GetRequest_AllowDeferred +} + +type GetResponse struct { + Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity" json:"entity,omitempty"` + Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"` + InOrder *bool `protobuf:"varint,6,opt,name=in_order,def=1" json:"in_order,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetResponse) Reset() { *m = GetResponse{} } +func (m *GetResponse) String() string { return proto.CompactTextString(m) } +func (*GetResponse) ProtoMessage() {} + +const Default_GetResponse_InOrder bool = true + +func (m *GetResponse) GetEntity() []*GetResponse_Entity { + if m != nil { + return m.Entity + } + return nil +} + +func (m *GetResponse) GetDeferred() []*Reference { + if m != nil { + return m.Deferred + } + return nil +} + +func (m *GetResponse) GetInOrder() bool { + if m != nil && m.InOrder != nil { + return *m.InOrder + } + return Default_GetResponse_InOrder +} + +type GetResponse_Entity struct { + Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"` + Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"` + Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} } +func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) } +func (*GetResponse_Entity) ProtoMessage() {} + +func (m *GetResponse_Entity) GetEntity() *EntityProto { + if m != nil { + return m.Entity + } + return nil +} + +func (m *GetResponse_Entity) GetKey() *Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *GetResponse_Entity) GetVersion() int64 { + if m != nil && m.Version != nil { + return *m.Version + } + return 0 +} + +type PutRequest struct { + Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"` + Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"` + Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` + CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index" json:"composite_index,omitempty"` + Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` + Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` + MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` + Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` + AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PutRequest) Reset() { *m = PutRequest{} } +func (m *PutRequest) String() string { return proto.CompactTextString(m) } +func (*PutRequest) ProtoMessage() {} + +const Default_PutRequest_Trusted bool = false +const Default_PutRequest_Force bool = false +const Default_PutRequest_MarkChanges bool = false +const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT + +func (m *PutRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *PutRequest) GetEntity() []*EntityProto { + if m != nil { + return m.Entity + } + return nil +} + +func (m *PutRequest) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *PutRequest) GetCompositeIndex() []*CompositeIndex { + if m != nil { + return m.CompositeIndex + } + return nil +} + +func (m *PutRequest) GetTrusted() bool { + if m != nil && m.Trusted != nil { + return *m.Trusted + } + return Default_PutRequest_Trusted +} + +func (m *PutRequest) GetForce() bool { + if m != nil && m.Force != nil { + return *m.Force + } + return Default_PutRequest_Force +} + +func (m *PutRequest) GetMarkChanges() bool { + if m != nil && m.MarkChanges != nil { + return *m.MarkChanges + } + return Default_PutRequest_MarkChanges +} + +func (m *PutRequest) GetSnapshot() []*Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy { + if m != nil && m.AutoIdPolicy != nil { + return *m.AutoIdPolicy + } + return Default_PutRequest_AutoIdPolicy +} + +type PutResponse struct { + Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` + Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"` + Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PutResponse) Reset() { *m = PutResponse{} } +func (m *PutResponse) String() string { return proto.CompactTextString(m) } +func (*PutResponse) ProtoMessage() {} + +func (m *PutResponse) GetKey() []*Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *PutResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +func (m *PutResponse) GetVersion() []int64 { + if m != nil { + return m.Version + } + return nil +} + +type TouchRequest struct { + Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` + Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` + CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index" json:"composite_index,omitempty"` + Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"` + Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TouchRequest) Reset() { *m = TouchRequest{} } +func (m *TouchRequest) String() string { return proto.CompactTextString(m) } +func (*TouchRequest) ProtoMessage() {} + +const Default_TouchRequest_Force bool = false + +func (m *TouchRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *TouchRequest) GetKey() []*Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex { + if m != nil { + return m.CompositeIndex + } + return nil +} + +func (m *TouchRequest) GetForce() bool { + if m != nil && m.Force != nil { + return *m.Force + } + return Default_TouchRequest_Force +} + +func (m *TouchRequest) GetSnapshot() []*Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +type TouchResponse struct { + Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TouchResponse) Reset() { *m = TouchResponse{} } +func (m *TouchResponse) String() string { return proto.CompactTextString(m) } +func (*TouchResponse) ProtoMessage() {} + +func (m *TouchResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +type DeleteRequest struct { + Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` + Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"` + Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"` + Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` + Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` + MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` + Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } +func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRequest) ProtoMessage() {} + +const Default_DeleteRequest_Trusted bool = false +const Default_DeleteRequest_Force bool = false +const Default_DeleteRequest_MarkChanges bool = false + +func (m *DeleteRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *DeleteRequest) GetKey() []*Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *DeleteRequest) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *DeleteRequest) GetTrusted() bool { + if m != nil && m.Trusted != nil { + return *m.Trusted + } + return Default_DeleteRequest_Trusted +} + +func (m *DeleteRequest) GetForce() bool { + if m != nil && m.Force != nil { + return *m.Force + } + return Default_DeleteRequest_Force +} + +func (m *DeleteRequest) GetMarkChanges() bool { + if m != nil && m.MarkChanges != nil { + return *m.MarkChanges + } + return Default_DeleteRequest_MarkChanges +} + +func (m *DeleteRequest) GetSnapshot() []*Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +type DeleteResponse struct { + Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` + Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } +func (m *DeleteResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteResponse) ProtoMessage() {} + +func (m *DeleteResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +func (m *DeleteResponse) GetVersion() []int64 { + if m != nil { + return m.Version + } + return nil +} + +type NextRequest struct { + Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"` + Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"` + Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"` + Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"` + Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NextRequest) Reset() { *m = NextRequest{} } +func (m *NextRequest) String() string { return proto.CompactTextString(m) } +func (*NextRequest) ProtoMessage() {} + +const Default_NextRequest_Offset int32 = 0 +const Default_NextRequest_Compile bool = false + +func (m *NextRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *NextRequest) GetCursor() *Cursor { + if m != nil { + return m.Cursor + } + return nil +} + +func (m *NextRequest) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *NextRequest) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return Default_NextRequest_Offset +} + +func (m *NextRequest) GetCompile() bool { + if m != nil && m.Compile != nil { + return *m.Compile + } + return Default_NextRequest_Compile +} + +type QueryResult struct { + Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"` + Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"` + SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results" json:"skipped_results,omitempty"` + MoreResults *bool `protobuf:"varint,3,req,name=more_results" json:"more_results,omitempty"` + KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only" json:"keys_only,omitempty"` + IndexOnly *bool `protobuf:"varint,9,opt,name=index_only" json:"index_only,omitempty"` + SmallOps *bool `protobuf:"varint,10,opt,name=small_ops" json:"small_ops,omitempty"` + CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query" json:"compiled_query,omitempty"` + CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"` + Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"` + Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *QueryResult) Reset() { *m = QueryResult{} } +func (m *QueryResult) String() string { return proto.CompactTextString(m) } +func (*QueryResult) ProtoMessage() {} + +func (m *QueryResult) GetCursor() *Cursor { + if m != nil { + return m.Cursor + } + return nil +} + +func (m *QueryResult) GetResult() []*EntityProto { + if m != nil { + return m.Result + } + return nil +} + +func (m *QueryResult) GetSkippedResults() int32 { + if m != nil && m.SkippedResults != nil { + return *m.SkippedResults + } + return 0 +} + +func (m *QueryResult) GetMoreResults() bool { + if m != nil && m.MoreResults != nil { + return *m.MoreResults + } + return false +} + +func (m *QueryResult) GetKeysOnly() bool { + if m != nil && m.KeysOnly != nil { + return *m.KeysOnly + } + return false +} + +func (m *QueryResult) GetIndexOnly() bool { + if m != nil && m.IndexOnly != nil { + return *m.IndexOnly + } + return false +} + +func (m *QueryResult) GetSmallOps() bool { + if m != nil && m.SmallOps != nil { + return *m.SmallOps + } + return false +} + +func (m *QueryResult) GetCompiledQuery() *CompiledQuery { + if m != nil { + return m.CompiledQuery + } + return nil +} + +func (m *QueryResult) GetCompiledCursor() *CompiledCursor { + if m != nil { + return m.CompiledCursor + } + return nil +} + +func (m *QueryResult) GetIndex() []*CompositeIndex { + if m != nil { + return m.Index + } + return nil +} + +func (m *QueryResult) GetVersion() []int64 { + if m != nil { + return m.Version + } + return nil +} + +type AllocateIdsRequest struct { + Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` + ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key" json:"model_key,omitempty"` + Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"` + Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"` + Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} } +func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) } +func (*AllocateIdsRequest) ProtoMessage() {} + +func (m *AllocateIdsRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AllocateIdsRequest) GetModelKey() *Reference { + if m != nil { + return m.ModelKey + } + return nil +} + +func (m *AllocateIdsRequest) GetSize() int64 { + if m != nil && m.Size != nil { + return *m.Size + } + return 0 +} + +func (m *AllocateIdsRequest) GetMax() int64 { + if m != nil && m.Max != nil { + return *m.Max + } + return 0 +} + +func (m *AllocateIdsRequest) GetReserve() []*Reference { + if m != nil { + return m.Reserve + } + return nil +} + +type AllocateIdsResponse struct { + Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"` + End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"` + Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} } +func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) } +func (*AllocateIdsResponse) ProtoMessage() {} + +func (m *AllocateIdsResponse) GetStart() int64 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *AllocateIdsResponse) GetEnd() int64 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *AllocateIdsResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +type CompositeIndices struct { + Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompositeIndices) Reset() { *m = CompositeIndices{} } +func (m *CompositeIndices) String() string { return proto.CompactTextString(m) } +func (*CompositeIndices) ProtoMessage() {} + +func (m *CompositeIndices) GetIndex() []*CompositeIndex { + if m != nil { + return m.Index + } + return nil +} + +type AddActionsRequest struct { + Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` + Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"` + Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} } +func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) } +func (*AddActionsRequest) ProtoMessage() {} + +func (m *AddActionsRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AddActionsRequest) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *AddActionsRequest) GetAction() []*Action { + if m != nil { + return m.Action + } + return nil +} + +type AddActionsResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} } +func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) } +func (*AddActionsResponse) ProtoMessage() {} + +type BeginTransactionRequest struct { + Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` + App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` + AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,def=0" json:"allow_multiple_eg,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } +func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } +func (*BeginTransactionRequest) ProtoMessage() {} + +const Default_BeginTransactionRequest_AllowMultipleEg bool = false + +func (m *BeginTransactionRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *BeginTransactionRequest) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *BeginTransactionRequest) GetAllowMultipleEg() bool { + if m != nil && m.AllowMultipleEg != nil { + return *m.AllowMultipleEg + } + return Default_BeginTransactionRequest_AllowMultipleEg +} + +type CommitResponse struct { + Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` + Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CommitResponse) Reset() { *m = CommitResponse{} } +func (m *CommitResponse) String() string { return proto.CompactTextString(m) } +func (*CommitResponse) ProtoMessage() {} + +func (m *CommitResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +func (m *CommitResponse) GetVersion() []*CommitResponse_Version { + if m != nil { + return m.Version + } + return nil +} + +type CommitResponse_Version struct { + RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key" json:"root_entity_key,omitempty"` + Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} } +func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) } +func (*CommitResponse_Version) ProtoMessage() {} + +func (m *CommitResponse_Version) GetRootEntityKey() *Reference { + if m != nil { + return m.RootEntityKey + } + return nil +} + +func (m *CommitResponse_Version) GetVersion() int64 { + if m != nil && m.Version != nil { + return *m.Version + } + return 0 +} + +func init() { +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto new file mode 100644 index 000000000..e76f126ff --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto @@ -0,0 +1,541 @@ +syntax = "proto2"; +option go_package = "datastore"; + +package appengine; + +message Action{} + +message PropertyValue { + optional int64 int64Value = 1; + optional bool booleanValue = 2; + optional string stringValue = 3; + optional double doubleValue = 4; + + optional group PointValue = 5 { + required double x = 6; + required double y = 7; + } + + optional group UserValue = 8 { + required string email = 9; + required string auth_domain = 10; + optional string nickname = 11; + optional string federated_identity = 21; + optional string federated_provider = 22; + } + + optional group ReferenceValue = 12 { + required string app = 13; + optional string name_space = 20; + repeated group PathElement = 14 { + required string type = 15; + optional int64 id = 16; + optional string name = 17; + } + } +} + +message Property { + enum Meaning { + NO_MEANING = 0; + BLOB = 14; + TEXT = 15; + BYTESTRING = 16; + + ATOM_CATEGORY = 1; + ATOM_LINK = 2; + ATOM_TITLE = 3; + ATOM_CONTENT = 4; + ATOM_SUMMARY = 5; + ATOM_AUTHOR = 6; + + GD_WHEN = 7; + GD_EMAIL = 8; + GEORSS_POINT = 9; + GD_IM = 10; + + GD_PHONENUMBER = 11; + GD_POSTALADDRESS = 12; + + GD_RATING = 13; + + BLOBKEY = 17; + ENTITY_PROTO = 19; + + INDEX_VALUE = 18; + }; + + optional Meaning meaning = 1 [default = NO_MEANING]; + optional string meaning_uri = 2; + + required string name = 3; + + required PropertyValue value = 5; + + required bool multiple = 4; + + optional bool searchable = 6 [default=false]; + + enum FtsTokenizationOption { + HTML = 1; + ATOM = 2; + } + + optional FtsTokenizationOption fts_tokenization_option = 8; + + optional string locale = 9 [default = "en"]; +} + +message Path { + repeated group Element = 1 { + required string type = 2; + optional int64 id = 3; + optional string name = 4; + } +} + +message Reference { + required string app = 13; + optional string name_space = 20; + required Path path = 14; +} + +message User { + required string email = 1; + required string auth_domain = 2; + optional string nickname = 3; + optional string federated_identity = 6; + optional string federated_provider = 7; +} + +message EntityProto { + required Reference key = 13; + required Path entity_group = 16; + optional User owner = 17; + + enum Kind { + GD_CONTACT = 1; + GD_EVENT = 2; + GD_MESSAGE = 3; + } + optional Kind kind = 4; + optional string kind_uri = 5; + + repeated Property property = 14; + repeated Property raw_property = 15; + + optional int32 rank = 18; +} + +message CompositeProperty { + required int64 index_id = 1; + repeated string value = 2; +} + +message Index { + required string entity_type = 1; + required bool ancestor = 5; + repeated group Property = 2 { + required string name = 3; + enum Direction { + ASCENDING = 1; + DESCENDING = 2; + } + optional Direction direction = 4 [default = ASCENDING]; + } +} + +message CompositeIndex { + required string app_id = 1; + required int64 id = 2; + required Index definition = 3; + + enum State { + WRITE_ONLY = 1; + READ_WRITE = 2; + DELETED = 3; + ERROR = 4; + } + required State state = 4; + + optional bool only_use_if_required = 6 [default = false]; +} + +message IndexPostfix { + message IndexValue { + required string property_name = 1; + required PropertyValue value = 2; + } + + repeated IndexValue index_value = 1; + + optional Reference key = 2; + + optional bool before = 3 [default=true]; +} + +message IndexPosition { + optional string key = 1; + + optional bool before = 2 [default=true]; +} + +message Snapshot { + enum Status { + INACTIVE = 0; + ACTIVE = 1; + } + + required int64 ts = 1; +} + +message InternalHeader { + optional string qos = 1; +} + +message Transaction { + optional InternalHeader header = 4; + required fixed64 handle = 1; + required string app = 2; + optional bool mark_changes = 3 [default = false]; +} + +message Query { + optional InternalHeader header = 39; + + required string app = 1; + optional string name_space = 29; + + optional string kind = 3; + optional Reference ancestor = 17; + + repeated group Filter = 4 { + enum Operator { + LESS_THAN = 1; + LESS_THAN_OR_EQUAL = 2; + GREATER_THAN = 3; + GREATER_THAN_OR_EQUAL = 4; + EQUAL = 5; + IN = 6; + EXISTS = 7; + } + + required Operator op = 6; + repeated Property property = 14; + } + + optional string search_query = 8; + + repeated group Order = 9 { + enum Direction { + ASCENDING = 1; + DESCENDING = 2; + } + + required string property = 10; + optional Direction direction = 11 [default = ASCENDING]; + } + + enum Hint { + ORDER_FIRST = 1; + ANCESTOR_FIRST = 2; + FILTER_FIRST = 3; + } + optional Hint hint = 18; + + optional int32 count = 23; + + optional int32 offset = 12 [default = 0]; + + optional int32 limit = 16; + + optional CompiledCursor compiled_cursor = 30; + optional CompiledCursor end_compiled_cursor = 31; + + repeated CompositeIndex composite_index = 19; + + optional bool require_perfect_plan = 20 [default = false]; + + optional bool keys_only = 21 [default = false]; + + optional Transaction transaction = 22; + + optional bool compile = 25 [default = false]; + + optional int64 failover_ms = 26; + + optional bool strong = 32; + + repeated string property_name = 33; + + repeated string group_by_property_name = 34; + + optional bool distinct = 24; + + optional int64 min_safe_time_seconds = 35; + + repeated string safe_replica_name = 36; + + optional bool persist_offset = 37 [default=false]; +} + +message CompiledQuery { + required group PrimaryScan = 1 { + optional string index_name = 2; + + optional string start_key = 3; + optional bool start_inclusive = 4; + optional string end_key = 5; + optional bool end_inclusive = 6; + + repeated string start_postfix_value = 22; + repeated string end_postfix_value = 23; + + optional int64 end_unapplied_log_timestamp_us = 19; + } + + repeated group MergeJoinScan = 7 { + required string index_name = 8; + + repeated string prefix_value = 9; + + optional bool value_prefix = 20 [default=false]; + } + + optional Index index_def = 21; + + optional int32 offset = 10 [default = 0]; + + optional int32 limit = 11; + + required bool keys_only = 12; + + repeated string property_name = 24; + + optional int32 distinct_infix_size = 25; + + optional group EntityFilter = 13 { + optional bool distinct = 14 [default=false]; + + optional string kind = 17; + optional Reference ancestor = 18; + } +} + +message CompiledCursor { + optional group Position = 2 { + optional string start_key = 27; + + repeated group IndexValue = 29 { + optional string property = 30; + required PropertyValue value = 31; + } + + optional Reference key = 32; + + optional bool start_inclusive = 28 [default=true]; + } +} + +message Cursor { + required fixed64 cursor = 1; + + optional string app = 2; +} + +message Error { + enum ErrorCode { + BAD_REQUEST = 1; + CONCURRENT_TRANSACTION = 2; + INTERNAL_ERROR = 3; + NEED_INDEX = 4; + TIMEOUT = 5; + PERMISSION_DENIED = 6; + BIGTABLE_ERROR = 7; + COMMITTED_BUT_STILL_APPLYING = 8; + CAPABILITY_DISABLED = 9; + TRY_ALTERNATE_BACKEND = 10; + SAFE_TIME_TOO_OLD = 11; + } +} + +message Cost { + optional int32 index_writes = 1; + optional int32 index_write_bytes = 2; + optional int32 entity_writes = 3; + optional int32 entity_write_bytes = 4; + optional group CommitCost = 5 { + optional int32 requested_entity_puts = 6; + optional int32 requested_entity_deletes = 7; + }; + optional int32 approximate_storage_delta = 8; + optional int32 id_sequence_updates = 9; +} + +message GetRequest { + optional InternalHeader header = 6; + + repeated Reference key = 1; + optional Transaction transaction = 2; + + optional int64 failover_ms = 3; + + optional bool strong = 4; + + optional bool allow_deferred = 5 [default=false]; +} + +message GetResponse { + repeated group Entity = 1 { + optional EntityProto entity = 2; + optional Reference key = 4; + + optional int64 version = 3; + } + + repeated Reference deferred = 5; + + optional bool in_order = 6 [default=true]; +} + +message PutRequest { + optional InternalHeader header = 11; + + repeated EntityProto entity = 1; + optional Transaction transaction = 2; + repeated CompositeIndex composite_index = 3; + + optional bool trusted = 4 [default = false]; + + optional bool force = 7 [default = false]; + + optional bool mark_changes = 8 [default = false]; + repeated Snapshot snapshot = 9; + + enum AutoIdPolicy { + CURRENT = 0; + SEQUENTIAL = 1; + } + optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT]; +} + +message PutResponse { + repeated Reference key = 1; + optional Cost cost = 2; + repeated int64 version = 3; +} + +message TouchRequest { + optional InternalHeader header = 10; + + repeated Reference key = 1; + repeated CompositeIndex composite_index = 2; + optional bool force = 3 [default = false]; + repeated Snapshot snapshot = 9; +} + +message TouchResponse { + optional Cost cost = 1; +} + +message DeleteRequest { + optional InternalHeader header = 10; + + repeated Reference key = 6; + optional Transaction transaction = 5; + + optional bool trusted = 4 [default = false]; + + optional bool force = 7 [default = false]; + + optional bool mark_changes = 8 [default = false]; + repeated Snapshot snapshot = 9; +} + +message DeleteResponse { + optional Cost cost = 1; + repeated int64 version = 3; +} + +message NextRequest { + optional InternalHeader header = 5; + + required Cursor cursor = 1; + optional int32 count = 2; + + optional int32 offset = 4 [default = 0]; + + optional bool compile = 3 [default = false]; +} + +message QueryResult { + optional Cursor cursor = 1; + + repeated EntityProto result = 2; + + optional int32 skipped_results = 7; + + required bool more_results = 3; + + optional bool keys_only = 4; + + optional bool index_only = 9; + + optional bool small_ops = 10; + + optional CompiledQuery compiled_query = 5; + + optional CompiledCursor compiled_cursor = 6; + + repeated CompositeIndex index = 8; + + repeated int64 version = 11; +} + +message AllocateIdsRequest { + optional InternalHeader header = 4; + + optional Reference model_key = 1; + + optional int64 size = 2; + + optional int64 max = 3; + + repeated Reference reserve = 5; +} + +message AllocateIdsResponse { + required int64 start = 1; + required int64 end = 2; + optional Cost cost = 3; +} + +message CompositeIndices { + repeated CompositeIndex index = 1; +} + +message AddActionsRequest { + optional InternalHeader header = 3; + + required Transaction transaction = 1; + repeated Action action = 2; +} + +message AddActionsResponse { +} + +message BeginTransactionRequest { + optional InternalHeader header = 3; + + required string app = 1; + optional bool allow_multiple_eg = 2 [default = false]; +} + +message CommitResponse { + optional Cost cost = 1; + + repeated group Version = 3 { + required Reference root_entity_key = 4; + required int64 version = 5; + } +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/identity.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/identity.go new file mode 100644 index 000000000..d538701ab --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/identity.go @@ -0,0 +1,14 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +import netcontext "golang.org/x/net/context" + +// These functions are implementations of the wrapper functions +// in ../appengine/identity.go. See that file for commentary. + +func AppID(c netcontext.Context) string { + return appID(FullyQualifiedAppID(c)) +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/identity_classic.go new file mode 100644 index 000000000..e6b9227c5 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/identity_classic.go @@ -0,0 +1,27 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appengine + +package internal + +import ( + "appengine" + + netcontext "golang.org/x/net/context" +) + +func DefaultVersionHostname(ctx netcontext.Context) string { + return appengine.DefaultVersionHostname(fromContext(ctx)) +} + +func RequestID(ctx netcontext.Context) string { return appengine.RequestID(fromContext(ctx)) } +func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } +func ServerSoftware() string { return appengine.ServerSoftware() } +func ModuleName(ctx netcontext.Context) string { return appengine.ModuleName(fromContext(ctx)) } +func VersionID(ctx netcontext.Context) string { return appengine.VersionID(fromContext(ctx)) } +func InstanceID() string { return appengine.InstanceID() } +func IsDevAppServer() bool { return appengine.IsDevAppServer() } + +func fullyQualifiedAppID(ctx netcontext.Context) string { return fromContext(ctx).FullyQualifiedAppID() } diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/identity_vm.go new file mode 100644 index 000000000..ebe68b785 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/identity_vm.go @@ -0,0 +1,97 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package internal + +import ( + "net/http" + "os" + + netcontext "golang.org/x/net/context" +) + +// These functions are implementations of the wrapper functions +// in ../appengine/identity.go. See that file for commentary. + +const ( + hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname" + hRequestLogId = "X-AppEngine-Request-Log-Id" + hDatacenter = "X-AppEngine-Datacenter" +) + +func ctxHeaders(ctx netcontext.Context) http.Header { + return fromContext(ctx).Request().Header +} + +func DefaultVersionHostname(ctx netcontext.Context) string { + return ctxHeaders(ctx).Get(hDefaultVersionHostname) +} + +func RequestID(ctx netcontext.Context) string { + return ctxHeaders(ctx).Get(hRequestLogId) +} + +func Datacenter(ctx netcontext.Context) string { + return ctxHeaders(ctx).Get(hDatacenter) +} + +func ServerSoftware() string { + // TODO(dsymonds): Remove fallback when we've verified this. + if s := os.Getenv("SERVER_SOFTWARE"); s != "" { + return s + } + return "Google App Engine/1.x.x" +} + +// TODO(dsymonds): Remove the metadata fetches. + +func ModuleName(_ netcontext.Context) string { + if s := os.Getenv("GAE_MODULE_NAME"); s != "" { + return s + } + return string(mustGetMetadata("instance/attributes/gae_backend_name")) +} + +func VersionID(_ netcontext.Context) string { + if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" { + return s1 + "." + s2 + } + return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version")) +} + +func InstanceID() string { + if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" { + return s + } + return string(mustGetMetadata("instance/attributes/gae_backend_instance")) +} + +func partitionlessAppID() string { + // gae_project has everything except the partition prefix. + appID := os.Getenv("GAE_LONG_APP_ID") + if appID == "" { + appID = string(mustGetMetadata("instance/attributes/gae_project")) + } + return appID +} + +func fullyQualifiedAppID(_ netcontext.Context) string { + appID := partitionlessAppID() + + part := os.Getenv("GAE_PARTITION") + if part == "" { + part = string(mustGetMetadata("instance/attributes/gae_partition")) + } + + if part != "" { + appID = part + "~" + appID + } + return appID +} + +func IsDevAppServer() bool { + return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/internal.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/internal.go new file mode 100644 index 000000000..66e8d7686 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/internal.go @@ -0,0 +1,144 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package internal provides support for package appengine. +// +// Programs should not use this package directly. Its API is not stable. +// Use packages appengine and appengine/* instead. +package internal + +import ( + "fmt" + "io" + "log" + "net/http" + "net/url" + "os" + + "github.com/golang/protobuf/proto" + + remotepb "google.golang.org/appengine/internal/remote_api" +) + +// errorCodeMaps is a map of service name to the error code map for the service. +var errorCodeMaps = make(map[string]map[int32]string) + +// RegisterErrorCodeMap is called from API implementations to register their +// error code map. This should only be called from init functions. +func RegisterErrorCodeMap(service string, m map[int32]string) { + errorCodeMaps[service] = m +} + +type timeoutCodeKey struct { + service string + code int32 +} + +// timeoutCodes is the set of service+code pairs that represent timeouts. +var timeoutCodes = make(map[timeoutCodeKey]bool) + +func RegisterTimeoutErrorCode(service string, code int32) { + timeoutCodes[timeoutCodeKey{service, code}] = true +} + +// APIError is the type returned by appengine.Context's Call method +// when an API call fails in an API-specific way. This may be, for instance, +// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE. +type APIError struct { + Service string + Detail string + Code int32 // API-specific error code +} + +func (e *APIError) Error() string { + if e.Code == 0 { + if e.Detail == "" { + return "APIError " + } + return e.Detail + } + s := fmt.Sprintf("API error %d", e.Code) + if m, ok := errorCodeMaps[e.Service]; ok { + s += " (" + e.Service + ": " + m[e.Code] + ")" + } else { + // Shouldn't happen, but provide a bit more detail if it does. + s = e.Service + " " + s + } + if e.Detail != "" { + s += ": " + e.Detail + } + return s +} + +func (e *APIError) IsTimeout() bool { + return timeoutCodes[timeoutCodeKey{e.Service, e.Code}] +} + +// CallError is the type returned by appengine.Context's Call method when an +// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED. +type CallError struct { + Detail string + Code int32 + // TODO: Remove this if we get a distinguishable error code. + Timeout bool +} + +func (e *CallError) Error() string { + var msg string + switch remotepb.RpcError_ErrorCode(e.Code) { + case remotepb.RpcError_UNKNOWN: + return e.Detail + case remotepb.RpcError_OVER_QUOTA: + msg = "Over quota" + case remotepb.RpcError_CAPABILITY_DISABLED: + msg = "Capability disabled" + case remotepb.RpcError_CANCELLED: + msg = "Canceled" + default: + msg = fmt.Sprintf("Call error %d", e.Code) + } + s := msg + ": " + e.Detail + if e.Timeout { + s += " (timeout)" + } + return s +} + +func (e *CallError) IsTimeout() bool { + return e.Timeout +} + +func Main() { + installHealthChecker(http.DefaultServeMux) + + port := "8080" + if s := os.Getenv("PORT"); s != "" { + port = s + } + + if err := http.ListenAndServe(":"+port, http.HandlerFunc(handleHTTP)); err != nil { + log.Fatalf("http.ListenAndServe: %v", err) + } +} + +func installHealthChecker(mux *http.ServeMux) { + // If no health check handler has been installed by this point, add a trivial one. + const healthPath = "/_ah/health" + hreq := &http.Request{ + Method: "GET", + URL: &url.URL{ + Path: healthPath, + }, + } + if _, pat := mux.Handler(hreq); pat != healthPath { + mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "ok") + }) + } +} + +// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace. +// The function should be prepared to be called on the same message more than once; it should only modify the +// RPC request the first time. +var NamespaceMods = make(map[string]func(m proto.Message, namespace string)) diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/log/log_service.pb.go new file mode 100644 index 000000000..20c595be3 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/log/log_service.pb.go @@ -0,0 +1,899 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/log/log_service.proto +// DO NOT EDIT! + +/* +Package log is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/log/log_service.proto + +It has these top-level messages: + LogServiceError + UserAppLogLine + UserAppLogGroup + FlushRequest + SetStatusRequest + LogOffset + LogLine + RequestLog + LogModuleVersion + LogReadRequest + LogReadResponse + LogUsageRecord + LogUsageRequest + LogUsageResponse +*/ +package log + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type LogServiceError_ErrorCode int32 + +const ( + LogServiceError_OK LogServiceError_ErrorCode = 0 + LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1 + LogServiceError_STORAGE_ERROR LogServiceError_ErrorCode = 2 +) + +var LogServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INVALID_REQUEST", + 2: "STORAGE_ERROR", +} +var LogServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INVALID_REQUEST": 1, + "STORAGE_ERROR": 2, +} + +func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode { + p := new(LogServiceError_ErrorCode) + *p = x + return p +} +func (x LogServiceError_ErrorCode) String() string { + return proto.EnumName(LogServiceError_ErrorCode_name, int32(x)) +} +func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode") + if err != nil { + return err + } + *x = LogServiceError_ErrorCode(value) + return nil +} + +type LogServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogServiceError) Reset() { *m = LogServiceError{} } +func (m *LogServiceError) String() string { return proto.CompactTextString(m) } +func (*LogServiceError) ProtoMessage() {} + +type UserAppLogLine struct { + TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec" json:"timestamp_usec,omitempty"` + Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` + Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} } +func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) } +func (*UserAppLogLine) ProtoMessage() {} + +func (m *UserAppLogLine) GetTimestampUsec() int64 { + if m != nil && m.TimestampUsec != nil { + return *m.TimestampUsec + } + return 0 +} + +func (m *UserAppLogLine) GetLevel() int64 { + if m != nil && m.Level != nil { + return *m.Level + } + return 0 +} + +func (m *UserAppLogLine) GetMessage() string { + if m != nil && m.Message != nil { + return *m.Message + } + return "" +} + +type UserAppLogGroup struct { + LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line" json:"log_line,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} } +func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) } +func (*UserAppLogGroup) ProtoMessage() {} + +func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine { + if m != nil { + return m.LogLine + } + return nil +} + +type FlushRequest struct { + Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FlushRequest) Reset() { *m = FlushRequest{} } +func (m *FlushRequest) String() string { return proto.CompactTextString(m) } +func (*FlushRequest) ProtoMessage() {} + +func (m *FlushRequest) GetLogs() []byte { + if m != nil { + return m.Logs + } + return nil +} + +type SetStatusRequest struct { + Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} } +func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) } +func (*SetStatusRequest) ProtoMessage() {} + +func (m *SetStatusRequest) GetStatus() string { + if m != nil && m.Status != nil { + return *m.Status + } + return "" +} + +type LogOffset struct { + RequestId []byte `protobuf:"bytes,1,opt,name=request_id" json:"request_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogOffset) Reset() { *m = LogOffset{} } +func (m *LogOffset) String() string { return proto.CompactTextString(m) } +func (*LogOffset) ProtoMessage() {} + +func (m *LogOffset) GetRequestId() []byte { + if m != nil { + return m.RequestId + } + return nil +} + +type LogLine struct { + Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"` + Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` + LogMessage *string `protobuf:"bytes,3,req,name=log_message" json:"log_message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogLine) Reset() { *m = LogLine{} } +func (m *LogLine) String() string { return proto.CompactTextString(m) } +func (*LogLine) ProtoMessage() {} + +func (m *LogLine) GetTime() int64 { + if m != nil && m.Time != nil { + return *m.Time + } + return 0 +} + +func (m *LogLine) GetLevel() int32 { + if m != nil && m.Level != nil { + return *m.Level + } + return 0 +} + +func (m *LogLine) GetLogMessage() string { + if m != nil && m.LogMessage != nil { + return *m.LogMessage + } + return "" +} + +type RequestLog struct { + AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + ModuleId *string `protobuf:"bytes,37,opt,name=module_id,def=default" json:"module_id,omitempty"` + VersionId *string `protobuf:"bytes,2,req,name=version_id" json:"version_id,omitempty"` + RequestId []byte `protobuf:"bytes,3,req,name=request_id" json:"request_id,omitempty"` + Offset *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"` + Ip *string `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"` + Nickname *string `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"` + StartTime *int64 `protobuf:"varint,6,req,name=start_time" json:"start_time,omitempty"` + EndTime *int64 `protobuf:"varint,7,req,name=end_time" json:"end_time,omitempty"` + Latency *int64 `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"` + Mcycles *int64 `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"` + Method *string `protobuf:"bytes,10,req,name=method" json:"method,omitempty"` + Resource *string `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"` + HttpVersion *string `protobuf:"bytes,12,req,name=http_version" json:"http_version,omitempty"` + Status *int32 `protobuf:"varint,13,req,name=status" json:"status,omitempty"` + ResponseSize *int64 `protobuf:"varint,14,req,name=response_size" json:"response_size,omitempty"` + Referrer *string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"` + UserAgent *string `protobuf:"bytes,16,opt,name=user_agent" json:"user_agent,omitempty"` + UrlMapEntry *string `protobuf:"bytes,17,req,name=url_map_entry" json:"url_map_entry,omitempty"` + Combined *string `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"` + ApiMcycles *int64 `protobuf:"varint,19,opt,name=api_mcycles" json:"api_mcycles,omitempty"` + Host *string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"` + Cost *float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"` + TaskQueueName *string `protobuf:"bytes,22,opt,name=task_queue_name" json:"task_queue_name,omitempty"` + TaskName *string `protobuf:"bytes,23,opt,name=task_name" json:"task_name,omitempty"` + WasLoadingRequest *bool `protobuf:"varint,24,opt,name=was_loading_request" json:"was_loading_request,omitempty"` + PendingTime *int64 `protobuf:"varint,25,opt,name=pending_time" json:"pending_time,omitempty"` + ReplicaIndex *int32 `protobuf:"varint,26,opt,name=replica_index,def=-1" json:"replica_index,omitempty"` + Finished *bool `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"` + CloneKey []byte `protobuf:"bytes,28,opt,name=clone_key" json:"clone_key,omitempty"` + Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"` + LinesIncomplete *bool `protobuf:"varint,36,opt,name=lines_incomplete" json:"lines_incomplete,omitempty"` + AppEngineRelease []byte `protobuf:"bytes,38,opt,name=app_engine_release" json:"app_engine_release,omitempty"` + ExitReason *int32 `protobuf:"varint,30,opt,name=exit_reason" json:"exit_reason,omitempty"` + WasThrottledForTime *bool `protobuf:"varint,31,opt,name=was_throttled_for_time" json:"was_throttled_for_time,omitempty"` + WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests" json:"was_throttled_for_requests,omitempty"` + ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time" json:"throttled_time,omitempty"` + ServerName []byte `protobuf:"bytes,34,opt,name=server_name" json:"server_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RequestLog) Reset() { *m = RequestLog{} } +func (m *RequestLog) String() string { return proto.CompactTextString(m) } +func (*RequestLog) ProtoMessage() {} + +const Default_RequestLog_ModuleId string = "default" +const Default_RequestLog_ReplicaIndex int32 = -1 +const Default_RequestLog_Finished bool = true + +func (m *RequestLog) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *RequestLog) GetModuleId() string { + if m != nil && m.ModuleId != nil { + return *m.ModuleId + } + return Default_RequestLog_ModuleId +} + +func (m *RequestLog) GetVersionId() string { + if m != nil && m.VersionId != nil { + return *m.VersionId + } + return "" +} + +func (m *RequestLog) GetRequestId() []byte { + if m != nil { + return m.RequestId + } + return nil +} + +func (m *RequestLog) GetOffset() *LogOffset { + if m != nil { + return m.Offset + } + return nil +} + +func (m *RequestLog) GetIp() string { + if m != nil && m.Ip != nil { + return *m.Ip + } + return "" +} + +func (m *RequestLog) GetNickname() string { + if m != nil && m.Nickname != nil { + return *m.Nickname + } + return "" +} + +func (m *RequestLog) GetStartTime() int64 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *RequestLog) GetEndTime() int64 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *RequestLog) GetLatency() int64 { + if m != nil && m.Latency != nil { + return *m.Latency + } + return 0 +} + +func (m *RequestLog) GetMcycles() int64 { + if m != nil && m.Mcycles != nil { + return *m.Mcycles + } + return 0 +} + +func (m *RequestLog) GetMethod() string { + if m != nil && m.Method != nil { + return *m.Method + } + return "" +} + +func (m *RequestLog) GetResource() string { + if m != nil && m.Resource != nil { + return *m.Resource + } + return "" +} + +func (m *RequestLog) GetHttpVersion() string { + if m != nil && m.HttpVersion != nil { + return *m.HttpVersion + } + return "" +} + +func (m *RequestLog) GetStatus() int32 { + if m != nil && m.Status != nil { + return *m.Status + } + return 0 +} + +func (m *RequestLog) GetResponseSize() int64 { + if m != nil && m.ResponseSize != nil { + return *m.ResponseSize + } + return 0 +} + +func (m *RequestLog) GetReferrer() string { + if m != nil && m.Referrer != nil { + return *m.Referrer + } + return "" +} + +func (m *RequestLog) GetUserAgent() string { + if m != nil && m.UserAgent != nil { + return *m.UserAgent + } + return "" +} + +func (m *RequestLog) GetUrlMapEntry() string { + if m != nil && m.UrlMapEntry != nil { + return *m.UrlMapEntry + } + return "" +} + +func (m *RequestLog) GetCombined() string { + if m != nil && m.Combined != nil { + return *m.Combined + } + return "" +} + +func (m *RequestLog) GetApiMcycles() int64 { + if m != nil && m.ApiMcycles != nil { + return *m.ApiMcycles + } + return 0 +} + +func (m *RequestLog) GetHost() string { + if m != nil && m.Host != nil { + return *m.Host + } + return "" +} + +func (m *RequestLog) GetCost() float64 { + if m != nil && m.Cost != nil { + return *m.Cost + } + return 0 +} + +func (m *RequestLog) GetTaskQueueName() string { + if m != nil && m.TaskQueueName != nil { + return *m.TaskQueueName + } + return "" +} + +func (m *RequestLog) GetTaskName() string { + if m != nil && m.TaskName != nil { + return *m.TaskName + } + return "" +} + +func (m *RequestLog) GetWasLoadingRequest() bool { + if m != nil && m.WasLoadingRequest != nil { + return *m.WasLoadingRequest + } + return false +} + +func (m *RequestLog) GetPendingTime() int64 { + if m != nil && m.PendingTime != nil { + return *m.PendingTime + } + return 0 +} + +func (m *RequestLog) GetReplicaIndex() int32 { + if m != nil && m.ReplicaIndex != nil { + return *m.ReplicaIndex + } + return Default_RequestLog_ReplicaIndex +} + +func (m *RequestLog) GetFinished() bool { + if m != nil && m.Finished != nil { + return *m.Finished + } + return Default_RequestLog_Finished +} + +func (m *RequestLog) GetCloneKey() []byte { + if m != nil { + return m.CloneKey + } + return nil +} + +func (m *RequestLog) GetLine() []*LogLine { + if m != nil { + return m.Line + } + return nil +} + +func (m *RequestLog) GetLinesIncomplete() bool { + if m != nil && m.LinesIncomplete != nil { + return *m.LinesIncomplete + } + return false +} + +func (m *RequestLog) GetAppEngineRelease() []byte { + if m != nil { + return m.AppEngineRelease + } + return nil +} + +func (m *RequestLog) GetExitReason() int32 { + if m != nil && m.ExitReason != nil { + return *m.ExitReason + } + return 0 +} + +func (m *RequestLog) GetWasThrottledForTime() bool { + if m != nil && m.WasThrottledForTime != nil { + return *m.WasThrottledForTime + } + return false +} + +func (m *RequestLog) GetWasThrottledForRequests() bool { + if m != nil && m.WasThrottledForRequests != nil { + return *m.WasThrottledForRequests + } + return false +} + +func (m *RequestLog) GetThrottledTime() int64 { + if m != nil && m.ThrottledTime != nil { + return *m.ThrottledTime + } + return 0 +} + +func (m *RequestLog) GetServerName() []byte { + if m != nil { + return m.ServerName + } + return nil +} + +type LogModuleVersion struct { + ModuleId *string `protobuf:"bytes,1,opt,name=module_id,def=default" json:"module_id,omitempty"` + VersionId *string `protobuf:"bytes,2,opt,name=version_id" json:"version_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} } +func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) } +func (*LogModuleVersion) ProtoMessage() {} + +const Default_LogModuleVersion_ModuleId string = "default" + +func (m *LogModuleVersion) GetModuleId() string { + if m != nil && m.ModuleId != nil { + return *m.ModuleId + } + return Default_LogModuleVersion_ModuleId +} + +func (m *LogModuleVersion) GetVersionId() string { + if m != nil && m.VersionId != nil { + return *m.VersionId + } + return "" +} + +type LogReadRequest struct { + AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"` + ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version" json:"module_version,omitempty"` + StartTime *int64 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"` + EndTime *int64 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"` + Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"` + RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id" json:"request_id,omitempty"` + MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level" json:"minimum_log_level,omitempty"` + IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete" json:"include_incomplete,omitempty"` + Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"` + CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex" json:"combined_log_regex,omitempty"` + HostRegex *string `protobuf:"bytes,15,opt,name=host_regex" json:"host_regex,omitempty"` + ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index" json:"replica_index,omitempty"` + IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs" json:"include_app_logs,omitempty"` + AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request" json:"app_logs_per_request,omitempty"` + IncludeHost *bool `protobuf:"varint,11,opt,name=include_host" json:"include_host,omitempty"` + IncludeAll *bool `protobuf:"varint,12,opt,name=include_all" json:"include_all,omitempty"` + CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator" json:"cache_iterator,omitempty"` + NumShards *int32 `protobuf:"varint,18,opt,name=num_shards" json:"num_shards,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogReadRequest) Reset() { *m = LogReadRequest{} } +func (m *LogReadRequest) String() string { return proto.CompactTextString(m) } +func (*LogReadRequest) ProtoMessage() {} + +func (m *LogReadRequest) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *LogReadRequest) GetVersionId() []string { + if m != nil { + return m.VersionId + } + return nil +} + +func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion { + if m != nil { + return m.ModuleVersion + } + return nil +} + +func (m *LogReadRequest) GetStartTime() int64 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *LogReadRequest) GetEndTime() int64 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *LogReadRequest) GetOffset() *LogOffset { + if m != nil { + return m.Offset + } + return nil +} + +func (m *LogReadRequest) GetRequestId() [][]byte { + if m != nil { + return m.RequestId + } + return nil +} + +func (m *LogReadRequest) GetMinimumLogLevel() int32 { + if m != nil && m.MinimumLogLevel != nil { + return *m.MinimumLogLevel + } + return 0 +} + +func (m *LogReadRequest) GetIncludeIncomplete() bool { + if m != nil && m.IncludeIncomplete != nil { + return *m.IncludeIncomplete + } + return false +} + +func (m *LogReadRequest) GetCount() int64 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *LogReadRequest) GetCombinedLogRegex() string { + if m != nil && m.CombinedLogRegex != nil { + return *m.CombinedLogRegex + } + return "" +} + +func (m *LogReadRequest) GetHostRegex() string { + if m != nil && m.HostRegex != nil { + return *m.HostRegex + } + return "" +} + +func (m *LogReadRequest) GetReplicaIndex() int32 { + if m != nil && m.ReplicaIndex != nil { + return *m.ReplicaIndex + } + return 0 +} + +func (m *LogReadRequest) GetIncludeAppLogs() bool { + if m != nil && m.IncludeAppLogs != nil { + return *m.IncludeAppLogs + } + return false +} + +func (m *LogReadRequest) GetAppLogsPerRequest() int32 { + if m != nil && m.AppLogsPerRequest != nil { + return *m.AppLogsPerRequest + } + return 0 +} + +func (m *LogReadRequest) GetIncludeHost() bool { + if m != nil && m.IncludeHost != nil { + return *m.IncludeHost + } + return false +} + +func (m *LogReadRequest) GetIncludeAll() bool { + if m != nil && m.IncludeAll != nil { + return *m.IncludeAll + } + return false +} + +func (m *LogReadRequest) GetCacheIterator() bool { + if m != nil && m.CacheIterator != nil { + return *m.CacheIterator + } + return false +} + +func (m *LogReadRequest) GetNumShards() int32 { + if m != nil && m.NumShards != nil { + return *m.NumShards + } + return 0 +} + +type LogReadResponse struct { + Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"` + Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"` + LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time" json:"last_end_time,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogReadResponse) Reset() { *m = LogReadResponse{} } +func (m *LogReadResponse) String() string { return proto.CompactTextString(m) } +func (*LogReadResponse) ProtoMessage() {} + +func (m *LogReadResponse) GetLog() []*RequestLog { + if m != nil { + return m.Log + } + return nil +} + +func (m *LogReadResponse) GetOffset() *LogOffset { + if m != nil { + return m.Offset + } + return nil +} + +func (m *LogReadResponse) GetLastEndTime() int64 { + if m != nil && m.LastEndTime != nil { + return *m.LastEndTime + } + return 0 +} + +type LogUsageRecord struct { + VersionId *string `protobuf:"bytes,1,opt,name=version_id" json:"version_id,omitempty"` + StartTime *int32 `protobuf:"varint,2,opt,name=start_time" json:"start_time,omitempty"` + EndTime *int32 `protobuf:"varint,3,opt,name=end_time" json:"end_time,omitempty"` + Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` + TotalSize *int64 `protobuf:"varint,5,opt,name=total_size" json:"total_size,omitempty"` + Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} } +func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) } +func (*LogUsageRecord) ProtoMessage() {} + +func (m *LogUsageRecord) GetVersionId() string { + if m != nil && m.VersionId != nil { + return *m.VersionId + } + return "" +} + +func (m *LogUsageRecord) GetStartTime() int32 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *LogUsageRecord) GetEndTime() int32 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *LogUsageRecord) GetCount() int64 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *LogUsageRecord) GetTotalSize() int64 { + if m != nil && m.TotalSize != nil { + return *m.TotalSize + } + return 0 +} + +func (m *LogUsageRecord) GetRecords() int32 { + if m != nil && m.Records != nil { + return *m.Records + } + return 0 +} + +type LogUsageRequest struct { + AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"` + StartTime *int32 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"` + EndTime *int32 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"` + ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,def=1" json:"resolution_hours,omitempty"` + CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions" json:"combine_versions,omitempty"` + UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version" json:"usage_version,omitempty"` + VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only" json:"versions_only,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} } +func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) } +func (*LogUsageRequest) ProtoMessage() {} + +const Default_LogUsageRequest_ResolutionHours uint32 = 1 + +func (m *LogUsageRequest) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *LogUsageRequest) GetVersionId() []string { + if m != nil { + return m.VersionId + } + return nil +} + +func (m *LogUsageRequest) GetStartTime() int32 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *LogUsageRequest) GetEndTime() int32 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *LogUsageRequest) GetResolutionHours() uint32 { + if m != nil && m.ResolutionHours != nil { + return *m.ResolutionHours + } + return Default_LogUsageRequest_ResolutionHours +} + +func (m *LogUsageRequest) GetCombineVersions() bool { + if m != nil && m.CombineVersions != nil { + return *m.CombineVersions + } + return false +} + +func (m *LogUsageRequest) GetUsageVersion() int32 { + if m != nil && m.UsageVersion != nil { + return *m.UsageVersion + } + return 0 +} + +func (m *LogUsageRequest) GetVersionsOnly() bool { + if m != nil && m.VersionsOnly != nil { + return *m.VersionsOnly + } + return false +} + +type LogUsageResponse struct { + Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"` + Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} } +func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) } +func (*LogUsageResponse) ProtoMessage() {} + +func (m *LogUsageResponse) GetUsage() []*LogUsageRecord { + if m != nil { + return m.Usage + } + return nil +} + +func (m *LogUsageResponse) GetSummary() *LogUsageRecord { + if m != nil { + return m.Summary + } + return nil +} + +func init() { +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/log/log_service.proto b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/log/log_service.proto new file mode 100644 index 000000000..8981dc475 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/log/log_service.proto @@ -0,0 +1,150 @@ +syntax = "proto2"; +option go_package = "log"; + +package appengine; + +message LogServiceError { + enum ErrorCode { + OK = 0; + INVALID_REQUEST = 1; + STORAGE_ERROR = 2; + } +} + +message UserAppLogLine { + required int64 timestamp_usec = 1; + required int64 level = 2; + required string message = 3; +} + +message UserAppLogGroup { + repeated UserAppLogLine log_line = 2; +} + +message FlushRequest { + optional bytes logs = 1; +} + +message SetStatusRequest { + required string status = 1; +} + + +message LogOffset { + optional bytes request_id = 1; +} + +message LogLine { + required int64 time = 1; + required int32 level = 2; + required string log_message = 3; +} + +message RequestLog { + required string app_id = 1; + optional string module_id = 37 [default="default"]; + required string version_id = 2; + required bytes request_id = 3; + optional LogOffset offset = 35; + required string ip = 4; + optional string nickname = 5; + required int64 start_time = 6; + required int64 end_time = 7; + required int64 latency = 8; + required int64 mcycles = 9; + required string method = 10; + required string resource = 11; + required string http_version = 12; + required int32 status = 13; + required int64 response_size = 14; + optional string referrer = 15; + optional string user_agent = 16; + required string url_map_entry = 17; + required string combined = 18; + optional int64 api_mcycles = 19; + optional string host = 20; + optional double cost = 21; + + optional string task_queue_name = 22; + optional string task_name = 23; + + optional bool was_loading_request = 24; + optional int64 pending_time = 25; + optional int32 replica_index = 26 [default = -1]; + optional bool finished = 27 [default = true]; + optional bytes clone_key = 28; + + repeated LogLine line = 29; + + optional bool lines_incomplete = 36; + optional bytes app_engine_release = 38; + + optional int32 exit_reason = 30; + optional bool was_throttled_for_time = 31; + optional bool was_throttled_for_requests = 32; + optional int64 throttled_time = 33; + + optional bytes server_name = 34; +} + +message LogModuleVersion { + optional string module_id = 1 [default="default"]; + optional string version_id = 2; +} + +message LogReadRequest { + required string app_id = 1; + repeated string version_id = 2; + repeated LogModuleVersion module_version = 19; + + optional int64 start_time = 3; + optional int64 end_time = 4; + optional LogOffset offset = 5; + repeated bytes request_id = 6; + + optional int32 minimum_log_level = 7; + optional bool include_incomplete = 8; + optional int64 count = 9; + + optional string combined_log_regex = 14; + optional string host_regex = 15; + optional int32 replica_index = 16; + + optional bool include_app_logs = 10; + optional int32 app_logs_per_request = 17; + optional bool include_host = 11; + optional bool include_all = 12; + optional bool cache_iterator = 13; + optional int32 num_shards = 18; +} + +message LogReadResponse { + repeated RequestLog log = 1; + optional LogOffset offset = 2; + optional int64 last_end_time = 3; +} + +message LogUsageRecord { + optional string version_id = 1; + optional int32 start_time = 2; + optional int32 end_time = 3; + optional int64 count = 4; + optional int64 total_size = 5; + optional int32 records = 6; +} + +message LogUsageRequest { + required string app_id = 1; + repeated string version_id = 2; + optional int32 start_time = 3; + optional int32 end_time = 4; + optional uint32 resolution_hours = 5 [default = 1]; + optional bool combine_versions = 6; + optional int32 usage_version = 7; + optional bool versions_only = 8; +} + +message LogUsageResponse { + repeated LogUsageRecord usage = 1; + optional LogUsageRecord summary = 2; +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/metadata.go new file mode 100644 index 000000000..9cc1f71d1 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/metadata.go @@ -0,0 +1,61 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +// This file has code for accessing metadata. +// +// References: +// https://cloud.google.com/compute/docs/metadata + +import ( + "fmt" + "io/ioutil" + "log" + "net/http" + "net/url" +) + +const ( + metadataHost = "metadata" + metadataPath = "/computeMetadata/v1/" +) + +var ( + metadataRequestHeaders = http.Header{ + "Metadata-Flavor": []string{"Google"}, + } +) + +// TODO(dsymonds): Do we need to support default values, like Python? +func mustGetMetadata(key string) []byte { + b, err := getMetadata(key) + if err != nil { + log.Fatalf("Metadata fetch failed: %v", err) + } + return b +} + +func getMetadata(key string) ([]byte, error) { + // TODO(dsymonds): May need to use url.Parse to support keys with query args. + req := &http.Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "http", + Host: metadataHost, + Path: metadataPath + key, + }, + Header: metadataRequestHeaders, + Host: metadataHost, + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != 200 { + return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode) + } + return ioutil.ReadAll(resp.Body) +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go new file mode 100644 index 000000000..a0145ed31 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go @@ -0,0 +1,375 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/modules/modules_service.proto +// DO NOT EDIT! + +/* +Package modules is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/modules/modules_service.proto + +It has these top-level messages: + ModulesServiceError + GetModulesRequest + GetModulesResponse + GetVersionsRequest + GetVersionsResponse + GetDefaultVersionRequest + GetDefaultVersionResponse + GetNumInstancesRequest + GetNumInstancesResponse + SetNumInstancesRequest + SetNumInstancesResponse + StartModuleRequest + StartModuleResponse + StopModuleRequest + StopModuleResponse + GetHostnameRequest + GetHostnameResponse +*/ +package modules + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type ModulesServiceError_ErrorCode int32 + +const ( + ModulesServiceError_OK ModulesServiceError_ErrorCode = 0 + ModulesServiceError_INVALID_MODULE ModulesServiceError_ErrorCode = 1 + ModulesServiceError_INVALID_VERSION ModulesServiceError_ErrorCode = 2 + ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3 + ModulesServiceError_TRANSIENT_ERROR ModulesServiceError_ErrorCode = 4 + ModulesServiceError_UNEXPECTED_STATE ModulesServiceError_ErrorCode = 5 +) + +var ModulesServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INVALID_MODULE", + 2: "INVALID_VERSION", + 3: "INVALID_INSTANCES", + 4: "TRANSIENT_ERROR", + 5: "UNEXPECTED_STATE", +} +var ModulesServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INVALID_MODULE": 1, + "INVALID_VERSION": 2, + "INVALID_INSTANCES": 3, + "TRANSIENT_ERROR": 4, + "UNEXPECTED_STATE": 5, +} + +func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode { + p := new(ModulesServiceError_ErrorCode) + *p = x + return p +} +func (x ModulesServiceError_ErrorCode) String() string { + return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x)) +} +func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode") + if err != nil { + return err + } + *x = ModulesServiceError_ErrorCode(value) + return nil +} + +type ModulesServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} } +func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) } +func (*ModulesServiceError) ProtoMessage() {} + +type GetModulesRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} } +func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) } +func (*GetModulesRequest) ProtoMessage() {} + +type GetModulesResponse struct { + Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} } +func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) } +func (*GetModulesResponse) ProtoMessage() {} + +func (m *GetModulesResponse) GetModule() []string { + if m != nil { + return m.Module + } + return nil +} + +type GetVersionsRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} } +func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) } +func (*GetVersionsRequest) ProtoMessage() {} + +func (m *GetVersionsRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +type GetVersionsResponse struct { + Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} } +func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) } +func (*GetVersionsResponse) ProtoMessage() {} + +func (m *GetVersionsResponse) GetVersion() []string { + if m != nil { + return m.Version + } + return nil +} + +type GetDefaultVersionRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} } +func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) } +func (*GetDefaultVersionRequest) ProtoMessage() {} + +func (m *GetDefaultVersionRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +type GetDefaultVersionResponse struct { + Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} } +func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) } +func (*GetDefaultVersionResponse) ProtoMessage() {} + +func (m *GetDefaultVersionResponse) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type GetNumInstancesRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} } +func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) } +func (*GetNumInstancesRequest) ProtoMessage() {} + +func (m *GetNumInstancesRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *GetNumInstancesRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type GetNumInstancesResponse struct { + Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} } +func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) } +func (*GetNumInstancesResponse) ProtoMessage() {} + +func (m *GetNumInstancesResponse) GetInstances() int64 { + if m != nil && m.Instances != nil { + return *m.Instances + } + return 0 +} + +type SetNumInstancesRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} } +func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) } +func (*SetNumInstancesRequest) ProtoMessage() {} + +func (m *SetNumInstancesRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *SetNumInstancesRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +func (m *SetNumInstancesRequest) GetInstances() int64 { + if m != nil && m.Instances != nil { + return *m.Instances + } + return 0 +} + +type SetNumInstancesResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} } +func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) } +func (*SetNumInstancesResponse) ProtoMessage() {} + +type StartModuleRequest struct { + Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} } +func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) } +func (*StartModuleRequest) ProtoMessage() {} + +func (m *StartModuleRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *StartModuleRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type StartModuleResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} } +func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) } +func (*StartModuleResponse) ProtoMessage() {} + +type StopModuleRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} } +func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) } +func (*StopModuleRequest) ProtoMessage() {} + +func (m *StopModuleRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *StopModuleRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type StopModuleResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} } +func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) } +func (*StopModuleResponse) ProtoMessage() {} + +type GetHostnameRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} } +func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) } +func (*GetHostnameRequest) ProtoMessage() {} + +func (m *GetHostnameRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *GetHostnameRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +func (m *GetHostnameRequest) GetInstance() string { + if m != nil && m.Instance != nil { + return *m.Instance + } + return "" +} + +type GetHostnameResponse struct { + Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} } +func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) } +func (*GetHostnameResponse) ProtoMessage() {} + +func (m *GetHostnameResponse) GetHostname() string { + if m != nil && m.Hostname != nil { + return *m.Hostname + } + return "" +} + +func init() { +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/modules/modules_service.proto b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/modules/modules_service.proto new file mode 100644 index 000000000..d29f0065a --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/modules/modules_service.proto @@ -0,0 +1,80 @@ +syntax = "proto2"; +option go_package = "modules"; + +package appengine; + +message ModulesServiceError { + enum ErrorCode { + OK = 0; + INVALID_MODULE = 1; + INVALID_VERSION = 2; + INVALID_INSTANCES = 3; + TRANSIENT_ERROR = 4; + UNEXPECTED_STATE = 5; + } +} + +message GetModulesRequest { +} + +message GetModulesResponse { + repeated string module = 1; +} + +message GetVersionsRequest { + optional string module = 1; +} + +message GetVersionsResponse { + repeated string version = 1; +} + +message GetDefaultVersionRequest { + optional string module = 1; +} + +message GetDefaultVersionResponse { + required string version = 1; +} + +message GetNumInstancesRequest { + optional string module = 1; + optional string version = 2; +} + +message GetNumInstancesResponse { + required int64 instances = 1; +} + +message SetNumInstancesRequest { + optional string module = 1; + optional string version = 2; + required int64 instances = 3; +} + +message SetNumInstancesResponse {} + +message StartModuleRequest { + required string module = 1; + required string version = 2; +} + +message StartModuleResponse {} + +message StopModuleRequest { + optional string module = 1; + optional string version = 2; +} + +message StopModuleResponse {} + +message GetHostnameRequest { + optional string module = 1; + optional string version = 2; + optional string instance = 3; +} + +message GetHostnameResponse { + required string hostname = 1; +} + diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/net.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/net.go new file mode 100644 index 000000000..3b94cf0c6 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/net.go @@ -0,0 +1,56 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +// This file implements a network dialer that limits the number of concurrent connections. +// It is only used for API calls. + +import ( + "log" + "net" + "runtime" + "sync" + "time" +) + +var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable. + +func limitRelease() { + // non-blocking + select { + case <-limitSem: + default: + // This should not normally happen. + log.Print("appengine: unbalanced limitSem release!") + } +} + +func limitDial(network, addr string) (net.Conn, error) { + limitSem <- 1 + + // Dial with a timeout in case the API host is MIA. + // The connection should normally be very fast. + conn, err := net.DialTimeout(network, addr, 500*time.Millisecond) + if err != nil { + limitRelease() + return nil, err + } + lc := &limitConn{Conn: conn} + runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required + return lc, nil +} + +type limitConn struct { + close sync.Once + net.Conn +} + +func (lc *limitConn) Close() error { + defer lc.close.Do(func() { + limitRelease() + runtime.SetFinalizer(lc, nil) + }) + return lc.Conn.Close() +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/regen.sh new file mode 100644 index 000000000..2fdb546a6 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/regen.sh @@ -0,0 +1,40 @@ +#!/bin/bash -e +# +# This script rebuilds the generated code for the protocol buffers. +# To run this you will need protoc and goprotobuf installed; +# see https://github.com/golang/protobuf for instructions. + +PKG=google.golang.org/appengine + +function die() { + echo 1>&2 $* + exit 1 +} + +# Sanity check that the right tools are accessible. +for tool in go protoc protoc-gen-go; do + q=$(which $tool) || die "didn't find $tool" + echo 1>&2 "$tool: $q" +done + +echo -n 1>&2 "finding package dir... " +pkgdir=$(go list -f '{{.Dir}}' $PKG) +echo 1>&2 $pkgdir +base=$(echo $pkgdir | sed "s,/$PKG\$,,") +echo 1>&2 "base: $base" +cd $base + +# Run protoc once per package. +for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do + echo 1>&2 "* $dir" + protoc --go_out=. $dir/*.proto +done + +for f in $(find $PKG/internal -name '*.pb.go'); do + # Remove proto.RegisterEnum calls. + # These cause duplicate registration panics when these packages + # are used on classic App Engine. proto.RegisterEnum only affects + # parsing the text format; we don't care about that. + # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17 + sed -i '/proto.RegisterEnum/d' $f +done diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go new file mode 100644 index 000000000..526bd39e6 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go @@ -0,0 +1,231 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/remote_api/remote_api.proto +// DO NOT EDIT! + +/* +Package remote_api is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/remote_api/remote_api.proto + +It has these top-level messages: + Request + ApplicationError + RpcError + Response +*/ +package remote_api + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type RpcError_ErrorCode int32 + +const ( + RpcError_UNKNOWN RpcError_ErrorCode = 0 + RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1 + RpcError_PARSE_ERROR RpcError_ErrorCode = 2 + RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3 + RpcError_OVER_QUOTA RpcError_ErrorCode = 4 + RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5 + RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6 + RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7 + RpcError_BAD_REQUEST RpcError_ErrorCode = 8 + RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9 + RpcError_CANCELLED RpcError_ErrorCode = 10 + RpcError_REPLAY_ERROR RpcError_ErrorCode = 11 + RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12 +) + +var RpcError_ErrorCode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CALL_NOT_FOUND", + 2: "PARSE_ERROR", + 3: "SECURITY_VIOLATION", + 4: "OVER_QUOTA", + 5: "REQUEST_TOO_LARGE", + 6: "CAPABILITY_DISABLED", + 7: "FEATURE_DISABLED", + 8: "BAD_REQUEST", + 9: "RESPONSE_TOO_LARGE", + 10: "CANCELLED", + 11: "REPLAY_ERROR", + 12: "DEADLINE_EXCEEDED", +} +var RpcError_ErrorCode_value = map[string]int32{ + "UNKNOWN": 0, + "CALL_NOT_FOUND": 1, + "PARSE_ERROR": 2, + "SECURITY_VIOLATION": 3, + "OVER_QUOTA": 4, + "REQUEST_TOO_LARGE": 5, + "CAPABILITY_DISABLED": 6, + "FEATURE_DISABLED": 7, + "BAD_REQUEST": 8, + "RESPONSE_TOO_LARGE": 9, + "CANCELLED": 10, + "REPLAY_ERROR": 11, + "DEADLINE_EXCEEDED": 12, +} + +func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode { + p := new(RpcError_ErrorCode) + *p = x + return p +} +func (x RpcError_ErrorCode) String() string { + return proto.EnumName(RpcError_ErrorCode_name, int32(x)) +} +func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode") + if err != nil { + return err + } + *x = RpcError_ErrorCode(value) + return nil +} + +type Request struct { + ServiceName *string `protobuf:"bytes,2,req,name=service_name" json:"service_name,omitempty"` + Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"` + Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"` + RequestId *string `protobuf:"bytes,5,opt,name=request_id" json:"request_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} + +func (m *Request) GetServiceName() string { + if m != nil && m.ServiceName != nil { + return *m.ServiceName + } + return "" +} + +func (m *Request) GetMethod() string { + if m != nil && m.Method != nil { + return *m.Method + } + return "" +} + +func (m *Request) GetRequest() []byte { + if m != nil { + return m.Request + } + return nil +} + +func (m *Request) GetRequestId() string { + if m != nil && m.RequestId != nil { + return *m.RequestId + } + return "" +} + +type ApplicationError struct { + Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` + Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ApplicationError) Reset() { *m = ApplicationError{} } +func (m *ApplicationError) String() string { return proto.CompactTextString(m) } +func (*ApplicationError) ProtoMessage() {} + +func (m *ApplicationError) GetCode() int32 { + if m != nil && m.Code != nil { + return *m.Code + } + return 0 +} + +func (m *ApplicationError) GetDetail() string { + if m != nil && m.Detail != nil { + return *m.Detail + } + return "" +} + +type RpcError struct { + Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` + Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RpcError) Reset() { *m = RpcError{} } +func (m *RpcError) String() string { return proto.CompactTextString(m) } +func (*RpcError) ProtoMessage() {} + +func (m *RpcError) GetCode() int32 { + if m != nil && m.Code != nil { + return *m.Code + } + return 0 +} + +func (m *RpcError) GetDetail() string { + if m != nil && m.Detail != nil { + return *m.Detail + } + return "" +} + +type Response struct { + Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"` + Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"` + ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error" json:"application_error,omitempty"` + JavaException []byte `protobuf:"bytes,4,opt,name=java_exception" json:"java_exception,omitempty"` + RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error" json:"rpc_error,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} + +func (m *Response) GetResponse() []byte { + if m != nil { + return m.Response + } + return nil +} + +func (m *Response) GetException() []byte { + if m != nil { + return m.Exception + } + return nil +} + +func (m *Response) GetApplicationError() *ApplicationError { + if m != nil { + return m.ApplicationError + } + return nil +} + +func (m *Response) GetJavaException() []byte { + if m != nil { + return m.JavaException + } + return nil +} + +func (m *Response) GetRpcError() *RpcError { + if m != nil { + return m.RpcError + } + return nil +} + +func init() { +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto new file mode 100644 index 000000000..f21763a4e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto @@ -0,0 +1,44 @@ +syntax = "proto2"; +option go_package = "remote_api"; + +package remote_api; + +message Request { + required string service_name = 2; + required string method = 3; + required bytes request = 4; + optional string request_id = 5; +} + +message ApplicationError { + required int32 code = 1; + required string detail = 2; +} + +message RpcError { + enum ErrorCode { + UNKNOWN = 0; + CALL_NOT_FOUND = 1; + PARSE_ERROR = 2; + SECURITY_VIOLATION = 3; + OVER_QUOTA = 4; + REQUEST_TOO_LARGE = 5; + CAPABILITY_DISABLED = 6; + FEATURE_DISABLED = 7; + BAD_REQUEST = 8; + RESPONSE_TOO_LARGE = 9; + CANCELLED = 10; + REPLAY_ERROR = 11; + DEADLINE_EXCEEDED = 12; + } + required int32 code = 1; + optional string detail = 2; +} + +message Response { + optional bytes response = 1; + optional bytes exception = 2; + optional ApplicationError application_error = 3; + optional bytes java_exception = 4; + optional RpcError rpc_error = 5; +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/transaction.go new file mode 100644 index 000000000..28a6d1812 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/transaction.go @@ -0,0 +1,107 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +// This file implements hooks for applying datastore transactions. + +import ( + "errors" + "reflect" + + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" + + basepb "google.golang.org/appengine/internal/base" + pb "google.golang.org/appengine/internal/datastore" +) + +var transactionSetters = make(map[reflect.Type]reflect.Value) + +// RegisterTransactionSetter registers a function that sets transaction information +// in a protocol buffer message. f should be a function with two arguments, +// the first being a protocol buffer type, and the second being *datastore.Transaction. +func RegisterTransactionSetter(f interface{}) { + v := reflect.ValueOf(f) + transactionSetters[v.Type().In(0)] = v +} + +// applyTransaction applies the transaction t to message pb +// by using the relevant setter passed to RegisterTransactionSetter. +func applyTransaction(pb proto.Message, t *pb.Transaction) { + v := reflect.ValueOf(pb) + if f, ok := transactionSetters[v.Type()]; ok { + f.Call([]reflect.Value{v, reflect.ValueOf(t)}) + } +} + +var transactionKey = "used for *Transaction" + +func transactionFromContext(ctx netcontext.Context) *transaction { + t, _ := ctx.Value(&transactionKey).(*transaction) + return t +} + +func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context { + return netcontext.WithValue(ctx, &transactionKey, t) +} + +type transaction struct { + transaction pb.Transaction + finished bool +} + +var ErrConcurrentTransaction = errors.New("internal: concurrent transaction") + +func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool) error { + if transactionFromContext(c) != nil { + return errors.New("nested transactions are not supported") + } + + // Begin the transaction. + t := &transaction{} + req := &pb.BeginTransactionRequest{ + App: proto.String(FullyQualifiedAppID(c)), + } + if xg { + req.AllowMultipleEg = proto.Bool(true) + } + if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil { + return err + } + + // Call f, rolling back the transaction if f returns a non-nil error, or panics. + // The panic is not recovered. + defer func() { + if t.finished { + return + } + t.finished = true + // Ignore the error return value, since we are already returning a non-nil + // error (or we're panicking). + Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{}) + }() + if err := f(withTransaction(c, t)); err != nil { + return err + } + t.finished = true + + // Commit the transaction. + res := &pb.CommitResponse{} + err := Call(c, "datastore_v3", "Commit", &t.transaction, res) + if ae, ok := err.(*APIError); ok { + /* TODO: restore this conditional + if appengine.IsDevAppServer() { + */ + // The Python Dev AppServer raises an ApplicationError with error code 2 (which is + // Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.". + if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." { + return ErrConcurrentTransaction + } + if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) { + return ErrConcurrentTransaction + } + } + return err +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/namespace.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/namespace.go new file mode 100644 index 000000000..21860ca08 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/namespace.go @@ -0,0 +1,25 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import ( + "fmt" + "regexp" + + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +// Namespace returns a replacement context that operates within the given namespace. +func Namespace(c context.Context, namespace string) (context.Context, error) { + if !validNamespace.MatchString(namespace) { + return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace) + } + return internal.NamespacedContext(c, namespace), nil +} + +// validNamespace matches valid namespace names. +var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`) diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/timeout.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/timeout.go new file mode 100644 index 000000000..05642a992 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/timeout.go @@ -0,0 +1,20 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import "golang.org/x/net/context" + +// IsTimeoutError reports whether err is a timeout error. +func IsTimeoutError(err error) bool { + if err == context.DeadlineExceeded { + return true + } + if t, ok := err.(interface { + IsTimeout() bool + }); ok { + return t.IsTimeout() + } + return false +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/.travis.yml b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/.travis.yml new file mode 100644 index 000000000..c037df0de --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/.travis.yml @@ -0,0 +1,11 @@ +sudo: false +language: go +go: +- 1.4 +- 1.5 +install: +- go get -v google.golang.org/cloud/... +script: +- openssl aes-256-cbc -K $encrypted_912ff8fa81ad_key -iv $encrypted_912ff8fa81ad_iv -in key.json.enc -out key.json -d +- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json" + go test -v -tags=integration google.golang.org/cloud/... diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/AUTHORS b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/AUTHORS new file mode 100644 index 000000000..3da443dc9 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/AUTHORS @@ -0,0 +1,12 @@ +# This is the official list of cloud authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. + +Google Inc. +Palm Stone Games, Inc. +Péter Szilágyi +Tyler Treat diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/CONTRIBUTING.md b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/CONTRIBUTING.md new file mode 100644 index 000000000..9a1cab287 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/CONTRIBUTING.md @@ -0,0 +1,114 @@ +# Contributing + +1. Sign one of the contributor license agreements below. +1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool. +1. Get the cloud package by running `go get -d google.golang.org/cloud`. + 1. If you have already checked out the source, make sure that the remote git + origin is https://code.googlesource.com/gocloud: + + git remote set-url origin https://code.googlesource.com/gocloud +1. Make changes and create a change by running `git codereview change `, +provide a command message, and use `git codereview mail` to create a Gerrit CL. +1. Keep amending to the change and mail as your recieve feedback. + +## Integration Tests + +Additional to the unit tests, you may run the integration test suite. + +To run the integrations tests, creating and configuration of a project in the +Google Developers Console is required. Once you create a project, set the +following environment variables to be able to run the against the actual APIs. + +- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455) +- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file. + +Create a storage bucket with the same name as the project id set in **GCLOUD_TESTS_GOLANG_PROJECT_ID**. +The storage integration test will create and delete some objects in this bucket. + +Install the [gcloud command-line tool][gcloudcli] to your machine and use it +to create the indexes used in the datastore integration tests with indexes +found in `datastore/testdata/index.yaml`: + +From the project's root directory: + +``` sh +# Install the app component +$ gcloud components update app + +# Set the default project in your env +$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID + +# Authenticate the gcloud tool with your account +$ gcloud auth login + +# Create the indexes +$ gcloud preview datastore create-indexes datastore/testdata/index.yaml + +``` + +You can run the integration tests by running: + +``` sh +$ go test -v -tags=integration google.golang.org/cloud/... +``` + +## Contributor License Agreements + +Before we can accept your pull requests you'll need to sign a Contributor +License Agreement (CLA): + +- **If you are an individual writing original source code** and **you own the +- intellectual property**, then you'll need to sign an [individual CLA][indvcla]. +- **If you work for a company that wants to allow you to contribute your work**, +then you'll need to sign a [corporate CLA][corpcla]. + +You can sign these electronically (just scroll to the bottom). After that, +we'll be able to accept your pull requests. + +## Contributor Code of Conduct + +As contributors and maintainers of this project, +and in the interest of fostering an open and welcoming community, +we pledge to respect all people who contribute through reporting issues, +posting feature requests, updating documentation, +submitting pull requests or patches, and other activities. + +We are committed to making participation in this project +a harassment-free experience for everyone, +regardless of level of experience, gender, gender identity and expression, +sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, +such as physical or electronic +addresses, without explicit permission +* Other unethical or unprofessional conduct. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct. +By adopting this Code of Conduct, +project maintainers commit themselves to fairly and consistently +applying these principles to every aspect of managing this project. +Project maintainers who do not follow or enforce the Code of Conduct +may be permanently removed from the project team. + +This code of conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior +may be reported by opening an issue +or contacting one or more of the project maintainers. + +This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, +available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) + +[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/ +[indvcla]: https://developers.google.com/open-source/cla/individual +[corpcla]: https://developers.google.com/open-source/cla/corporate diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/CONTRIBUTORS b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/CONTRIBUTORS new file mode 100644 index 000000000..475ac6a66 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/CONTRIBUTORS @@ -0,0 +1,24 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# Names should be added to this file as: +# Name + +# Keep the list alphabetically sorted. + +Andrew Gerrand +Brad Fitzpatrick +Burcu Dogan +Dave Day +David Symonds +Glenn Lewis +Johan Euphrosine +Luna Duclos +Michael McGreevy +Péter Szilágyi +Tyler Treat diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/LICENSE b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/LICENSE new file mode 100644 index 000000000..a4c5efd82 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2014 Google Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/README.md b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/README.md new file mode 100644 index 000000000..10d3995d5 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/README.md @@ -0,0 +1,135 @@ +# Google Cloud for Go + +[![Build Status](https://travis-ci.org/GoogleCloudPlatform/gcloud-golang.svg?branch=master)](https://travis-ci.org/GoogleCloudPlatform/gcloud-golang) + +**NOTE:** These packages are experimental, and may occasionally make +backwards-incompatible changes. + +**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud). + +Go packages for Google Cloud Platform services. Supported APIs include: + + * Google Cloud Datastore + * Google Cloud Storage + * Google Cloud Pub/Sub + * Google Cloud Container Engine + +``` go +import "google.golang.org/cloud" +``` + +Documentation and examples are available at +[https://godoc.org/google.golang.org/cloud](https://godoc.org/google.golang.org/cloud). + +## Authorization + +Authorization, throughout the package, is delegated to the godoc.org/golang.org/x/oauth2. +Refer to the [godoc documentation](https://godoc.org/golang.org/x/oauth2) +for examples on using oauth2 with the Cloud package. + +## Google Cloud Datastore + +[Google Cloud Datastore][cloud-datastore] ([docs][cloud-datastore-docs]) is a fully +managed, schemaless database for storing non-relational data. Cloud Datastore +automatically scales with your users and supports ACID transactions, high availability +of reads and writes, strong consistency for reads and ancestor queries, and eventual +consistency for all other queries. + +Follow the [activation instructions][cloud-datastore-activation] to use the Google +Cloud Datastore API with your project. + +[https://godoc.org/google.golang.org/cloud/datastore](https://godoc.org/google.golang.org/cloud/datastore) + + +```go +type Post struct { + Title string + Body string `datastore:",noindex"` + PublishedAt time.Time +} +keys := []*datastore.Key{ + datastore.NewKey(ctx, "Post", "post1", 0, nil), + datastore.NewKey(ctx, "Post", "post2", 0, nil), +} +posts := []*Post{ + {Title: "Post 1", Body: "...", PublishedAt: time.Now()}, + {Title: "Post 2", Body: "...", PublishedAt: time.Now()}, +} +if _, err := datastore.PutMulti(ctx, keys, posts); err != nil { + log.Println(err) +} +``` + +## Google Cloud Storage + +[Google Cloud Storage][cloud-storage] ([docs][cloud-storage-docs]) allows you to store +data on Google infrastructure with very high reliability, performance and availability, +and can be used to distribute large data objects to users via direct download. + +[https://godoc.org/google.golang.org/cloud/storage](https://godoc.org/google.golang.org/cloud/storage) + + +```go +// Read the object1 from bucket. +rc, err := storage.NewReader(ctx, "bucket", "object1") +if err != nil { + log.Fatal(err) +} +slurp, err := ioutil.ReadAll(rc) +rc.Close() +if err != nil { + log.Fatal(err) +} +``` + +## Google Cloud Pub/Sub (Alpha) + +> Google Cloud Pub/Sub is in **Alpha status**. As a result, it might change in +> backward-incompatible ways and is not recommended for production use. It is not +> subject to any SLA or deprecation policy. + +[Google Cloud Pub/Sub][cloud-pubsub] ([docs][cloud-pubsub-docs]) allows you to connect +your services with reliable, many-to-many, asynchronous messaging hosted on Google's +infrastructure. Cloud Pub/Sub automatically scales as you need it and provides a foundation +for building your own robust, global services. + +[https://godoc.org/google.golang.org/cloud/pubsub](https://godoc.org/google.golang.org/cloud/pubsub) + + +```go +// Publish "hello world" on topic1. +msgIDs, err := pubsub.Publish(ctx, "topic1", &pubsub.Message{ + Data: []byte("hello world"), +}) +if err != nil { + log.Println(err) +} +// Pull messages via subscription1. +msgs, err := pubsub.Pull(ctx, "subscription1", 1) +if err != nil { + log.Println(err) +} +``` + +## Contributing + +Contributions are welcome. Please, see the +[CONTRIBUTING](https://github.com/GoogleCloudPlatform/gcloud-golang/blob/master/CONTRIBUTING.md) +document for details. We're using Gerrit for our code reviews. Please don't open pull +requests against this repo, new pull requests will be automatically closed. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. +See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/gcloud-golang/blob/master/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. + +[cloud-datastore]: https://cloud.google.com/datastore/ +[cloud-datastore-docs]: https://cloud.google.com/datastore/docs +[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate + +[cloud-pubsub]: https://cloud.google.com/pubsub/ +[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs + +[cloud-storage]: https://cloud.google.com/storage/ +[cloud-storage-docs]: https://cloud.google.com/storage/docs/overview +[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/cloud.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/cloud.go new file mode 100644 index 000000000..a634b0552 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/cloud.go @@ -0,0 +1,49 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package cloud contains Google Cloud Platform APIs related types +// and common functions. +package cloud + +import ( + "net/http" + + "golang.org/x/net/context" + "google.golang.org/cloud/internal" +) + +// NewContext returns a new context that uses the provided http.Client. +// Provided http.Client is responsible to authorize and authenticate +// the requests made to the Google Cloud APIs. +// It mutates the client's original Transport to append the cloud +// package's user-agent to the outgoing requests. +// You can obtain the project ID from the Google Developers Console, +// https://console.developers.google.com. +func NewContext(projID string, c *http.Client) context.Context { + if c == nil { + panic("invalid nil *http.Client passed to NewContext") + } + return WithContext(context.Background(), projID, c) +} + +// WithContext returns a new context in a similar way NewContext does, +// but initiates the new context with the specified parent. +func WithContext(parent context.Context, projID string, c *http.Client) context.Context { + // TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does. + // Do User-Agent some other way. + if _, ok := c.Transport.(*internal.Transport); !ok { + c.Transport = &internal.Transport{Base: c.Transport} + } + return internal.WithContext(parent, projID, c) +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/compute/metadata/metadata.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/compute/metadata/metadata.go new file mode 100644 index 000000000..3dd684e08 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/compute/metadata/metadata.go @@ -0,0 +1,327 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metadata provides access to Google Compute Engine (GCE) +// metadata and API service accounts. +// +// This package is a wrapper around the GCE metadata service, +// as documented at https://developers.google.com/compute/docs/metadata. +package metadata + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "strings" + "sync" + "time" + + "google.golang.org/cloud/internal" +) + +type cachedValue struct { + k string + trim bool + mu sync.Mutex + v string +} + +var ( + projID = &cachedValue{k: "project/project-id", trim: true} + projNum = &cachedValue{k: "project/numeric-project-id", trim: true} + instID = &cachedValue{k: "instance/id", trim: true} +) + +var metaClient = &http.Client{ + Transport: &internal.Transport{ + Base: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 750 * time.Millisecond, + KeepAlive: 30 * time.Second, + }).Dial, + ResponseHeaderTimeout: 750 * time.Millisecond, + }, + }, +} + +// NotDefinedError is returned when requested metadata is not defined. +// +// The underlying string is the suffix after "/computeMetadata/v1/". +// +// This error is not returned if the value is defined to be the empty +// string. +type NotDefinedError string + +func (suffix NotDefinedError) Error() string { + return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) +} + +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func Get(suffix string) (string, error) { + val, _, err := getETag(suffix) + return val, err +} + +// getETag returns a value from the metadata service as well as the associated +// ETag. This func is otherwise equivalent to Get. +func getETag(suffix string) (value, etag string, err error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv("GCE_METADATA_HOST") + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = "169.254.169.254" + } + url := "http://" + host + "/computeMetadata/v1/" + suffix + req, _ := http.NewRequest("GET", url, nil) + req.Header.Set("Metadata-Flavor", "Google") + res, err := metaClient.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", "", NotDefinedError(suffix) + } + if res.StatusCode != 200 { + return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + return string(all), res.Header.Get("Etag"), nil +} + +func getTrimmed(suffix string) (s string, err error) { + s, err = Get(suffix) + s = strings.TrimSpace(s) + return +} + +func (c *cachedValue) get() (v string, err error) { + defer c.mu.Unlock() + c.mu.Lock() + if c.v != "" { + return c.v, nil + } + if c.trim { + v, err = getTrimmed(c.k) + } else { + v, err = Get(c.k) + } + if err == nil { + c.v = v + } + return +} + +var onGCE struct { + sync.Mutex + set bool + v bool +} + +// OnGCE reports whether this process is running on Google Compute Engine. +func OnGCE() bool { + defer onGCE.Unlock() + onGCE.Lock() + if onGCE.set { + return onGCE.v + } + onGCE.set = true + + // We use the DNS name of the metadata service here instead of the IP address + // because we expect that to fail faster in the not-on-GCE case. + res, err := metaClient.Get("http://metadata.google.internal") + if err != nil { + return false + } + onGCE.v = res.Header.Get("Metadata-Flavor") == "Google" + return onGCE.v +} + +// Subscribe subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// Subscribe calls fn with the latest metadata value indicated by the provided +// suffix. If the metadata value is deleted, fn is called with the empty string +// and ok false. Subscribe blocks until fn returns a non-nil error or the value +// is deleted. Subscribe returns the error value returned from the last call to +// fn, which may be nil when ok == false. +func Subscribe(suffix string, fn func(v string, ok bool) error) error { + const failedSubscribeSleep = time.Second * 5 + + // First check to see if the metadata value exists at all. + val, lastETag, err := getETag(suffix) + if err != nil { + return err + } + + if err := fn(val, true); err != nil { + return err + } + + ok := true + suffix += "?wait_for_change=true&last_etag=" + for { + val, etag, err := getETag(suffix + url.QueryEscape(lastETag)) + if err != nil { + if _, deleted := err.(NotDefinedError); !deleted { + time.Sleep(failedSubscribeSleep) + continue // Retry on other errors. + } + ok = false + } + lastETag = etag + + if err := fn(val, ok); err != nil || !ok { + return err + } + } +} + +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return projID.get() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return projNum.get() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/ip") +} + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") +} + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { + return getTrimmed("instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { + var s []string + j, err := Get("instance/tags") + if err != nil { + return nil, err + } + if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { + return nil, err + } + return s, nil +} + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { + return instID.get() +} + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { + host, err := Hostname() + if err != nil { + return "", err + } + return strings.Split(host, ".")[0], nil +} + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { + zone, err := getTrimmed("instance/zone") + // zone is of the form "projects//zones/". + if err != nil { + return "", err + } + return zone[strings.LastIndex(zone, "/")+1:], nil +} + +// InstanceAttributes returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") } + +// ProjectAttributes returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func ProjectAttributes() ([]string, error) { return lines("project/attributes/") } + +func lines(suffix string) ([]string, error) { + j, err := Get(suffix) + if err != nil { + return nil, err + } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil +} + +// InstanceAttributeValue returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func InstanceAttributeValue(attr string) (string, error) { + return Get("instance/attributes/" + attr) +} + +// ProjectAttributeValue returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func ProjectAttributeValue(attr string) (string, error) { + return Get("project/attributes/" + attr) +} + +// Scopes returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func Scopes(serviceAccount string) ([]string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return lines("instance/service-accounts/" + serviceAccount + "/scopes") +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/internal/cloud.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/internal/cloud.go new file mode 100644 index 000000000..8b0db1b5d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/internal/cloud.go @@ -0,0 +1,128 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal provides support for the cloud packages. +// +// Users should not import this package directly. +package internal + +import ( + "fmt" + "net/http" + "sync" + + "golang.org/x/net/context" +) + +type contextKey struct{} + +func WithContext(parent context.Context, projID string, c *http.Client) context.Context { + if c == nil { + panic("nil *http.Client passed to WithContext") + } + if projID == "" { + panic("empty project ID passed to WithContext") + } + return context.WithValue(parent, contextKey{}, &cloudContext{ + ProjectID: projID, + HTTPClient: c, + }) +} + +const userAgent = "gcloud-golang/0.1" + +type cloudContext struct { + ProjectID string + HTTPClient *http.Client + + mu sync.Mutex // guards svc + svc map[string]interface{} // e.g. "storage" => *rawStorage.Service +} + +// Service returns the result of the fill function if it's never been +// called before for the given name (which is assumed to be an API +// service name, like "datastore"). If it has already been cached, the fill +// func is not run. +// It's safe for concurrent use by multiple goroutines. +func Service(ctx context.Context, name string, fill func(*http.Client) interface{}) interface{} { + return cc(ctx).service(name, fill) +} + +func (c *cloudContext) service(name string, fill func(*http.Client) interface{}) interface{} { + c.mu.Lock() + defer c.mu.Unlock() + + if c.svc == nil { + c.svc = make(map[string]interface{}) + } else if v, ok := c.svc[name]; ok { + return v + } + v := fill(c.HTTPClient) + c.svc[name] = v + return v +} + +// Transport is an http.RoundTripper that appends +// Google Cloud client's user-agent to the original +// request's user-agent header. +type Transport struct { + // Base represents the actual http.RoundTripper + // the requests will be delegated to. + Base http.RoundTripper +} + +// RoundTrip appends a user-agent to the existing user-agent +// header and delegates the request to the base http.RoundTripper. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + req = cloneRequest(req) + ua := req.Header.Get("User-Agent") + if ua == "" { + ua = userAgent + } else { + ua = fmt.Sprintf("%s %s", ua, userAgent) + } + req.Header.Set("User-Agent", ua) + return t.Base.RoundTrip(req) +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return r2 +} + +func ProjID(ctx context.Context) string { + return cc(ctx).ProjectID +} + +func HTTPClient(ctx context.Context) *http.Client { + return cc(ctx).HTTPClient +} + +// cc returns the internal *cloudContext (cc) state for a context.Context. +// It panics if the user did it wrong. +func cc(ctx context.Context) *cloudContext { + if c, ok := ctx.Value(contextKey{}).(*cloudContext); ok { + return c + } + panic("invalid context.Context type; it should be created with cloud.NewContext") +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/internal/opts/option.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/internal/opts/option.go new file mode 100644 index 000000000..c5ccf4f56 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/internal/opts/option.go @@ -0,0 +1,24 @@ +// Package opts holds the DialOpts struct, configurable by +// cloud.ClientOptions to set up transports for cloud packages. +// +// This is a separate page to prevent cycles between the core +// cloud packages. +package opts + +import ( + "net/http" + + "golang.org/x/oauth2" + "google.golang.org/grpc" +) + +type DialOpt struct { + Endpoint string + Scopes []string + UserAgent string + + TokenSource oauth2.TokenSource + + HTTPClient *http.Client + GRPCClient *grpc.ClientConn +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/key.json.enc b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/key.json.enc new file mode 100644 index 000000000..2f673a84b Binary files /dev/null and b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/key.json.enc differ diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/option.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/option.go new file mode 100644 index 000000000..d4a5aea29 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/option.go @@ -0,0 +1,100 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloud + +import ( + "net/http" + + "golang.org/x/oauth2" + "google.golang.org/cloud/internal/opts" + "google.golang.org/grpc" +) + +// ClientOption is used when construct clients for each cloud service. +type ClientOption interface { + // Resolve configures the given DialOpts for this option. + Resolve(*opts.DialOpt) +} + +// WithTokenSource returns a ClientOption that specifies an OAuth2 token +// source to be used as the basis for authentication. +func WithTokenSource(s oauth2.TokenSource) ClientOption { + return withTokenSource{s} +} + +type withTokenSource struct{ ts oauth2.TokenSource } + +func (w withTokenSource) Resolve(o *opts.DialOpt) { + o.TokenSource = w.ts +} + +// WithEndpoint returns a ClientOption that overrides the default endpoint +// to be used for a service. +func WithEndpoint(url string) ClientOption { + return withEndpoint(url) +} + +type withEndpoint string + +func (w withEndpoint) Resolve(o *opts.DialOpt) { + o.Endpoint = string(w) +} + +// WithScopes returns a ClientOption that overrides the default OAuth2 scopes +// to be used for a service. +func WithScopes(scope ...string) ClientOption { + return withScopes(scope) +} + +type withScopes []string + +func (w withScopes) Resolve(o *opts.DialOpt) { + o.Scopes = []string(w) +} + +// WithUserAgent returns a ClientOption that sets the User-Agent. +func WithUserAgent(ua string) ClientOption { + return withUA(ua) +} + +type withUA string + +func (w withUA) Resolve(o *opts.DialOpt) { o.UserAgent = string(w) } + +// WithBaseHTTP returns a ClientOption that specifies the HTTP client to +// use as the basis of communications. This option may only be used with +// services that support HTTP as their communication transport. +func WithBaseHTTP(client *http.Client) ClientOption { + return withBaseHTTP{client} +} + +type withBaseHTTP struct{ client *http.Client } + +func (w withBaseHTTP) Resolve(o *opts.DialOpt) { + o.HTTPClient = w.client +} + +// WithBaseGRPC returns a ClientOption that specifies the GRPC client +// connection to use as the basis of communications. This option many only be +// used with services that support HRPC as their communication transport. +func WithBaseGRPC(client *grpc.ClientConn) ClientOption { + return withBaseGRPC{client} +} + +type withBaseGRPC struct{ client *grpc.ClientConn } + +func (w withBaseGRPC) Resolve(o *opts.DialOpt) { + o.GRPCClient = w.client +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/acl.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/acl.go new file mode 100644 index 000000000..71c5800a8 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/acl.go @@ -0,0 +1,176 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "fmt" + + "golang.org/x/net/context" + raw "google.golang.org/api/storage/v1" +) + +// ACLRole is the the access permission for the entity. +type ACLRole string + +const ( + RoleOwner ACLRole = "OWNER" + RoleReader ACLRole = "READER" +) + +// ACLEntity is an entity holding an ACL permission. +// +// It could be in the form of: +// "user-", "user-","group-", "group-", +// "domain-" and "project-team-". +// +// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers. +type ACLEntity string + +const ( + AllUsers ACLEntity = "allUsers" + AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers" +) + +// ACLRule represents an access control list rule entry for a Google Cloud Storage object or bucket. +// A bucket is a Google Cloud Storage container whose name is globally unique and contains zero or +// more objects. An object is a blob of data that is stored in a bucket. +type ACLRule struct { + // Entity identifies the entity holding the current rule's permissions. + Entity ACLEntity + + // Role is the the access permission for the entity. + Role ACLRole +} + +// DefaultACL returns the default object ACL entries for the named bucket. +func DefaultACL(ctx context.Context, bucket string) ([]ACLRule, error) { + acls, err := rawService(ctx).DefaultObjectAccessControls.List(bucket).Context(ctx).Do() + if err != nil { + return nil, fmt.Errorf("storage: error listing default object ACL for bucket %q: %v", bucket, err) + } + r := make([]ACLRule, 0, len(acls.Items)) + for _, v := range acls.Items { + if m, ok := v.(map[string]interface{}); ok { + entity, ok1 := m["entity"].(string) + role, ok2 := m["role"].(string) + if ok1 && ok2 { + r = append(r, ACLRule{Entity: ACLEntity(entity), Role: ACLRole(role)}) + } + } + } + return r, nil +} + +// PutDefaultACLRule saves the named default object ACL entity with the provided role for the named bucket. +func PutDefaultACLRule(ctx context.Context, bucket string, entity ACLEntity, role ACLRole) error { + acl := &raw.ObjectAccessControl{ + Bucket: bucket, + Entity: string(entity), + Role: string(role), + } + _, err := rawService(ctx).DefaultObjectAccessControls.Update(bucket, string(entity), acl).Context(ctx).Do() + if err != nil { + return fmt.Errorf("storage: error updating default ACL rule for bucket %q, entity %q: %v", bucket, entity, err) + } + return nil +} + +// DeleteDefaultACLRule deletes the named default ACL entity for the named bucket. +func DeleteDefaultACLRule(ctx context.Context, bucket string, entity ACLEntity) error { + err := rawService(ctx).DefaultObjectAccessControls.Delete(bucket, string(entity)).Context(ctx).Do() + if err != nil { + return fmt.Errorf("storage: error deleting default ACL rule for bucket %q, entity %q: %v", bucket, entity, err) + } + return nil +} + +// BucketACL returns the ACL entries for the named bucket. +func BucketACL(ctx context.Context, bucket string) ([]ACLRule, error) { + acls, err := rawService(ctx).BucketAccessControls.List(bucket).Context(ctx).Do() + if err != nil { + return nil, fmt.Errorf("storage: error listing bucket ACL for bucket %q: %v", bucket, err) + } + r := make([]ACLRule, len(acls.Items)) + for i, v := range acls.Items { + r[i].Entity = ACLEntity(v.Entity) + r[i].Role = ACLRole(v.Role) + } + return r, nil +} + +// PutBucketACLRule saves the named ACL entity with the provided role for the named bucket. +func PutBucketACLRule(ctx context.Context, bucket string, entity ACLEntity, role ACLRole) error { + acl := &raw.BucketAccessControl{ + Bucket: bucket, + Entity: string(entity), + Role: string(role), + } + _, err := rawService(ctx).BucketAccessControls.Update(bucket, string(entity), acl).Context(ctx).Do() + if err != nil { + return fmt.Errorf("storage: error updating bucket ACL rule for bucket %q, entity %q: %v", bucket, entity, err) + } + return nil +} + +// DeleteBucketACLRule deletes the named ACL entity for the named bucket. +func DeleteBucketACLRule(ctx context.Context, bucket string, entity ACLEntity) error { + err := rawService(ctx).BucketAccessControls.Delete(bucket, string(entity)).Context(ctx).Do() + if err != nil { + return fmt.Errorf("storage: error deleting bucket ACL rule for bucket %q, entity %q: %v", bucket, entity, err) + } + return nil +} + +// ACL returns the ACL entries for the named object. +func ACL(ctx context.Context, bucket, object string) ([]ACLRule, error) { + acls, err := rawService(ctx).ObjectAccessControls.List(bucket, object).Context(ctx).Do() + if err != nil { + return nil, fmt.Errorf("storage: error listing object ACL for bucket %q, file %q: %v", bucket, object, err) + } + r := make([]ACLRule, 0, len(acls.Items)) + for _, v := range acls.Items { + if m, ok := v.(map[string]interface{}); ok { + entity, ok1 := m["entity"].(string) + role, ok2 := m["role"].(string) + if ok1 && ok2 { + r = append(r, ACLRule{Entity: ACLEntity(entity), Role: ACLRole(role)}) + } + } + } + return r, nil +} + +// PutACLRule saves the named ACL entity with the provided role for the named object. +func PutACLRule(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole) error { + acl := &raw.ObjectAccessControl{ + Bucket: bucket, + Entity: string(entity), + Role: string(role), + } + _, err := rawService(ctx).ObjectAccessControls.Update(bucket, object, string(entity), acl).Context(ctx).Do() + if err != nil { + return fmt.Errorf("storage: error updating object ACL rule for bucket %q, file %q, entity %q: %v", bucket, object, entity, err) + } + return nil +} + +// DeleteACLRule deletes the named ACL entity for the named object. +func DeleteACLRule(ctx context.Context, bucket, object string, entity ACLEntity) error { + err := rawService(ctx).ObjectAccessControls.Delete(bucket, object, string(entity)).Context(ctx).Do() + if err != nil { + return fmt.Errorf("storage: error deleting object ACL rule for bucket %q, file %q, entity %q: %v", bucket, object, entity, err) + } + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/storage.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/storage.go new file mode 100644 index 000000000..bf22c6aea --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/storage.go @@ -0,0 +1,350 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package storage contains a Google Cloud Storage client. +// +// This package is experimental and may make backwards-incompatible changes. +package storage + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + "google.golang.org/cloud/internal" + + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + raw "google.golang.org/api/storage/v1" +) + +var ( + ErrBucketNotExist = errors.New("storage: bucket doesn't exist") + ErrObjectNotExist = errors.New("storage: object doesn't exist") +) + +const ( + // ScopeFullControl grants permissions to manage your + // data and permissions in Google Cloud Storage. + ScopeFullControl = raw.DevstorageFullControlScope + + // ScopeReadOnly grants permissions to + // view your data in Google Cloud Storage. + ScopeReadOnly = raw.DevstorageReadOnlyScope + + // ScopeReadWrite grants permissions to manage your + // data in Google Cloud Storage. + ScopeReadWrite = raw.DevstorageReadWriteScope +) + +// TODO(jbd): Add storage.buckets.list. +// TODO(jbd): Add storage.buckets.insert. +// TODO(jbd): Add storage.buckets.update. +// TODO(jbd): Add storage.buckets.delete. + +// TODO(jbd): Add storage.objects.watch. + +// BucketInfo returns the metadata for the specified bucket. +func BucketInfo(ctx context.Context, name string) (*Bucket, error) { + resp, err := rawService(ctx).Buckets.Get(name).Projection("full").Context(ctx).Do() + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrBucketNotExist + } + if err != nil { + return nil, err + } + return newBucket(resp), nil +} + +// ListObjects lists objects from the bucket. You can specify a query +// to filter the results. If q is nil, no filtering is applied. +func ListObjects(ctx context.Context, bucket string, q *Query) (*Objects, error) { + c := rawService(ctx).Objects.List(bucket) + c.Projection("full") + if q != nil { + c.Delimiter(q.Delimiter) + c.Prefix(q.Prefix) + c.Versions(q.Versions) + c.PageToken(q.Cursor) + if q.MaxResults > 0 { + c.MaxResults(int64(q.MaxResults)) + } + } + resp, err := c.Context(ctx).Do() + if err != nil { + return nil, err + } + objects := &Objects{ + Results: make([]*Object, len(resp.Items)), + Prefixes: make([]string, len(resp.Prefixes)), + } + for i, item := range resp.Items { + objects.Results[i] = newObject(item) + } + for i, prefix := range resp.Prefixes { + objects.Prefixes[i] = prefix + } + if resp.NextPageToken != "" { + next := Query{} + if q != nil { + // keep the other filtering + // criteria if there is a query + next = *q + } + next.Cursor = resp.NextPageToken + objects.Next = &next + } + return objects, nil +} + +// SignedURLOptions allows you to restrict the access to the signed URL. +type SignedURLOptions struct { + // GoogleAccessID represents the authorizer of the signed URL generation. + // It is typically the Google service account client email address from + // the Google Developers Console in the form of "xxx@developer.gserviceaccount.com". + // Required. + GoogleAccessID string + + // PrivateKey is the Google service account private key. It is obtainable + // from the Google Developers Console. + // At https://console.developers.google.com/project//apiui/credential, + // create a service account client ID or reuse one of your existing service account + // credentials. Click on the "Generate new P12 key" to generate and download + // a new private key. Once you download the P12 file, use the following command + // to convert it into a PEM file. + // + // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes + // + // Provide the contents of the PEM file as a byte slice. + // Required. + PrivateKey []byte + + // Method is the HTTP method to be used with the signed URL. + // Signed URLs can be used with GET, HEAD, PUT, and DELETE requests. + // Required. + Method string + + // Expires is the expiration time on the signed URL. It must be + // a datetime in the future. + // Required. + Expires time.Time + + // ContentType is the content type header the client must provide + // to use the generated signed URL. + // Optional. + ContentType string + + // Headers is a list of extention headers the client must provide + // in order to use the generated signed URL. + // Optional. + Headers []string + + // MD5 is the base64 encoded MD5 checksum of the file. + // If provided, the client should provide the exact value on the request + // header in order to use the signed URL. + // Optional. + MD5 []byte +} + +// SignedURL returns a URL for the specified object. Signed URLs allow +// the users access to a restricted resource for a limited time without having a +// Google account or signing in. For more information about the signed +// URLs, see https://cloud.google.com/storage/docs/accesscontrol#Signed-URLs. +func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) { + if opts == nil { + return "", errors.New("storage: missing required SignedURLOptions") + } + if opts.GoogleAccessID == "" || opts.PrivateKey == nil { + return "", errors.New("storage: missing required credentials to generate a signed URL") + } + if opts.Method == "" { + return "", errors.New("storage: missing required method option") + } + if opts.Expires.IsZero() { + return "", errors.New("storage: missing required expires option") + } + key, err := parseKey(opts.PrivateKey) + if err != nil { + return "", err + } + h := sha256.New() + fmt.Fprintf(h, "%s\n", opts.Method) + fmt.Fprintf(h, "%s\n", opts.MD5) + fmt.Fprintf(h, "%s\n", opts.ContentType) + fmt.Fprintf(h, "%d\n", opts.Expires.Unix()) + fmt.Fprintf(h, "%s", strings.Join(opts.Headers, "\n")) + fmt.Fprintf(h, "/%s/%s", bucket, name) + b, err := rsa.SignPKCS1v15( + rand.Reader, + key, + crypto.SHA256, + h.Sum(nil), + ) + if err != nil { + return "", err + } + encoded := base64.StdEncoding.EncodeToString(b) + u := &url.URL{ + Scheme: "https", + Host: "storage.googleapis.com", + Path: fmt.Sprintf("/%s/%s", bucket, name), + } + q := u.Query() + q.Set("GoogleAccessId", opts.GoogleAccessID) + q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix())) + q.Set("Signature", string(encoded)) + u.RawQuery = q.Encode() + return u.String(), nil +} + +// StatObject returns meta information about the specified object. +func StatObject(ctx context.Context, bucket, name string) (*Object, error) { + o, err := rawService(ctx).Objects.Get(bucket, name).Projection("full").Context(ctx).Do() + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + if err != nil { + return nil, err + } + return newObject(o), nil +} + +// UpdateAttrs updates an object with the provided attributes. +// All zero-value attributes are ignored. +func UpdateAttrs(ctx context.Context, bucket, name string, attrs ObjectAttrs) (*Object, error) { + o, err := rawService(ctx).Objects.Patch(bucket, name, attrs.toRawObject(bucket)).Projection("full").Context(ctx).Do() + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + if err != nil { + return nil, err + } + return newObject(o), nil +} + +// DeleteObject deletes the single specified object. +func DeleteObject(ctx context.Context, bucket, name string) error { + return rawService(ctx).Objects.Delete(bucket, name).Context(ctx).Do() +} + +// CopyObject copies the source object to the destination. +// The copied object's attributes are overwritten by attrs if non-nil. +func CopyObject(ctx context.Context, srcBucket, srcName string, destBucket, destName string, attrs *ObjectAttrs) (*Object, error) { + if srcBucket == "" || destBucket == "" { + return nil, errors.New("storage: srcBucket and destBucket must both be non-empty") + } + if srcName == "" || destName == "" { + return nil, errors.New("storage: srcName and destName must be non-empty") + } + var rawObject *raw.Object + if attrs != nil { + attrs.Name = destName + if attrs.ContentType == "" { + return nil, errors.New("storage: attrs.ContentType must be non-empty") + } + rawObject = attrs.toRawObject(destBucket) + } + o, err := rawService(ctx).Objects.Copy( + srcBucket, srcName, destBucket, destName, rawObject).Projection("full").Context(ctx).Do() + if err != nil { + return nil, err + } + return newObject(o), nil +} + +// NewReader creates a new io.ReadCloser to read the contents +// of the object. +func NewReader(ctx context.Context, bucket, name string) (io.ReadCloser, error) { + hc := internal.HTTPClient(ctx) + u := &url.URL{ + Scheme: "https", + Host: "storage.googleapis.com", + Path: fmt.Sprintf("/%s/%s", bucket, name), + } + res, err := hc.Get(u.String()) + if err != nil { + return nil, err + } + if res.StatusCode == http.StatusNotFound { + res.Body.Close() + return nil, ErrObjectNotExist + } + if res.StatusCode < 200 || res.StatusCode > 299 { + res.Body.Close() + return res.Body, fmt.Errorf("storage: can't read object %v/%v, status code: %v", bucket, name, res.Status) + } + return res.Body, nil +} + +// NewWriter returns a storage Writer that writes to the GCS object +// identified by the specified name. +// If such an object doesn't exist, it creates one. +// Attributes can be set on the object by modifying the returned Writer's +// ObjectAttrs field before the first call to Write. The name parameter to this +// function is ignored if the Name field of the ObjectAttrs field is set to a +// non-empty string. +// +// It is the caller's responsibility to call Close when writing is done. +// +// The object is not available and any previous object with the same +// name is not replaced on Cloud Storage until Close is called. +func NewWriter(ctx context.Context, bucket, name string) *Writer { + return &Writer{ + ctx: ctx, + bucket: bucket, + name: name, + donec: make(chan struct{}), + } +} + +func rawService(ctx context.Context) *raw.Service { + return internal.Service(ctx, "storage", func(hc *http.Client) interface{} { + svc, _ := raw.New(hc) + return svc + }).(*raw.Service) +} + +// parseKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func parseKey(key []byte) (*rsa.PrivateKey, error) { + if block, _ := pem.Decode(key); block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, err + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("oauth2: private key is invalid") + } + return parsed, nil +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/types.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/types.go new file mode 100644 index 000000000..060deb6ad --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/types.go @@ -0,0 +1,417 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "encoding/base64" + "io" + "sync" + "time" + + "golang.org/x/net/context" + raw "google.golang.org/api/storage/v1" +) + +// Bucket represents a Google Cloud Storage bucket. +type Bucket struct { + // Name is the name of the bucket. + Name string + + // ACL is the list of access control rules on the bucket. + ACL []ACLRule + + // DefaultObjectACL is the list of access controls to + // apply to new objects when no object ACL is provided. + DefaultObjectACL []ACLRule + + // Location is the location of the bucket. It defaults to "US". + Location string + + // Metageneration is the metadata generation of the bucket. + // Read-only. + Metageneration int64 + + // StorageClass is the storage class of the bucket. This defines + // how objects in the bucket are stored and determines the SLA + // and the cost of storage. Typical values are "STANDARD" and + // "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD". + StorageClass string + + // Created is the creation time of the bucket. + // Read-only. + Created time.Time +} + +func newBucket(b *raw.Bucket) *Bucket { + if b == nil { + return nil + } + bucket := &Bucket{ + Name: b.Name, + Location: b.Location, + Metageneration: b.Metageneration, + StorageClass: b.StorageClass, + Created: convertTime(b.TimeCreated), + } + acl := make([]ACLRule, len(b.Acl)) + for i, rule := range b.Acl { + acl[i] = ACLRule{ + Entity: ACLEntity(rule.Entity), + Role: ACLRole(rule.Role), + } + } + bucket.ACL = acl + objACL := make([]ACLRule, len(b.DefaultObjectAcl)) + for i, rule := range b.DefaultObjectAcl { + objACL[i] = ACLRule{ + Entity: ACLEntity(rule.Entity), + Role: ACLRole(rule.Role), + } + } + bucket.DefaultObjectACL = objACL + return bucket +} + +// ObjectAttrs is the user-editable object attributes. +type ObjectAttrs struct { + // Name is the name of the object. + Name string + + // ContentType is the MIME type of the object's content. + // Optional. + ContentType string + + // ContentLanguage is the optional RFC 1766 Content-Language of + // the object's content sent in response headers. + ContentLanguage string + + // ContentEncoding is the optional Content-Encoding of the object + // sent it the response headers. + ContentEncoding string + + // CacheControl is the optional Cache-Control header of the object + // sent in the response headers. + CacheControl string + + // ContentDisposition is the optional Content-Disposition header of the object + // sent in the response headers. + ContentDisposition string + + // ACL is the list of access control rules for the object. + // Optional. If nil or empty, existing ACL rules are preserved. + ACL []ACLRule + + // Metadata represents user-provided metadata, in key/value pairs. + // It can be nil if the current metadata values needs to preserved. + Metadata map[string]string +} + +func (o ObjectAttrs) toRawObject(bucket string) *raw.Object { + var acl []*raw.ObjectAccessControl + if len(o.ACL) > 0 { + acl = make([]*raw.ObjectAccessControl, len(o.ACL)) + for i, rule := range o.ACL { + acl[i] = &raw.ObjectAccessControl{ + Entity: string(rule.Entity), + Role: string(rule.Role), + } + } + } + return &raw.Object{ + Bucket: bucket, + Name: o.Name, + ContentType: o.ContentType, + ContentEncoding: o.ContentEncoding, + ContentLanguage: o.ContentLanguage, + CacheControl: o.CacheControl, + ContentDisposition: o.ContentDisposition, + Acl: acl, + Metadata: o.Metadata, + } +} + +// Object represents a Google Cloud Storage (GCS) object. +type Object struct { + // Bucket is the name of the bucket containing this GCS object. + Bucket string + + // Name is the name of the object within the bucket. + Name string + + // ContentType is the MIME type of the object's content. + ContentType string + + // ContentLanguage is the content language of the object's content. + ContentLanguage string + + // CacheControl is the Cache-Control header to be sent in the response + // headers when serving the object data. + CacheControl string + + // ACL is the list of access control rules for the object. + ACL []ACLRule + + // Owner is the owner of the object. + // + // If non-zero, it is in the form of "user-". + Owner string + + // Size is the length of the object's content. + Size int64 + + // ContentEncoding is the encoding of the object's content. + ContentEncoding string + + // MD5 is the MD5 hash of the object's content. + MD5 []byte + + // CRC32C is the CRC32 checksum of the object's content using + // the Castagnoli93 polynomial. + CRC32C uint32 + + // MediaLink is an URL to the object's content. + MediaLink string + + // Metadata represents user-provided metadata, in key/value pairs. + // It can be nil if no metadata is provided. + Metadata map[string]string + + // Generation is the generation number of the object's content. + Generation int64 + + // MetaGeneration is the version of the metadata for this + // object at this generation. This field is used for preconditions + // and for detecting changes in metadata. A metageneration number + // is only meaningful in the context of a particular generation + // of a particular object. + MetaGeneration int64 + + // StorageClass is the storage class of the bucket. + // This value defines how objects in the bucket are stored and + // determines the SLA and the cost of storage. Typical values are + // "STANDARD" and "DURABLE_REDUCED_AVAILABILITY". + // It defaults to "STANDARD". + StorageClass string + + // Deleted is the time the object was deleted. + // If not deleted, it is the zero value. + Deleted time.Time + + // Updated is the creation or modification time of the object. + // For buckets with versioning enabled, changing an object's + // metadata does not change this property. + Updated time.Time +} + +// convertTime converts a time in RFC3339 format to time.Time. +// If any error occurs in parsing, the zero-value time.Time is silently returned. +func convertTime(t string) time.Time { + var r time.Time + if t != "" { + r, _ = time.Parse(time.RFC3339, t) + } + return r +} + +func newObject(o *raw.Object) *Object { + if o == nil { + return nil + } + acl := make([]ACLRule, len(o.Acl)) + for i, rule := range o.Acl { + acl[i] = ACLRule{ + Entity: ACLEntity(rule.Entity), + Role: ACLRole(rule.Role), + } + } + owner := "" + if o.Owner != nil { + owner = o.Owner.Entity + } + md5, _ := base64.StdEncoding.DecodeString(o.Md5Hash) + var crc32c uint32 + d, err := base64.StdEncoding.DecodeString(o.Crc32c) + if err == nil && len(d) == 4 { + crc32c = uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3]) + } + return &Object{ + Bucket: o.Bucket, + Name: o.Name, + ContentType: o.ContentType, + ContentLanguage: o.ContentLanguage, + CacheControl: o.CacheControl, + ACL: acl, + Owner: owner, + ContentEncoding: o.ContentEncoding, + Size: int64(o.Size), + MD5: md5, + CRC32C: crc32c, + MediaLink: o.MediaLink, + Metadata: o.Metadata, + Generation: o.Generation, + MetaGeneration: o.Metageneration, + StorageClass: o.StorageClass, + Deleted: convertTime(o.TimeDeleted), + Updated: convertTime(o.Updated), + } +} + +// Query represents a query to filter objects from a bucket. +type Query struct { + // Delimiter returns results in a directory-like fashion. + // Results will contain only objects whose names, aside from the + // prefix, do not contain delimiter. Objects whose names, + // aside from the prefix, contain delimiter will have their name, + // truncated after the delimiter, returned in prefixes. + // Duplicate prefixes are omitted. + // Optional. + Delimiter string + + // Prefix is the prefix filter to query objects + // whose names begin with this prefix. + // Optional. + Prefix string + + // Versions indicates whether multiple versions of the same + // object will be included in the results. + Versions bool + + // Cursor is a previously-returned page token + // representing part of the larger set of results to view. + // Optional. + Cursor string + + // MaxResults is the maximum number of items plus prefixes + // to return. As duplicate prefixes are omitted, + // fewer total results may be returned than requested. + // The default page limit is used if it is negative or zero. + MaxResults int +} + +// Objects represents a list of objects returned from +// a bucket look-p request and a query to retrieve more +// objects from the next pages. +type Objects struct { + // Results represent a list of object results. + Results []*Object + + // Next is the continuation query to retrieve more + // results with the same filtering criteria. If there + // are no more results to retrieve, it is nil. + Next *Query + + // Prefixes represents prefixes of objects + // matching-but-not-listed up to and including + // the requested delimiter. + Prefixes []string +} + +// contentTyper implements ContentTyper to enable an +// io.ReadCloser to specify its MIME type. +type contentTyper struct { + io.Reader + t string +} + +func (c *contentTyper) ContentType() string { + return c.t +} + +// A Writer writes a Cloud Storage object. +type Writer struct { + // ObjectAttrs are optional attributes to set on the object. Any attributes + // must be initialized before the first Write call. Nil or zero-valued + // attributes are ignored. + ObjectAttrs + + ctx context.Context + bucket string + name string + + once sync.Once + + opened bool + r io.Reader + pw *io.PipeWriter + + donec chan struct{} // closed after err and obj are set. + err error + obj *Object +} + +func (w *Writer) open() { + attrs := w.ObjectAttrs + // Always set the name, otherwise the backend + // rejects the request and responds with an HTTP 400. + if attrs.Name == "" { + attrs.Name = w.name + } + pr, pw := io.Pipe() + w.r = &contentTyper{pr, attrs.ContentType} + w.pw = pw + w.opened = true + + go func() { + resp, err := rawService(w.ctx).Objects.Insert( + w.bucket, attrs.toRawObject(w.bucket)).Media(w.r).Projection("full").Context(w.ctx).Do() + w.err = err + if err == nil { + w.obj = newObject(resp) + } else { + pr.CloseWithError(w.err) + } + close(w.donec) + }() +} + +// Write appends to w. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.err != nil { + return 0, w.err + } + if !w.opened { + w.open() + } + return w.pw.Write(p) +} + +// Close completes the write operation and flushes any buffered data. +// If Close doesn't return an error, metadata about the written object +// can be retrieved by calling Object. +func (w *Writer) Close() error { + if !w.opened { + w.open() + } + if err := w.pw.Close(); err != nil { + return err + } + <-w.donec + return w.err +} + +// CloseWithError aborts the write operation with the provided error. +// CloseWithError always returns nil. +func (w *Writer) CloseWithError(err error) error { + if !w.opened { + return nil + } + return w.pw.CloseWithError(err) +} + +// Object returns metadata about a successfully-written object. +// It's only valid to call it after Close returns nil. +func (w *Writer) Object() *Object { + return w.obj +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/.travis.yml b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/.travis.yml new file mode 100644 index 000000000..055d6641c --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/.travis.yml @@ -0,0 +1,13 @@ +language: go + +before_install: + - go get github.com/axw/gocov/gocov + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover + +install: + - mkdir -p "$GOPATH/src/google.golang.org" + - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/google.golang.org/grpc" + +script: + - make test testrace diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/CONTRIBUTING.md new file mode 100644 index 000000000..407d384a7 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -0,0 +1,23 @@ +# How to contribute + +We definitely welcome patches and contribution to grpc! Here is some guideline +and information about how to do so. + +## Getting started + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +### Filing Issues +When filing an issue, make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +### Contributing code +Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file. diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/LICENSE b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/LICENSE new file mode 100644 index 000000000..f4988b450 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/LICENSE @@ -0,0 +1,28 @@ +Copyright 2014, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/Makefile b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/Makefile new file mode 100644 index 000000000..12e84e4e5 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/Makefile @@ -0,0 +1,50 @@ +.PHONY: \ + all \ + deps \ + updatedeps \ + testdeps \ + updatetestdeps \ + build \ + proto \ + test \ + testrace \ + clean \ + +all: test testrace + +deps: + go get -d -v google.golang.org/grpc/... + +updatedeps: + go get -d -v -u -f google.golang.org/grpc/... + +testdeps: + go get -d -v -t google.golang.org/grpc/... + +updatetestdeps: + go get -d -v -t -u -f google.golang.org/grpc/... + +build: deps + go build google.golang.org/grpc/... + +proto: + @ if ! which protoc > /dev/null; then \ + echo "error: protoc not installed" >&2; \ + exit 1; \ + fi + go get -v github.com/golang/protobuf/protoc-gen-go + for file in $$(git ls-files '*.proto'); do \ + protoc -I $$(dirname $$file) --go_out=plugins=grpc:$$(dirname $$file) $$file; \ + done + +test: testdeps + go test -v -cpu 1,4 google.golang.org/grpc/... + +testrace: testdeps + go test -v -race -cpu 1,4 google.golang.org/grpc/... + +clean: + go clean google.golang.org/grpc/... + +coverage: testdeps + ./coverage.sh --coveralls diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/PATENTS b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/PATENTS new file mode 100644 index 000000000..619f9dbfe --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the GRPC project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of GRPC, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of GRPC. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of GRPC or any code incorporated within this +implementation of GRPC constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of GRPC +shall terminate as of the date such litigation is filed. diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/README.md b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/README.md new file mode 100644 index 000000000..37b05f095 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/README.md @@ -0,0 +1,32 @@ +#gRPC-Go + +[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) + +The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start](http://www.grpc.io/docs/) guide. + +Installation +------------ + +To install this package, you need to install Go 1.4 or above and setup your Go workspace on your computer. The simplest way to install the library is to run: + +``` +$ go get google.golang.org/grpc +``` + +Prerequisites +------------- + +This requires Go 1.4 or above. + +Constraints +----------- +The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](http://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants. + +Documentation +------------- +See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/). + +Status +------ +Beta release + diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/call.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/call.go new file mode 100644 index 000000000..504a6e18a --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/call.go @@ -0,0 +1,190 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "bytes" + "io" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/trace" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/transport" +) + +// recvResponse receives and parses an RPC response. +// On error, it returns the error and indicates whether the call should be retried. +// +// TODO(zhaoq): Check whether the received message sequence is valid. +func recvResponse(dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error { + // Try to acquire header metadata from the server if there is any. + var err error + c.headerMD, err = stream.Header() + if err != nil { + return err + } + p := &parser{r: stream} + for { + if err = recv(p, dopts.codec, stream, dopts.dc, reply); err != nil { + if err == io.EOF { + break + } + return err + } + } + c.trailerMD = stream.Trailer() + return nil +} + +// sendRequest writes out various information of an RPC such as Context and Message. +func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) { + stream, err := t.NewStream(ctx, callHdr) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + if _, ok := err.(transport.ConnectionError); !ok { + t.CloseStream(stream, err) + } + } + }() + var cbuf *bytes.Buffer + if compressor != nil { + cbuf = new(bytes.Buffer) + } + outBuf, err := encode(codec, args, compressor, cbuf) + if err != nil { + return nil, transport.StreamErrorf(codes.Internal, "grpc: %v", err) + } + err = t.Write(stream, outBuf, opts) + if err != nil { + return nil, err + } + // Sent successfully. + return stream, nil +} + +// Invoke is called by the generated code. It sends the RPC request on the +// wire and returns after response is received. +func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) { + var c callInfo + for _, o := range opts { + if err := o.before(&c); err != nil { + return toRPCErr(err) + } + } + defer func() { + for _, o := range opts { + o.after(&c) + } + }() + if EnableTracing { + c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) + defer c.traceInfo.tr.Finish() + c.traceInfo.firstLine.client = true + if deadline, ok := ctx.Deadline(); ok { + c.traceInfo.firstLine.deadline = deadline.Sub(time.Now()) + } + c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false) + // TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set. + defer func() { + if err != nil { + c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + c.traceInfo.tr.SetError() + } + }() + } + topts := &transport.Options{ + Last: true, + Delay: false, + } + var ( + lastErr error // record the error that happened + ) + for { + var ( + err error + t transport.ClientTransport + stream *transport.Stream + ) + // TODO(zhaoq): Need a formal spec of retry strategy for non-failfast rpcs. + if lastErr != nil && c.failFast { + return toRPCErr(lastErr) + } + callHdr := &transport.CallHdr{ + Host: cc.authority, + Method: method, + } + if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + } + t, err = cc.dopts.picker.Pick(ctx) + if err != nil { + if lastErr != nil { + // This was a retry; return the error from the last attempt. + return toRPCErr(lastErr) + } + return toRPCErr(err) + } + if c.traceInfo.tr != nil { + c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true) + } + stream, err = sendRequest(ctx, cc.dopts.codec, cc.dopts.cp, callHdr, t, args, topts) + if err != nil { + if _, ok := err.(transport.ConnectionError); ok { + lastErr = err + continue + } + if lastErr != nil { + return toRPCErr(lastErr) + } + return toRPCErr(err) + } + // Receive the response + lastErr = recvResponse(cc.dopts, t, &c, stream, reply) + if _, ok := lastErr.(transport.ConnectionError); ok { + continue + } + if c.traceInfo.tr != nil { + c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true) + } + t.CloseStream(stream, lastErr) + if lastErr != nil { + return toRPCErr(lastErr) + } + return Errorf(stream.StatusCode(), stream.StatusDesc()) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/clientconn.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/clientconn.go new file mode 100644 index 000000000..e2264236f --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/clientconn.go @@ -0,0 +1,590 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "errors" + "fmt" + "net" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/trace" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/transport" +) + +var ( + // ErrUnspecTarget indicates that the target address is unspecified. + ErrUnspecTarget = errors.New("grpc: target is unspecified") + // ErrNoTransportSecurity indicates that there is no transport security + // being set for ClientConn. Users should either set one or explicitly + // call WithInsecure DialOption to disable security. + ErrNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + // ErrCredentialsMisuse indicates that users want to transmit security information + // (e.g., oauth2 token) which requires secure connection on an insecure + // connection. + ErrCredentialsMisuse = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportAuthenticator() to set)") + // ErrClientConnClosing indicates that the operation is illegal because + // the session is closing. + ErrClientConnClosing = errors.New("grpc: the client connection is closing") + // ErrClientConnTimeout indicates that the connection could not be + // established or re-established within the specified timeout. + ErrClientConnTimeout = errors.New("grpc: timed out trying to connect") + // minimum time to give a connection to complete + minConnectTimeout = 20 * time.Second +) + +// dialOptions configure a Dial call. dialOptions are set by the DialOption +// values passed to Dial. +type dialOptions struct { + codec Codec + cp Compressor + dc Decompressor + picker Picker + block bool + insecure bool + copts transport.ConnectOptions +} + +// DialOption configures how we set up the connection. +type DialOption func(*dialOptions) + +// WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling. +func WithCodec(c Codec) DialOption { + return func(o *dialOptions) { + o.codec = c + } +} + +// WithCompressor returns a DialOption which sets a CompressorGenerator for generating message +// compressor. +func WithCompressor(cp Compressor) DialOption { + return func(o *dialOptions) { + o.cp = cp + } +} + +// WithDecompressor returns a DialOption which sets a DecompressorGenerator for generating +// message decompressor. +func WithDecompressor(dc Decompressor) DialOption { + return func(o *dialOptions) { + o.dc = dc + } +} + +// WithPicker returns a DialOption which sets a picker for connection selection. +func WithPicker(p Picker) DialOption { + return func(o *dialOptions) { + o.picker = p + } +} + +// WithBlock returns a DialOption which makes caller of Dial blocks until the underlying +// connection is up. Without this, Dial returns immediately and connecting the server +// happens in background. +func WithBlock() DialOption { + return func(o *dialOptions) { + o.block = true + } +} + +// WithInsecure returns a DialOption which disables transport security for this ClientConn. +// Note that transport security is required unless WithInsecure is set. +func WithInsecure() DialOption { + return func(o *dialOptions) { + o.insecure = true + } +} + +// WithTransportCredentials returns a DialOption which configures a +// connection level security credentials (e.g., TLS/SSL). +func WithTransportCredentials(creds credentials.TransportAuthenticator) DialOption { + return func(o *dialOptions) { + o.copts.AuthOptions = append(o.copts.AuthOptions, creds) + } +} + +// WithPerRPCCredentials returns a DialOption which sets +// credentials which will place auth state on each outbound RPC. +func WithPerRPCCredentials(creds credentials.Credentials) DialOption { + return func(o *dialOptions) { + o.copts.AuthOptions = append(o.copts.AuthOptions, creds) + } +} + +// WithTimeout returns a DialOption that configures a timeout for dialing a client connection. +func WithTimeout(d time.Duration) DialOption { + return func(o *dialOptions) { + o.copts.Timeout = d + } +} + +// WithDialer returns a DialOption that specifies a function to use for dialing network addresses. +func WithDialer(f func(addr string, timeout time.Duration) (net.Conn, error)) DialOption { + return func(o *dialOptions) { + o.copts.Dialer = f + } +} + +// WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs. +func WithUserAgent(s string) DialOption { + return func(o *dialOptions) { + o.copts.UserAgent = s + } +} + +// Dial creates a client connection the given target. +func Dial(target string, opts ...DialOption) (*ClientConn, error) { + cc := &ClientConn{ + target: target, + } + for _, opt := range opts { + opt(&cc.dopts) + } + if cc.dopts.codec == nil { + // Set the default codec. + cc.dopts.codec = protoCodec{} + } + if cc.dopts.picker == nil { + cc.dopts.picker = &unicastPicker{ + target: target, + } + } + if err := cc.dopts.picker.Init(cc); err != nil { + return nil, err + } + colonPos := strings.LastIndex(target, ":") + if colonPos == -1 { + colonPos = len(target) + } + cc.authority = target[:colonPos] + return cc, nil +} + +// ConnectivityState indicates the state of a client connection. +type ConnectivityState int + +const ( + // Idle indicates the ClientConn is idle. + Idle ConnectivityState = iota + // Connecting indicates the ClienConn is connecting. + Connecting + // Ready indicates the ClientConn is ready for work. + Ready + // TransientFailure indicates the ClientConn has seen a failure but expects to recover. + TransientFailure + // Shutdown indicates the ClientConn has started shutting down. + Shutdown +) + +func (s ConnectivityState) String() string { + switch s { + case Idle: + return "IDLE" + case Connecting: + return "CONNECTING" + case Ready: + return "READY" + case TransientFailure: + return "TRANSIENT_FAILURE" + case Shutdown: + return "SHUTDOWN" + default: + panic(fmt.Sprintf("unknown connectivity state: %d", s)) + } +} + +// ClientConn represents a client connection to an RPC service. +type ClientConn struct { + target string + authority string + dopts dialOptions +} + +// State returns the connectivity state of cc. +// This is EXPERIMENTAL API. +func (cc *ClientConn) State() (ConnectivityState, error) { + return cc.dopts.picker.State() +} + +// WaitForStateChange blocks until the state changes to something other than the sourceState. +// It returns the new state or error. +// This is EXPERIMENTAL API. +func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { + return cc.dopts.picker.WaitForStateChange(ctx, sourceState) +} + +// Close starts to tear down the ClientConn. +func (cc *ClientConn) Close() error { + return cc.dopts.picker.Close() +} + +// Conn is a client connection to a single destination. +type Conn struct { + target string + dopts dialOptions + resetChan chan int + shutdownChan chan struct{} + events trace.EventLog + + mu sync.Mutex + state ConnectivityState + stateCV *sync.Cond + // ready is closed and becomes nil when a new transport is up or failed + // due to timeout. + ready chan struct{} + transport transport.ClientTransport +} + +// NewConn creates a Conn. +func NewConn(cc *ClientConn) (*Conn, error) { + if cc.target == "" { + return nil, ErrUnspecTarget + } + c := &Conn{ + target: cc.target, + dopts: cc.dopts, + resetChan: make(chan int, 1), + shutdownChan: make(chan struct{}), + } + if EnableTracing { + c.events = trace.NewEventLog("grpc.ClientConn", c.target) + } + if !c.dopts.insecure { + var ok bool + for _, cd := range c.dopts.copts.AuthOptions { + if _, ok := cd.(credentials.TransportAuthenticator); !ok { + continue + } + ok = true + } + if !ok { + return nil, ErrNoTransportSecurity + } + } else { + for _, cd := range c.dopts.copts.AuthOptions { + if cd.RequireTransportSecurity() { + return nil, ErrCredentialsMisuse + } + } + } + c.stateCV = sync.NewCond(&c.mu) + if c.dopts.block { + if err := c.resetTransport(false); err != nil { + c.Close() + return nil, err + } + // Start to monitor the error status of transport. + go c.transportMonitor() + } else { + // Start a goroutine connecting to the server asynchronously. + go func() { + if err := c.resetTransport(false); err != nil { + grpclog.Printf("Failed to dial %s: %v; please retry.", c.target, err) + c.Close() + return + } + c.transportMonitor() + }() + } + return c, nil +} + +// printf records an event in cc's event log, unless cc has been closed. +// REQUIRES cc.mu is held. +func (cc *Conn) printf(format string, a ...interface{}) { + if cc.events != nil { + cc.events.Printf(format, a...) + } +} + +// errorf records an error in cc's event log, unless cc has been closed. +// REQUIRES cc.mu is held. +func (cc *Conn) errorf(format string, a ...interface{}) { + if cc.events != nil { + cc.events.Errorf(format, a...) + } +} + +// State returns the connectivity state of the Conn +func (cc *Conn) State() ConnectivityState { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.state +} + +// WaitForStateChange blocks until the state changes to something other than the sourceState. +func (cc *Conn) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { + cc.mu.Lock() + defer cc.mu.Unlock() + if sourceState != cc.state { + return cc.state, nil + } + done := make(chan struct{}) + var err error + go func() { + select { + case <-ctx.Done(): + cc.mu.Lock() + err = ctx.Err() + cc.stateCV.Broadcast() + cc.mu.Unlock() + case <-done: + } + }() + defer close(done) + for sourceState == cc.state { + cc.stateCV.Wait() + if err != nil { + return cc.state, err + } + } + return cc.state, nil +} + +// NotifyReset tries to signal the underlying transport needs to be reset due to +// for example a name resolution change in flight. +func (cc *Conn) NotifyReset() { + select { + case cc.resetChan <- 0: + default: + } +} + +func (cc *Conn) resetTransport(closeTransport bool) error { + var retries int + start := time.Now() + for { + cc.mu.Lock() + cc.printf("connecting") + if cc.state == Shutdown { + // cc.Close() has been invoked. + cc.mu.Unlock() + return ErrClientConnClosing + } + cc.state = Connecting + cc.stateCV.Broadcast() + cc.mu.Unlock() + if closeTransport { + cc.transport.Close() + } + // Adjust timeout for the current try. + copts := cc.dopts.copts + if copts.Timeout < 0 { + cc.Close() + return ErrClientConnTimeout + } + if copts.Timeout > 0 { + copts.Timeout -= time.Since(start) + if copts.Timeout <= 0 { + cc.Close() + return ErrClientConnTimeout + } + } + sleepTime := backoff(retries) + timeout := sleepTime + if timeout < minConnectTimeout { + timeout = minConnectTimeout + } + if copts.Timeout == 0 || copts.Timeout > timeout { + copts.Timeout = timeout + } + connectTime := time.Now() + addr, err := cc.dopts.picker.PickAddr() + var newTransport transport.ClientTransport + if err == nil { + newTransport, err = transport.NewClientTransport(addr, &copts) + } + if err != nil { + cc.mu.Lock() + if cc.state == Shutdown { + // cc.Close() has been invoked. + cc.mu.Unlock() + return ErrClientConnClosing + } + cc.errorf("transient failure: %v", err) + cc.state = TransientFailure + cc.stateCV.Broadcast() + if cc.ready != nil { + close(cc.ready) + cc.ready = nil + } + cc.mu.Unlock() + sleepTime -= time.Since(connectTime) + if sleepTime < 0 { + sleepTime = 0 + } + // Fail early before falling into sleep. + if cc.dopts.copts.Timeout > 0 && cc.dopts.copts.Timeout < sleepTime+time.Since(start) { + cc.mu.Lock() + cc.errorf("connection timeout") + cc.mu.Unlock() + cc.Close() + return ErrClientConnTimeout + } + closeTransport = false + time.Sleep(sleepTime) + retries++ + grpclog.Printf("grpc: Conn.resetTransport failed to create client transport: %v; Reconnecting to %q", err, cc.target) + continue + } + cc.mu.Lock() + cc.printf("ready") + if cc.state == Shutdown { + // cc.Close() has been invoked. + cc.mu.Unlock() + newTransport.Close() + return ErrClientConnClosing + } + cc.state = Ready + cc.stateCV.Broadcast() + cc.transport = newTransport + if cc.ready != nil { + close(cc.ready) + cc.ready = nil + } + cc.mu.Unlock() + return nil + } +} + +func (cc *Conn) reconnect() bool { + cc.mu.Lock() + if cc.state == Shutdown { + // cc.Close() has been invoked. + cc.mu.Unlock() + return false + } + cc.state = TransientFailure + cc.stateCV.Broadcast() + cc.mu.Unlock() + if err := cc.resetTransport(true); err != nil { + // The ClientConn is closing. + cc.mu.Lock() + cc.printf("transport exiting: %v", err) + cc.mu.Unlock() + grpclog.Printf("grpc: Conn.transportMonitor exits due to: %v", err) + return false + } + return true +} + +// Run in a goroutine to track the error in transport and create the +// new transport if an error happens. It returns when the channel is closing. +func (cc *Conn) transportMonitor() { + for { + select { + // shutdownChan is needed to detect the teardown when + // the ClientConn is idle (i.e., no RPC in flight). + case <-cc.shutdownChan: + return + case <-cc.resetChan: + if !cc.reconnect() { + return + } + case <-cc.transport.Error(): + if !cc.reconnect() { + return + } + // Tries to drain reset signal if there is any since it is out-dated. + select { + case <-cc.resetChan: + default: + } + } + } +} + +// Wait blocks until i) the new transport is up or ii) ctx is done or iii) cc is closed. +func (cc *Conn) Wait(ctx context.Context) (transport.ClientTransport, error) { + for { + cc.mu.Lock() + switch { + case cc.state == Shutdown: + cc.mu.Unlock() + return nil, ErrClientConnClosing + case cc.state == Ready: + ct := cc.transport + cc.mu.Unlock() + return ct, nil + default: + ready := cc.ready + if ready == nil { + ready = make(chan struct{}) + cc.ready = ready + } + cc.mu.Unlock() + select { + case <-ctx.Done(): + return nil, transport.ContextErr(ctx.Err()) + // Wait until the new transport is ready or failed. + case <-ready: + } + } + } +} + +// Close starts to tear down the Conn. Returns ErrClientConnClosing if +// it has been closed (mostly due to dial time-out). +// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in +// some edge cases (e.g., the caller opens and closes many ClientConn's in a +// tight loop. +func (cc *Conn) Close() error { + cc.mu.Lock() + defer cc.mu.Unlock() + if cc.state == Shutdown { + return ErrClientConnClosing + } + cc.state = Shutdown + cc.stateCV.Broadcast() + if cc.events != nil { + cc.events.Finish() + cc.events = nil + } + if cc.ready != nil { + close(cc.ready) + cc.ready = nil + } + if cc.transport != nil { + cc.transport.Close() + } + if cc.shutdownChan != nil { + close(cc.shutdownChan) + } + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/codegen.sh b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/codegen.sh new file mode 100644 index 000000000..b00948884 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/codegen.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +# This script serves as an example to demonstrate how to generate the gRPC-Go +# interface and the related messages from .proto file. +# +# It assumes the installation of i) Google proto buffer compiler at +# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen +# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have +# not, please install them first. +# +# We recommend running this script at $GOPATH/src. +# +# If this is not what you need, feel free to make your own scripts. Again, this +# script is for demonstration purpose. +# +proto=$1 +protoc --go_out=plugins=grpc:. $proto diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/codes/code_string.go new file mode 100644 index 000000000..e6762d084 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/codes/code_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=Code; DO NOT EDIT + +package codes + +import "fmt" + +const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated" + +var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192} + +func (i Code) String() string { + if i+1 >= Code(len(_Code_index)) { + return fmt.Sprintf("Code(%d)", i) + } + return _Code_name[_Code_index[i]:_Code_index[i+1]] +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/codes/codes.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/codes/codes.go new file mode 100644 index 000000000..37c5b860b --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/codes/codes.go @@ -0,0 +1,159 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package codes defines the canonical error codes used by gRPC. It is +// consistent across various languages. +package codes + +// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +type Code uint32 + +//go:generate stringer -type=Code + +const ( + // OK is returned on success. + OK Code = 0 + + // Canceled indicates the operation was cancelled (typically by the caller). + Canceled Code = 1 + + // Unknown error. An example of where this error may be returned is + // if a Status value received from another address space belongs to + // an error-space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + Unknown Code = 2 + + // InvalidArgument indicates client specified an invalid argument. + // Note that this differs from FailedPrecondition. It indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + InvalidArgument Code = 3 + + // DeadlineExceeded means operation expired before completion. + // For operations that change the state of the system, this error may be + // returned even if the operation has completed successfully. For + // example, a successful response from a server could have been delayed + // long enough for the deadline to expire. + DeadlineExceeded Code = 4 + + // NotFound means some requested entity (e.g., file or directory) was + // not found. + NotFound Code = 5 + + // AlreadyExists means an attempt to create an entity failed because one + // already exists. + AlreadyExists Code = 6 + + // PermissionDenied indicates the caller does not have permission to + // execute the specified operation. It must not be used for rejections + // caused by exhausting some resource (use ResourceExhausted + // instead for those errors). It must not be + // used if the caller cannot be identified (use Unauthenticated + // instead for those errors). + PermissionDenied Code = 7 + + // Unauthenticated indicates the request does not have valid + // authentication credentials for the operation. + Unauthenticated Code = 16 + + // ResourceExhausted indicates some resource has been exhausted, perhaps + // a per-user quota, or perhaps the entire file system is out of space. + ResourceExhausted Code = 8 + + // FailedPrecondition indicates operation was rejected because the + // system is not in a state required for the operation's execution. + // For example, directory to be deleted may be non-empty, an rmdir + // operation is applied to a non-directory, etc. + // + // A litmus test that may help a service implementor in deciding + // between FailedPrecondition, Aborted, and Unavailable: + // (a) Use Unavailable if the client can retry just the failing call. + // (b) Use Aborted if the client should retry at a higher-level + // (e.g., restarting a read-modify-write sequence). + // (c) Use FailedPrecondition if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, FailedPrecondition + // should be returned since the client should not retry unless + // they have first fixed up the directory by deleting files from it. + // (d) Use FailedPrecondition if the client performs conditional + // REST Get/Update/Delete on a resource and the resource on the + // server does not match the condition. E.g., conflicting + // read-modify-write on the same resource. + FailedPrecondition Code = 9 + + // Aborted indicates the operation was aborted, typically due to a + // concurrency issue like sequencer check failures, transaction aborts, + // etc. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + Aborted Code = 10 + + // OutOfRange means operation was attempted past the valid range. + // E.g., seeking or reading past end of file. + // + // Unlike InvalidArgument, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate InvalidArgument if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // OutOfRange if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between FailedPrecondition and + // OutOfRange. We recommend using OutOfRange (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an OutOfRange error to detect when + // they are done. + OutOfRange Code = 11 + + // Unimplemented indicates operation is not implemented or not + // supported/enabled in this service. + Unimplemented Code = 12 + + // Internal errors. Means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + Internal Code = 13 + + // Unavailable indicates the service is currently unavailable. + // This is a most likely a transient condition and may be corrected + // by retrying with a backoff. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + Unavailable Code = 14 + + // DataLoss indicates unrecoverable data loss or corruption. + DataLoss Code = 15 +) diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/coverage.sh b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/coverage.sh new file mode 100644 index 000000000..120235374 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/coverage.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +set -e + +workdir=.cover +profile="$workdir/cover.out" +mode=set +end2endtest="google.golang.org/grpc/test" + +generate_cover_data() { + rm -rf "$workdir" + mkdir "$workdir" + + for pkg in "$@"; do + if [ $pkg == "google.golang.org/grpc" -o $pkg == "google.golang.org/grpc/transport" -o $pkg == "google.golang.org/grpc/metadata" -o $pkg == "google.golang.org/grpc/credentials" ] + then + f="$workdir/$(echo $pkg | tr / -)" + go test -covermode="$mode" -coverprofile="$f.cover" "$pkg" + go test -covermode="$mode" -coverpkg "$pkg" -coverprofile="$f.e2e.cover" "$end2endtest" + fi + done + + echo "mode: $mode" >"$profile" + grep -h -v "^mode:" "$workdir"/*.cover >>"$profile" +} + +show_cover_report() { + go tool cover -${1}="$profile" +} + +push_to_coveralls() { + goveralls -coverprofile="$profile" +} + +generate_cover_data $(go list ./...) +show_cover_report func +case "$1" in +"") + ;; +--html) + show_cover_report html ;; +--coveralls) + push_to_coveralls ;; +*) + echo >&2 "error: invalid option: $1" ;; +esac +rm -rf "$workdir" diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/credentials/credentials.go new file mode 100644 index 000000000..0b0b89b6a --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/credentials/credentials.go @@ -0,0 +1,226 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package credentials implements various credentials supported by gRPC library, +// which encapsulate all the state needed by a client to authenticate with a +// server and make various assertions, e.g., about the client's identity, role, +// or whether it is authorized to make a particular call. +package credentials + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "strings" + "time" + + "golang.org/x/net/context" +) + +var ( + // alpnProtoStr are the specified application level protocols for gRPC. + alpnProtoStr = []string{"h2"} +) + +// Credentials defines the common interface all supported credentials must +// implement. +type Credentials interface { + // GetRequestMetadata gets the current request metadata, refreshing + // tokens if required. This should be called by the transport layer on + // each request, and the data should be populated in headers or other + // context. uri is the URI of the entry point for the request. When + // supported by the underlying implementation, ctx can be used for + // timeout and cancellation. + // TODO(zhaoq): Define the set of the qualified keys instead of leaving + // it as an arbitrary string. + GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) + // RequireTransportSecurity indicates whether the credentails requires + // transport security. + RequireTransportSecurity() bool +} + +// ProtocolInfo provides information regarding the gRPC wire protocol version, +// security protocol, security protocol version in use, etc. +type ProtocolInfo struct { + // ProtocolVersion is the gRPC wire protocol version. + ProtocolVersion string + // SecurityProtocol is the security protocol in use. + SecurityProtocol string + // SecurityVersion is the security protocol version. + SecurityVersion string +} + +// AuthInfo defines the common interface for the auth information the users are interested in. +type AuthInfo interface { + AuthType() string +} + +// TransportAuthenticator defines the common interface for all the live gRPC wire +// protocols and supported transport security protocols (e.g., TLS, SSL). +type TransportAuthenticator interface { + // ClientHandshake does the authentication handshake specified by the corresponding + // authentication protocol on rawConn for clients. It returns the authenticated + // connection and the corresponding auth information about the connection. + ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (net.Conn, AuthInfo, error) + // ServerHandshake does the authentication handshake for servers. It returns + // the authenticated connection and the corresponding auth information about + // the connection. + ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) + // Info provides the ProtocolInfo of this TransportAuthenticator. + Info() ProtocolInfo + Credentials +} + +// TLSInfo contains the auth information for a TLS authenticated connection. +// It implements the AuthInfo interface. +type TLSInfo struct { + State tls.ConnectionState +} + +func (t TLSInfo) AuthType() string { + return "tls" +} + +// tlsCreds is the credentials required for authenticating a connection using TLS. +type tlsCreds struct { + // TLS configuration + config tls.Config +} + +func (c tlsCreds) Info() ProtocolInfo { + return ProtocolInfo{ + SecurityProtocol: "tls", + SecurityVersion: "1.2", + } +} + +// GetRequestMetadata returns nil, nil since TLS credentials does not have +// metadata. +func (c *tlsCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + return nil, nil +} + +func (c *tlsCreds) RequireTransportSecurity() bool { + return true +} + +type timeoutError struct{} + +func (timeoutError) Error() string { return "credentials: Dial timed out" } +func (timeoutError) Timeout() bool { return true } +func (timeoutError) Temporary() bool { return true } + +func (c *tlsCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (_ net.Conn, _ AuthInfo, err error) { + // borrow some code from tls.DialWithDialer + var errChannel chan error + if timeout != 0 { + errChannel = make(chan error, 2) + time.AfterFunc(timeout, func() { + errChannel <- timeoutError{} + }) + } + if c.config.ServerName == "" { + colonPos := strings.LastIndex(addr, ":") + if colonPos == -1 { + colonPos = len(addr) + } + c.config.ServerName = addr[:colonPos] + } + conn := tls.Client(rawConn, &c.config) + if timeout == 0 { + err = conn.Handshake() + } else { + go func() { + errChannel <- conn.Handshake() + }() + err = <-errChannel + } + if err != nil { + rawConn.Close() + return nil, nil, err + } + // TODO(zhaoq): Omit the auth info for client now. It is more for + // information than anything else. + return conn, nil, nil +} + +func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { + conn := tls.Server(rawConn, &c.config) + if err := conn.Handshake(); err != nil { + rawConn.Close() + return nil, nil, err + } + return conn, TLSInfo{conn.ConnectionState()}, nil +} + +// NewTLS uses c to construct a TransportAuthenticator based on TLS. +func NewTLS(c *tls.Config) TransportAuthenticator { + tc := &tlsCreds{*c} + tc.config.NextProtos = alpnProtoStr + return tc +} + +// NewClientTLSFromCert constructs a TLS from the input certificate for client. +func NewClientTLSFromCert(cp *x509.CertPool, serverName string) TransportAuthenticator { + return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp}) +} + +// NewClientTLSFromFile constructs a TLS from the input certificate file for client. +func NewClientTLSFromFile(certFile, serverName string) (TransportAuthenticator, error) { + b, err := ioutil.ReadFile(certFile) + if err != nil { + return nil, err + } + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return nil, fmt.Errorf("credentials: failed to append certificates") + } + return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp}), nil +} + +// NewServerTLSFromCert constructs a TLS from the input certificate for server. +func NewServerTLSFromCert(cert *tls.Certificate) TransportAuthenticator { + return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) +} + +// NewServerTLSFromFile constructs a TLS from the input certificate file and key +// file for server. +func NewServerTLSFromFile(certFile, keyFile string) (TransportAuthenticator, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, err + } + return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/doc.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/doc.go new file mode 100644 index 000000000..b4c0e740e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/doc.go @@ -0,0 +1,6 @@ +/* +Package grpc implements an RPC system called gRPC. + +See www.grpc.io for more information about gRPC. +*/ +package grpc diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/grpclog/logger.go new file mode 100644 index 000000000..2cc09be48 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/grpclog/logger.go @@ -0,0 +1,93 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* +Package grpclog defines logging for grpc. +*/ +package grpclog + +import ( + "log" + "os" +) + +// Use golang's standard logger by default. +// Access is not mutex-protected: do not modify except in init() +// functions. +var logger Logger = log.New(os.Stderr, "", log.LstdFlags) + +// Logger mimics golang's standard Logger as an interface. +type Logger interface { + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatalln(args ...interface{}) + Print(args ...interface{}) + Printf(format string, args ...interface{}) + Println(args ...interface{}) +} + +// SetLogger sets the logger that is used in grpc. Call only from +// init() functions. +func SetLogger(l Logger) { + logger = l +} + +// Fatal is equivalent to Print() followed by a call to os.Exit() with a non-zero exit code. +func Fatal(args ...interface{}) { + logger.Fatal(args...) +} + +// Fatalf is equivalent to Printf() followed by a call to os.Exit() with a non-zero exit code. +func Fatalf(format string, args ...interface{}) { + logger.Fatalf(format, args...) +} + +// Fatalln is equivalent to Println() followed by a call to os.Exit()) with a non-zero exit code. +func Fatalln(args ...interface{}) { + logger.Fatalln(args...) +} + +// Print prints to the logger. Arguments are handled in the manner of fmt.Print. +func Print(args ...interface{}) { + logger.Print(args...) +} + +// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. +func Printf(format string, args ...interface{}) { + logger.Printf(format, args...) +} + +// Println prints to the logger. Arguments are handled in the manner of fmt.Println. +func Println(args ...interface{}) { + logger.Println(args...) +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/internal/internal.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/internal/internal.go new file mode 100644 index 000000000..5489143a8 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/internal/internal.go @@ -0,0 +1,49 @@ +/* + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package internal contains gRPC-internal code for testing, to avoid polluting +// the godoc of the top-level grpc package. +package internal + +// TestingCloseConns closes all existing transports but keeps +// grpcServer.lis accepting new connections. +// +// The provided grpcServer must be of type *grpc.Server. It is untyped +// for circular dependency reasons. +var TestingCloseConns func(grpcServer interface{}) + +// TestingUseHandlerImpl enables the http.Handler-based server implementation. +// It must be called before Serve and requires TLS credentials. +// +// The provided grpcServer must be of type *grpc.Server. It is untyped +// for circular dependency reasons. +var TestingUseHandlerImpl func(grpcServer interface{}) diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/metadata/metadata.go new file mode 100644 index 000000000..58469ddd3 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/metadata/metadata.go @@ -0,0 +1,134 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package metadata define the structure of the metadata supported by gRPC library. +package metadata + +import ( + "encoding/base64" + "fmt" + "strings" + + "golang.org/x/net/context" +) + +const ( + binHdrSuffix = "-bin" +) + +// encodeKeyValue encodes key and value qualified for transmission via gRPC. +// Transmitting binary headers violates HTTP/2 spec. +// TODO(zhaoq): Maybe check if k is ASCII also. +func encodeKeyValue(k, v string) (string, string) { + k = strings.ToLower(k) + if strings.HasSuffix(k, binHdrSuffix) { + val := base64.StdEncoding.EncodeToString([]byte(v)) + v = string(val) + } + return k, v +} + +// DecodeKeyValue returns the original key and value corresponding to the +// encoded data in k, v. +func DecodeKeyValue(k, v string) (string, string, error) { + if !strings.HasSuffix(k, binHdrSuffix) { + return k, v, nil + } + val, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return "", "", err + } + return k, string(val), nil +} + +// MD is a mapping from metadata keys to values. Users should use the following +// two convenience functions New and Pairs to generate MD. +type MD map[string][]string + +// New creates a MD from given key-value map. +func New(m map[string]string) MD { + md := MD{} + for k, v := range m { + key, val := encodeKeyValue(k, v) + md[key] = append(md[key], val) + } + return md +} + +// Pairs returns an MD formed by the mapping of key, value ... +// Pairs panics if len(kv) is odd. +func Pairs(kv ...string) MD { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) + } + md := MD{} + var k string + for i, s := range kv { + if i%2 == 0 { + k = s + continue + } + key, val := encodeKeyValue(k, s) + md[key] = append(md[key], val) + } + return md +} + +// Len returns the number of items in md. +func (md MD) Len() int { + return len(md) +} + +// Copy returns a copy of md. +func (md MD) Copy() MD { + out := MD{} + for k, v := range md { + for _, i := range v { + out[k] = append(out[k], i) + } + } + return out +} + +type mdKey struct{} + +// NewContext creates a new context with md attached. +func NewContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdKey{}, md) +} + +// FromContext returns the MD in ctx if it exists. +func FromContext(ctx context.Context) (md MD, ok bool) { + md, ok = ctx.Value(mdKey{}).(MD) + return +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/naming/naming.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/naming/naming.go new file mode 100644 index 000000000..06605607c --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/naming/naming.go @@ -0,0 +1,73 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package naming defines the naming API and related data structures for gRPC. +// The interface is EXPERIMENTAL and may be suject to change. +package naming + +// Operation defines the corresponding operations for a name resolution change. +type Operation uint8 + +const ( + // Add indicates a new address is added. + Add Operation = iota + // Delete indicates an exisiting address is deleted. + Delete +) + +// Update defines a name resolution update. Notice that it is not valid having both +// empty string Addr and nil Metadata in an Update. +type Update struct { + // Op indicates the operation of the update. + Op Operation + // Addr is the updated address. It is empty string if there is no address update. + Addr string + // Metadata is the updated metadata. It is nil if there is no metadata update. + // Metadata is not required for a custom naming implementation. + Metadata interface{} +} + +// Resolver creates a Watcher for a target to track its resolution changes. +type Resolver interface { + // Resolve creates a Watcher for target. + Resolve(target string) (Watcher, error) +} + +// Watcher watches for the updates on the specified target. +type Watcher interface { + // Next blocks until an update or error happens. It may return one or more + // updates. The first call should get the full set of the results. + Next() ([]*Update, error) + // Close closes the Watcher. + Close() +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/peer/peer.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/peer/peer.go new file mode 100644 index 000000000..bfa6205ba --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/peer/peer.go @@ -0,0 +1,65 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package peer defines various peer information associated with RPCs and +// corresponding utils. +package peer + +import ( + "net" + + "golang.org/x/net/context" + "google.golang.org/grpc/credentials" +) + +// Peer contains the information of the peer for an RPC. +type Peer struct { + // Addr is the peer address. + Addr net.Addr + // AuthInfo is the authentication information of the transport. + // It is nil if there is no transport security being used. + AuthInfo credentials.AuthInfo +} + +type peerKey struct{} + +// NewContext creates a new context with peer information attached. +func NewContext(ctx context.Context, p *Peer) context.Context { + return context.WithValue(ctx, peerKey{}, p) +} + +// FromContext returns the peer information in ctx if it exists. +func FromContext(ctx context.Context) (p *Peer, ok bool) { + p, ok = ctx.Value(peerKey{}).(*Peer) + return +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/picker.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/picker.go new file mode 100644 index 000000000..50f315b44 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/picker.go @@ -0,0 +1,243 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "container/list" + "fmt" + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/naming" + "google.golang.org/grpc/transport" +) + +// Picker picks a Conn for RPC requests. +// This is EXPERIMENTAL and please do not implement your own Picker for now. +type Picker interface { + // Init does initial processing for the Picker, e.g., initiate some connections. + Init(cc *ClientConn) error + // Pick blocks until either a transport.ClientTransport is ready for the upcoming RPC + // or some error happens. + Pick(ctx context.Context) (transport.ClientTransport, error) + // PickAddr picks a peer address for connecting. This will be called repeated for + // connecting/reconnecting. + PickAddr() (string, error) + // State returns the connectivity state of the underlying connections. + State() (ConnectivityState, error) + // WaitForStateChange blocks until the state changes to something other than + // the sourceState. It returns the new state or error. + WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) + // Close closes all the Conn's owned by this Picker. + Close() error +} + +// unicastPicker is the default Picker which is used when there is no custom Picker +// specified by users. It always picks the same Conn. +type unicastPicker struct { + target string + conn *Conn +} + +func (p *unicastPicker) Init(cc *ClientConn) error { + c, err := NewConn(cc) + if err != nil { + return err + } + p.conn = c + return nil +} + +func (p *unicastPicker) Pick(ctx context.Context) (transport.ClientTransport, error) { + return p.conn.Wait(ctx) +} + +func (p *unicastPicker) PickAddr() (string, error) { + return p.target, nil +} + +func (p *unicastPicker) State() (ConnectivityState, error) { + return p.conn.State(), nil +} + +func (p *unicastPicker) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { + return p.conn.WaitForStateChange(ctx, sourceState) +} + +func (p *unicastPicker) Close() error { + if p.conn != nil { + return p.conn.Close() + } + return nil +} + +// unicastNamingPicker picks an address from a name resolver to set up the connection. +type unicastNamingPicker struct { + cc *ClientConn + resolver naming.Resolver + watcher naming.Watcher + mu sync.Mutex + // The list of the addresses are obtained from watcher. + addrs *list.List + // It tracks the current picked addr by PickAddr(). The next PickAddr may + // push it forward on addrs. + pickedAddr *list.Element + conn *Conn +} + +// NewUnicastNamingPicker creates a Picker to pick addresses from a name resolver +// to connect. +func NewUnicastNamingPicker(r naming.Resolver) Picker { + return &unicastNamingPicker{ + resolver: r, + addrs: list.New(), + } +} + +type addrInfo struct { + addr string + // Set to true if this addrInfo needs to be deleted in the next PickAddrr() call. + deleting bool +} + +// processUpdates calls Watcher.Next() once and processes the obtained updates. +func (p *unicastNamingPicker) processUpdates() error { + updates, err := p.watcher.Next() + if err != nil { + return err + } + for _, update := range updates { + switch update.Op { + case naming.Add: + p.mu.Lock() + p.addrs.PushBack(&addrInfo{ + addr: update.Addr, + }) + p.mu.Unlock() + // Initial connection setup + if p.conn == nil { + conn, err := NewConn(p.cc) + if err != nil { + return err + } + p.conn = conn + } + case naming.Delete: + p.mu.Lock() + for e := p.addrs.Front(); e != nil; e = e.Next() { + if update.Addr == e.Value.(*addrInfo).addr { + if e == p.pickedAddr { + // Do not remove the element now if it is the current picked + // one. We leave the deletion to the next PickAddr() call. + e.Value.(*addrInfo).deleting = true + // Notify Conn to close it. All the live RPCs on this connection + // will be aborted. + p.conn.NotifyReset() + } else { + p.addrs.Remove(e) + } + } + } + p.mu.Unlock() + default: + grpclog.Println("Unknown update.Op ", update.Op) + } + } + return nil +} + +// monitor runs in a standalone goroutine to keep watching name resolution updates until the watcher +// is closed. +func (p *unicastNamingPicker) monitor() { + for { + if err := p.processUpdates(); err != nil { + return + } + } +} + +func (p *unicastNamingPicker) Init(cc *ClientConn) error { + w, err := p.resolver.Resolve(cc.target) + if err != nil { + return err + } + p.watcher = w + p.cc = cc + // Get the initial name resolution. + if err := p.processUpdates(); err != nil { + return err + } + go p.monitor() + return nil +} + +func (p *unicastNamingPicker) Pick(ctx context.Context) (transport.ClientTransport, error) { + return p.conn.Wait(ctx) +} + +func (p *unicastNamingPicker) PickAddr() (string, error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.pickedAddr == nil { + p.pickedAddr = p.addrs.Front() + } else { + pa := p.pickedAddr + p.pickedAddr = pa.Next() + if pa.Value.(*addrInfo).deleting { + p.addrs.Remove(pa) + } + if p.pickedAddr == nil { + p.pickedAddr = p.addrs.Front() + } + } + if p.pickedAddr == nil { + return "", fmt.Errorf("there is no address available to pick") + } + return p.pickedAddr.Value.(*addrInfo).addr, nil +} + +func (p *unicastNamingPicker) State() (ConnectivityState, error) { + return 0, fmt.Errorf("State() is not supported for unicastNamingPicker") +} + +func (p *unicastNamingPicker) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { + return 0, fmt.Errorf("WaitForStateChange is not supported for unicastNamingPciker") +} + +func (p *unicastNamingPicker) Close() error { + p.watcher.Close() + p.conn.Close() + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/rpc_util.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/rpc_util.go new file mode 100644 index 000000000..96c790bed --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/rpc_util.go @@ -0,0 +1,452 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "bytes" + "compress/gzip" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "math" + "math/rand" + "os" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/transport" +) + +// Codec defines the interface gRPC uses to encode and decode messages. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // String returns the name of the Codec implementation. The returned + // string will be used as part of content type in transmission. + String() string +} + +// protoCodec is a Codec implemetation with protobuf. It is the default codec for gRPC. +type protoCodec struct{} + +func (protoCodec) Marshal(v interface{}) ([]byte, error) { + return proto.Marshal(v.(proto.Message)) +} + +func (protoCodec) Unmarshal(data []byte, v interface{}) error { + return proto.Unmarshal(data, v.(proto.Message)) +} + +func (protoCodec) String() string { + return "proto" +} + +// Compressor defines the interface gRPC uses to compress a message. +type Compressor interface { + // Do compresses p into w. + Do(w io.Writer, p []byte) error + // Type returns the compression algorithm the Compressor uses. + Type() string +} + +// NewGZIPCompressor creates a Compressor based on GZIP. +func NewGZIPCompressor() Compressor { + return &gzipCompressor{} +} + +type gzipCompressor struct { +} + +func (c *gzipCompressor) Do(w io.Writer, p []byte) error { + z := gzip.NewWriter(w) + if _, err := z.Write(p); err != nil { + return err + } + return z.Close() +} + +func (c *gzipCompressor) Type() string { + return "gzip" +} + +// Decompressor defines the interface gRPC uses to decompress a message. +type Decompressor interface { + // Do reads the data from r and uncompress them. + Do(r io.Reader) ([]byte, error) + // Type returns the compression algorithm the Decompressor uses. + Type() string +} + +type gzipDecompressor struct { +} + +// NewGZIPDecompressor creates a Decompressor based on GZIP. +func NewGZIPDecompressor() Decompressor { + return &gzipDecompressor{} +} + +func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { + z, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + defer z.Close() + return ioutil.ReadAll(z) +} + +func (d *gzipDecompressor) Type() string { + return "gzip" +} + +// callInfo contains all related configuration and information about an RPC. +type callInfo struct { + failFast bool + headerMD metadata.MD + trailerMD metadata.MD + traceInfo traceInfo // in trace.go +} + +// CallOption configures a Call before it starts or extracts information from +// a Call after it completes. +type CallOption interface { + // before is called before the call is sent to any server. If before + // returns a non-nil error, the RPC fails with that error. + before(*callInfo) error + + // after is called after the call has completed. after cannot return an + // error, so any failures should be reported via output parameters. + after(*callInfo) +} + +type beforeCall func(c *callInfo) error + +func (o beforeCall) before(c *callInfo) error { return o(c) } +func (o beforeCall) after(c *callInfo) {} + +type afterCall func(c *callInfo) + +func (o afterCall) before(c *callInfo) error { return nil } +func (o afterCall) after(c *callInfo) { o(c) } + +// Header returns a CallOptions that retrieves the header metadata +// for a unary RPC. +func Header(md *metadata.MD) CallOption { + return afterCall(func(c *callInfo) { + *md = c.headerMD + }) +} + +// Trailer returns a CallOptions that retrieves the trailer metadata +// for a unary RPC. +func Trailer(md *metadata.MD) CallOption { + return afterCall(func(c *callInfo) { + *md = c.trailerMD + }) +} + +// The format of the payload: compressed or not? +type payloadFormat uint8 + +const ( + compressionNone payloadFormat = iota // no compression + compressionMade +) + +// parser reads complelete gRPC messages from the underlying reader. +type parser struct { + // r is the underlying reader. + // See the comment on recvMsg for the permissible + // error types. + r io.Reader + + // The header of a gRPC message. Find more detail + // at http://www.grpc.io/docs/guides/wire.html. + header [5]byte +} + +// recvMsg reads a complete gRPC message from the stream. +// +// It returns the message and its payload (compression/encoding) +// format. The caller owns the returned msg memory. +// +// If there is an error, possible values are: +// * io.EOF, when no messages remain +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * of type transport.StreamError +// No other error values or types must be returned, which also means +// that the underlying io.Reader must not return an incompatible +// error. +func (p *parser) recvMsg() (pf payloadFormat, msg []byte, err error) { + if _, err := io.ReadFull(p.r, p.header[:]); err != nil { + return 0, nil, err + } + + pf = payloadFormat(p.header[0]) + length := binary.BigEndian.Uint32(p.header[1:]) + + if length == 0 { + return pf, nil, nil + } + // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead + // of making it for each message: + msg = make([]byte, int(length)) + if _, err := io.ReadFull(p.r, msg); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, nil, err + } + return pf, msg, nil +} + +// encode serializes msg and prepends the message header. If msg is nil, it +// generates the message header of 0 message length. +func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer) ([]byte, error) { + var b []byte + var length uint + if msg != nil { + var err error + // TODO(zhaoq): optimize to reduce memory alloc and copying. + b, err = c.Marshal(msg) + if err != nil { + return nil, err + } + if cp != nil { + if err := cp.Do(cbuf, b); err != nil { + return nil, err + } + b = cbuf.Bytes() + } + length = uint(len(b)) + } + if length > math.MaxUint32 { + return nil, Errorf(codes.InvalidArgument, "grpc: message too large (%d bytes)", length) + } + + const ( + payloadLen = 1 + sizeLen = 4 + ) + + var buf = make([]byte, payloadLen+sizeLen+len(b)) + + // Write payload format + if cp == nil { + buf[0] = byte(compressionNone) + } else { + buf[0] = byte(compressionMade) + } + // Write length of b into buf + binary.BigEndian.PutUint32(buf[1:], uint32(length)) + // Copy encoded msg to buf + copy(buf[5:], b) + + return buf, nil +} + +func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error { + switch pf { + case compressionNone: + case compressionMade: + if recvCompress == "" { + return transport.StreamErrorf(codes.InvalidArgument, "grpc: invalid grpc-encoding %q with compression enabled", recvCompress) + } + if dc == nil || recvCompress != dc.Type() { + return transport.StreamErrorf(codes.InvalidArgument, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } + default: + return transport.StreamErrorf(codes.InvalidArgument, "grpc: received unexpected payload format %d", pf) + } + return nil +} + +func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}) error { + pf, d, err := p.recvMsg() + if err != nil { + return err + } + if err := checkRecvPayload(pf, s.RecvCompress(), dc); err != nil { + return err + } + if pf == compressionMade { + d, err = dc.Do(bytes.NewReader(d)) + if err != nil { + return transport.StreamErrorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } + } + if err := c.Unmarshal(d, m); err != nil { + return transport.StreamErrorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) + } + return nil +} + +// rpcError defines the status from an RPC. +type rpcError struct { + code codes.Code + desc string +} + +func (e rpcError) Error() string { + return fmt.Sprintf("rpc error: code = %d desc = %q", e.code, e.desc) +} + +// Code returns the error code for err if it was produced by the rpc system. +// Otherwise, it returns codes.Unknown. +func Code(err error) codes.Code { + if err == nil { + return codes.OK + } + if e, ok := err.(rpcError); ok { + return e.code + } + return codes.Unknown +} + +// ErrorDesc returns the error description of err if it was produced by the rpc system. +// Otherwise, it returns err.Error() or empty string when err is nil. +func ErrorDesc(err error) string { + if err == nil { + return "" + } + if e, ok := err.(rpcError); ok { + return e.desc + } + return err.Error() +} + +// Errorf returns an error containing an error code and a description; +// Errorf returns nil if c is OK. +func Errorf(c codes.Code, format string, a ...interface{}) error { + if c == codes.OK { + return nil + } + return rpcError{ + code: c, + desc: fmt.Sprintf(format, a...), + } +} + +// toRPCErr converts an error into a rpcError. +func toRPCErr(err error) error { + switch e := err.(type) { + case rpcError: + return err + case transport.StreamError: + return rpcError{ + code: e.Code, + desc: e.Desc, + } + case transport.ConnectionError: + return rpcError{ + code: codes.Internal, + desc: e.Desc, + } + } + return Errorf(codes.Unknown, "%v", err) +} + +// convertCode converts a standard Go error into its canonical code. Note that +// this is only used to translate the error returned by the server applications. +func convertCode(err error) codes.Code { + switch err { + case nil: + return codes.OK + case io.EOF: + return codes.OutOfRange + case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF: + return codes.FailedPrecondition + case os.ErrInvalid: + return codes.InvalidArgument + case context.Canceled: + return codes.Canceled + case context.DeadlineExceeded: + return codes.DeadlineExceeded + } + switch { + case os.IsExist(err): + return codes.AlreadyExists + case os.IsNotExist(err): + return codes.NotFound + case os.IsPermission(err): + return codes.PermissionDenied + } + return codes.Unknown +} + +const ( + // how long to wait after the first failure before retrying + baseDelay = 1.0 * time.Second + // upper bound of backoff delay + maxDelay = 120 * time.Second + // backoff increases by this factor on each retry + backoffFactor = 1.6 + // backoff is randomized downwards by this factor + backoffJitter = 0.2 +) + +func backoff(retries int) (t time.Duration) { + if retries == 0 { + return baseDelay + } + backoff, max := float64(baseDelay), float64(maxDelay) + for backoff < max && retries > 0 { + backoff *= backoffFactor + retries-- + } + if backoff > max { + backoff = max + } + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + backoffJitter*(rand.Float64()*2-1) + if backoff < 0 { + return 0 + } + return time.Duration(backoff) +} + +// SupportPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the grpc package. +// +// This constant may be renamed in the future if a change in the generated code +// requires a synchronised update of grpc-go and protoc-gen-go. This constant +// should not be referenced from any other code. +const SupportPackageIsVersion1 = true diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/server.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/server.go new file mode 100644 index 000000000..bdf68a0fc --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/server.go @@ -0,0 +1,746 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "bytes" + "errors" + "fmt" + "io" + "net" + "net/http" + "reflect" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/http2" + "golang.org/x/net/trace" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/transport" +) + +type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) + +// MethodDesc represents an RPC service's method specification. +type MethodDesc struct { + MethodName string + Handler methodHandler +} + +// ServiceDesc represents an RPC service's specification. +type ServiceDesc struct { + ServiceName string + // The pointer to the service interface. Used to check whether the user + // provided implementation satisfies the interface requirements. + HandlerType interface{} + Methods []MethodDesc + Streams []StreamDesc +} + +// service consists of the information of the server serving this service and +// the methods in this service. +type service struct { + server interface{} // the server for service methods + md map[string]*MethodDesc + sd map[string]*StreamDesc +} + +// Server is a gRPC server to serve RPC requests. +type Server struct { + opts options + + mu sync.Mutex // guards following + lis map[net.Listener]bool + conns map[io.Closer]bool + m map[string]*service // service name -> service info + events trace.EventLog +} + +type options struct { + creds credentials.Credentials + codec Codec + cp Compressor + dc Decompressor + maxConcurrentStreams uint32 + useHandlerImpl bool // use http.Handler-based server +} + +// A ServerOption sets options. +type ServerOption func(*options) + +// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. +func CustomCodec(codec Codec) ServerOption { + return func(o *options) { + o.codec = codec + } +} + +func RPCCompressor(cp Compressor) ServerOption { + return func(o *options) { + o.cp = cp + } +} + +func RPCDecompressor(dc Decompressor) ServerOption { + return func(o *options) { + o.dc = dc + } +} + +// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number +// of concurrent streams to each ServerTransport. +func MaxConcurrentStreams(n uint32) ServerOption { + return func(o *options) { + o.maxConcurrentStreams = n + } +} + +// Creds returns a ServerOption that sets credentials for server connections. +func Creds(c credentials.Credentials) ServerOption { + return func(o *options) { + o.creds = c + } +} + +// NewServer creates a gRPC server which has no service registered and has not +// started to accept requests yet. +func NewServer(opt ...ServerOption) *Server { + var opts options + for _, o := range opt { + o(&opts) + } + if opts.codec == nil { + // Set the default codec. + opts.codec = protoCodec{} + } + s := &Server{ + lis: make(map[net.Listener]bool), + opts: opts, + conns: make(map[io.Closer]bool), + m: make(map[string]*service), + } + if EnableTracing { + _, file, line, _ := runtime.Caller(1) + s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) + } + return s +} + +// printf records an event in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) printf(format string, a ...interface{}) { + if s.events != nil { + s.events.Printf(format, a...) + } +} + +// errorf records an error in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) errorf(format string, a ...interface{}) { + if s.events != nil { + s.events.Errorf(format, a...) + } +} + +// RegisterService register a service and its implementation to the gRPC +// server. Called from the IDL generated code. This must be called before +// invoking Serve. +func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { + ht := reflect.TypeOf(sd.HandlerType).Elem() + st := reflect.TypeOf(ss) + if !st.Implements(ht) { + grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + } + s.register(sd, ss) +} + +func (s *Server) register(sd *ServiceDesc, ss interface{}) { + s.mu.Lock() + defer s.mu.Unlock() + s.printf("RegisterService(%q)", sd.ServiceName) + if _, ok := s.m[sd.ServiceName]; ok { + grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) + } + srv := &service{ + server: ss, + md: make(map[string]*MethodDesc), + sd: make(map[string]*StreamDesc), + } + for i := range sd.Methods { + d := &sd.Methods[i] + srv.md[d.MethodName] = d + } + for i := range sd.Streams { + d := &sd.Streams[i] + srv.sd[d.StreamName] = d + } + s.m[sd.ServiceName] = srv +} + +var ( + // ErrServerStopped indicates that the operation is now illegal because of + // the server being stopped. + ErrServerStopped = errors.New("grpc: the server has been stopped") +) + +func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + creds, ok := s.opts.creds.(credentials.TransportAuthenticator) + if !ok { + return rawConn, nil, nil + } + return creds.ServerHandshake(rawConn) +} + +// Serve accepts incoming connections on the listener lis, creating a new +// ServerTransport and service goroutine for each. The service goroutines +// read gRPC requests and then call the registered handlers to reply to them. +// Service returns when lis.Accept fails. +func (s *Server) Serve(lis net.Listener) error { + s.mu.Lock() + s.printf("serving") + if s.lis == nil { + s.mu.Unlock() + return ErrServerStopped + } + s.lis[lis] = true + s.mu.Unlock() + defer func() { + lis.Close() + s.mu.Lock() + delete(s.lis, lis) + s.mu.Unlock() + }() + for { + rawConn, err := lis.Accept() + if err != nil { + s.mu.Lock() + s.printf("done serving; Accept = %v", err) + s.mu.Unlock() + return err + } + // Start a new goroutine to deal with rawConn + // so we don't stall this Accept loop goroutine. + go s.handleRawConn(rawConn) + } +} + +// handleRawConn is run in its own goroutine and handles a just-accepted +// connection that has not had any I/O performed on it yet. +func (s *Server) handleRawConn(rawConn net.Conn) { + conn, authInfo, err := s.useTransportAuthenticator(rawConn) + if err != nil { + s.mu.Lock() + s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + s.mu.Unlock() + grpclog.Printf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + rawConn.Close() + return + } + + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + conn.Close() + return + } + s.mu.Unlock() + + if s.opts.useHandlerImpl { + s.serveUsingHandler(conn) + } else { + s.serveNewHTTP2Transport(conn, authInfo) + } +} + +// serveNewHTTP2Transport sets up a new http/2 transport (using the +// gRPC http2 server transport in transport/http2_server.go) and +// serves streams on it. +// This is run in its own goroutine (it does network I/O in +// transport.NewServerTransport). +func (s *Server) serveNewHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) { + st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams, authInfo) + if err != nil { + s.mu.Lock() + s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) + s.mu.Unlock() + c.Close() + grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err) + return + } + if !s.addConn(st) { + st.Close() + return + } + s.serveStreams(st) +} + +func (s *Server) serveStreams(st transport.ServerTransport) { + defer s.removeConn(st) + defer st.Close() + var wg sync.WaitGroup + st.HandleStreams(func(stream *transport.Stream) { + wg.Add(1) + go func() { + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + }() + }) + wg.Wait() +} + +var _ http.Handler = (*Server)(nil) + +// serveUsingHandler is called from handleRawConn when s is configured +// to handle requests via the http.Handler interface. It sets up a +// net/http.Server to handle the just-accepted conn. The http.Server +// is configured to route all incoming requests (all HTTP/2 streams) +// to ServeHTTP, which creates a new ServerTransport for each stream. +// serveUsingHandler blocks until conn closes. +// +// This codepath is only used when Server.TestingUseHandlerImpl has +// been configured. This lets the end2end tests exercise the ServeHTTP +// method as one of the environment types. +// +// conn is the *tls.Conn that's already been authenticated. +func (s *Server) serveUsingHandler(conn net.Conn) { + if !s.addConn(conn) { + conn.Close() + return + } + defer s.removeConn(conn) + h2s := &http2.Server{ + MaxConcurrentStreams: s.opts.maxConcurrentStreams, + } + h2s.ServeConn(conn, &http2.ServeConnOpts{ + Handler: s, + }) +} + +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + st, err := transport.NewServerHandlerTransport(w, r) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if !s.addConn(st) { + st.Close() + return + } + defer s.removeConn(st) + s.serveStreams(st) +} + +// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. +// If tracing is not enabled, it returns nil. +func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { + if !EnableTracing { + return nil + } + trInfo = &traceInfo{ + tr: trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()), + } + trInfo.firstLine.client = false + trInfo.firstLine.remoteAddr = st.RemoteAddr() + stream.TraceContext(trInfo.tr) + if dl, ok := stream.Context().Deadline(); ok { + trInfo.firstLine.deadline = dl.Sub(time.Now()) + } + return trInfo +} + +func (s *Server) addConn(c io.Closer) bool { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns == nil { + return false + } + s.conns[c] = true + return true +} + +func (s *Server) removeConn(c io.Closer) { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns != nil { + delete(s.conns, c) + } +} + +func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options) error { + var cbuf *bytes.Buffer + if cp != nil { + cbuf = new(bytes.Buffer) + } + p, err := encode(s.opts.codec, msg, cp, cbuf) + if err != nil { + // This typically indicates a fatal issue (e.g., memory + // corruption or hardware faults) the application program + // cannot handle. + // + // TODO(zhaoq): There exist other options also such as only closing the + // faulty stream locally and remotely (Other streams can keep going). Find + // the optimal option. + grpclog.Fatalf("grpc: Server failed to encode response %v", err) + } + return t.Write(stream, p, opts) +} + +func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) { + if trInfo != nil { + defer trInfo.tr.Finish() + trInfo.firstLine.client = false + trInfo.tr.LazyLog(&trInfo.firstLine, false) + defer func() { + if err != nil && err != io.EOF { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + }() + } + p := &parser{r: stream} + for { + pf, req, err := p.recvMsg() + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if err == io.ErrUnexpectedEOF { + err = transport.StreamError{Code: codes.Internal, Desc: "io.ErrUnexpectedEOF"} + } + if err != nil { + switch err := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + case transport.StreamError: + if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) + } + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", err, err)) + } + return err + } + + if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil { + switch err := err.(type) { + case transport.StreamError: + if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) + } + default: + if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) + } + + } + return err + } + statusCode := codes.OK + statusDesc := "" + df := func(v interface{}) error { + if pf == compressionMade { + var err error + req, err = s.opts.dc.Do(bytes.NewReader(req)) + if err != nil { + if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) + } + return err + } + } + if err := s.opts.codec.Unmarshal(req, v); err != nil { + return err + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) + } + return nil + } + reply, appErr := md.Handler(srv.server, stream.Context(), df) + if appErr != nil { + if err, ok := appErr.(rpcError); ok { + statusCode = err.code + statusDesc = err.desc + } else { + statusCode = convertCode(appErr) + statusDesc = appErr.Error() + } + if trInfo != nil && statusCode != codes.OK { + trInfo.tr.LazyLog(stringer(statusDesc), true) + trInfo.tr.SetError() + } + if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err) + return err + } + return nil + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer("OK"), false) + } + opts := &transport.Options{ + Last: true, + Delay: false, + } + if s.opts.cp != nil { + stream.SetSendCompress(s.opts.cp.Type()) + } + if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil { + switch err := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + case transport.StreamError: + statusCode = err.Code + statusDesc = err.Desc + default: + statusCode = codes.Unknown + statusDesc = err.Error() + } + return err + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + } + return t.WriteStatus(stream, statusCode, statusDesc) + } +} + +func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { + if s.opts.cp != nil { + stream.SetSendCompress(s.opts.cp.Type()) + } + ss := &serverStream{ + t: t, + s: stream, + p: &parser{r: stream}, + codec: s.opts.codec, + cp: s.opts.cp, + dc: s.opts.dc, + trInfo: trInfo, + } + if ss.cp != nil { + ss.cbuf = new(bytes.Buffer) + } + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + defer func() { + ss.mu.Lock() + if err != nil && err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + ss.trInfo.tr.Finish() + ss.trInfo.tr = nil + ss.mu.Unlock() + }() + } + if appErr := sd.Handler(srv.server, ss); appErr != nil { + if err, ok := appErr.(rpcError); ok { + ss.statusCode = err.code + ss.statusDesc = err.desc + } else if err, ok := appErr.(transport.StreamError); ok { + ss.statusCode = err.Code + ss.statusDesc = err.Desc + } else { + ss.statusCode = convertCode(appErr) + ss.statusDesc = appErr.Error() + } + } + if trInfo != nil { + ss.mu.Lock() + if ss.statusCode != codes.OK { + ss.trInfo.tr.LazyLog(stringer(ss.statusDesc), true) + ss.trInfo.tr.SetError() + } else { + ss.trInfo.tr.LazyLog(stringer("OK"), false) + } + ss.mu.Unlock() + } + return t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc) + +} + +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { + sm := stream.Method() + if sm != "" && sm[0] == '/' { + sm = sm[1:] + } + pos := strings.LastIndex(sm, "/") + if pos == -1 { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) + trInfo.tr.SetError() + } + if err := t.WriteStatus(stream, codes.InvalidArgument, fmt.Sprintf("malformed method name: %q", stream.Method())); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } + return + } + service := sm[:pos] + method := sm[pos+1:] + srv, ok := s.m[service] + if !ok { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true) + trInfo.tr.SetError() + } + if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown service %v", service)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } + return + } + // Unary RPC or Streaming RPC? + if md, ok := srv.md[method]; ok { + s.processUnaryRPC(t, stream, srv, md, trInfo) + return + } + if sd, ok := srv.sd[method]; ok { + s.processStreamingRPC(t, stream, srv, sd, trInfo) + return + } + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true) + trInfo.tr.SetError() + } + if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown method %v", method)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } +} + +// Stop stops the gRPC server. It immediately closes all open +// connections and listeners. +// It cancels all active RPCs on the server side and the corresponding +// pending RPCs on the client side will get notified by connection +// errors. +func (s *Server) Stop() { + s.mu.Lock() + listeners := s.lis + s.lis = nil + cs := s.conns + s.conns = nil + s.mu.Unlock() + + for lis := range listeners { + lis.Close() + } + for c := range cs { + c.Close() + } + + s.mu.Lock() + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() +} + +func init() { + internal.TestingCloseConns = func(arg interface{}) { + arg.(*Server).testingCloseConns() + } + internal.TestingUseHandlerImpl = func(arg interface{}) { + arg.(*Server).opts.useHandlerImpl = true + } +} + +// testingCloseConns closes all existing transports but keeps s.lis +// accepting new connections. +func (s *Server) testingCloseConns() { + s.mu.Lock() + for c := range s.conns { + c.Close() + delete(s.conns, c) + } + s.mu.Unlock() +} + +// SendHeader sends header metadata. It may be called at most once from a unary +// RPC handler. The ctx is the RPC handler's Context or one derived from it. +func SendHeader(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream, ok := transport.StreamFromContext(ctx) + if !ok { + return fmt.Errorf("grpc: failed to fetch the stream from the context %v", ctx) + } + t := stream.ServerTransport() + if t == nil { + grpclog.Fatalf("grpc: SendHeader: %v has no ServerTransport to send header metadata.", stream) + } + return t.WriteHeader(stream, md) +} + +// SetTrailer sets the trailer metadata that will be sent when an RPC returns. +// It may be called at most once from a unary RPC handler. The ctx is the RPC +// handler's Context or one derived from it. +func SetTrailer(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream, ok := transport.StreamFromContext(ctx) + if !ok { + return fmt.Errorf("grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetTrailer(md) +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/stream.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/stream.go new file mode 100644 index 000000000..dba7f6c42 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/stream.go @@ -0,0 +1,411 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "bytes" + "errors" + "io" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/trace" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/transport" +) + +type streamHandler func(srv interface{}, stream ServerStream) error + +// StreamDesc represents a streaming RPC service's method specification. +type StreamDesc struct { + StreamName string + Handler streamHandler + + // At least one of these is true. + ServerStreams bool + ClientStreams bool +} + +// Stream defines the common interface a client or server stream has to satisfy. +type Stream interface { + // Context returns the context for this stream. + Context() context.Context + // SendMsg blocks until it sends m, the stream is done or the stream + // breaks. + // On error, it aborts the stream and returns an RPC status on client + // side. On server side, it simply returns the error to the caller. + // SendMsg is called by generated code. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message or the stream is + // done. On client side, it returns io.EOF when the stream is done. On + // any other error, it aborts the stream and returns an RPC status. On + // server side, it simply returns the error to the caller. + RecvMsg(m interface{}) error +} + +// ClientStream defines the interface a client stream has to satify. +type ClientStream interface { + // Header returns the header metedata received from the server if there + // is any. It blocks if the metadata is not ready to read. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server. It must be called + // after stream.Recv() returns non-nil error (including io.EOF) for + // bi-directional streaming and server streaming or stream.CloseAndRecv() + // returns for client streaming in order to receive trailer metadata if + // present. Otherwise, it could returns an empty MD even though trailer + // is present. + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. + CloseSend() error + Stream +} + +// NewClientStream creates a new Stream for the client side. This is called +// by generated code. +func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + var ( + t transport.ClientTransport + err error + ) + t, err = cc.dopts.picker.Pick(ctx) + if err != nil { + return nil, toRPCErr(err) + } + // TODO(zhaoq): CallOption is omitted. Add support when it is needed. + callHdr := &transport.CallHdr{ + Host: cc.authority, + Method: method, + Flush: desc.ServerStreams && desc.ClientStreams, + } + if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + } + cs := &clientStream{ + desc: desc, + codec: cc.dopts.codec, + cp: cc.dopts.cp, + dc: cc.dopts.dc, + tracing: EnableTracing, + } + if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + cs.cbuf = new(bytes.Buffer) + } + if cs.tracing { + cs.trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) + cs.trInfo.firstLine.client = true + if deadline, ok := ctx.Deadline(); ok { + cs.trInfo.firstLine.deadline = deadline.Sub(time.Now()) + } + cs.trInfo.tr.LazyLog(&cs.trInfo.firstLine, false) + ctx = trace.NewContext(ctx, cs.trInfo.tr) + } + s, err := t.NewStream(ctx, callHdr) + if err != nil { + cs.finish(err) + return nil, toRPCErr(err) + } + cs.t = t + cs.s = s + cs.p = &parser{r: s} + // Listen on ctx.Done() to detect cancellation when there is no pending + // I/O operations on this stream. + go func() { + select { + case <-t.Error(): + // Incur transport error, simply exit. + case <-s.Context().Done(): + err := s.Context().Err() + cs.finish(err) + cs.closeTransportStream(transport.ContextErr(err)) + } + }() + return cs, nil +} + +// clientStream implements a client side Stream. +type clientStream struct { + t transport.ClientTransport + s *transport.Stream + p *parser + desc *StreamDesc + codec Codec + cp Compressor + cbuf *bytes.Buffer + dc Decompressor + + tracing bool // set to EnableTracing when the clientStream is created. + + mu sync.Mutex + closed bool + // trInfo.tr is set when the clientStream is created (if EnableTracing is true), + // and is set to nil when the clientStream's finish method is called. + trInfo traceInfo +} + +func (cs *clientStream) Context() context.Context { + return cs.s.Context() +} + +func (cs *clientStream) Header() (metadata.MD, error) { + m, err := cs.s.Header() + if err != nil { + if _, ok := err.(transport.ConnectionError); !ok { + cs.closeTransportStream(err) + } + } + return m, err +} + +func (cs *clientStream) Trailer() metadata.MD { + return cs.s.Trailer() +} + +func (cs *clientStream) SendMsg(m interface{}) (err error) { + if cs.tracing { + cs.mu.Lock() + if cs.trInfo.tr != nil { + cs.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } + cs.mu.Unlock() + } + defer func() { + if err != nil { + cs.finish(err) + } + if err == nil || err == io.EOF { + return + } + if _, ok := err.(transport.ConnectionError); !ok { + cs.closeTransportStream(err) + } + err = toRPCErr(err) + }() + out, err := encode(cs.codec, m, cs.cp, cs.cbuf) + defer func() { + if cs.cbuf != nil { + cs.cbuf.Reset() + } + }() + if err != nil { + return transport.StreamErrorf(codes.Internal, "grpc: %v", err) + } + return cs.t.Write(cs.s, out, &transport.Options{Last: false}) +} + +func (cs *clientStream) RecvMsg(m interface{}) (err error) { + err = recv(cs.p, cs.codec, cs.s, cs.dc, m) + defer func() { + // err != nil indicates the termination of the stream. + if err != nil { + cs.finish(err) + } + }() + if err == nil { + if cs.tracing { + cs.mu.Lock() + if cs.trInfo.tr != nil { + cs.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } + cs.mu.Unlock() + } + if !cs.desc.ClientStreams || cs.desc.ServerStreams { + return + } + // Special handling for client streaming rpc. + err = recv(cs.p, cs.codec, cs.s, cs.dc, m) + cs.closeTransportStream(err) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { + if cs.s.StatusCode() == codes.OK { + cs.finish(err) + return nil + } + return Errorf(cs.s.StatusCode(), cs.s.StatusDesc()) + } + return toRPCErr(err) + } + if _, ok := err.(transport.ConnectionError); !ok { + cs.closeTransportStream(err) + } + if err == io.EOF { + if cs.s.StatusCode() == codes.OK { + // Returns io.EOF to indicate the end of the stream. + return + } + return Errorf(cs.s.StatusCode(), cs.s.StatusDesc()) + } + return toRPCErr(err) +} + +func (cs *clientStream) CloseSend() (err error) { + err = cs.t.Write(cs.s, nil, &transport.Options{Last: true}) + defer func() { + if err != nil { + cs.finish(err) + } + }() + if err == nil || err == io.EOF { + return + } + if _, ok := err.(transport.ConnectionError); !ok { + cs.closeTransportStream(err) + } + err = toRPCErr(err) + return +} + +func (cs *clientStream) closeTransportStream(err error) { + cs.mu.Lock() + if cs.closed { + cs.mu.Unlock() + return + } + cs.closed = true + cs.mu.Unlock() + cs.t.CloseStream(cs.s, err) +} + +func (cs *clientStream) finish(err error) { + if !cs.tracing { + return + } + cs.mu.Lock() + defer cs.mu.Unlock() + if cs.trInfo.tr != nil { + if err == nil || err == io.EOF { + cs.trInfo.tr.LazyPrintf("RPC: [OK]") + } else { + cs.trInfo.tr.LazyPrintf("RPC: [%v]", err) + cs.trInfo.tr.SetError() + } + cs.trInfo.tr.Finish() + cs.trInfo.tr = nil + } +} + +// ServerStream defines the interface a server stream has to satisfy. +type ServerStream interface { + // SendHeader sends the header metadata. It should not be called + // after SendProto. It fails if called multiple times or if + // called after SendProto. + SendHeader(metadata.MD) error + // SetTrailer sets the trailer metadata which will be sent with the + // RPC status. + SetTrailer(metadata.MD) + Stream +} + +// serverStream implements a server side Stream. +type serverStream struct { + t transport.ServerTransport + s *transport.Stream + p *parser + codec Codec + cp Compressor + dc Decompressor + cbuf *bytes.Buffer + statusCode codes.Code + statusDesc string + trInfo *traceInfo + + mu sync.Mutex // protects trInfo.tr after the service handler runs. +} + +func (ss *serverStream) Context() context.Context { + return ss.s.Context() +} + +func (ss *serverStream) SendHeader(md metadata.MD) error { + return ss.t.WriteHeader(ss.s, md) +} + +func (ss *serverStream) SetTrailer(md metadata.MD) { + if md.Len() == 0 { + return + } + ss.s.SetTrailer(md) + return +} + +func (ss *serverStream) SendMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } else { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + }() + out, err := encode(ss.codec, m, ss.cp, ss.cbuf) + defer func() { + if ss.cbuf != nil { + ss.cbuf.Reset() + } + }() + if err != nil { + err = transport.StreamErrorf(codes.Internal, "grpc: %v", err) + return err + } + return ss.t.Write(ss.s, out, &transport.Options{Last: false}) +} + +func (ss *serverStream) RecvMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } else if err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + }() + return recv(ss.p, ss.codec, ss.s, ss.dc, m) +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/trace.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/trace.go new file mode 100644 index 000000000..cde04fbfc --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/trace.go @@ -0,0 +1,120 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" + "time" + + "golang.org/x/net/trace" +) + +// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. +// This should only be set before any RPCs are sent or received by this program. +var EnableTracing = true + +// methodFamily returns the trace family for the given method. +// It turns "/pkg.Service/GetFoo" into "pkg.Service". +func methodFamily(m string) string { + m = strings.TrimPrefix(m, "/") // remove leading slash + if i := strings.Index(m, "/"); i >= 0 { + m = m[:i] // remove everything from second slash + } + if i := strings.LastIndex(m, "."); i >= 0 { + m = m[i+1:] // cut down to last dotted component + } + return m +} + +// traceInfo contains tracing information for an RPC. +type traceInfo struct { + tr trace.Trace + firstLine firstLine +} + +// firstLine is the first line of an RPC trace. +type firstLine struct { + client bool // whether this is a client (outgoing) RPC + remoteAddr net.Addr + deadline time.Duration // may be zero +} + +func (f *firstLine) String() string { + var line bytes.Buffer + io.WriteString(&line, "RPC: ") + if f.client { + io.WriteString(&line, "to") + } else { + io.WriteString(&line, "from") + } + fmt.Fprintf(&line, " %v deadline:", f.remoteAddr) + if f.deadline != 0 { + fmt.Fprint(&line, f.deadline) + } else { + io.WriteString(&line, "none") + } + return line.String() +} + +// payload represents an RPC request or response payload. +type payload struct { + sent bool // whether this is an outgoing payload + msg interface{} // e.g. a proto.Message + // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? +} + +func (p payload) String() string { + if p.sent { + return fmt.Sprintf("sent: %v", p.msg) + } else { + return fmt.Sprintf("recv: %v", p.msg) + } +} + +type fmtStringer struct { + format string + a []interface{} +} + +func (f *fmtStringer) String() string { + return fmt.Sprintf(f.format, f.a...) +} + +type stringer string + +func (s stringer) String() string { return string(s) } diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/control.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/control.go new file mode 100644 index 000000000..f6b38a5a6 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/control.go @@ -0,0 +1,260 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package transport + +import ( + "fmt" + "sync" + + "golang.org/x/net/http2" +) + +const ( + // The default value of flow control window size in HTTP2 spec. + defaultWindowSize = 65535 + // The initial window size for flow control. + initialWindowSize = defaultWindowSize // for an RPC + initialConnWindowSize = defaultWindowSize * 16 // for a connection +) + +// The following defines various control items which could flow through +// the control buffer of transport. They represent different aspects of +// control tasks, e.g., flow control, settings, streaming resetting, etc. +type windowUpdate struct { + streamID uint32 + increment uint32 +} + +func (windowUpdate) isItem() bool { + return true +} + +type settings struct { + ack bool + ss []http2.Setting +} + +func (settings) isItem() bool { + return true +} + +type resetStream struct { + streamID uint32 + code http2.ErrCode +} + +func (resetStream) isItem() bool { + return true +} + +type flushIO struct { +} + +func (flushIO) isItem() bool { + return true +} + +type ping struct { + ack bool + data [8]byte +} + +func (ping) isItem() bool { + return true +} + +// quotaPool is a pool which accumulates the quota and sends it to acquire() +// when it is available. +type quotaPool struct { + c chan int + + mu sync.Mutex + quota int +} + +// newQuotaPool creates a quotaPool which has quota q available to consume. +func newQuotaPool(q int) *quotaPool { + qb := "aPool{ + c: make(chan int, 1), + } + if q > 0 { + qb.c <- q + } else { + qb.quota = q + } + return qb +} + +// add adds n to the available quota and tries to send it on acquire. +func (qb *quotaPool) add(n int) { + qb.mu.Lock() + defer qb.mu.Unlock() + qb.quota += n + if qb.quota <= 0 { + return + } + select { + case qb.c <- qb.quota: + qb.quota = 0 + default: + } +} + +// cancel cancels the pending quota sent on acquire, if any. +func (qb *quotaPool) cancel() { + qb.mu.Lock() + defer qb.mu.Unlock() + select { + case n := <-qb.c: + qb.quota += n + default: + } +} + +// reset cancels the pending quota sent on acquired, incremented by v and sends +// it back on acquire. +func (qb *quotaPool) reset(v int) { + qb.mu.Lock() + defer qb.mu.Unlock() + select { + case n := <-qb.c: + qb.quota += n + default: + } + qb.quota += v + if qb.quota <= 0 { + return + } + select { + case qb.c <- qb.quota: + qb.quota = 0 + default: + } +} + +// acquire returns the channel on which available quota amounts are sent. +func (qb *quotaPool) acquire() <-chan int { + return qb.c +} + +// inFlow deals with inbound flow control +type inFlow struct { + // The inbound flow control limit for pending data. + limit uint32 + // conn points to the shared connection-level inFlow that is shared + // by all streams on that conn. It is nil for the inFlow on the conn + // directly. + conn *inFlow + + mu sync.Mutex + // pendingData is the overall data which have been received but not been + // consumed by applications. + pendingData uint32 + // The amount of data the application has consumed but grpc has not sent + // window update for them. Used to reduce window update frequency. + pendingUpdate uint32 +} + +// onData is invoked when some data frame is received. It increments not only its +// own pendingData but also that of the associated connection-level flow. +func (f *inFlow) onData(n uint32) error { + if n == 0 { + return nil + } + f.mu.Lock() + defer f.mu.Unlock() + if f.pendingData+f.pendingUpdate+n > f.limit { + return fmt.Errorf("recieved %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate+n, f.limit) + } + if f.conn != nil { + if err := f.conn.onData(n); err != nil { + return ConnectionErrorf("%v", err) + } + } + f.pendingData += n + return nil +} + +// connOnRead updates the connection level states when the application consumes data. +func (f *inFlow) connOnRead(n uint32) uint32 { + if n == 0 || f.conn != nil { + return 0 + } + f.mu.Lock() + defer f.mu.Unlock() + f.pendingData -= n + f.pendingUpdate += n + if f.pendingUpdate >= f.limit/4 { + ret := f.pendingUpdate + f.pendingUpdate = 0 + return ret + } + return 0 +} + +// onRead is invoked when the application reads the data. It returns the window updates +// for both stream and connection level. +func (f *inFlow) onRead(n uint32) (swu, cwu uint32) { + if n == 0 { + return + } + f.mu.Lock() + defer f.mu.Unlock() + if f.pendingData == 0 { + // pendingData has been adjusted by restoreConn. + return + } + f.pendingData -= n + f.pendingUpdate += n + if f.pendingUpdate >= f.limit/4 { + swu = f.pendingUpdate + f.pendingUpdate = 0 + } + cwu = f.conn.connOnRead(n) + return +} + +// restoreConn is invoked when a stream is terminated. It removes its stake in +// the connection-level flow and resets its own state. +func (f *inFlow) restoreConn() uint32 { + if f.conn == nil { + return 0 + } + f.mu.Lock() + defer f.mu.Unlock() + n := f.pendingData + f.pendingData = 0 + f.pendingUpdate = 0 + return f.conn.connOnRead(n) +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/handler_server.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/handler_server.go new file mode 100644 index 000000000..d7e18a0b6 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/handler_server.go @@ -0,0 +1,377 @@ +/* + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// This file is the implementation of a gRPC server using HTTP/2 which +// uses the standard Go http2 Server implementation (via the +// http.Handler interface), rather than speaking low-level HTTP/2 +// frames itself. It is the implementation of *grpc.Server.ServeHTTP. + +package transport + +import ( + "errors" + "fmt" + "io" + "net" + "net/http" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/http2" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" +) + +// NewServerHandlerTransport returns a ServerTransport handling gRPC +// from inside an http.Handler. It requires that the http Server +// supports HTTP/2. +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTransport, error) { + if r.ProtoMajor != 2 { + return nil, errors.New("gRPC requires HTTP/2") + } + if r.Method != "POST" { + return nil, errors.New("invalid gRPC request method") + } + if !strings.Contains(r.Header.Get("Content-Type"), "application/grpc") { + return nil, errors.New("invalid gRPC request content-type") + } + if _, ok := w.(http.Flusher); !ok { + return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + } + if _, ok := w.(http.CloseNotifier); !ok { + return nil, errors.New("gRPC requires a ResponseWriter supporting http.CloseNotifier") + } + + st := &serverHandlerTransport{ + rw: w, + req: r, + closedCh: make(chan struct{}), + writes: make(chan func()), + } + + if v := r.Header.Get("grpc-timeout"); v != "" { + to, err := timeoutDecode(v) + if err != nil { + return nil, StreamErrorf(codes.Internal, "malformed time-out: %v", err) + } + st.timeoutSet = true + st.timeout = to + } + + var metakv []string + for k, vv := range r.Header { + k = strings.ToLower(k) + if isReservedHeader(k) { + continue + } + for _, v := range vv { + if k == "user-agent" { + // user-agent is special. Copying logic of http_util.go. + if i := strings.LastIndex(v, " "); i == -1 { + // There is no application user agent string being set + continue + } else { + v = v[:i] + } + } + metakv = append(metakv, k, v) + + } + } + st.headerMD = metadata.Pairs(metakv...) + + return st, nil +} + +// serverHandlerTransport is an implementation of ServerTransport +// which replies to exactly one gRPC request (exactly one HTTP request), +// using the net/http.Handler interface. This http.Handler is guaranteed +// at this point to be speaking over HTTP/2, so it's able to speak valid +// gRPC. +type serverHandlerTransport struct { + rw http.ResponseWriter + req *http.Request + timeoutSet bool + timeout time.Duration + didCommonHeaders bool + + headerMD metadata.MD + + closeOnce sync.Once + closedCh chan struct{} // closed on Close + + // writes is a channel of code to run serialized in the + // ServeHTTP (HandleStreams) goroutine. The channel is closed + // when WriteStatus is called. + writes chan func() +} + +func (ht *serverHandlerTransport) Close() error { + ht.closeOnce.Do(ht.closeCloseChanOnce) + return nil +} + +func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } + +func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } + +// strAddr is a net.Addr backed by either a TCP "ip:port" string, or +// the empty string if unknown. +type strAddr string + +func (a strAddr) Network() string { + if a != "" { + // Per the documentation on net/http.Request.RemoteAddr, if this is + // set, it's set to the IP:port of the peer (hence, TCP): + // https://golang.org/pkg/net/http/#Request + // + // If we want to support Unix sockets later, we can + // add our own grpc-specific convention within the + // grpc codebase to set RemoteAddr to a different + // format, or probably better: we can attach it to the + // context and use that from serverHandlerTransport.RemoteAddr. + return "tcp" + } + return "" +} + +func (a strAddr) String() string { return string(a) } + +// do runs fn in the ServeHTTP goroutine. +func (ht *serverHandlerTransport) do(fn func()) error { + select { + case ht.writes <- fn: + return nil + case <-ht.closedCh: + return ErrConnClosing + } +} + +func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error { + err := ht.do(func() { + ht.writeCommonHeaders(s) + + // And flush, in case no header or body has been sent yet. + // This forces a separation of headers and trailers if this is the + // first call (for example, in end2end tests's TestNoService). + ht.rw.(http.Flusher).Flush() + + h := ht.rw.Header() + h.Set("Grpc-Status", fmt.Sprintf("%d", statusCode)) + if statusDesc != "" { + h.Set("Grpc-Message", statusDesc) + } + if md := s.Trailer(); len(md) > 0 { + for k, vv := range md { + for _, v := range vv { + // http2 ResponseWriter mechanism to + // send undeclared Trailers after the + // headers have possibly been written. + h.Add(http2.TrailerPrefix+k, v) + } + } + } + }) + close(ht.writes) + return err +} + +// writeCommonHeaders sets common headers on the first write +// call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { + if ht.didCommonHeaders { + return + } + ht.didCommonHeaders = true + + h := ht.rw.Header() + h["Date"] = nil // suppress Date to make tests happy; TODO: restore + h.Set("Content-Type", "application/grpc") + + // Predeclare trailers we'll set later in WriteStatus (after the body). + // This is a SHOULD in the HTTP RFC, and the way you add (known) + // Trailers per the net/http.ResponseWriter contract. + // See https://golang.org/pkg/net/http/#ResponseWriter + // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers + h.Add("Trailer", "Grpc-Status") + h.Add("Trailer", "Grpc-Message") + + if s.sendCompress != "" { + h.Set("Grpc-Encoding", s.sendCompress) + } +} + +func (ht *serverHandlerTransport) Write(s *Stream, data []byte, opts *Options) error { + return ht.do(func() { + ht.writeCommonHeaders(s) + ht.rw.Write(data) + if !opts.Delay { + ht.rw.(http.Flusher).Flush() + } + }) +} + +func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + return ht.do(func() { + ht.writeCommonHeaders(s) + h := ht.rw.Header() + for k, vv := range md { + for _, v := range vv { + h.Add(k, v) + } + } + ht.rw.WriteHeader(200) + ht.rw.(http.Flusher).Flush() + }) +} + +func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { + // With this transport type there will be exactly 1 stream: this HTTP request. + + var ctx context.Context + var cancel context.CancelFunc + if ht.timeoutSet { + ctx, cancel = context.WithTimeout(context.Background(), ht.timeout) + } else { + ctx, cancel = context.WithCancel(context.Background()) + } + + // requestOver is closed when either the request's context is done + // or the status has been written via WriteStatus. + requestOver := make(chan struct{}) + + // clientGone receives a single value if peer is gone, either + // because the underlying connection is dead or because the + // peer sends an http2 RST_STREAM. + clientGone := ht.rw.(http.CloseNotifier).CloseNotify() + go func() { + select { + case <-requestOver: + return + case <-ht.closedCh: + case <-clientGone: + } + cancel() + }() + + req := ht.req + + s := &Stream{ + id: 0, // irrelevant + windowHandler: func(int) {}, // nothing + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + } + pr := &peer.Peer{ + Addr: ht.RemoteAddr(), + } + if req.TLS != nil { + pr.AuthInfo = credentials.TLSInfo{*req.TLS} + } + ctx = metadata.NewContext(ctx, ht.headerMD) + ctx = peer.NewContext(ctx, pr) + s.ctx = newContextWithStream(ctx, s) + s.dec = &recvBufferReader{ctx: s.ctx, recv: s.buf} + + // readerDone is closed when the Body.Read-ing goroutine exits. + readerDone := make(chan struct{}) + go func() { + defer close(readerDone) + for { + buf := make([]byte, 1024) // TODO: minimize garbage, optimize recvBuffer code/ownership + n, err := req.Body.Read(buf) + if n > 0 { + s.buf.put(&recvMsg{data: buf[:n]}) + } + if err != nil { + s.buf.put(&recvMsg{err: mapRecvMsgError(err)}) + return + } + } + }() + + // startStream is provided by the *grpc.Server's serveStreams. + // It starts a goroutine serving s and exits immediately. + // The goroutine that is started is the one that then calls + // into ht, calling WriteHeader, Write, WriteStatus, Close, etc. + startStream(s) + + ht.runStream() + close(requestOver) + + // Wait for reading goroutine to finish. + req.Body.Close() + <-readerDone +} + +func (ht *serverHandlerTransport) runStream() { + for { + select { + case fn, ok := <-ht.writes: + if !ok { + return + } + fn() + case <-ht.closedCh: + return + } + } +} + +// mapRecvMsgError returns the non-nil err into the appropriate +// error value as expected by callers of *grpc.parser.recvMsg. +// In particular, in can only be: +// * io.EOF +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * of type transport.StreamError +func mapRecvMsgError(err error) error { + if err == io.EOF || err == io.ErrUnexpectedEOF { + return err + } + if se, ok := err.(http2.StreamError); ok { + if code, ok := http2ErrConvTab[se.Code]; ok { + return StreamError{ + Code: code, + Desc: se.Error(), + } + } + } + return ConnectionError{Desc: err.Error()} +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http2_client.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http2_client.go new file mode 100644 index 000000000..66fabbba7 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http2_client.go @@ -0,0 +1,879 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package transport + +import ( + "bytes" + "errors" + "io" + "math" + "net" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" +) + +// http2Client implements the ClientTransport interface with HTTP2. +type http2Client struct { + target string // server name/addr + userAgent string + conn net.Conn // underlying communication channel + authInfo credentials.AuthInfo // auth info about the connection + nextID uint32 // the next stream ID to be used + + // writableChan synchronizes write access to the transport. + // A writer acquires the write lock by sending a value on writableChan + // and releases it by receiving from writableChan. + writableChan chan int + // shutdownChan is closed when Close is called. + // Blocking operations should select on shutdownChan to avoid + // blocking forever after Close. + // TODO(zhaoq): Maybe have a channel context? + shutdownChan chan struct{} + // errorChan is closed to notify the I/O error to the caller. + errorChan chan struct{} + + framer *framer + hBuf *bytes.Buffer // the buffer for HPACK encoding + hEnc *hpack.Encoder // HPACK encoder + + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *recvBuffer + fc *inFlow + // sendQuotaPool provides flow control to outbound message. + sendQuotaPool *quotaPool + // streamsQuota limits the max number of concurrent streams. + streamsQuota *quotaPool + + // The scheme used: https if TLS is on, http otherwise. + scheme string + + authCreds []credentials.Credentials + + mu sync.Mutex // guard the following variables + state transportState // the state of underlying connection + activeStreams map[uint32]*Stream + // The max number of concurrent streams + maxStreams int + // the per-stream outbound flow control window size set by the peer. + streamSendQuota uint32 +} + +// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// and starts to receive messages on it. Non-nil error returns if construction +// fails. +func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err error) { + if opts.Dialer == nil { + // Set the default Dialer. + opts.Dialer = func(addr string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("tcp", addr, timeout) + } + } + scheme := "http" + startT := time.Now() + timeout := opts.Timeout + conn, connErr := opts.Dialer(addr, timeout) + if connErr != nil { + return nil, ConnectionErrorf("transport: %v", connErr) + } + var authInfo credentials.AuthInfo + for _, c := range opts.AuthOptions { + if ccreds, ok := c.(credentials.TransportAuthenticator); ok { + scheme = "https" + // TODO(zhaoq): Now the first TransportAuthenticator is used if there are + // multiple ones provided. Revisit this if it is not appropriate. Probably + // place the ClientTransport construction into a separate function to make + // things clear. + if timeout > 0 { + timeout -= time.Since(startT) + } + conn, authInfo, connErr = ccreds.ClientHandshake(addr, conn, timeout) + break + } + } + if connErr != nil { + return nil, ConnectionErrorf("transport: %v", connErr) + } + defer func() { + if err != nil { + conn.Close() + } + }() + // Send connection preface to server. + n, err := conn.Write(clientPreface) + if err != nil { + return nil, ConnectionErrorf("transport: %v", err) + } + if n != len(clientPreface) { + return nil, ConnectionErrorf("transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + } + framer := newFramer(conn) + if initialWindowSize != defaultWindowSize { + err = framer.writeSettings(true, http2.Setting{http2.SettingInitialWindowSize, uint32(initialWindowSize)}) + } else { + err = framer.writeSettings(true) + } + if err != nil { + return nil, ConnectionErrorf("transport: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { + if err := framer.writeWindowUpdate(true, 0, delta); err != nil { + return nil, ConnectionErrorf("transport: %v", err) + } + } + ua := primaryUA + if opts.UserAgent != "" { + ua = opts.UserAgent + " " + ua + } + var buf bytes.Buffer + t := &http2Client{ + target: addr, + userAgent: ua, + conn: conn, + authInfo: authInfo, + // The client initiated stream id is odd starting from 1. + nextID: 1, + writableChan: make(chan int, 1), + shutdownChan: make(chan struct{}), + errorChan: make(chan struct{}), + framer: framer, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + controlBuf: newRecvBuffer(), + fc: &inFlow{limit: initialConnWindowSize}, + sendQuotaPool: newQuotaPool(defaultWindowSize), + scheme: scheme, + state: reachable, + activeStreams: make(map[uint32]*Stream), + authCreds: opts.AuthOptions, + maxStreams: math.MaxInt32, + streamSendQuota: defaultWindowSize, + } + go t.controller() + t.writableChan <- 0 + // Start the reader goroutine for incoming message. The threading model + // on receiving is that each transport has a dedicated goroutine which + // reads HTTP2 frame from network. Then it dispatches the frame to the + // corresponding stream entity. + go t.reader() + return t, nil +} + +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { + fc := &inFlow{ + limit: initialWindowSize, + conn: t.fc, + } + // TODO(zhaoq): Handle uint32 overflow of Stream.id. + s := &Stream{ + id: t.nextID, + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + fc: fc, + sendQuotaPool: newQuotaPool(int(t.streamSendQuota)), + headerChan: make(chan struct{}), + } + t.nextID += 2 + s.windowHandler = func(n int) { + t.updateWindow(s, uint32(n)) + } + // Make a stream be able to cancel the pending operations by itself. + s.ctx, s.cancel = context.WithCancel(ctx) + s.dec = &recvBufferReader{ + ctx: s.ctx, + recv: s.buf, + } + return s +} + +// NewStream creates a stream and register it into the transport as "active" +// streams. +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { + // Record the timeout value on the context. + var timeout time.Duration + if dl, ok := ctx.Deadline(); ok { + timeout = dl.Sub(time.Now()) + if timeout <= 0 { + return nil, ContextErr(context.DeadlineExceeded) + } + } + pr := &peer.Peer{ + Addr: t.conn.RemoteAddr(), + } + // Attach Auth info if there is any. + if t.authInfo != nil { + pr.AuthInfo = t.authInfo + } + ctx = peer.NewContext(ctx, pr) + authData := make(map[string]string) + for _, c := range t.authCreds { + // Construct URI required to get auth request metadata. + var port string + if pos := strings.LastIndex(t.target, ":"); pos != -1 { + // Omit port if it is the default one. + if t.target[pos+1:] != "443" { + port = ":" + t.target[pos+1:] + } + } + pos := strings.LastIndex(callHdr.Method, "/") + if pos == -1 { + return nil, StreamErrorf(codes.InvalidArgument, "transport: malformed method name: %q", callHdr.Method) + } + audience := "https://" + callHdr.Host + port + callHdr.Method[:pos] + data, err := c.GetRequestMetadata(ctx, audience) + if err != nil { + return nil, StreamErrorf(codes.InvalidArgument, "transport: %v", err) + } + for k, v := range data { + authData[k] = v + } + } + t.mu.Lock() + if t.state != reachable { + t.mu.Unlock() + return nil, ErrConnClosing + } + checkStreamsQuota := t.streamsQuota != nil + t.mu.Unlock() + if checkStreamsQuota { + sq, err := wait(ctx, t.shutdownChan, t.streamsQuota.acquire()) + if err != nil { + return nil, err + } + // Returns the quota balance back. + if sq > 1 { + t.streamsQuota.add(sq - 1) + } + } + if _, err := wait(ctx, t.shutdownChan, t.writableChan); err != nil { + // t.streamsQuota will be updated when t.CloseStream is invoked. + return nil, err + } + t.mu.Lock() + if t.state != reachable { + t.mu.Unlock() + return nil, ErrConnClosing + } + s := t.newStream(ctx, callHdr) + t.activeStreams[s.id] = s + + // This stream is not counted when applySetings(...) initialize t.streamsQuota. + // Reset t.streamsQuota to the right value. + var reset bool + if !checkStreamsQuota && t.streamsQuota != nil { + reset = true + } + t.mu.Unlock() + if reset { + t.streamsQuota.reset(-1) + } + + // HPACK encodes various headers. Note that once WriteField(...) is + // called, the corresponding headers/continuation frame has to be sent + // because hpack.Encoder is stateful. + t.hBuf.Reset() + t.hEnc.WriteField(hpack.HeaderField{Name: ":method", Value: "POST"}) + t.hEnc.WriteField(hpack.HeaderField{Name: ":scheme", Value: t.scheme}) + t.hEnc.WriteField(hpack.HeaderField{Name: ":path", Value: callHdr.Method}) + t.hEnc.WriteField(hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) + t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + t.hEnc.WriteField(hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) + t.hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"}) + + if callHdr.SendCompress != "" { + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + } + if timeout > 0 { + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: timeoutEncode(timeout)}) + } + for k, v := range authData { + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) + } + var ( + hasMD bool + endHeaders bool + ) + if md, ok := metadata.FromContext(ctx); ok { + hasMD = true + for k, v := range md { + for _, entry := range v { + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) + } + } + } + first := true + // Sends the headers in a single batch even when they span multiple frames. + for !endHeaders { + size := t.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + var flush bool + if endHeaders && (hasMD || callHdr.Flush) { + flush = true + } + if first { + // Sends a HeadersFrame to server to start a new stream. + p := http2.HeadersFrameParam{ + StreamID: s.id, + BlockFragment: t.hBuf.Next(size), + EndStream: false, + EndHeaders: endHeaders, + } + // Do a force flush for the buffered frames iff it is the last headers frame + // and there is header metadata to be sent. Otherwise, there is flushing until + // the corresponding data frame is written. + err = t.framer.writeHeaders(flush, p) + first = false + } else { + // Sends Continuation frames for the leftover headers. + err = t.framer.writeContinuation(flush, s.id, endHeaders, t.hBuf.Next(size)) + } + if err != nil { + t.notifyError(err) + return nil, ConnectionErrorf("transport: %v", err) + } + } + t.writableChan <- 0 + return s, nil +} + +// CloseStream clears the footprint of a stream when the stream is not needed any more. +// This must not be executed in reader's goroutine. +func (t *http2Client) CloseStream(s *Stream, err error) { + var updateStreams bool + t.mu.Lock() + if t.streamsQuota != nil { + updateStreams = true + } + delete(t.activeStreams, s.id) + t.mu.Unlock() + if updateStreams { + t.streamsQuota.add(1) + } + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), the caller needs + // to call cancel on the stream to interrupt the blocking on + // other goroutines. + s.cancel() + s.mu.Lock() + if q := s.fc.restoreConn(); q > 0 { + t.controlBuf.put(&windowUpdate{0, q}) + } + if s.state == streamDone { + s.mu.Unlock() + return + } + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } + s.state = streamDone + s.mu.Unlock() + if _, ok := err.(StreamError); ok { + t.controlBuf.put(&resetStream{s.id, http2.ErrCodeCancel}) + } +} + +// Close kicks off the shutdown process of the transport. This should be called +// only once on a transport. Once it is called, the transport should not be +// accessed any more. +func (t *http2Client) Close() (err error) { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return errors.New("transport: Close() was already called") + } + t.state = closing + t.mu.Unlock() + close(t.shutdownChan) + err = t.conn.Close() + t.mu.Lock() + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + // Notify all active streams. + for _, s := range streams { + s.mu.Lock() + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } + s.mu.Unlock() + s.write(recvMsg{err: ErrConnClosing}) + } + return +} + +// Write formats the data into HTTP2 data frame(s) and sends it out. The caller +// should proceed only if Write returns nil. +// TODO(zhaoq): opts.Delay is ignored in this implementation. Support it later +// if it improves the performance. +func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { + r := bytes.NewBuffer(data) + for { + var p []byte + if r.Len() > 0 { + size := http2MaxFrameLen + s.sendQuotaPool.add(0) + // Wait until the stream has some quota to send the data. + sq, err := wait(s.ctx, t.shutdownChan, s.sendQuotaPool.acquire()) + if err != nil { + return err + } + t.sendQuotaPool.add(0) + // Wait until the transport has some quota to send the data. + tq, err := wait(s.ctx, t.shutdownChan, t.sendQuotaPool.acquire()) + if err != nil { + if _, ok := err.(StreamError); ok { + t.sendQuotaPool.cancel() + } + return err + } + if sq < size { + size = sq + } + if tq < size { + size = tq + } + p = r.Next(size) + ps := len(p) + if ps < sq { + // Overbooked stream quota. Return it back. + s.sendQuotaPool.add(sq - ps) + } + if ps < tq { + // Overbooked transport quota. Return it back. + t.sendQuotaPool.add(tq - ps) + } + } + var ( + endStream bool + forceFlush bool + ) + if opts.Last && r.Len() == 0 { + endStream = true + } + // Indicate there is a writer who is about to write a data frame. + t.framer.adjustNumWriters(1) + // Got some quota. Try to acquire writing privilege on the transport. + if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + if t.framer.adjustNumWriters(-1) == 0 { + // This writer is the last one in this batch and has the + // responsibility to flush the buffered frames. It queues + // a flush request to controlBuf instead of flushing directly + // in order to avoid the race with other writing or flushing. + t.controlBuf.put(&flushIO{}) + } + return err + } + if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 { + // Do a force flush iff this is last frame for the entire gRPC message + // and the caller is the only writer at this moment. + forceFlush = true + } + // If WriteData fails, all the pending streams will be handled + // by http2Client.Close(). No explicit CloseStream() needs to be + // invoked. + if err := t.framer.writeData(forceFlush, s.id, endStream, p); err != nil { + t.notifyError(err) + return ConnectionErrorf("transport: %v", err) + } + if t.framer.adjustNumWriters(-1) == 0 { + t.framer.flushWrite() + } + t.writableChan <- 0 + if r.Len() == 0 { + break + } + } + if !opts.Last { + return nil + } + s.mu.Lock() + if s.state != streamDone { + if s.state == streamReadDone { + s.state = streamDone + } else { + s.state = streamWriteDone + } + } + s.mu.Unlock() + return nil +} + +func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) { + t.mu.Lock() + defer t.mu.Unlock() + s, ok := t.activeStreams[f.Header().StreamID] + return s, ok +} + +// updateWindow adjusts the inbound quota for the stream and the transport. +// Window updates will deliver to the controller for sending when +// the cumulative quota exceeds the corresponding threshold. +func (t *http2Client) updateWindow(s *Stream, n uint32) { + swu, cwu := s.fc.onRead(n) + if swu > 0 { + t.controlBuf.put(&windowUpdate{s.id, swu}) + } + if cwu > 0 { + t.controlBuf.put(&windowUpdate{0, cwu}) + } +} + +func (t *http2Client) handleData(f *http2.DataFrame) { + // Select the right stream to dispatch. + s, ok := t.getStream(f) + if !ok { + return + } + size := len(f.Data()) + if size > 0 { + if err := s.fc.onData(uint32(size)); err != nil { + if _, ok := err.(ConnectionError); ok { + t.notifyError(err) + return + } + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return + } + s.state = streamDone + s.statusCode = codes.Internal + s.statusDesc = err.Error() + s.mu.Unlock() + s.write(recvMsg{err: io.EOF}) + t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) + return + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + data := make([]byte, size) + copy(data, f.Data()) + s.write(recvMsg{data: data}) + } + // The server has closed the stream without sending trailers. Record that + // the read direction is closed, and set the status appropriately. + if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { + s.mu.Lock() + if s.state == streamWriteDone { + s.state = streamDone + } else { + s.state = streamReadDone + } + s.statusCode = codes.Internal + s.statusDesc = "server closed the stream without sending trailers" + s.mu.Unlock() + s.write(recvMsg{err: io.EOF}) + } +} + +func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { + s, ok := t.getStream(f) + if !ok { + return + } + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return + } + s.state = streamDone + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } + s.statusCode, ok = http2ErrConvTab[http2.ErrCode(f.ErrCode)] + if !ok { + grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode) + } + s.mu.Unlock() + s.write(recvMsg{err: io.EOF}) +} + +func (t *http2Client) handleSettings(f *http2.SettingsFrame) { + if f.IsAck() { + return + } + var ss []http2.Setting + f.ForeachSetting(func(s http2.Setting) error { + ss = append(ss, s) + return nil + }) + // The settings will be applied once the ack is sent. + t.controlBuf.put(&settings{ack: true, ss: ss}) +} + +func (t *http2Client) handlePing(f *http2.PingFrame) { + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) +} + +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { + // TODO(zhaoq): GoAwayFrame handler to be implemented +} + +func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { + id := f.Header().StreamID + incr := f.Increment + if id == 0 { + t.sendQuotaPool.add(int(incr)) + return + } + if s, ok := t.getStream(f); ok { + s.sendQuotaPool.add(int(incr)) + } +} + +// operateHeaders takes action on the decoded headers. +func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + s, ok := t.getStream(frame) + if !ok { + return + } + var state decodeState + for _, hf := range frame.Fields { + state.processHeaderField(hf) + } + if state.err != nil { + s.write(recvMsg{err: state.err}) + // Something wrong. Stops reading even when there is remaining. + return + } + + endStream := frame.StreamEnded() + + s.mu.Lock() + if !endStream { + s.recvCompress = state.encoding + } + if !s.headerDone { + if !endStream && len(state.mdata) > 0 { + s.header = state.mdata + } + close(s.headerChan) + s.headerDone = true + } + if !endStream || s.state == streamDone { + s.mu.Unlock() + return + } + + if len(state.mdata) > 0 { + s.trailer = state.mdata + } + s.state = streamDone + s.statusCode = state.statusCode + s.statusDesc = state.statusDesc + s.mu.Unlock() + + s.write(recvMsg{err: io.EOF}) +} + +func handleMalformedHTTP2(s *Stream, err http2.StreamError) { + s.mu.Lock() + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } + s.mu.Unlock() + s.write(recvMsg{err: StreamErrorf(http2ErrConvTab[err.Code], "%v", err)}) +} + +// reader runs as a separate goroutine in charge of reading data from network +// connection. +// +// TODO(zhaoq): currently one reader per transport. Investigate whether this is +// optimal. +// TODO(zhaoq): Check the validity of the incoming frame sequence. +func (t *http2Client) reader() { + // Check the validity of server preface. + frame, err := t.framer.readFrame() + if err != nil { + t.notifyError(err) + return + } + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + t.notifyError(err) + return + } + t.handleSettings(sf) + + // loop to keep reading incoming messages on this transport. + for { + frame, err := t.framer.readFrame() + if err != nil { + // Abort an active stream if the http2.Framer returns a + // http2.StreamError. This can happen only if the server's response + // is malformed http2. + if se, ok := err.(http2.StreamError); ok { + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + handleMalformedHTTP2(s, se) + } + continue + } else { + // Transport error. + t.notifyError(err) + return + } + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + t.operateHeaders(frame) + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.GoAwayFrame: + t.handleGoAway(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + default: + grpclog.Printf("transport: http2Client.reader got unhandled frame type %v.", frame) + } + } +} + +func (t *http2Client) applySettings(ss []http2.Setting) { + for _, s := range ss { + switch s.ID { + case http2.SettingMaxConcurrentStreams: + // TODO(zhaoq): This is a hack to avoid significant refactoring of the + // code to deal with the unrealistic int32 overflow. Probably will try + // to find a better way to handle this later. + if s.Val > math.MaxInt32 { + s.Val = math.MaxInt32 + } + t.mu.Lock() + reset := t.streamsQuota != nil + if !reset { + t.streamsQuota = newQuotaPool(int(s.Val) - len(t.activeStreams)) + } + ms := t.maxStreams + t.maxStreams = int(s.Val) + t.mu.Unlock() + if reset { + t.streamsQuota.reset(int(s.Val) - ms) + } + case http2.SettingInitialWindowSize: + t.mu.Lock() + for _, stream := range t.activeStreams { + // Adjust the sending quota for each stream. + stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota)) + } + t.streamSendQuota = s.Val + t.mu.Unlock() + } + } +} + +// controller running in a separate goroutine takes charge of sending control +// frames (e.g., window update, reset stream, setting, etc.) to the server. +func (t *http2Client) controller() { + for { + select { + case i := <-t.controlBuf.get(): + t.controlBuf.load() + select { + case <-t.writableChan: + switch i := i.(type) { + case *windowUpdate: + t.framer.writeWindowUpdate(true, i.streamID, i.increment) + case *settings: + if i.ack { + t.framer.writeSettingsAck(true) + t.applySettings(i.ss) + } else { + t.framer.writeSettings(true, i.ss...) + } + case *resetStream: + t.framer.writeRSTStream(true, i.streamID, i.code) + case *flushIO: + t.framer.flushWrite() + case *ping: + t.framer.writePing(true, i.ack, i.data) + default: + grpclog.Printf("transport: http2Client.controller got unexpected item type %v\n", i) + } + t.writableChan <- 0 + continue + case <-t.shutdownChan: + return + } + case <-t.shutdownChan: + return + } + } +} + +func (t *http2Client) Error() <-chan struct{} { + return t.errorChan +} + +func (t *http2Client) notifyError(err error) { + t.mu.Lock() + defer t.mu.Unlock() + // make sure t.errorChan is closed only once. + if t.state == reachable { + t.state = unreachable + close(t.errorChan) + grpclog.Printf("transport: http2Client.notifyError got notified that the client transport was broken %v.", err) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http2_server.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http2_server.go new file mode 100644 index 000000000..cec441cfc --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http2_server.go @@ -0,0 +1,691 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package transport + +import ( + "bytes" + "errors" + "io" + "math" + "net" + "strconv" + "sync" + + "golang.org/x/net/context" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" +) + +// ErrIllegalHeaderWrite indicates that setting header is illegal because of +// the stream's state. +var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + +// http2Server implements the ServerTransport interface with HTTP2. +type http2Server struct { + conn net.Conn + maxStreamID uint32 // max stream ID ever seen + authInfo credentials.AuthInfo // auth info about the connection + // writableChan synchronizes write access to the transport. + // A writer acquires the write lock by receiving a value on writableChan + // and releases it by sending on writableChan. + writableChan chan int + // shutdownChan is closed when Close is called. + // Blocking operations should select on shutdownChan to avoid + // blocking forever after Close. + shutdownChan chan struct{} + framer *framer + hBuf *bytes.Buffer // the buffer for HPACK encoding + hEnc *hpack.Encoder // HPACK encoder + + // The max number of concurrent streams. + maxStreams uint32 + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *recvBuffer + fc *inFlow + // sendQuotaPool provides flow control to outbound message. + sendQuotaPool *quotaPool + + mu sync.Mutex // guard the following + state transportState + activeStreams map[uint32]*Stream + // the per-stream outbound flow control window size set by the peer. + streamSendQuota uint32 +} + +// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is +// returned if something goes wrong. +func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (_ ServerTransport, err error) { + framer := newFramer(conn) + // Send initial settings as connection preface to client. + var settings []http2.Setting + // TODO(zhaoq): Have a better way to signal "no limit" because 0 is + // permitted in the HTTP2 spec. + if maxStreams == 0 { + maxStreams = math.MaxUint32 + } else { + settings = append(settings, http2.Setting{http2.SettingMaxConcurrentStreams, maxStreams}) + } + if initialWindowSize != defaultWindowSize { + settings = append(settings, http2.Setting{http2.SettingInitialWindowSize, uint32(initialWindowSize)}) + } + if err := framer.writeSettings(true, settings...); err != nil { + return nil, ConnectionErrorf("transport: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { + if err := framer.writeWindowUpdate(true, 0, delta); err != nil { + return nil, ConnectionErrorf("transport: %v", err) + } + } + var buf bytes.Buffer + t := &http2Server{ + conn: conn, + authInfo: authInfo, + framer: framer, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + maxStreams: maxStreams, + controlBuf: newRecvBuffer(), + fc: &inFlow{limit: initialConnWindowSize}, + sendQuotaPool: newQuotaPool(defaultWindowSize), + state: reachable, + writableChan: make(chan int, 1), + shutdownChan: make(chan struct{}), + activeStreams: make(map[uint32]*Stream), + streamSendQuota: defaultWindowSize, + } + go t.controller() + t.writableChan <- 0 + return t, nil +} + +// operateHeader takes action on the decoded headers. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) { + buf := newRecvBuffer() + fc := &inFlow{ + limit: initialWindowSize, + conn: t.fc, + } + s := &Stream{ + id: frame.Header().StreamID, + st: t, + buf: buf, + fc: fc, + } + + var state decodeState + for _, hf := range frame.Fields { + state.processHeaderField(hf) + } + if err := state.err; err != nil { + if se, ok := err.(StreamError); ok { + t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]}) + } + return + } + + if frame.StreamEnded() { + // s is just created by the caller. No lock needed. + s.state = streamReadDone + } + s.recvCompress = state.encoding + if state.timeoutSet { + s.ctx, s.cancel = context.WithTimeout(context.TODO(), state.timeout) + } else { + s.ctx, s.cancel = context.WithCancel(context.TODO()) + } + pr := &peer.Peer{ + Addr: t.conn.RemoteAddr(), + } + // Attach Auth info if there is any. + if t.authInfo != nil { + pr.AuthInfo = t.authInfo + } + s.ctx = peer.NewContext(s.ctx, pr) + // Cache the current stream to the context so that the server application + // can find out. Required when the server wants to send some metadata + // back to the client (unary call only). + s.ctx = newContextWithStream(s.ctx, s) + // Attach the received metadata to the context. + if len(state.mdata) > 0 { + s.ctx = metadata.NewContext(s.ctx, state.mdata) + } + + s.dec = &recvBufferReader{ + ctx: s.ctx, + recv: s.buf, + } + s.recvCompress = state.encoding + s.method = state.method + t.mu.Lock() + if t.state != reachable { + t.mu.Unlock() + return + } + if uint32(len(t.activeStreams)) >= t.maxStreams { + t.mu.Unlock() + t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) + return + } + s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota)) + t.activeStreams[s.id] = s + t.mu.Unlock() + s.windowHandler = func(n int) { + t.updateWindow(s, uint32(n)) + } + handle(s) +} + +// HandleStreams receives incoming streams using the given handler. This is +// typically run in a separate goroutine. +func (t *http2Server) HandleStreams(handle func(*Stream)) { + // Check the validity of client preface. + preface := make([]byte, len(clientPreface)) + if _, err := io.ReadFull(t.conn, preface); err != nil { + grpclog.Printf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) + t.Close() + return + } + if !bytes.Equal(preface, clientPreface) { + grpclog.Printf("transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) + t.Close() + return + } + + frame, err := t.framer.readFrame() + if err != nil { + grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err) + t.Close() + return + } + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + grpclog.Printf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) + t.Close() + return + } + t.handleSettings(sf) + + for { + frame, err := t.framer.readFrame() + if err != nil { + t.Close() + return + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + id := frame.Header().StreamID + if id%2 != 1 || id <= t.maxStreamID { + // illegal gRPC stream id. + grpclog.Println("transport: http2Server.HandleStreams received an illegal stream id: ", id) + t.Close() + break + } + t.maxStreamID = id + t.operateHeaders(frame, handle) + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + case *http2.GoAwayFrame: + break + default: + grpclog.Printf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + } + } +} + +func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { + t.mu.Lock() + defer t.mu.Unlock() + if t.activeStreams == nil { + // The transport is closing. + return nil, false + } + s, ok := t.activeStreams[f.Header().StreamID] + if !ok { + // The stream is already done. + return nil, false + } + return s, true +} + +// updateWindow adjusts the inbound quota for the stream and the transport. +// Window updates will deliver to the controller for sending when +// the cumulative quota exceeds the corresponding threshold. +func (t *http2Server) updateWindow(s *Stream, n uint32) { + swu, cwu := s.fc.onRead(n) + if swu > 0 { + t.controlBuf.put(&windowUpdate{s.id, swu}) + } + if cwu > 0 { + t.controlBuf.put(&windowUpdate{0, cwu}) + } +} + +func (t *http2Server) handleData(f *http2.DataFrame) { + // Select the right stream to dispatch. + s, ok := t.getStream(f) + if !ok { + return + } + size := len(f.Data()) + if size > 0 { + if err := s.fc.onData(uint32(size)); err != nil { + if _, ok := err.(ConnectionError); ok { + grpclog.Printf("transport: http2Server %v", err) + t.Close() + return + } + t.closeStream(s) + t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) + return + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + data := make([]byte, size) + copy(data, f.Data()) + s.write(recvMsg{data: data}) + } + if f.Header().Flags.Has(http2.FlagDataEndStream) { + // Received the end of stream from the client. + s.mu.Lock() + if s.state != streamDone { + if s.state == streamWriteDone { + s.state = streamDone + } else { + s.state = streamReadDone + } + } + s.mu.Unlock() + s.write(recvMsg{err: io.EOF}) + } +} + +func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { + s, ok := t.getStream(f) + if !ok { + return + } + t.closeStream(s) +} + +func (t *http2Server) handleSettings(f *http2.SettingsFrame) { + if f.IsAck() { + return + } + var ss []http2.Setting + f.ForeachSetting(func(s http2.Setting) error { + ss = append(ss, s) + return nil + }) + // The settings will be applied once the ack is sent. + t.controlBuf.put(&settings{ack: true, ss: ss}) +} + +func (t *http2Server) handlePing(f *http2.PingFrame) { + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) +} + +func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { + id := f.Header().StreamID + incr := f.Increment + if id == 0 { + t.sendQuotaPool.add(int(incr)) + return + } + if s, ok := t.getStream(f); ok { + s.sendQuotaPool.add(int(incr)) + } +} + +func (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) error { + first := true + endHeaders := false + var err error + // Sends the headers in a single batch. + for !endHeaders { + size := t.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + if first { + p := http2.HeadersFrameParam{ + StreamID: s.id, + BlockFragment: b.Next(size), + EndStream: endStream, + EndHeaders: endHeaders, + } + err = t.framer.writeHeaders(endHeaders, p) + first = false + } else { + err = t.framer.writeContinuation(endHeaders, s.id, endHeaders, b.Next(size)) + } + if err != nil { + t.Close() + return ConnectionErrorf("transport: %v", err) + } + } + return nil +} + +// WriteHeader sends the header metedata md back to the client. +func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { + s.mu.Lock() + if s.headerOk || s.state == streamDone { + s.mu.Unlock() + return ErrIllegalHeaderWrite + } + s.headerOk = true + s.mu.Unlock() + if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + return err + } + t.hBuf.Reset() + t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + if s.sendCompress != "" { + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + } + for k, v := range md { + for _, entry := range v { + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) + } + } + if err := t.writeHeaders(s, t.hBuf, false); err != nil { + return err + } + t.writableChan <- 0 + return nil +} + +// WriteStatus sends stream status to the client and terminates the stream. +// There is no further I/O operations being able to perform on this stream. +// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early +// OK is adopted. +func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error { + var headersSent bool + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return nil + } + if s.headerOk { + headersSent = true + } + s.mu.Unlock() + if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + return err + } + t.hBuf.Reset() + if !headersSent { + t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + } + t.hEnc.WriteField( + hpack.HeaderField{ + Name: "grpc-status", + Value: strconv.Itoa(int(statusCode)), + }) + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: statusDesc}) + // Attach the trailer metadata. + for k, v := range s.trailer { + for _, entry := range v { + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) + } + } + if err := t.writeHeaders(s, t.hBuf, true); err != nil { + t.Close() + return err + } + t.closeStream(s) + t.writableChan <- 0 + return nil +} + +// Write converts the data into HTTP2 data frame and sends it out. Non-nil error +// is returns if it fails (e.g., framing error, transport error). +func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { + // TODO(zhaoq): Support multi-writers for a single stream. + var writeHeaderFrame bool + s.mu.Lock() + if !s.headerOk { + writeHeaderFrame = true + s.headerOk = true + } + s.mu.Unlock() + if writeHeaderFrame { + if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + return err + } + t.hBuf.Reset() + t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + if s.sendCompress != "" { + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + } + p := http2.HeadersFrameParam{ + StreamID: s.id, + BlockFragment: t.hBuf.Bytes(), + EndHeaders: true, + } + if err := t.framer.writeHeaders(false, p); err != nil { + t.Close() + return ConnectionErrorf("transport: %v", err) + } + t.writableChan <- 0 + } + r := bytes.NewBuffer(data) + for { + if r.Len() == 0 { + return nil + } + size := http2MaxFrameLen + s.sendQuotaPool.add(0) + // Wait until the stream has some quota to send the data. + sq, err := wait(s.ctx, t.shutdownChan, s.sendQuotaPool.acquire()) + if err != nil { + return err + } + t.sendQuotaPool.add(0) + // Wait until the transport has some quota to send the data. + tq, err := wait(s.ctx, t.shutdownChan, t.sendQuotaPool.acquire()) + if err != nil { + if _, ok := err.(StreamError); ok { + t.sendQuotaPool.cancel() + } + return err + } + if sq < size { + size = sq + } + if tq < size { + size = tq + } + p := r.Next(size) + ps := len(p) + if ps < sq { + // Overbooked stream quota. Return it back. + s.sendQuotaPool.add(sq - ps) + } + if ps < tq { + // Overbooked transport quota. Return it back. + t.sendQuotaPool.add(tq - ps) + } + t.framer.adjustNumWriters(1) + // Got some quota. Try to acquire writing privilege on the + // transport. + if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + if t.framer.adjustNumWriters(-1) == 0 { + // This writer is the last one in this batch and has the + // responsibility to flush the buffered frames. It queues + // a flush request to controlBuf instead of flushing directly + // in order to avoid the race with other writing or flushing. + t.controlBuf.put(&flushIO{}) + } + return err + } + var forceFlush bool + if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 && !opts.Last { + forceFlush = true + } + if err := t.framer.writeData(forceFlush, s.id, false, p); err != nil { + t.Close() + return ConnectionErrorf("transport: %v", err) + } + if t.framer.adjustNumWriters(-1) == 0 { + t.framer.flushWrite() + } + t.writableChan <- 0 + } + +} + +func (t *http2Server) applySettings(ss []http2.Setting) { + for _, s := range ss { + if s.ID == http2.SettingInitialWindowSize { + t.mu.Lock() + defer t.mu.Unlock() + for _, stream := range t.activeStreams { + stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota)) + } + t.streamSendQuota = s.Val + } + + } +} + +// controller running in a separate goroutine takes charge of sending control +// frames (e.g., window update, reset stream, setting, etc.) to the server. +func (t *http2Server) controller() { + for { + select { + case i := <-t.controlBuf.get(): + t.controlBuf.load() + select { + case <-t.writableChan: + switch i := i.(type) { + case *windowUpdate: + t.framer.writeWindowUpdate(true, i.streamID, i.increment) + case *settings: + if i.ack { + t.framer.writeSettingsAck(true) + t.applySettings(i.ss) + } else { + t.framer.writeSettings(true, i.ss...) + } + case *resetStream: + t.framer.writeRSTStream(true, i.streamID, i.code) + case *flushIO: + t.framer.flushWrite() + case *ping: + t.framer.writePing(true, i.ack, i.data) + default: + grpclog.Printf("transport: http2Server.controller got unexpected item type %v\n", i) + } + t.writableChan <- 0 + continue + case <-t.shutdownChan: + return + } + case <-t.shutdownChan: + return + } + } +} + +// Close starts shutting down the http2Server transport. +// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This +// could cause some resource issue. Revisit this later. +func (t *http2Server) Close() (err error) { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return errors.New("transport: Close() was already called") + } + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + close(t.shutdownChan) + err = t.conn.Close() + // Cancel all active streams. + for _, s := range streams { + s.cancel() + } + return +} + +// closeStream clears the footprint of a stream when the stream is not needed +// any more. +func (t *http2Server) closeStream(s *Stream) { + t.mu.Lock() + delete(t.activeStreams, s.id) + t.mu.Unlock() + if q := s.fc.restoreConn(); q > 0 { + t.controlBuf.put(&windowUpdate{0, q}) + } + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return + } + s.state = streamDone + s.mu.Unlock() + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() +} + +func (t *http2Server) RemoteAddr() net.Addr { + return t.conn.RemoteAddr() +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http_util.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http_util.go new file mode 100644 index 000000000..6aabcd4a3 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http_util.go @@ -0,0 +1,406 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package transport + +import ( + "bufio" + "fmt" + "io" + "net" + "strconv" + "strings" + "sync/atomic" + "time" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" +) + +const ( + // The primary user agent + primaryUA = "grpc-go/0.11" + // http2MaxFrameLen specifies the max length of a HTTP2 frame. + http2MaxFrameLen = 16384 // 16KB frame + // http://http2.github.io/http2-spec/#SettingValues + http2InitHeaderTableSize = 4096 + // http2IOBufSize specifies the buffer size for sending frames. + http2IOBufSize = 32 * 1024 +) + +var ( + clientPreface = []byte(http2.ClientPreface) + http2ErrConvTab = map[http2.ErrCode]codes.Code{ + http2.ErrCodeNo: codes.Internal, + http2.ErrCodeProtocol: codes.Internal, + http2.ErrCodeInternal: codes.Internal, + http2.ErrCodeFlowControl: codes.ResourceExhausted, + http2.ErrCodeSettingsTimeout: codes.Internal, + http2.ErrCodeFrameSize: codes.Internal, + http2.ErrCodeRefusedStream: codes.Unavailable, + http2.ErrCodeCancel: codes.Canceled, + http2.ErrCodeCompression: codes.Internal, + http2.ErrCodeConnect: codes.Internal, + http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, + http2.ErrCodeInadequateSecurity: codes.PermissionDenied, + http2.ErrCodeHTTP11Required: codes.FailedPrecondition, + } + statusCodeConvTab = map[codes.Code]http2.ErrCode{ + codes.Internal: http2.ErrCodeInternal, + codes.Canceled: http2.ErrCodeCancel, + codes.Unavailable: http2.ErrCodeRefusedStream, + codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, + codes.PermissionDenied: http2.ErrCodeInadequateSecurity, + } +) + +// Records the states during HPACK decoding. Must be reset once the +// decoding of the entire headers are finished. +type decodeState struct { + err error // first error encountered decoding + + encoding string + // statusCode caches the stream status received from the trailer + // the server sent. Client side only. + statusCode codes.Code + statusDesc string + // Server side only fields. + timeoutSet bool + timeout time.Duration + method string + // key-value metadata map from the peer. + mdata map[string][]string +} + +// isReservedHeader checks whether hdr belongs to HTTP2 headers +// reserved by gRPC protocol. Any other headers are classified as the +// user-specified metadata. +func isReservedHeader(hdr string) bool { + if hdr != "" && hdr[0] == ':' { + return true + } + switch hdr { + case "content-type", + "grpc-message-type", + "grpc-encoding", + "grpc-message", + "grpc-status", + "grpc-timeout", + "te": + return true + default: + return false + } +} + +func (d *decodeState) setErr(err error) { + if d.err == nil { + d.err = err + } +} + +func (d *decodeState) processHeaderField(f hpack.HeaderField) { + switch f.Name { + case "content-type": + if !strings.Contains(f.Value, "application/grpc") { + d.setErr(StreamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value)) + return + } + case "grpc-encoding": + d.encoding = f.Value + case "grpc-status": + code, err := strconv.Atoi(f.Value) + if err != nil { + d.setErr(StreamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err)) + return + } + d.statusCode = codes.Code(code) + case "grpc-message": + d.statusDesc = f.Value + case "grpc-timeout": + d.timeoutSet = true + var err error + d.timeout, err = timeoutDecode(f.Value) + if err != nil { + d.setErr(StreamErrorf(codes.Internal, "transport: malformed time-out: %v", err)) + return + } + case ":path": + d.method = f.Value + default: + if !isReservedHeader(f.Name) { + if f.Name == "user-agent" { + i := strings.LastIndex(f.Value, " ") + if i == -1 { + // There is no application user agent string being set. + return + } + // Extract the application user agent string. + f.Value = f.Value[:i] + } + if d.mdata == nil { + d.mdata = make(map[string][]string) + } + k, v, err := metadata.DecodeKeyValue(f.Name, f.Value) + if err != nil { + grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err) + return + } + d.mdata[k] = append(d.mdata[k], v) + } + } +} + +type timeoutUnit uint8 + +const ( + hour timeoutUnit = 'H' + minute timeoutUnit = 'M' + second timeoutUnit = 'S' + millisecond timeoutUnit = 'm' + microsecond timeoutUnit = 'u' + nanosecond timeoutUnit = 'n' +) + +func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { + switch u { + case hour: + return time.Hour, true + case minute: + return time.Minute, true + case second: + return time.Second, true + case millisecond: + return time.Millisecond, true + case microsecond: + return time.Microsecond, true + case nanosecond: + return time.Nanosecond, true + default: + } + return +} + +const maxTimeoutValue int64 = 100000000 - 1 + +// div does integer division and round-up the result. Note that this is +// equivalent to (d+r-1)/r but has less chance to overflow. +func div(d, r time.Duration) int64 { + if m := d % r; m > 0 { + return int64(d/r + 1) + } + return int64(d / r) +} + +// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it. +func timeoutEncode(t time.Duration) string { + if d := div(t, time.Nanosecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "n" + } + if d := div(t, time.Microsecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "u" + } + if d := div(t, time.Millisecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "m" + } + if d := div(t, time.Second); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "S" + } + if d := div(t, time.Minute); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "M" + } + // Note that maxTimeoutValue * time.Hour > MaxInt64. + return strconv.FormatInt(div(t, time.Hour), 10) + "H" +} + +func timeoutDecode(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("transport: timeout string is too short: %q", s) + } + unit := timeoutUnit(s[size-1]) + d, ok := timeoutUnitToDuration(unit) + if !ok { + return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + return d * time.Duration(t), nil +} + +type framer struct { + numWriters int32 + reader io.Reader + writer *bufio.Writer + fr *http2.Framer +} + +func newFramer(conn net.Conn) *framer { + f := &framer{ + reader: bufio.NewReaderSize(conn, http2IOBufSize), + writer: bufio.NewWriterSize(conn, http2IOBufSize), + } + f.fr = http2.NewFramer(f.writer, f.reader) + f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) + return f +} + +func (f *framer) adjustNumWriters(i int32) int32 { + return atomic.AddInt32(&f.numWriters, i) +} + +// The following writeXXX functions can only be called when the caller gets +// unblocked from writableChan channel (i.e., owns the privilege to write). + +func (f *framer) writeContinuation(forceFlush bool, streamID uint32, endHeaders bool, headerBlockFragment []byte) error { + if err := f.fr.WriteContinuation(streamID, endHeaders, headerBlockFragment); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writeData(forceFlush bool, streamID uint32, endStream bool, data []byte) error { + if err := f.fr.WriteData(streamID, endStream, data); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writeGoAway(forceFlush bool, maxStreamID uint32, code http2.ErrCode, debugData []byte) error { + if err := f.fr.WriteGoAway(maxStreamID, code, debugData); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writeHeaders(forceFlush bool, p http2.HeadersFrameParam) error { + if err := f.fr.WriteHeaders(p); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writePing(forceFlush, ack bool, data [8]byte) error { + if err := f.fr.WritePing(ack, data); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writePriority(forceFlush bool, streamID uint32, p http2.PriorityParam) error { + if err := f.fr.WritePriority(streamID, p); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writePushPromise(forceFlush bool, p http2.PushPromiseParam) error { + if err := f.fr.WritePushPromise(p); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writeRSTStream(forceFlush bool, streamID uint32, code http2.ErrCode) error { + if err := f.fr.WriteRSTStream(streamID, code); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writeSettings(forceFlush bool, settings ...http2.Setting) error { + if err := f.fr.WriteSettings(settings...); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writeSettingsAck(forceFlush bool) error { + if err := f.fr.WriteSettingsAck(); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writeWindowUpdate(forceFlush bool, streamID, incr uint32) error { + if err := f.fr.WriteWindowUpdate(streamID, incr); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) flushWrite() error { + return f.writer.Flush() +} + +func (f *framer) readFrame() (http2.Frame, error) { + return f.fr.ReadFrame() +} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/transport.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/transport.go new file mode 100644 index 000000000..f027cae55 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/transport.go @@ -0,0 +1,508 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* +Package transport defines and implements message oriented communication channel +to complete various transactions (e.g., an RPC). +*/ +package transport + +import ( + "bytes" + "errors" + "fmt" + "io" + "net" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/trace" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" +) + +// recvMsg represents the received msg from the transport. All transport +// protocol specific info has been removed. +type recvMsg struct { + data []byte + // nil: received some data + // io.EOF: stream is completed. data is nil. + // other non-nil error: transport failure. data is nil. + err error +} + +func (recvMsg) isItem() bool { + return true +} + +// All items in an out of a recvBuffer should be the same type. +type item interface { + isItem() bool +} + +// recvBuffer is an unbounded channel of item. +type recvBuffer struct { + c chan item + mu sync.Mutex + backlog []item +} + +func newRecvBuffer() *recvBuffer { + b := &recvBuffer{ + c: make(chan item, 1), + } + return b +} + +func (b *recvBuffer) put(r item) { + b.mu.Lock() + defer b.mu.Unlock() + b.backlog = append(b.backlog, r) + select { + case b.c <- b.backlog[0]: + b.backlog = b.backlog[1:] + default: + } +} + +func (b *recvBuffer) load() { + b.mu.Lock() + defer b.mu.Unlock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog = b.backlog[1:] + default: + } + } +} + +// get returns the channel that receives an item in the buffer. +// +// Upon receipt of an item, the caller should call load to send another +// item onto the channel if there is any. +func (b *recvBuffer) get() <-chan item { + return b.c +} + +// recvBufferReader implements io.Reader interface to read the data from +// recvBuffer. +type recvBufferReader struct { + ctx context.Context + recv *recvBuffer + last *bytes.Reader // Stores the remaining data in the previous calls. + err error +} + +// Read reads the next len(p) bytes from last. If last is drained, it tries to +// read additional data from recv. It blocks if there no additional data available +// in recv. If Read returns any non-nil error, it will continue to return that error. +func (r *recvBufferReader) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + defer func() { r.err = err }() + if r.last != nil && r.last.Len() > 0 { + // Read remaining data left in last call. + return r.last.Read(p) + } + select { + case <-r.ctx.Done(): + return 0, ContextErr(r.ctx.Err()) + case i := <-r.recv.get(): + r.recv.load() + m := i.(*recvMsg) + if m.err != nil { + return 0, m.err + } + r.last = bytes.NewReader(m.data) + return r.last.Read(p) + } +} + +type streamState uint8 + +const ( + streamActive streamState = iota + streamWriteDone // EndStream sent + streamReadDone // EndStream received + streamDone // sendDone and recvDone or RSTStreamFrame is sent or received. +) + +// Stream represents an RPC in the transport layer. +type Stream struct { + id uint32 + // nil for client side Stream. + st ServerTransport + // ctx is the associated context of the stream. + ctx context.Context + cancel context.CancelFunc + // method records the associated RPC method of the stream. + method string + recvCompress string + sendCompress string + buf *recvBuffer + dec io.Reader + fc *inFlow + recvQuota uint32 + // The accumulated inbound quota pending for window update. + updateQuota uint32 + // The handler to control the window update procedure for both this + // particular stream and the associated transport. + windowHandler func(int) + + sendQuotaPool *quotaPool + // Close headerChan to indicate the end of reception of header metadata. + headerChan chan struct{} + // header caches the received header metadata. + header metadata.MD + // The key-value map of trailer metadata. + trailer metadata.MD + + mu sync.RWMutex // guard the following + // headerOK becomes true from the first header is about to send. + headerOk bool + state streamState + // true iff headerChan is closed. Used to avoid closing headerChan + // multiple times. + headerDone bool + // the status received from the server. + statusCode codes.Code + statusDesc string +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *Stream) RecvCompress() string { + return s.recvCompress +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *Stream) SetSendCompress(str string) { + s.sendCompress = str +} + +// Header acquires the key-value pairs of header metadata once it +// is available. It blocks until i) the metadata is ready or ii) there is no +// header metadata or iii) the stream is cancelled/expired. +func (s *Stream) Header() (metadata.MD, error) { + select { + case <-s.ctx.Done(): + return nil, ContextErr(s.ctx.Err()) + case <-s.headerChan: + return s.header.Copy(), nil + } +} + +// Trailer returns the cached trailer metedata. Note that if it is not called +// after the entire stream is done, it could return an empty MD. Client +// side only. +func (s *Stream) Trailer() metadata.MD { + s.mu.RLock() + defer s.mu.RUnlock() + return s.trailer.Copy() +} + +// ServerTransport returns the underlying ServerTransport for the stream. +// The client side stream always returns nil. +func (s *Stream) ServerTransport() ServerTransport { + return s.st +} + +// Context returns the context of the stream. +func (s *Stream) Context() context.Context { + return s.ctx +} + +// TraceContext recreates the context of s with a trace.Trace. +func (s *Stream) TraceContext(tr trace.Trace) { + s.ctx = trace.NewContext(s.ctx, tr) +} + +// Method returns the method for the stream. +func (s *Stream) Method() string { + return s.method +} + +// StatusCode returns statusCode received from the server. +func (s *Stream) StatusCode() codes.Code { + return s.statusCode +} + +// StatusDesc returns statusDesc received from the server. +func (s *Stream) StatusDesc() string { + return s.statusDesc +} + +// ErrIllegalTrailerSet indicates that the trailer has already been set or it +// is too late to do so. +var ErrIllegalTrailerSet = errors.New("transport: trailer has been set") + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can only be called at most once. Server side only. +func (s *Stream) SetTrailer(md metadata.MD) error { + s.mu.Lock() + defer s.mu.Unlock() + if s.trailer != nil { + return ErrIllegalTrailerSet + } + s.trailer = md.Copy() + return nil +} + +func (s *Stream) write(m recvMsg) { + s.buf.put(&m) +} + +// Read reads all the data available for this Stream from the transport and +// passes them into the decoder, which converts them into a gRPC message stream. +// The error is io.EOF when the stream is done or another non-nil error if +// the stream broke. +func (s *Stream) Read(p []byte) (n int, err error) { + n, err = s.dec.Read(p) + if err != nil { + return + } + s.windowHandler(n) + return +} + +// The key to save transport.Stream in the context. +type streamKey struct{} + +// newContextWithStream creates a new context from ctx and attaches stream +// to it. +func newContextWithStream(ctx context.Context, stream *Stream) context.Context { + return context.WithValue(ctx, streamKey{}, stream) +} + +// StreamFromContext returns the stream saved in ctx. +func StreamFromContext(ctx context.Context) (s *Stream, ok bool) { + s, ok = ctx.Value(streamKey{}).(*Stream) + return +} + +// state of transport +type transportState int + +const ( + reachable transportState = iota + unreachable + closing +) + +// NewServerTransport creates a ServerTransport with conn or non-nil error +// if it fails. +func NewServerTransport(protocol string, conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (ServerTransport, error) { + return newHTTP2Server(conn, maxStreams, authInfo) +} + +// ConnectOptions covers all relevant options for dialing a server. +type ConnectOptions struct { + // UserAgent is the application user agent. + UserAgent string + // Dialer specifies how to dial a network address. + Dialer func(string, time.Duration) (net.Conn, error) + // AuthOptions stores the credentials required to setup a client connection and/or issue RPCs. + AuthOptions []credentials.Credentials + // Timeout specifies the timeout for dialing a client connection. + Timeout time.Duration +} + +// NewClientTransport establishes the transport with the required ConnectOptions +// and returns it to the caller. +func NewClientTransport(target string, opts *ConnectOptions) (ClientTransport, error) { + return newHTTP2Client(target, opts) +} + +// Options provides additional hints and information for message +// transmission. +type Options struct { + // Last indicates whether this write is the last piece for + // this stream. + Last bool + + // Delay is a hint to the transport implementation for whether + // the data could be buffered for a batching write. The + // Transport implementation may ignore the hint. + Delay bool +} + +// CallHdr carries the information of a particular RPC. +type CallHdr struct { + // Host specifies the peer's host. + Host string + + // Method specifies the operation to perform. + Method string + + // RecvCompress specifies the compression algorithm applied on + // inbound messages. + RecvCompress string + + // SendCompress specifies the compression algorithm applied on + // outbound message. + SendCompress string + + // Flush indicates whether a new stream command should be sent + // to the peer without waiting for the first data. This is + // only a hint. The transport may modify the flush decision + // for performance purposes. + Flush bool +} + +// ClientTransport is the common interface for all gRPC client-side transport +// implementations. +type ClientTransport interface { + // Close tears down this transport. Once it returns, the transport + // should not be accessed any more. The caller must make sure this + // is called only once. + Close() error + + // Write sends the data for the given stream. A nil stream indicates + // the write is to be performed on the transport as a whole. + Write(s *Stream, data []byte, opts *Options) error + + // NewStream creates a Stream for an RPC. + NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) + + // CloseStream clears the footprint of a stream when the stream is + // not needed any more. The err indicates the error incurred when + // CloseStream is called. Must be called when a stream is finished + // unless the associated transport is closing. + CloseStream(stream *Stream, err error) + + // Error returns a channel that is closed when some I/O error + // happens. Typically the caller should have a goroutine to monitor + // this in order to take action (e.g., close the current transport + // and create a new one) in error case. It should not return nil + // once the transport is initiated. + Error() <-chan struct{} +} + +// ServerTransport is the common interface for all gRPC server-side transport +// implementations. +// +// Methods may be called concurrently from multiple goroutines, but +// Write methods for a given Stream will be called serially. +type ServerTransport interface { + // HandleStreams receives incoming streams using the given handler. + HandleStreams(func(*Stream)) + + // WriteHeader sends the header metadata for the given stream. + // WriteHeader may not be called on all streams. + WriteHeader(s *Stream, md metadata.MD) error + + // Write sends the data for the given stream. + // Write may not be called on all streams. + Write(s *Stream, data []byte, opts *Options) error + + // WriteStatus sends the status of a stream to the client. + // WriteStatus is the final call made on a stream and always + // occurs. + WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error + + // Close tears down the transport. Once it is called, the transport + // should not be accessed any more. All the pending streams and their + // handlers will be terminated asynchronously. + Close() error + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr +} + +// StreamErrorf creates an StreamError with the specified error code and description. +func StreamErrorf(c codes.Code, format string, a ...interface{}) StreamError { + return StreamError{ + Code: c, + Desc: fmt.Sprintf(format, a...), + } +} + +// ConnectionErrorf creates an ConnectionError with the specified error description. +func ConnectionErrorf(format string, a ...interface{}) ConnectionError { + return ConnectionError{ + Desc: fmt.Sprintf(format, a...), + } +} + +// ConnectionError is an error that results in the termination of the +// entire connection and the retry of all the active streams. +type ConnectionError struct { + Desc string +} + +func (e ConnectionError) Error() string { + return fmt.Sprintf("connection error: desc = %q", e.Desc) +} + +// Define some common ConnectionErrors. +var ErrConnClosing = ConnectionError{Desc: "transport is closing"} + +// StreamError is an error that only affects one stream within a connection. +type StreamError struct { + Code codes.Code + Desc string +} + +func (e StreamError) Error() string { + return fmt.Sprintf("stream error: code = %d desc = %q", e.Code, e.Desc) +} + +// ContextErr converts the error from context package into a StreamError. +func ContextErr(err error) StreamError { + switch err { + case context.DeadlineExceeded: + return StreamErrorf(codes.DeadlineExceeded, "%v", err) + case context.Canceled: + return StreamErrorf(codes.Canceled, "%v", err) + } + panic(fmt.Sprintf("Unexpected error from context packet: %v", err)) +} + +// wait blocks until it can receive from ctx.Done, closing, or proceed. +// If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err. +// If it receives from closing, it returns 0, ErrConnClosing. +// If it receives from proceed, it returns the received integer, nil. +func wait(ctx context.Context, closing <-chan struct{}, proceed <-chan int) (int, error) { + select { + case <-ctx.Done(): + return 0, ContextErr(ctx.Err()) + case <-closing: + return 0, ErrConnClosing + case i := <-proceed: + return i, nil + } +} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/.gitignore b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/.gitignore new file mode 100644 index 000000000..191a5360b --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/.gitignore @@ -0,0 +1,4 @@ +_* +*.swp +*.[568] +[568].out diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/LICENSE b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/LICENSE new file mode 100644 index 000000000..545cf2d33 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/LICENSE @@ -0,0 +1,25 @@ +Gocheck - A rich testing framework for Go + +Copyright (c) 2010-2013 Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/README.md b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/README.md new file mode 100644 index 000000000..0ca9e5726 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/README.md @@ -0,0 +1,20 @@ +Instructions +============ + +Install the package with: + + go get gopkg.in/check.v1 + +Import it with: + + import "gopkg.in/check.v1" + +and use _check_ as the package name inside the code. + +For more details, visit the project page: + +* http://labix.org/gocheck + +and the API documentation: + +* https://gopkg.in/check.v1 diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/TODO b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/TODO new file mode 100644 index 000000000..33498270e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/TODO @@ -0,0 +1,2 @@ +- Assert(slice, Contains, item) +- Parallel test support diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/benchmark.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/benchmark.go new file mode 100644 index 000000000..48cb8c811 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/benchmark.go @@ -0,0 +1,163 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package check + +import ( + "fmt" + "runtime" + "time" +) + +var memStats runtime.MemStats + +// testingB is a type passed to Benchmark functions to manage benchmark +// timing and to specify the number of iterations to run. +type timer struct { + start time.Time // Time test or benchmark started + duration time.Duration + N int + bytes int64 + timerOn bool + benchTime time.Duration + // The initial states of memStats.Mallocs and memStats.TotalAlloc. + startAllocs uint64 + startBytes uint64 + // The net total of this test after being run. + netAllocs uint64 + netBytes uint64 +} + +// StartTimer starts timing a test. This function is called automatically +// before a benchmark starts, but it can also used to resume timing after +// a call to StopTimer. +func (c *C) StartTimer() { + if !c.timerOn { + c.start = time.Now() + c.timerOn = true + + runtime.ReadMemStats(&memStats) + c.startAllocs = memStats.Mallocs + c.startBytes = memStats.TotalAlloc + } +} + +// StopTimer stops timing a test. This can be used to pause the timer +// while performing complex initialization that you don't +// want to measure. +func (c *C) StopTimer() { + if c.timerOn { + c.duration += time.Now().Sub(c.start) + c.timerOn = false + runtime.ReadMemStats(&memStats) + c.netAllocs += memStats.Mallocs - c.startAllocs + c.netBytes += memStats.TotalAlloc - c.startBytes + } +} + +// ResetTimer sets the elapsed benchmark time to zero. +// It does not affect whether the timer is running. +func (c *C) ResetTimer() { + if c.timerOn { + c.start = time.Now() + runtime.ReadMemStats(&memStats) + c.startAllocs = memStats.Mallocs + c.startBytes = memStats.TotalAlloc + } + c.duration = 0 + c.netAllocs = 0 + c.netBytes = 0 +} + +// SetBytes informs the number of bytes that the benchmark processes +// on each iteration. If this is called in a benchmark it will also +// report MB/s. +func (c *C) SetBytes(n int64) { + c.bytes = n +} + +func (c *C) nsPerOp() int64 { + if c.N <= 0 { + return 0 + } + return c.duration.Nanoseconds() / int64(c.N) +} + +func (c *C) mbPerSec() float64 { + if c.bytes <= 0 || c.duration <= 0 || c.N <= 0 { + return 0 + } + return (float64(c.bytes) * float64(c.N) / 1e6) / c.duration.Seconds() +} + +func (c *C) timerString() string { + if c.N <= 0 { + return fmt.Sprintf("%3.3fs", float64(c.duration.Nanoseconds())/1e9) + } + mbs := c.mbPerSec() + mb := "" + if mbs != 0 { + mb = fmt.Sprintf("\t%7.2f MB/s", mbs) + } + nsop := c.nsPerOp() + ns := fmt.Sprintf("%10d ns/op", nsop) + if c.N > 0 && nsop < 100 { + // The format specifiers here make sure that + // the ones digits line up for all three possible formats. + if nsop < 10 { + ns = fmt.Sprintf("%13.2f ns/op", float64(c.duration.Nanoseconds())/float64(c.N)) + } else { + ns = fmt.Sprintf("%12.1f ns/op", float64(c.duration.Nanoseconds())/float64(c.N)) + } + } + memStats := "" + if c.benchMem { + allocedBytes := fmt.Sprintf("%8d B/op", int64(c.netBytes)/int64(c.N)) + allocs := fmt.Sprintf("%8d allocs/op", int64(c.netAllocs)/int64(c.N)) + memStats = fmt.Sprintf("\t%s\t%s", allocedBytes, allocs) + } + return fmt.Sprintf("%8d\t%s%s%s", c.N, ns, mb, memStats) +} + +func min(x, y int) int { + if x > y { + return y + } + return x +} + +func max(x, y int) int { + if x < y { + return y + } + return x +} + +// roundDown10 rounds a number down to the nearest power of 10. +func roundDown10(n int) int { + var tens = 0 + // tens = floor(log_10(n)) + for n > 10 { + n = n / 10 + tens++ + } + // result = 10^tens + result := 1 + for i := 0; i < tens; i++ { + result *= 10 + } + return result +} + +// roundUp rounds x up to a number of the form [1eX, 2eX, 5eX]. +func roundUp(n int) int { + base := roundDown10(n) + if n < (2 * base) { + return 2 * base + } + if n < (5 * base) { + return 5 * base + } + return 10 * base +} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/check.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/check.go new file mode 100644 index 000000000..ca8c0f92d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/check.go @@ -0,0 +1,945 @@ +// Package check is a rich testing extension for Go's testing package. +// +// For details about the project, see: +// +// http://labix.org/gocheck +// +package check + +import ( + "bytes" + "errors" + "fmt" + "io" + "math/rand" + "os" + "path" + "path/filepath" + "reflect" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +// ----------------------------------------------------------------------- +// Internal type which deals with suite method calling. + +const ( + fixtureKd = iota + testKd +) + +type funcKind int + +const ( + succeededSt = iota + failedSt + skippedSt + panickedSt + fixturePanickedSt + missedSt +) + +type funcStatus int + +// A method value can't reach its own Method structure. +type methodType struct { + reflect.Value + Info reflect.Method +} + +func newMethod(receiver reflect.Value, i int) *methodType { + return &methodType{receiver.Method(i), receiver.Type().Method(i)} +} + +func (method *methodType) PC() uintptr { + return method.Info.Func.Pointer() +} + +func (method *methodType) suiteName() string { + t := method.Info.Type.In(0) + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t.Name() +} + +func (method *methodType) String() string { + return method.suiteName() + "." + method.Info.Name +} + +func (method *methodType) matches(re *regexp.Regexp) bool { + return (re.MatchString(method.Info.Name) || + re.MatchString(method.suiteName()) || + re.MatchString(method.String())) +} + +type C struct { + method *methodType + kind funcKind + testName string + status funcStatus + logb *logger + logw io.Writer + done chan *C + reason string + mustFail bool + tempDir *tempDir + benchMem bool + startTime time.Time + timer +} + +func (c *C) stopNow() { + runtime.Goexit() +} + +// logger is a concurrency safe byte.Buffer +type logger struct { + sync.Mutex + writer bytes.Buffer +} + +func (l *logger) Write(buf []byte) (int, error) { + l.Lock() + defer l.Unlock() + return l.writer.Write(buf) +} + +func (l *logger) WriteTo(w io.Writer) (int64, error) { + l.Lock() + defer l.Unlock() + return l.writer.WriteTo(w) +} + +func (l *logger) String() string { + l.Lock() + defer l.Unlock() + return l.writer.String() +} + +// ----------------------------------------------------------------------- +// Handling of temporary files and directories. + +type tempDir struct { + sync.Mutex + path string + counter int +} + +func (td *tempDir) newPath() string { + td.Lock() + defer td.Unlock() + if td.path == "" { + var err error + for i := 0; i != 100; i++ { + path := fmt.Sprintf("%s%ccheck-%d", os.TempDir(), os.PathSeparator, rand.Int()) + if err = os.Mkdir(path, 0700); err == nil { + td.path = path + break + } + } + if td.path == "" { + panic("Couldn't create temporary directory: " + err.Error()) + } + } + result := filepath.Join(td.path, strconv.Itoa(td.counter)) + td.counter += 1 + return result +} + +func (td *tempDir) removeAll() { + td.Lock() + defer td.Unlock() + if td.path != "" { + err := os.RemoveAll(td.path) + if err != nil { + fmt.Fprintf(os.Stderr, "WARNING: Error cleaning up temporaries: "+err.Error()) + } + } +} + +// Create a new temporary directory which is automatically removed after +// the suite finishes running. +func (c *C) MkDir() string { + path := c.tempDir.newPath() + if err := os.Mkdir(path, 0700); err != nil { + panic(fmt.Sprintf("Couldn't create temporary directory %s: %s", path, err.Error())) + } + return path +} + +// ----------------------------------------------------------------------- +// Low-level logging functions. + +func (c *C) log(args ...interface{}) { + c.writeLog([]byte(fmt.Sprint(args...) + "\n")) +} + +func (c *C) logf(format string, args ...interface{}) { + c.writeLog([]byte(fmt.Sprintf(format+"\n", args...))) +} + +func (c *C) logNewLine() { + c.writeLog([]byte{'\n'}) +} + +func (c *C) writeLog(buf []byte) { + c.logb.Write(buf) + if c.logw != nil { + c.logw.Write(buf) + } +} + +func hasStringOrError(x interface{}) (ok bool) { + _, ok = x.(fmt.Stringer) + if ok { + return + } + _, ok = x.(error) + return +} + +func (c *C) logValue(label string, value interface{}) { + if label == "" { + if hasStringOrError(value) { + c.logf("... %#v (%q)", value, value) + } else { + c.logf("... %#v", value) + } + } else if value == nil { + c.logf("... %s = nil", label) + } else { + if hasStringOrError(value) { + fv := fmt.Sprintf("%#v", value) + qv := fmt.Sprintf("%q", value) + if fv != qv { + c.logf("... %s %s = %s (%s)", label, reflect.TypeOf(value), fv, qv) + return + } + } + if s, ok := value.(string); ok && isMultiLine(s) { + c.logf(`... %s %s = "" +`, label, reflect.TypeOf(value)) + c.logMultiLine(s) + } else { + c.logf("... %s %s = %#v", label, reflect.TypeOf(value), value) + } + } +} + +func (c *C) logMultiLine(s string) { + b := make([]byte, 0, len(s)*2) + i := 0 + n := len(s) + for i < n { + j := i + 1 + for j < n && s[j-1] != '\n' { + j++ + } + b = append(b, "... "...) + b = strconv.AppendQuote(b, s[i:j]) + if j < n { + b = append(b, " +"...) + } + b = append(b, '\n') + i = j + } + c.writeLog(b) +} + +func isMultiLine(s string) bool { + for i := 0; i+1 < len(s); i++ { + if s[i] == '\n' { + return true + } + } + return false +} + +func (c *C) logString(issue string) { + c.log("... ", issue) +} + +func (c *C) logCaller(skip int) { + // This is a bit heavier than it ought to be. + skip += 1 // Our own frame. + pc, callerFile, callerLine, ok := runtime.Caller(skip) + if !ok { + return + } + var testFile string + var testLine int + testFunc := runtime.FuncForPC(c.method.PC()) + if runtime.FuncForPC(pc) != testFunc { + for { + skip += 1 + if pc, file, line, ok := runtime.Caller(skip); ok { + // Note that the test line may be different on + // distinct calls for the same test. Showing + // the "internal" line is helpful when debugging. + if runtime.FuncForPC(pc) == testFunc { + testFile, testLine = file, line + break + } + } else { + break + } + } + } + if testFile != "" && (testFile != callerFile || testLine != callerLine) { + c.logCode(testFile, testLine) + } + c.logCode(callerFile, callerLine) +} + +func (c *C) logCode(path string, line int) { + c.logf("%s:%d:", nicePath(path), line) + code, err := printLine(path, line) + if code == "" { + code = "..." // XXX Open the file and take the raw line. + if err != nil { + code += err.Error() + } + } + c.log(indent(code, " ")) +} + +var valueGo = filepath.Join("reflect", "value.go") +var asmGo = filepath.Join("runtime", "asm_") + +func (c *C) logPanic(skip int, value interface{}) { + skip++ // Our own frame. + initialSkip := skip + for ; ; skip++ { + if pc, file, line, ok := runtime.Caller(skip); ok { + if skip == initialSkip { + c.logf("... Panic: %s (PC=0x%X)\n", value, pc) + } + name := niceFuncName(pc) + path := nicePath(file) + if strings.Contains(path, "/gopkg.in/check.v") { + continue + } + if name == "Value.call" && strings.HasSuffix(path, valueGo) { + continue + } + if name == "call16" && strings.Contains(path, asmGo) { + continue + } + c.logf("%s:%d\n in %s", nicePath(file), line, name) + } else { + break + } + } +} + +func (c *C) logSoftPanic(issue string) { + c.log("... Panic: ", issue) +} + +func (c *C) logArgPanic(method *methodType, expectedType string) { + c.logf("... Panic: %s argument should be %s", + niceFuncName(method.PC()), expectedType) +} + +// ----------------------------------------------------------------------- +// Some simple formatting helpers. + +var initWD, initWDErr = os.Getwd() + +func init() { + if initWDErr == nil { + initWD = strings.Replace(initWD, "\\", "/", -1) + "/" + } +} + +func nicePath(path string) string { + if initWDErr == nil { + if strings.HasPrefix(path, initWD) { + return path[len(initWD):] + } + } + return path +} + +func niceFuncPath(pc uintptr) string { + function := runtime.FuncForPC(pc) + if function != nil { + filename, line := function.FileLine(pc) + return fmt.Sprintf("%s:%d", nicePath(filename), line) + } + return "" +} + +func niceFuncName(pc uintptr) string { + function := runtime.FuncForPC(pc) + if function != nil { + name := path.Base(function.Name()) + if i := strings.Index(name, "."); i > 0 { + name = name[i+1:] + } + if strings.HasPrefix(name, "(*") { + if i := strings.Index(name, ")"); i > 0 { + name = name[2:i] + name[i+1:] + } + } + if i := strings.LastIndex(name, ".*"); i != -1 { + name = name[:i] + "." + name[i+2:] + } + if i := strings.LastIndex(name, "·"); i != -1 { + name = name[:i] + "." + name[i+2:] + } + return name + } + return "" +} + +// ----------------------------------------------------------------------- +// Result tracker to aggregate call results. + +type Result struct { + Succeeded int + Failed int + Skipped int + Panicked int + FixturePanicked int + ExpectedFailures int + Missed int // Not even tried to run, related to a panic in the fixture. + RunError error // Houston, we've got a problem. + WorkDir string // If KeepWorkDir is true +} + +type resultTracker struct { + result Result + _lastWasProblem bool + _waiting int + _missed int + _expectChan chan *C + _doneChan chan *C + _stopChan chan bool +} + +func newResultTracker() *resultTracker { + return &resultTracker{_expectChan: make(chan *C), // Synchronous + _doneChan: make(chan *C, 32), // Asynchronous + _stopChan: make(chan bool)} // Synchronous +} + +func (tracker *resultTracker) start() { + go tracker._loopRoutine() +} + +func (tracker *resultTracker) waitAndStop() { + <-tracker._stopChan +} + +func (tracker *resultTracker) expectCall(c *C) { + tracker._expectChan <- c +} + +func (tracker *resultTracker) callDone(c *C) { + tracker._doneChan <- c +} + +func (tracker *resultTracker) _loopRoutine() { + for { + var c *C + if tracker._waiting > 0 { + // Calls still running. Can't stop. + select { + // XXX Reindent this (not now to make diff clear) + case c = <-tracker._expectChan: + tracker._waiting += 1 + case c = <-tracker._doneChan: + tracker._waiting -= 1 + switch c.status { + case succeededSt: + if c.kind == testKd { + if c.mustFail { + tracker.result.ExpectedFailures++ + } else { + tracker.result.Succeeded++ + } + } + case failedSt: + tracker.result.Failed++ + case panickedSt: + if c.kind == fixtureKd { + tracker.result.FixturePanicked++ + } else { + tracker.result.Panicked++ + } + case fixturePanickedSt: + // Track it as missed, since the panic + // was on the fixture, not on the test. + tracker.result.Missed++ + case missedSt: + tracker.result.Missed++ + case skippedSt: + if c.kind == testKd { + tracker.result.Skipped++ + } + } + } + } else { + // No calls. Can stop, but no done calls here. + select { + case tracker._stopChan <- true: + return + case c = <-tracker._expectChan: + tracker._waiting += 1 + case c = <-tracker._doneChan: + panic("Tracker got an unexpected done call.") + } + } + } +} + +// ----------------------------------------------------------------------- +// The underlying suite runner. + +type suiteRunner struct { + suite interface{} + setUpSuite, tearDownSuite *methodType + setUpTest, tearDownTest *methodType + tests []*methodType + tracker *resultTracker + tempDir *tempDir + keepDir bool + output *outputWriter + reportedProblemLast bool + benchTime time.Duration + benchMem bool +} + +type RunConf struct { + Output io.Writer + Stream bool + Verbose bool + Filter string + Benchmark bool + BenchmarkTime time.Duration // Defaults to 1 second + BenchmarkMem bool + KeepWorkDir bool +} + +// Create a new suiteRunner able to run all methods in the given suite. +func newSuiteRunner(suite interface{}, runConf *RunConf) *suiteRunner { + var conf RunConf + if runConf != nil { + conf = *runConf + } + if conf.Output == nil { + conf.Output = os.Stdout + } + if conf.Benchmark { + conf.Verbose = true + } + + suiteType := reflect.TypeOf(suite) + suiteNumMethods := suiteType.NumMethod() + suiteValue := reflect.ValueOf(suite) + + runner := &suiteRunner{ + suite: suite, + output: newOutputWriter(conf.Output, conf.Stream, conf.Verbose), + tracker: newResultTracker(), + benchTime: conf.BenchmarkTime, + benchMem: conf.BenchmarkMem, + tempDir: &tempDir{}, + keepDir: conf.KeepWorkDir, + tests: make([]*methodType, 0, suiteNumMethods), + } + if runner.benchTime == 0 { + runner.benchTime = 1 * time.Second + } + + var filterRegexp *regexp.Regexp + if conf.Filter != "" { + if regexp, err := regexp.Compile(conf.Filter); err != nil { + msg := "Bad filter expression: " + err.Error() + runner.tracker.result.RunError = errors.New(msg) + return runner + } else { + filterRegexp = regexp + } + } + + for i := 0; i != suiteNumMethods; i++ { + method := newMethod(suiteValue, i) + switch method.Info.Name { + case "SetUpSuite": + runner.setUpSuite = method + case "TearDownSuite": + runner.tearDownSuite = method + case "SetUpTest": + runner.setUpTest = method + case "TearDownTest": + runner.tearDownTest = method + default: + prefix := "Test" + if conf.Benchmark { + prefix = "Benchmark" + } + if !strings.HasPrefix(method.Info.Name, prefix) { + continue + } + if filterRegexp == nil || method.matches(filterRegexp) { + runner.tests = append(runner.tests, method) + } + } + } + return runner +} + +// Run all methods in the given suite. +func (runner *suiteRunner) run() *Result { + if runner.tracker.result.RunError == nil && len(runner.tests) > 0 { + runner.tracker.start() + if runner.checkFixtureArgs() { + c := runner.runFixture(runner.setUpSuite, "", nil) + if c == nil || c.status == succeededSt { + for i := 0; i != len(runner.tests); i++ { + c := runner.runTest(runner.tests[i]) + if c.status == fixturePanickedSt { + runner.skipTests(missedSt, runner.tests[i+1:]) + break + } + } + } else if c != nil && c.status == skippedSt { + runner.skipTests(skippedSt, runner.tests) + } else { + runner.skipTests(missedSt, runner.tests) + } + runner.runFixture(runner.tearDownSuite, "", nil) + } else { + runner.skipTests(missedSt, runner.tests) + } + runner.tracker.waitAndStop() + if runner.keepDir { + runner.tracker.result.WorkDir = runner.tempDir.path + } else { + runner.tempDir.removeAll() + } + } + return &runner.tracker.result +} + +// Create a call object with the given suite method, and fork a +// goroutine with the provided dispatcher for running it. +func (runner *suiteRunner) forkCall(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C { + var logw io.Writer + if runner.output.Stream { + logw = runner.output + } + if logb == nil { + logb = new(logger) + } + c := &C{ + method: method, + kind: kind, + testName: testName, + logb: logb, + logw: logw, + tempDir: runner.tempDir, + done: make(chan *C, 1), + timer: timer{benchTime: runner.benchTime}, + startTime: time.Now(), + benchMem: runner.benchMem, + } + runner.tracker.expectCall(c) + go (func() { + runner.reportCallStarted(c) + defer runner.callDone(c) + dispatcher(c) + })() + return c +} + +// Same as forkCall(), but wait for call to finish before returning. +func (runner *suiteRunner) runFunc(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C { + c := runner.forkCall(method, kind, testName, logb, dispatcher) + <-c.done + return c +} + +// Handle a finished call. If there were any panics, update the call status +// accordingly. Then, mark the call as done and report to the tracker. +func (runner *suiteRunner) callDone(c *C) { + value := recover() + if value != nil { + switch v := value.(type) { + case *fixturePanic: + if v.status == skippedSt { + c.status = skippedSt + } else { + c.logSoftPanic("Fixture has panicked (see related PANIC)") + c.status = fixturePanickedSt + } + default: + c.logPanic(1, value) + c.status = panickedSt + } + } + if c.mustFail { + switch c.status { + case failedSt: + c.status = succeededSt + case succeededSt: + c.status = failedSt + c.logString("Error: Test succeeded, but was expected to fail") + c.logString("Reason: " + c.reason) + } + } + + runner.reportCallDone(c) + c.done <- c +} + +// Runs a fixture call synchronously. The fixture will still be run in a +// goroutine like all suite methods, but this method will not return +// while the fixture goroutine is not done, because the fixture must be +// run in a desired order. +func (runner *suiteRunner) runFixture(method *methodType, testName string, logb *logger) *C { + if method != nil { + c := runner.runFunc(method, fixtureKd, testName, logb, func(c *C) { + c.ResetTimer() + c.StartTimer() + defer c.StopTimer() + c.method.Call([]reflect.Value{reflect.ValueOf(c)}) + }) + return c + } + return nil +} + +// Run the fixture method with runFixture(), but panic with a fixturePanic{} +// in case the fixture method panics. This makes it easier to track the +// fixture panic together with other call panics within forkTest(). +func (runner *suiteRunner) runFixtureWithPanic(method *methodType, testName string, logb *logger, skipped *bool) *C { + if skipped != nil && *skipped { + return nil + } + c := runner.runFixture(method, testName, logb) + if c != nil && c.status != succeededSt { + if skipped != nil { + *skipped = c.status == skippedSt + } + panic(&fixturePanic{c.status, method}) + } + return c +} + +type fixturePanic struct { + status funcStatus + method *methodType +} + +// Run the suite test method, together with the test-specific fixture, +// asynchronously. +func (runner *suiteRunner) forkTest(method *methodType) *C { + testName := method.String() + return runner.forkCall(method, testKd, testName, nil, func(c *C) { + var skipped bool + defer runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, &skipped) + defer c.StopTimer() + benchN := 1 + for { + runner.runFixtureWithPanic(runner.setUpTest, testName, c.logb, &skipped) + mt := c.method.Type() + if mt.NumIn() != 1 || mt.In(0) != reflect.TypeOf(c) { + // Rather than a plain panic, provide a more helpful message when + // the argument type is incorrect. + c.status = panickedSt + c.logArgPanic(c.method, "*check.C") + return + } + if strings.HasPrefix(c.method.Info.Name, "Test") { + c.ResetTimer() + c.StartTimer() + c.method.Call([]reflect.Value{reflect.ValueOf(c)}) + return + } + if !strings.HasPrefix(c.method.Info.Name, "Benchmark") { + panic("unexpected method prefix: " + c.method.Info.Name) + } + + runtime.GC() + c.N = benchN + c.ResetTimer() + c.StartTimer() + c.method.Call([]reflect.Value{reflect.ValueOf(c)}) + c.StopTimer() + if c.status != succeededSt || c.duration >= c.benchTime || benchN >= 1e9 { + return + } + perOpN := int(1e9) + if c.nsPerOp() != 0 { + perOpN = int(c.benchTime.Nanoseconds() / c.nsPerOp()) + } + + // Logic taken from the stock testing package: + // - Run more iterations than we think we'll need for a second (1.5x). + // - Don't grow too fast in case we had timing errors previously. + // - Be sure to run at least one more than last time. + benchN = max(min(perOpN+perOpN/2, 100*benchN), benchN+1) + benchN = roundUp(benchN) + + skipped = true // Don't run the deferred one if this panics. + runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, nil) + skipped = false + } + }) +} + +// Same as forkTest(), but wait for the test to finish before returning. +func (runner *suiteRunner) runTest(method *methodType) *C { + c := runner.forkTest(method) + <-c.done + return c +} + +// Helper to mark tests as skipped or missed. A bit heavy for what +// it does, but it enables homogeneous handling of tracking, including +// nice verbose output. +func (runner *suiteRunner) skipTests(status funcStatus, methods []*methodType) { + for _, method := range methods { + runner.runFunc(method, testKd, "", nil, func(c *C) { + c.status = status + }) + } +} + +// Verify if the fixture arguments are *check.C. In case of errors, +// log the error as a panic in the fixture method call, and return false. +func (runner *suiteRunner) checkFixtureArgs() bool { + succeeded := true + argType := reflect.TypeOf(&C{}) + for _, method := range []*methodType{runner.setUpSuite, runner.tearDownSuite, runner.setUpTest, runner.tearDownTest} { + if method != nil { + mt := method.Type() + if mt.NumIn() != 1 || mt.In(0) != argType { + succeeded = false + runner.runFunc(method, fixtureKd, "", nil, func(c *C) { + c.logArgPanic(method, "*check.C") + c.status = panickedSt + }) + } + } + } + return succeeded +} + +func (runner *suiteRunner) reportCallStarted(c *C) { + runner.output.WriteCallStarted("START", c) +} + +func (runner *suiteRunner) reportCallDone(c *C) { + runner.tracker.callDone(c) + switch c.status { + case succeededSt: + if c.mustFail { + runner.output.WriteCallSuccess("FAIL EXPECTED", c) + } else { + runner.output.WriteCallSuccess("PASS", c) + } + case skippedSt: + runner.output.WriteCallSuccess("SKIP", c) + case failedSt: + runner.output.WriteCallProblem("FAIL", c) + case panickedSt: + runner.output.WriteCallProblem("PANIC", c) + case fixturePanickedSt: + // That's a testKd call reporting that its fixture + // has panicked. The fixture call which caused the + // panic itself was tracked above. We'll report to + // aid debugging. + runner.output.WriteCallProblem("PANIC", c) + case missedSt: + runner.output.WriteCallSuccess("MISS", c) + } +} + +// ----------------------------------------------------------------------- +// Output writer manages atomic output writing according to settings. + +type outputWriter struct { + m sync.Mutex + writer io.Writer + wroteCallProblemLast bool + Stream bool + Verbose bool +} + +func newOutputWriter(writer io.Writer, stream, verbose bool) *outputWriter { + return &outputWriter{writer: writer, Stream: stream, Verbose: verbose} +} + +func (ow *outputWriter) Write(content []byte) (n int, err error) { + ow.m.Lock() + n, err = ow.writer.Write(content) + ow.m.Unlock() + return +} + +func (ow *outputWriter) WriteCallStarted(label string, c *C) { + if ow.Stream { + header := renderCallHeader(label, c, "", "\n") + ow.m.Lock() + ow.writer.Write([]byte(header)) + ow.m.Unlock() + } +} + +func (ow *outputWriter) WriteCallProblem(label string, c *C) { + var prefix string + if !ow.Stream { + prefix = "\n-----------------------------------" + + "-----------------------------------\n" + } + header := renderCallHeader(label, c, prefix, "\n\n") + ow.m.Lock() + ow.wroteCallProblemLast = true + ow.writer.Write([]byte(header)) + if !ow.Stream { + c.logb.WriteTo(ow.writer) + } + ow.m.Unlock() +} + +func (ow *outputWriter) WriteCallSuccess(label string, c *C) { + if ow.Stream || (ow.Verbose && c.kind == testKd) { + // TODO Use a buffer here. + var suffix string + if c.reason != "" { + suffix = " (" + c.reason + ")" + } + if c.status == succeededSt { + suffix += "\t" + c.timerString() + } + suffix += "\n" + if ow.Stream { + suffix += "\n" + } + header := renderCallHeader(label, c, "", suffix) + ow.m.Lock() + // Resist temptation of using line as prefix above due to race. + if !ow.Stream && ow.wroteCallProblemLast { + header = "\n-----------------------------------" + + "-----------------------------------\n" + + header + } + ow.wroteCallProblemLast = false + ow.writer.Write([]byte(header)) + ow.m.Unlock() + } +} + +func renderCallHeader(label string, c *C, prefix, suffix string) string { + pc := c.method.PC() + return fmt.Sprintf("%s%s: %s: %s%s", prefix, label, niceFuncPath(pc), + niceFuncName(pc), suffix) +} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/checkers.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/checkers.go new file mode 100644 index 000000000..bac338729 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/checkers.go @@ -0,0 +1,458 @@ +package check + +import ( + "fmt" + "reflect" + "regexp" +) + +// ----------------------------------------------------------------------- +// CommentInterface and Commentf helper, to attach extra information to checks. + +type comment struct { + format string + args []interface{} +} + +// Commentf returns an infomational value to use with Assert or Check calls. +// If the checker test fails, the provided arguments will be passed to +// fmt.Sprintf, and will be presented next to the logged failure. +// +// For example: +// +// c.Assert(v, Equals, 42, Commentf("Iteration #%d failed.", i)) +// +// Note that if the comment is constant, a better option is to +// simply use a normal comment right above or next to the line, as +// it will also get printed with any errors: +// +// c.Assert(l, Equals, 8192) // Ensure buffer size is correct (bug #123) +// +func Commentf(format string, args ...interface{}) CommentInterface { + return &comment{format, args} +} + +// CommentInterface must be implemented by types that attach extra +// information to failed checks. See the Commentf function for details. +type CommentInterface interface { + CheckCommentString() string +} + +func (c *comment) CheckCommentString() string { + return fmt.Sprintf(c.format, c.args...) +} + +// ----------------------------------------------------------------------- +// The Checker interface. + +// The Checker interface must be provided by checkers used with +// the Assert and Check verification methods. +type Checker interface { + Info() *CheckerInfo + Check(params []interface{}, names []string) (result bool, error string) +} + +// See the Checker interface. +type CheckerInfo struct { + Name string + Params []string +} + +func (info *CheckerInfo) Info() *CheckerInfo { + return info +} + +// ----------------------------------------------------------------------- +// Not checker logic inverter. + +// The Not checker inverts the logic of the provided checker. The +// resulting checker will succeed where the original one failed, and +// vice-versa. +// +// For example: +// +// c.Assert(a, Not(Equals), b) +// +func Not(checker Checker) Checker { + return ¬Checker{checker} +} + +type notChecker struct { + sub Checker +} + +func (checker *notChecker) Info() *CheckerInfo { + info := *checker.sub.Info() + info.Name = "Not(" + info.Name + ")" + return &info +} + +func (checker *notChecker) Check(params []interface{}, names []string) (result bool, error string) { + result, error = checker.sub.Check(params, names) + result = !result + return +} + +// ----------------------------------------------------------------------- +// IsNil checker. + +type isNilChecker struct { + *CheckerInfo +} + +// The IsNil checker tests whether the obtained value is nil. +// +// For example: +// +// c.Assert(err, IsNil) +// +var IsNil Checker = &isNilChecker{ + &CheckerInfo{Name: "IsNil", Params: []string{"value"}}, +} + +func (checker *isNilChecker) Check(params []interface{}, names []string) (result bool, error string) { + return isNil(params[0]), "" +} + +func isNil(obtained interface{}) (result bool) { + if obtained == nil { + result = true + } else { + switch v := reflect.ValueOf(obtained); v.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + } + return +} + +// ----------------------------------------------------------------------- +// NotNil checker. Alias for Not(IsNil), since it's so common. + +type notNilChecker struct { + *CheckerInfo +} + +// The NotNil checker verifies that the obtained value is not nil. +// +// For example: +// +// c.Assert(iface, NotNil) +// +// This is an alias for Not(IsNil), made available since it's a +// fairly common check. +// +var NotNil Checker = ¬NilChecker{ + &CheckerInfo{Name: "NotNil", Params: []string{"value"}}, +} + +func (checker *notNilChecker) Check(params []interface{}, names []string) (result bool, error string) { + return !isNil(params[0]), "" +} + +// ----------------------------------------------------------------------- +// Equals checker. + +type equalsChecker struct { + *CheckerInfo +} + +// The Equals checker verifies that the obtained value is equal to +// the expected value, according to usual Go semantics for ==. +// +// For example: +// +// c.Assert(value, Equals, 42) +// +var Equals Checker = &equalsChecker{ + &CheckerInfo{Name: "Equals", Params: []string{"obtained", "expected"}}, +} + +func (checker *equalsChecker) Check(params []interface{}, names []string) (result bool, error string) { + defer func() { + if v := recover(); v != nil { + result = false + error = fmt.Sprint(v) + } + }() + return params[0] == params[1], "" +} + +// ----------------------------------------------------------------------- +// DeepEquals checker. + +type deepEqualsChecker struct { + *CheckerInfo +} + +// The DeepEquals checker verifies that the obtained value is deep-equal to +// the expected value. The check will work correctly even when facing +// slices, interfaces, and values of different types (which always fail +// the test). +// +// For example: +// +// c.Assert(value, DeepEquals, 42) +// c.Assert(array, DeepEquals, []string{"hi", "there"}) +// +var DeepEquals Checker = &deepEqualsChecker{ + &CheckerInfo{Name: "DeepEquals", Params: []string{"obtained", "expected"}}, +} + +func (checker *deepEqualsChecker) Check(params []interface{}, names []string) (result bool, error string) { + return reflect.DeepEqual(params[0], params[1]), "" +} + +// ----------------------------------------------------------------------- +// HasLen checker. + +type hasLenChecker struct { + *CheckerInfo +} + +// The HasLen checker verifies that the obtained value has the +// provided length. In many cases this is superior to using Equals +// in conjuction with the len function because in case the check +// fails the value itself will be printed, instead of its length, +// providing more details for figuring the problem. +// +// For example: +// +// c.Assert(list, HasLen, 5) +// +var HasLen Checker = &hasLenChecker{ + &CheckerInfo{Name: "HasLen", Params: []string{"obtained", "n"}}, +} + +func (checker *hasLenChecker) Check(params []interface{}, names []string) (result bool, error string) { + n, ok := params[1].(int) + if !ok { + return false, "n must be an int" + } + value := reflect.ValueOf(params[0]) + switch value.Kind() { + case reflect.Map, reflect.Array, reflect.Slice, reflect.Chan, reflect.String: + default: + return false, "obtained value type has no length" + } + return value.Len() == n, "" +} + +// ----------------------------------------------------------------------- +// ErrorMatches checker. + +type errorMatchesChecker struct { + *CheckerInfo +} + +// The ErrorMatches checker verifies that the error value +// is non nil and matches the regular expression provided. +// +// For example: +// +// c.Assert(err, ErrorMatches, "perm.*denied") +// +var ErrorMatches Checker = errorMatchesChecker{ + &CheckerInfo{Name: "ErrorMatches", Params: []string{"value", "regex"}}, +} + +func (checker errorMatchesChecker) Check(params []interface{}, names []string) (result bool, errStr string) { + if params[0] == nil { + return false, "Error value is nil" + } + err, ok := params[0].(error) + if !ok { + return false, "Value is not an error" + } + params[0] = err.Error() + names[0] = "error" + return matches(params[0], params[1]) +} + +// ----------------------------------------------------------------------- +// Matches checker. + +type matchesChecker struct { + *CheckerInfo +} + +// The Matches checker verifies that the string provided as the obtained +// value (or the string resulting from obtained.String()) matches the +// regular expression provided. +// +// For example: +// +// c.Assert(err, Matches, "perm.*denied") +// +var Matches Checker = &matchesChecker{ + &CheckerInfo{Name: "Matches", Params: []string{"value", "regex"}}, +} + +func (checker *matchesChecker) Check(params []interface{}, names []string) (result bool, error string) { + return matches(params[0], params[1]) +} + +func matches(value, regex interface{}) (result bool, error string) { + reStr, ok := regex.(string) + if !ok { + return false, "Regex must be a string" + } + valueStr, valueIsStr := value.(string) + if !valueIsStr { + if valueWithStr, valueHasStr := value.(fmt.Stringer); valueHasStr { + valueStr, valueIsStr = valueWithStr.String(), true + } + } + if valueIsStr { + matches, err := regexp.MatchString("^"+reStr+"$", valueStr) + if err != nil { + return false, "Can't compile regex: " + err.Error() + } + return matches, "" + } + return false, "Obtained value is not a string and has no .String()" +} + +// ----------------------------------------------------------------------- +// Panics checker. + +type panicsChecker struct { + *CheckerInfo +} + +// The Panics checker verifies that calling the provided zero-argument +// function will cause a panic which is deep-equal to the provided value. +// +// For example: +// +// c.Assert(func() { f(1, 2) }, Panics, &SomeErrorType{"BOOM"}). +// +// +var Panics Checker = &panicsChecker{ + &CheckerInfo{Name: "Panics", Params: []string{"function", "expected"}}, +} + +func (checker *panicsChecker) Check(params []interface{}, names []string) (result bool, error string) { + f := reflect.ValueOf(params[0]) + if f.Kind() != reflect.Func || f.Type().NumIn() != 0 { + return false, "Function must take zero arguments" + } + defer func() { + // If the function has not panicked, then don't do the check. + if error != "" { + return + } + params[0] = recover() + names[0] = "panic" + result = reflect.DeepEqual(params[0], params[1]) + }() + f.Call(nil) + return false, "Function has not panicked" +} + +type panicMatchesChecker struct { + *CheckerInfo +} + +// The PanicMatches checker verifies that calling the provided zero-argument +// function will cause a panic with an error value matching +// the regular expression provided. +// +// For example: +// +// c.Assert(func() { f(1, 2) }, PanicMatches, `open.*: no such file or directory`). +// +// +var PanicMatches Checker = &panicMatchesChecker{ + &CheckerInfo{Name: "PanicMatches", Params: []string{"function", "expected"}}, +} + +func (checker *panicMatchesChecker) Check(params []interface{}, names []string) (result bool, errmsg string) { + f := reflect.ValueOf(params[0]) + if f.Kind() != reflect.Func || f.Type().NumIn() != 0 { + return false, "Function must take zero arguments" + } + defer func() { + // If the function has not panicked, then don't do the check. + if errmsg != "" { + return + } + obtained := recover() + names[0] = "panic" + if e, ok := obtained.(error); ok { + params[0] = e.Error() + } else if _, ok := obtained.(string); ok { + params[0] = obtained + } else { + errmsg = "Panic value is not a string or an error" + return + } + result, errmsg = matches(params[0], params[1]) + }() + f.Call(nil) + return false, "Function has not panicked" +} + +// ----------------------------------------------------------------------- +// FitsTypeOf checker. + +type fitsTypeChecker struct { + *CheckerInfo +} + +// The FitsTypeOf checker verifies that the obtained value is +// assignable to a variable with the same type as the provided +// sample value. +// +// For example: +// +// c.Assert(value, FitsTypeOf, int64(0)) +// c.Assert(value, FitsTypeOf, os.Error(nil)) +// +var FitsTypeOf Checker = &fitsTypeChecker{ + &CheckerInfo{Name: "FitsTypeOf", Params: []string{"obtained", "sample"}}, +} + +func (checker *fitsTypeChecker) Check(params []interface{}, names []string) (result bool, error string) { + obtained := reflect.ValueOf(params[0]) + sample := reflect.ValueOf(params[1]) + if !obtained.IsValid() { + return false, "" + } + if !sample.IsValid() { + return false, "Invalid sample value" + } + return obtained.Type().AssignableTo(sample.Type()), "" +} + +// ----------------------------------------------------------------------- +// Implements checker. + +type implementsChecker struct { + *CheckerInfo +} + +// The Implements checker verifies that the obtained value +// implements the interface specified via a pointer to an interface +// variable. +// +// For example: +// +// var e os.Error +// c.Assert(err, Implements, &e) +// +var Implements Checker = &implementsChecker{ + &CheckerInfo{Name: "Implements", Params: []string{"obtained", "ifaceptr"}}, +} + +func (checker *implementsChecker) Check(params []interface{}, names []string) (result bool, error string) { + obtained := reflect.ValueOf(params[0]) + ifaceptr := reflect.ValueOf(params[1]) + if !obtained.IsValid() { + return false, "" + } + if !ifaceptr.IsValid() || ifaceptr.Kind() != reflect.Ptr || ifaceptr.Elem().Kind() != reflect.Interface { + return false, "ifaceptr should be a pointer to an interface variable" + } + return obtained.Type().Implements(ifaceptr.Elem().Type()), "" +} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/helpers.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/helpers.go new file mode 100644 index 000000000..4b6c26da4 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/helpers.go @@ -0,0 +1,231 @@ +package check + +import ( + "fmt" + "strings" + "time" +) + +// TestName returns the current test name in the form "SuiteName.TestName" +func (c *C) TestName() string { + return c.testName +} + +// ----------------------------------------------------------------------- +// Basic succeeding/failing logic. + +// Failed returns whether the currently running test has already failed. +func (c *C) Failed() bool { + return c.status == failedSt +} + +// Fail marks the currently running test as failed. +// +// Something ought to have been previously logged so the developer can tell +// what went wrong. The higher level helper functions will fail the test +// and do the logging properly. +func (c *C) Fail() { + c.status = failedSt +} + +// FailNow marks the currently running test as failed and stops running it. +// Something ought to have been previously logged so the developer can tell +// what went wrong. The higher level helper functions will fail the test +// and do the logging properly. +func (c *C) FailNow() { + c.Fail() + c.stopNow() +} + +// Succeed marks the currently running test as succeeded, undoing any +// previous failures. +func (c *C) Succeed() { + c.status = succeededSt +} + +// SucceedNow marks the currently running test as succeeded, undoing any +// previous failures, and stops running the test. +func (c *C) SucceedNow() { + c.Succeed() + c.stopNow() +} + +// ExpectFailure informs that the running test is knowingly broken for +// the provided reason. If the test does not fail, an error will be reported +// to raise attention to this fact. This method is useful to temporarily +// disable tests which cover well known problems until a better time to +// fix the problem is found, without forgetting about the fact that a +// failure still exists. +func (c *C) ExpectFailure(reason string) { + if reason == "" { + panic("Missing reason why the test is expected to fail") + } + c.mustFail = true + c.reason = reason +} + +// Skip skips the running test for the provided reason. If run from within +// SetUpTest, the individual test being set up will be skipped, and if run +// from within SetUpSuite, the whole suite is skipped. +func (c *C) Skip(reason string) { + if reason == "" { + panic("Missing reason why the test is being skipped") + } + c.reason = reason + c.status = skippedSt + c.stopNow() +} + +// ----------------------------------------------------------------------- +// Basic logging. + +// GetTestLog returns the current test error output. +func (c *C) GetTestLog() string { + return c.logb.String() +} + +// Log logs some information into the test error output. +// The provided arguments are assembled together into a string with fmt.Sprint. +func (c *C) Log(args ...interface{}) { + c.log(args...) +} + +// Log logs some information into the test error output. +// The provided arguments are assembled together into a string with fmt.Sprintf. +func (c *C) Logf(format string, args ...interface{}) { + c.logf(format, args...) +} + +// Output enables *C to be used as a logger in functions that require only +// the minimum interface of *log.Logger. +func (c *C) Output(calldepth int, s string) error { + d := time.Now().Sub(c.startTime) + msec := d / time.Millisecond + sec := d / time.Second + min := d / time.Minute + + c.Logf("[LOG] %d:%02d.%03d %s", min, sec%60, msec%1000, s) + return nil +} + +// Error logs an error into the test error output and marks the test as failed. +// The provided arguments are assembled together into a string with fmt.Sprint. +func (c *C) Error(args ...interface{}) { + c.logCaller(1) + c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...))) + c.logNewLine() + c.Fail() +} + +// Errorf logs an error into the test error output and marks the test as failed. +// The provided arguments are assembled together into a string with fmt.Sprintf. +func (c *C) Errorf(format string, args ...interface{}) { + c.logCaller(1) + c.logString(fmt.Sprintf("Error: "+format, args...)) + c.logNewLine() + c.Fail() +} + +// Fatal logs an error into the test error output, marks the test as failed, and +// stops the test execution. The provided arguments are assembled together into +// a string with fmt.Sprint. +func (c *C) Fatal(args ...interface{}) { + c.logCaller(1) + c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...))) + c.logNewLine() + c.FailNow() +} + +// Fatlaf logs an error into the test error output, marks the test as failed, and +// stops the test execution. The provided arguments are assembled together into +// a string with fmt.Sprintf. +func (c *C) Fatalf(format string, args ...interface{}) { + c.logCaller(1) + c.logString(fmt.Sprint("Error: ", fmt.Sprintf(format, args...))) + c.logNewLine() + c.FailNow() +} + +// ----------------------------------------------------------------------- +// Generic checks and assertions based on checkers. + +// Check verifies if the first value matches the expected value according +// to the provided checker. If they do not match, an error is logged, the +// test is marked as failed, and the test execution continues. +// +// Some checkers may not need the expected argument (e.g. IsNil). +// +// Extra arguments provided to the function are logged next to the reported +// problem when the matching fails. +func (c *C) Check(obtained interface{}, checker Checker, args ...interface{}) bool { + return c.internalCheck("Check", obtained, checker, args...) +} + +// Assert ensures that the first value matches the expected value according +// to the provided checker. If they do not match, an error is logged, the +// test is marked as failed, and the test execution stops. +// +// Some checkers may not need the expected argument (e.g. IsNil). +// +// Extra arguments provided to the function are logged next to the reported +// problem when the matching fails. +func (c *C) Assert(obtained interface{}, checker Checker, args ...interface{}) { + if !c.internalCheck("Assert", obtained, checker, args...) { + c.stopNow() + } +} + +func (c *C) internalCheck(funcName string, obtained interface{}, checker Checker, args ...interface{}) bool { + if checker == nil { + c.logCaller(2) + c.logString(fmt.Sprintf("%s(obtained, nil!?, ...):", funcName)) + c.logString("Oops.. you've provided a nil checker!") + c.logNewLine() + c.Fail() + return false + } + + // If the last argument is a bug info, extract it out. + var comment CommentInterface + if len(args) > 0 { + if c, ok := args[len(args)-1].(CommentInterface); ok { + comment = c + args = args[:len(args)-1] + } + } + + params := append([]interface{}{obtained}, args...) + info := checker.Info() + + if len(params) != len(info.Params) { + names := append([]string{info.Params[0], info.Name}, info.Params[1:]...) + c.logCaller(2) + c.logString(fmt.Sprintf("%s(%s):", funcName, strings.Join(names, ", "))) + c.logString(fmt.Sprintf("Wrong number of parameters for %s: want %d, got %d", info.Name, len(names), len(params)+1)) + c.logNewLine() + c.Fail() + return false + } + + // Copy since it may be mutated by Check. + names := append([]string{}, info.Params...) + + // Do the actual check. + result, error := checker.Check(params, names) + if !result || error != "" { + c.logCaller(2) + for i := 0; i != len(params); i++ { + c.logValue(names[i], params[i]) + } + if comment != nil { + c.logString(comment.CheckCommentString()) + } + if error != "" { + c.logString(error) + } + c.logNewLine() + c.Fail() + return false + } + return true +} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/printer.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/printer.go new file mode 100644 index 000000000..e0f7557b5 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/printer.go @@ -0,0 +1,168 @@ +package check + +import ( + "bytes" + "go/ast" + "go/parser" + "go/printer" + "go/token" + "os" +) + +func indent(s, with string) (r string) { + eol := true + for i := 0; i != len(s); i++ { + c := s[i] + switch { + case eol && c == '\n' || c == '\r': + case c == '\n' || c == '\r': + eol = true + case eol: + eol = false + s = s[:i] + with + s[i:] + i += len(with) + } + } + return s +} + +func printLine(filename string, line int) (string, error) { + fset := token.NewFileSet() + file, err := os.Open(filename) + if err != nil { + return "", err + } + fnode, err := parser.ParseFile(fset, filename, file, parser.ParseComments) + if err != nil { + return "", err + } + config := &printer.Config{Mode: printer.UseSpaces, Tabwidth: 4} + lp := &linePrinter{fset: fset, fnode: fnode, line: line, config: config} + ast.Walk(lp, fnode) + result := lp.output.Bytes() + // Comments leave \n at the end. + n := len(result) + for n > 0 && result[n-1] == '\n' { + n-- + } + return string(result[:n]), nil +} + +type linePrinter struct { + config *printer.Config + fset *token.FileSet + fnode *ast.File + line int + output bytes.Buffer + stmt ast.Stmt +} + +func (lp *linePrinter) emit() bool { + if lp.stmt != nil { + lp.trim(lp.stmt) + lp.printWithComments(lp.stmt) + lp.stmt = nil + return true + } + return false +} + +func (lp *linePrinter) printWithComments(n ast.Node) { + nfirst := lp.fset.Position(n.Pos()).Line + nlast := lp.fset.Position(n.End()).Line + for _, g := range lp.fnode.Comments { + cfirst := lp.fset.Position(g.Pos()).Line + clast := lp.fset.Position(g.End()).Line + if clast == nfirst-1 && lp.fset.Position(n.Pos()).Column == lp.fset.Position(g.Pos()).Column { + for _, c := range g.List { + lp.output.WriteString(c.Text) + lp.output.WriteByte('\n') + } + } + if cfirst >= nfirst && cfirst <= nlast && n.End() <= g.List[0].Slash { + // The printer will not include the comment if it starts past + // the node itself. Trick it into printing by overlapping the + // slash with the end of the statement. + g.List[0].Slash = n.End() - 1 + } + } + node := &printer.CommentedNode{n, lp.fnode.Comments} + lp.config.Fprint(&lp.output, lp.fset, node) +} + +func (lp *linePrinter) Visit(n ast.Node) (w ast.Visitor) { + if n == nil { + if lp.output.Len() == 0 { + lp.emit() + } + return nil + } + first := lp.fset.Position(n.Pos()).Line + last := lp.fset.Position(n.End()).Line + if first <= lp.line && last >= lp.line { + // Print the innermost statement containing the line. + if stmt, ok := n.(ast.Stmt); ok { + if _, ok := n.(*ast.BlockStmt); !ok { + lp.stmt = stmt + } + } + if first == lp.line && lp.emit() { + return nil + } + return lp + } + return nil +} + +func (lp *linePrinter) trim(n ast.Node) bool { + stmt, ok := n.(ast.Stmt) + if !ok { + return true + } + line := lp.fset.Position(n.Pos()).Line + if line != lp.line { + return false + } + switch stmt := stmt.(type) { + case *ast.IfStmt: + stmt.Body = lp.trimBlock(stmt.Body) + case *ast.SwitchStmt: + stmt.Body = lp.trimBlock(stmt.Body) + case *ast.TypeSwitchStmt: + stmt.Body = lp.trimBlock(stmt.Body) + case *ast.CaseClause: + stmt.Body = lp.trimList(stmt.Body) + case *ast.CommClause: + stmt.Body = lp.trimList(stmt.Body) + case *ast.BlockStmt: + stmt.List = lp.trimList(stmt.List) + } + return true +} + +func (lp *linePrinter) trimBlock(stmt *ast.BlockStmt) *ast.BlockStmt { + if !lp.trim(stmt) { + return lp.emptyBlock(stmt) + } + stmt.Rbrace = stmt.Lbrace + return stmt +} + +func (lp *linePrinter) trimList(stmts []ast.Stmt) []ast.Stmt { + for i := 0; i != len(stmts); i++ { + if !lp.trim(stmts[i]) { + stmts[i] = lp.emptyStmt(stmts[i]) + break + } + } + return stmts +} + +func (lp *linePrinter) emptyStmt(n ast.Node) *ast.ExprStmt { + return &ast.ExprStmt{&ast.Ellipsis{n.Pos(), nil}} +} + +func (lp *linePrinter) emptyBlock(n ast.Node) *ast.BlockStmt { + p := n.Pos() + return &ast.BlockStmt{p, []ast.Stmt{lp.emptyStmt(n)}, p} +} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/run.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/run.go new file mode 100644 index 000000000..da8fd7987 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/run.go @@ -0,0 +1,175 @@ +package check + +import ( + "bufio" + "flag" + "fmt" + "os" + "testing" + "time" +) + +// ----------------------------------------------------------------------- +// Test suite registry. + +var allSuites []interface{} + +// Suite registers the given value as a test suite to be run. Any methods +// starting with the Test prefix in the given value will be considered as +// a test method. +func Suite(suite interface{}) interface{} { + allSuites = append(allSuites, suite) + return suite +} + +// ----------------------------------------------------------------------- +// Public running interface. + +var ( + oldFilterFlag = flag.String("gocheck.f", "", "Regular expression selecting which tests and/or suites to run") + oldVerboseFlag = flag.Bool("gocheck.v", false, "Verbose mode") + oldStreamFlag = flag.Bool("gocheck.vv", false, "Super verbose mode (disables output caching)") + oldBenchFlag = flag.Bool("gocheck.b", false, "Run benchmarks") + oldBenchTime = flag.Duration("gocheck.btime", 1*time.Second, "approximate run time for each benchmark") + oldListFlag = flag.Bool("gocheck.list", false, "List the names of all tests that will be run") + oldWorkFlag = flag.Bool("gocheck.work", false, "Display and do not remove the test working directory") + + newFilterFlag = flag.String("check.f", "", "Regular expression selecting which tests and/or suites to run") + newVerboseFlag = flag.Bool("check.v", false, "Verbose mode") + newStreamFlag = flag.Bool("check.vv", false, "Super verbose mode (disables output caching)") + newBenchFlag = flag.Bool("check.b", false, "Run benchmarks") + newBenchTime = flag.Duration("check.btime", 1*time.Second, "approximate run time for each benchmark") + newBenchMem = flag.Bool("check.bmem", false, "Report memory benchmarks") + newListFlag = flag.Bool("check.list", false, "List the names of all tests that will be run") + newWorkFlag = flag.Bool("check.work", false, "Display and do not remove the test working directory") +) + +// TestingT runs all test suites registered with the Suite function, +// printing results to stdout, and reporting any failures back to +// the "testing" package. +func TestingT(testingT *testing.T) { + benchTime := *newBenchTime + if benchTime == 1*time.Second { + benchTime = *oldBenchTime + } + conf := &RunConf{ + Filter: *oldFilterFlag + *newFilterFlag, + Verbose: *oldVerboseFlag || *newVerboseFlag, + Stream: *oldStreamFlag || *newStreamFlag, + Benchmark: *oldBenchFlag || *newBenchFlag, + BenchmarkTime: benchTime, + BenchmarkMem: *newBenchMem, + KeepWorkDir: *oldWorkFlag || *newWorkFlag, + } + if *oldListFlag || *newListFlag { + w := bufio.NewWriter(os.Stdout) + for _, name := range ListAll(conf) { + fmt.Fprintln(w, name) + } + w.Flush() + return + } + result := RunAll(conf) + println(result.String()) + if !result.Passed() { + testingT.Fail() + } +} + +// RunAll runs all test suites registered with the Suite function, using the +// provided run configuration. +func RunAll(runConf *RunConf) *Result { + result := Result{} + for _, suite := range allSuites { + result.Add(Run(suite, runConf)) + } + return &result +} + +// Run runs the provided test suite using the provided run configuration. +func Run(suite interface{}, runConf *RunConf) *Result { + runner := newSuiteRunner(suite, runConf) + return runner.run() +} + +// ListAll returns the names of all the test functions registered with the +// Suite function that will be run with the provided run configuration. +func ListAll(runConf *RunConf) []string { + var names []string + for _, suite := range allSuites { + names = append(names, List(suite, runConf)...) + } + return names +} + +// List returns the names of the test functions in the given +// suite that will be run with the provided run configuration. +func List(suite interface{}, runConf *RunConf) []string { + var names []string + runner := newSuiteRunner(suite, runConf) + for _, t := range runner.tests { + names = append(names, t.String()) + } + return names +} + +// ----------------------------------------------------------------------- +// Result methods. + +func (r *Result) Add(other *Result) { + r.Succeeded += other.Succeeded + r.Skipped += other.Skipped + r.Failed += other.Failed + r.Panicked += other.Panicked + r.FixturePanicked += other.FixturePanicked + r.ExpectedFailures += other.ExpectedFailures + r.Missed += other.Missed + if r.WorkDir != "" && other.WorkDir != "" { + r.WorkDir += ":" + other.WorkDir + } else if other.WorkDir != "" { + r.WorkDir = other.WorkDir + } +} + +func (r *Result) Passed() bool { + return (r.Failed == 0 && r.Panicked == 0 && + r.FixturePanicked == 0 && r.Missed == 0 && + r.RunError == nil) +} + +func (r *Result) String() string { + if r.RunError != nil { + return "ERROR: " + r.RunError.Error() + } + + var value string + if r.Failed == 0 && r.Panicked == 0 && r.FixturePanicked == 0 && + r.Missed == 0 { + value = "OK: " + } else { + value = "OOPS: " + } + value += fmt.Sprintf("%d passed", r.Succeeded) + if r.Skipped != 0 { + value += fmt.Sprintf(", %d skipped", r.Skipped) + } + if r.ExpectedFailures != 0 { + value += fmt.Sprintf(", %d expected failures", r.ExpectedFailures) + } + if r.Failed != 0 { + value += fmt.Sprintf(", %d FAILED", r.Failed) + } + if r.Panicked != 0 { + value += fmt.Sprintf(", %d PANICKED", r.Panicked) + } + if r.FixturePanicked != 0 { + value += fmt.Sprintf(", %d FIXTURE-PANICKED", r.FixturePanicked) + } + if r.Missed != 0 { + value += fmt.Sprintf(", %d MISSED", r.Missed) + } + if r.WorkDir != "" { + value += "\nWORK=" + r.WorkDir + } + return value +} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/LICENSE new file mode 100644 index 000000000..a68e67f01 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/LICENSE @@ -0,0 +1,188 @@ + +Copyright (c) 2011-2014 - Canonical Inc. + +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/LICENSE.libyaml new file mode 100644 index 000000000..8da58fbf6 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/README.md b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/README.md new file mode 100644 index 000000000..d6c919e60 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/README.md @@ -0,0 +1,128 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +type T struct { + A string + B struct{C int; D []int ",flow"} +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/apic.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/apic.go new file mode 100644 index 000000000..95ec014e8 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/apic.go @@ -0,0 +1,742 @@ +package yaml + +import ( + "io" + "os" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// File read handler. +func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_file.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_file_read_handler + parser.input_file = file +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } + return true +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// File write handler. +func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_file.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_file_write_handler + emitter.output_file = file +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } + return true +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } + return true +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } + return true +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } + return true +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } + return true +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compliler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/decode.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/decode.go new file mode 100644 index 000000000..ec9d27101 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/decode.go @@ -0,0 +1,667 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + + if len(b) == 0 { + b = []byte{'\n'} + } + + yaml_parser_set_input_string(&p.parser, b) + + p.skip() + if p.event.typ != yaml_STREAM_START_EVENT { + panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return &p +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +func (p *parser) skip() { + if p.event.typ != yaml_NO_EVENT { + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + yaml_event_delete(&p.event) + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + switch p.event.typ { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) + } + panic("unreachable") +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.skip() + n.children = append(n.children, p.parse()) + if p.event.typ != yaml_DOCUMENT_END_EVENT { + panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + p.skip() + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.skip() + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.skip() + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.skip() + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[string]bool + mapType reflect.Type + terrors []string +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() +) + +func newDecoder() *decoder { + d := &decoder{mapType: defaultMapType} + d.aliases = make(map[string]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + an, ok := d.doc.anchors[n.value] + if !ok { + failf("unknown anchor '%s' referenced", n.value) + } + if d.aliases[n.value] { + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n.value] = true + good = d.unmarshal(an, out) + delete(d.aliases, n.value) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if s, ok := resolved.(string); ok && out.CanAddr() { + if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok { + err := u.UnmarshalText([]byte(s)) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + good = true + } else if resolved != nil { + out.SetString(n.value) + good = true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else { + out.Set(reflect.ValueOf(resolved)) + } + good = true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + good = true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + good = true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + good = true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + good = true + case int64: + out.SetFloat(float64(resolved)) + good = true + case uint64: + out.SetFloat(float64(resolved)) + good = true + case float64: + out.SetFloat(resolved) + good = true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + good = true + } + } + if !good { + d.terror(n, tag, out) + } + return good +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + out.Set(out.Slice(0, j)) + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + out.SetMapIndex(k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + an, ok := d.doc.anchors[n.value] + if ok && an.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + an, ok := d.doc.anchors[ni.value] + if ok && an.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/emitterc.go new file mode 100644 index 000000000..9b3dc4a43 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") + } + return false +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an achor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceeded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceeded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[0]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceeded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceeded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/encode.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/encode.go new file mode 100644 index 000000000..b7edc799d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/encode.go @@ -0,0 +1,289 @@ +package yaml + +import ( + "encoding" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool +} + +func newEncoder() (e *encoder) { + e = &encoder{} + e.must(yaml_emitter_initialize(&e.emitter)) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) + e.emit() + e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) + e.emit() + return e +} + +func (e *encoder) finish() { + e.must(yaml_document_end_event_initialize(&e.event, true)) + e.emit() + e.emitter.open_ended = false + e.must(yaml_stream_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { + e.must(false) + } +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() { + e.nilv() + return + } + iface := in.Interface() + if m, ok := iface.(Marshaler); ok { + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + } else if m, ok := iface.(encoding.TextMarshaler); ok { + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + } + switch in.Kind() { + case reflect.Interface: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + f() + e.must(yaml_mapping_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + rtag, rs := resolve("", s) + if rtag == yaml_BINARY_TAG { + if tag == "" || tag == yaml_STR_TAG { + tag = rtag + s = rs.(string) + } else if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } else { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + } + if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else if strings.Contains(s, "\n") { + style = yaml_LITERAL_SCALAR_STYLE + } else { + style = yaml_PLAIN_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // FIXME: Handle 64 bits here. + s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/parserc.go new file mode 100644 index 000000000..0a7037ad1 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/parserc.go @@ -0,0 +1,1096 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } + return false +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/readerc.go new file mode 100644 index 000000000..d5fb09727 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/readerc.go @@ -0,0 +1,391 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + high, low = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + } + buffer_len += width + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/resolve.go new file mode 100644 index 000000000..93a863274 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/resolve.go @@ -0,0 +1,203 @@ +package yaml + +import ( + "encoding/base64" + "math" + "strconv" + "strings" + "unicode/utf8" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG: + return true + } + return false +} + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt(plain[3:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, -int(intv) + } else { + return yaml_INT_TAG, -intv + } + } + } + // XXX Handle timestamps here. + + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + if tag == yaml_BINARY_TAG { + return yaml_BINARY_TAG, in + } + if utf8.ValidString(in) { + return yaml_STR_TAG, in + } + return yaml_BINARY_TAG, encodeBase64(in) +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/scannerc.go new file mode 100644 index 000000000..fe93b190c --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/scannerc.go @@ -0,0 +1,2710 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // Check if we really need to fetch more tokens. + need_more_tokens := false + + if parser.tokens_head == len(parser.tokens) { + // Queue is empty. + need_more_tokens = true + } else { + // Check if any potential simple key may occupy the head position. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + if simple_key.possible && simple_key.token_number == parser.tokens_parsed { + need_more_tokens = true + break + } + } + } + + // We are finished. + if !need_more_tokens { + break + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Remove obsolete potential simple keys. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +// Check the list of potential simple keys and remove the positions that +// cannot contain simple keys anymore. +func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { + // Check for a potential simple key for each flow level. + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + + // The specification requires that a simple key + // + // - is limited to a single line, + // - is shorter than 1024 characters. + if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { + + // Check if the potential simple key to be removed is required. + if simple_key.required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + } + } + return true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // A simple key is required only when it is the first token in the current + // line. Therefore it is always allowed. But we add a check anyway. + if required && !parser.simple_key_allowed { + panic("should not happen") + } + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + } + simple_key.mark = parser.mark + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + return true +} + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // Increase the flow level. + parser.flow_level++ + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + } + return true +} + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each intendation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the intendation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if simple_key.possible { + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found uknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && !(s[0] == '!' && s[1] == 0) { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the tag is non-empty. + if len(s) == 0 { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the intendation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an intendation indicator equal to 0") + return false + } + + // Get the intendation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an intendation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the intendation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following intendation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan intendation spaces and line breaks for a block scalar. Determine the +// intendation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the intendation spaces and line breaks. + max_indent := 0 + for { + // Eat the intendation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the intendation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an intendation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". + if parser.flow_level > 0 && + parser.buffer[parser.buffer_pos] == ':' && + !is_blankz(parser.buffer, parser.buffer_pos+1) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found unexpected ':'") + return false + } + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab character that abuse intendation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violate intendation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check intendation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/sorter.go new file mode 100644 index 000000000..5958822f9 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/sorter.go @@ -0,0 +1,104 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/writerc.go new file mode 100644 index 000000000..190362f25 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/writerc.go @@ -0,0 +1,89 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + // If the output encoding is UTF-8, we don't need to recode the buffer. + if emitter.encoding == yaml_UTF8_ENCODING { + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true + } + + // Recode the buffer into the raw buffer. + var low, high int + if emitter.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + high, low = 1, 0 + } + + pos := 0 + for pos < emitter.buffer_pos { + // See the "reader.c" code for more details on UTF-8 encoding. Note + // that we assume that the buffer contains a valid UTF-8 sequence. + + // Read the next UTF-8 character. + octet := emitter.buffer[pos] + + var w int + var value rune + switch { + case octet&0x80 == 0x00: + w, value = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, value = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, value = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, value = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = emitter.buffer[pos+k] + value = (value << 6) + (rune(octet) & 0x3F) + } + pos += w + + // Write the character. + if value < 0x10000 { + var b [2]byte + b[high] = byte(value >> 8) + b[low] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) + } else { + // Write the character using a surrogate pair (check "reader.c"). + var b [4]byte + value -= 0x10000 + b[high] = byte(0xD8 + (value >> 18)) + b[low] = byte((value >> 10) & 0xFF) + b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) + b[low+2] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) + } + } + + // Write the raw buffer. + if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + emitter.raw_buffer = emitter.raw_buffer[:0] + return true +} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/yaml.go new file mode 100644 index 000000000..e3e01edc9 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/yaml.go @@ -0,0 +1,344 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only unmarshalled if they are exported (have an upper case +// first letter), and are unmarshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Does not apply to zero valued structs. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps. +// +// inline Inline the struct it's applied to, so its fields +// are processed as if they were part of the outer +// struct. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int "a,omitempty" +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshal("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + // TODO: Implement support for inline maps. + //case reflect.Map: + // if inlineMap >= 0 { + // return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + // } + // if field.Type.Key() != reflect.TypeOf("") { + // return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + // } + // inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField()-1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/yamlh.go new file mode 100644 index 000000000..d60a6b6b0 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/yamlh.go @@ -0,0 +1,716 @@ +package yaml + +import ( + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occured. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_file io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_file io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/yamlprivateh.go new file mode 100644 index 000000000..8110ce3c3 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/LICENSE b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/README b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/README new file mode 100644 index 000000000..98a875f37 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/README @@ -0,0 +1,152 @@ +package letsencrypt // import "rsc.io/letsencrypt" + +Package letsencrypt obtains TLS certificates from LetsEncrypt.org. + +LetsEncrypt.org is a service that issues free SSL/TLS certificates to +servers that can prove control over the given domain's DNS records or the +servers pointed at by those records. + + +Quick Start + +A complete HTTP/HTTPS web server using TLS certificates from +LetsEncrypt.org, redirecting all HTTP access to HTTPS, and maintaining TLS +certificates in a file letsencrypt.cache across server restarts. + + package main + + import ( + "fmt" + "log" + "net/http" + "rsc.io/letsencrypt" + ) + + func main() { + http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "Hello, TLS!\n") + }) + var m letsencrypt.Manager + if err := m.CacheFile("letsencrypt.cache"); err != nil { + log.Fatal(err) + } + log.Fatal(m.Serve()) + } + + +Overview + +The fundamental type in this package is the Manager, which manages obtaining +and refreshing a collection of TLS certificates, typically for use by an +HTTPS server. The example above shows the most basic use of a Manager. The +use can be customized by calling additional methods of the Manager. + + +Registration + +A Manager m registers anonymously with LetsEncrypt.org, including agreeing +to the letsencrypt.org terms of service, the first time it needs to obtain a +certificate. To register with a particular email address and with the option +of a prompt for agreement with the terms of service, call m.Register. + + +GetCertificate + +The Manager's GetCertificate method returns certificates from the Manager's +cache, filling the cache by requesting certificates from LetsEncrypt.org. In +this way, a server with a tls.Config.GetCertificate set to m.GetCertificate +will demand load a certificate for any host name it serves. To force loading +of certificates ahead of time, install m.GetCertificate as before but then +call m.Cert for each host name. + +A Manager can only obtain a certificate for a given host name if it can +prove control of that host name to LetsEncrypt.org. By default it proves +control by answering an HTTPS-based challenge: when the LetsEncrypt.org +servers connect to the named host on port 443 (HTTPS), the TLS SNI handshake +must use m.GetCertificate to obtain a per-host certificate. The most common +way to satisfy this requirement is for the host name to resolve to the IP +address of a (single) computer running m.ServeHTTPS, or at least running a +Go TLS server with tls.Config.GetCertificate set to m.GetCertificate. +However, other configurations are possible. For example, a group of machines +could use an implementation of tls.Config.GetCertificate that cached +certificates but handled cache misses by making RPCs to a Manager m on an +elected leader machine. + +In typical usage, then, the setting of tls.Config.GetCertificate to +m.GetCertificate serves two purposes: it provides certificates to the TLS +server for ordinary serving, and it also answers challenges to prove +ownership of the domains in order to obtain those certificates. + +To force the loading of a certificate for a given host into the Manager's +cache, use m.Cert. + + +Persistent Storage + +If a server always starts with a zero Manager m, the server effectively +fetches a new certificate for each of its host name from LetsEncrypt.org on +each restart. This is unfortunate both because the server cannot start if +LetsEncrypt.org is unavailable and because LetsEncrypt.org limits how often +it will issue a certificate for a given host name (at time of writing, the +limit is 5 per week for a given host name). To save server state proactively +to a cache file and to reload the server state from that same file when +creating a new manager, call m.CacheFile with the name of the file to use. + +For alternate storage uses, m.Marshal returns the current state of the +Manager as an opaque string, m.Unmarshal sets the state of the Manager using +a string previously returned by m.Marshal (usually a different m), and +m.Watch returns a channel that receives notifications about state changes. + + +Limits + +To avoid hitting basic rate limits on LetsEncrypt.org, a given Manager +limits all its interactions to at most one request every minute, with an +initial allowed burst of 20 requests. + +By default, if GetCertificate is asked for a certificate it does not have, +it will in turn ask LetsEncrypt.org for that certificate. This opens a +potential attack where attackers connect to a server by IP address and +pretend to be asking for an incorrect host name. Then GetCertificate will +attempt to obtain a certificate for that host, incorrectly, eventually +hitting LetsEncrypt.org's rate limit for certificate requests and making it +impossible to obtain actual certificates. Because servers hold certificates +for months at a time, however, an attack would need to be sustained over a +time period of at least a month in order to cause real problems. + +To mitigate this kind of attack, a given Manager limits itself to an average +of one certificate request for a new host every three hours, with an initial +allowed burst of up to 20 requests. Long-running servers will therefore stay +within the LetsEncrypt.org limit of 300 failed requests per month. +Certificate refreshes are not subject to this limit. + +To eliminate the attack entirely, call m.SetHosts to enumerate the exact set +of hosts that are allowed in certificate requests. + + +Web Servers + +The basic requirement for use of a Manager is that there be an HTTPS server +running on port 443 and calling m.GetCertificate to obtain TLS certificates. +Using standard primitives, the way to do this is: + + srv := &http.Server{ + Addr: ":https", + TLSConfig: &tls.Config{ + GetCertificate: m.GetCertificate, + }, + } + srv.ListenAndServeTLS("", "") + +However, this pattern of serving HTTPS with demand-loaded TLS certificates +comes up enough to wrap into a single method m.ServeHTTPS. + +Similarly, many HTTPS servers prefer to redirect HTTP clients to the HTTPS +URLs. That functionality is provided by RedirectHTTP. + +The combination of serving HTTPS with demand-loaded TLS certificates and +serving HTTPS redirects to HTTP clients is provided by m.Serve, as used in +the original example above. + +func RedirectHTTP(w http.ResponseWriter, r *http.Request) +type Manager struct { ... } diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/lets.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/lets.go new file mode 100644 index 000000000..3a8453630 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/lets.go @@ -0,0 +1,753 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package letsencrypt obtains TLS certificates from LetsEncrypt.org. +// +// LetsEncrypt.org is a service that issues free SSL/TLS certificates to servers +// that can prove control over the given domain's DNS records or +// the servers pointed at by those records. +// +// Quick Start +// +// A complete HTTP/HTTPS web server using TLS certificates from LetsEncrypt.org, +// redirecting all HTTP access to HTTPS, and maintaining TLS certificates in a file +// letsencrypt.cache across server restarts. +// +// package main +// +// import ( +// "fmt" +// "log" +// "net/http" +// "rsc.io/letsencrypt" +// ) +// +// func main() { +// http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { +// fmt.Fprintf(w, "Hello, TLS!\n") +// }) +// var m letsencrypt.Manager +// if err := m.CacheFile("letsencrypt.cache"); err != nil { +// log.Fatal(err) +// } +// log.Fatal(m.Serve()) +// } +// +// Overview +// +// The fundamental type in this package is the Manager, which +// manages obtaining and refreshing a collection of TLS certificates, +// typically for use by an HTTPS server. +// The example above shows the most basic use of a Manager. +// The use can be customized by calling additional methods of the Manager. +// +// Registration +// +// A Manager m registers anonymously with LetsEncrypt.org, including agreeing to +// the letsencrypt.org terms of service, the first time it needs to obtain a certificate. +// To register with a particular email address and with the option of a +// prompt for agreement with the terms of service, call m.Register. +// +// GetCertificate +// +// The Manager's GetCertificate method returns certificates +// from the Manager's cache, filling the cache by requesting certificates +// from LetsEncrypt.org. In this way, a server with a tls.Config.GetCertificate +// set to m.GetCertificate will demand load a certificate for any host name +// it serves. To force loading of certificates ahead of time, install m.GetCertificate +// as before but then call m.Cert for each host name. +// +// A Manager can only obtain a certificate for a given host name if it can prove +// control of that host name to LetsEncrypt.org. By default it proves control by +// answering an HTTPS-based challenge: when +// the LetsEncrypt.org servers connect to the named host on port 443 (HTTPS), +// the TLS SNI handshake must use m.GetCertificate to obtain a per-host certificate. +// The most common way to satisfy this requirement is for the host name to +// resolve to the IP address of a (single) computer running m.ServeHTTPS, +// or at least running a Go TLS server with tls.Config.GetCertificate set to m.GetCertificate. +// However, other configurations are possible. For example, a group of machines +// could use an implementation of tls.Config.GetCertificate that cached +// certificates but handled cache misses by making RPCs to a Manager m +// on an elected leader machine. +// +// In typical usage, then, the setting of tls.Config.GetCertificate to m.GetCertificate +// serves two purposes: it provides certificates to the TLS server for ordinary serving, +// and it also answers challenges to prove ownership of the domains in order to +// obtain those certificates. +// +// To force the loading of a certificate for a given host into the Manager's cache, +// use m.Cert. +// +// Persistent Storage +// +// If a server always starts with a zero Manager m, the server effectively fetches +// a new certificate for each of its host name from LetsEncrypt.org on each restart. +// This is unfortunate both because the server cannot start if LetsEncrypt.org is +// unavailable and because LetsEncrypt.org limits how often it will issue a certificate +// for a given host name (at time of writing, the limit is 5 per week for a given host name). +// To save server state proactively to a cache file and to reload the server state from +// that same file when creating a new manager, call m.CacheFile with the name of +// the file to use. +// +// For alternate storage uses, m.Marshal returns the current state of the Manager +// as an opaque string, m.Unmarshal sets the state of the Manager using a string +// previously returned by m.Marshal (usually a different m), and m.Watch returns +// a channel that receives notifications about state changes. +// +// Limits +// +// To avoid hitting basic rate limits on LetsEncrypt.org, a given Manager limits all its +// interactions to at most one request every minute, with an initial allowed burst of +// 20 requests. +// +// By default, if GetCertificate is asked for a certificate it does not have, it will in turn +// ask LetsEncrypt.org for that certificate. This opens a potential attack where attackers +// connect to a server by IP address and pretend to be asking for an incorrect host name. +// Then GetCertificate will attempt to obtain a certificate for that host, incorrectly, +// eventually hitting LetsEncrypt.org's rate limit for certificate requests and making it +// impossible to obtain actual certificates. Because servers hold certificates for months +// at a time, however, an attack would need to be sustained over a time period +// of at least a month in order to cause real problems. +// +// To mitigate this kind of attack, a given Manager limits +// itself to an average of one certificate request for a new host every three hours, +// with an initial allowed burst of up to 20 requests. +// Long-running servers will therefore stay +// within the LetsEncrypt.org limit of 300 failed requests per month. +// Certificate refreshes are not subject to this limit. +// +// To eliminate the attack entirely, call m.SetHosts to enumerate the exact set +// of hosts that are allowed in certificate requests. +// +// Web Servers +// +// The basic requirement for use of a Manager is that there be an HTTPS server +// running on port 443 and calling m.GetCertificate to obtain TLS certificates. +// Using standard primitives, the way to do this is: +// +// srv := &http.Server{ +// Addr: ":https", +// TLSConfig: &tls.Config{ +// GetCertificate: m.GetCertificate, +// }, +// } +// srv.ListenAndServeTLS("", "") +// +// However, this pattern of serving HTTPS with demand-loaded TLS certificates +// comes up enough to wrap into a single method m.ServeHTTPS. +// +// Similarly, many HTTPS servers prefer to redirect HTTP clients to the HTTPS URLs. +// That functionality is provided by RedirectHTTP. +// +// The combination of serving HTTPS with demand-loaded TLS certificates and +// serving HTTPS redirects to HTTP clients is provided by m.Serve, as used in +// the original example above. +// +package letsencrypt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "encoding/json" + "encoding/pem" + "fmt" + "io/ioutil" + "log" + "net" + "net/http" + "os" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/time/rate" + + "github.com/xenolf/lego/acme" +) + +const letsEncryptURL = "https://acme-v01.api.letsencrypt.org/directory" +const debug = false + +// A Manager m takes care of obtaining and refreshing a collection of TLS certificates +// obtained by LetsEncrypt.org. +// The zero Manager is not yet registered with LetsEncrypt.org and has no TLS certificates +// but is nonetheless ready for use. +// See the package comment for an overview of how to use a Manager. +type Manager struct { + mu sync.Mutex + state state + rateLimit *rate.Limiter + newHostLimit *rate.Limiter + certCache map[string]*cacheEntry + certTokens map[string]*tls.Certificate + watchChan chan struct{} +} + +// Serve runs an HTTP/HTTPS web server using TLS certificates obtained by the manager. +// The HTTP server redirects all requests to the HTTPS server. +// The HTTPS server obtains TLS certificates as needed and responds to requests +// by invoking http.DefaultServeMux. +// +// Serve does not return unitil the HTTPS server fails to start or else stops. +// Either way, Serve can only return a non-nil error, never nil. +func (m *Manager) Serve() error { + l, err := net.Listen("tcp", ":http") + if err != nil { + return err + } + defer l.Close() + go http.Serve(l, http.HandlerFunc(RedirectHTTP)) + + return m.ServeHTTPS() +} + +// ServeHTTPS runs an HTTPS web server using TLS certificates obtained by the manager. +// The HTTPS server obtains TLS certificates as needed and responds to requests +// by invoking http.DefaultServeMux. +// ServeHTTPS does not return unitil the HTTPS server fails to start or else stops. +// Either way, ServeHTTPS can only return a non-nil error, never nil. +func (m *Manager) ServeHTTPS() error { + srv := &http.Server{ + Addr: ":https", + TLSConfig: &tls.Config{ + GetCertificate: m.GetCertificate, + }, + } + return srv.ListenAndServeTLS("", "") +} + +// RedirectHTTP is an HTTP handler (suitable for use with http.HandleFunc) +// that responds to all requests by redirecting to the same URL served over HTTPS. +// It should only be invoked for requests received over HTTP. +func RedirectHTTP(w http.ResponseWriter, r *http.Request) { + if r.TLS != nil || r.Host == "" { + http.Error(w, "not found", 404) + } + + u := r.URL + u.Host = r.Host + u.Scheme = "https" + http.Redirect(w, r, u.String(), 302) +} + +// state is the serializable state for the Manager. +// It also implements acme.User. +type state struct { + Email string + Reg *acme.RegistrationResource + Key string + key *ecdsa.PrivateKey + Hosts []string + Certs map[string]stateCert +} + +func (s *state) GetEmail() string { return s.Email } +func (s *state) GetRegistration() *acme.RegistrationResource { return s.Reg } +func (s *state) GetPrivateKey() crypto.PrivateKey { return s.key } + +type stateCert struct { + Cert string + Key string +} + +func (cert stateCert) toTLS() (*tls.Certificate, error) { + c, err := tls.X509KeyPair([]byte(cert.Cert), []byte(cert.Key)) + if err != nil { + return nil, err + } + return &c, err +} + +type cacheEntry struct { + host string + m *Manager + + mu sync.Mutex + cert *tls.Certificate + timeout time.Time + refreshing bool + err error +} + +func (m *Manager) init() { + m.mu.Lock() + if m.certCache == nil { + m.rateLimit = rate.NewLimiter(rate.Every(1*time.Minute), 20) + m.newHostLimit = rate.NewLimiter(rate.Every(3*time.Hour), 20) + m.certCache = map[string]*cacheEntry{} + m.certTokens = map[string]*tls.Certificate{} + m.watchChan = make(chan struct{}, 1) + m.watchChan <- struct{}{} + } + m.mu.Unlock() +} + +// Watch returns the manager's watch channel, +// which delivers a notification after every time the +// manager's state (as exposed by Marshal and Unmarshal) changes. +// All calls to Watch return the same watch channel. +// +// The watch channel includes notifications about changes +// before the first call to Watch, so that in the pattern below, +// the range loop executes once immediately, saving +// the result of setup (along with any background updates that +// may have raced in quickly). +// +// m := new(letsencrypt.Manager) +// setup(m) +// go backgroundUpdates(m) +// for range m.Watch() { +// save(m.Marshal()) +// } +// +func (m *Manager) Watch() <-chan struct{} { + m.init() + m.updated() + return m.watchChan +} + +func (m *Manager) updated() { + select { + case m.watchChan <- struct{}{}: + default: + } +} + +func (m *Manager) CacheFile(name string) error { + f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return err + } + f.Close() + data, err := ioutil.ReadFile(name) + if err != nil { + return err + } + if len(data) > 0 { + if err := m.Unmarshal(string(data)); err != nil { + return err + } + } + go func() { + for range m.Watch() { + err := ioutil.WriteFile(name, []byte(m.Marshal()), 0600) + if err != nil { + log.Printf("writing letsencrypt cache: %v", err) + } + } + }() + return nil +} + +// Registered reports whether the manager has registered with letsencrypt.org yet. +func (m *Manager) Registered() bool { + m.init() + m.mu.Lock() + defer m.mu.Unlock() + return m.registered() +} + +func (m *Manager) registered() bool { + return m.state.Reg != nil && m.state.Reg.Body.Agreement != "" +} + +// Register registers the manager with letsencrypt.org, using the given email address. +// Registration may require agreeing to the letsencrypt.org terms of service. +// If so, Register calls prompt(url) where url is the URL of the terms of service. +// Prompt should report whether the caller agrees to the terms. +// A nil prompt func is taken to mean that the user always agrees. +// The email address is sent to LetsEncrypt.org but otherwise unchecked; +// it can be omitted by passing the empty string. +// +// Calling Register is only required to make sure registration uses a +// particular email address or to insert an explicit prompt into the +// registration sequence. If the manager is not registered, it will +// automatically register with no email address and automatic +// agreement to the terms of service at the first call to Cert or GetCertificate. +func (m *Manager) Register(email string, prompt func(string) bool) error { + m.init() + m.mu.Lock() + defer m.mu.Unlock() + + return m.register(email, prompt) +} + +func (m *Manager) register(email string, prompt func(string) bool) error { + if m.registered() { + return fmt.Errorf("already registered") + } + m.state.Email = email + if m.state.key == nil { + key, err := newKey() + if err != nil { + return fmt.Errorf("generating key: %v", err) + } + Key, err := marshalKey(key) + if err != nil { + return fmt.Errorf("generating key: %v", err) + } + m.state.key = key + m.state.Key = string(Key) + } + + c, err := acme.NewClient(letsEncryptURL, &m.state, acme.EC256) + if err != nil { + return fmt.Errorf("create client: %v", err) + } + reg, err := c.Register() + if err != nil { + return fmt.Errorf("register: %v", err) + } + + m.state.Reg = reg + if reg.Body.Agreement == "" { + if prompt != nil && !prompt(reg.TosURL) { + return fmt.Errorf("did not agree to TOS") + } + if err := c.AgreeToTOS(); err != nil { + return fmt.Errorf("agreeing to TOS: %v", err) + } + } + + m.updated() + + return nil +} + +// Marshal returns an encoding of the manager's state, +// suitable for writing to disk and reloading by calling Unmarshal. +// The state includes registration status, the configured host list +// from SetHosts, and all known certificates, including their private +// cryptographic keys. +// Consequently, the state should be kept private. +func (m *Manager) Marshal() string { + m.init() + js, err := json.MarshalIndent(&m.state, "", "\t") + if err != nil { + panic("unexpected json.Marshal failure") + } + return string(js) +} + +// Unmarshal restores the state encoded by a previous call to Marshal +// (perhaps on a different Manager in a different program). +func (m *Manager) Unmarshal(enc string) error { + m.init() + var st state + if err := json.Unmarshal([]byte(enc), &st); err != nil { + return err + } + if st.Key != "" { + key, err := unmarshalKey(st.Key) + if err != nil { + return err + } + st.key = key + } + m.state = st + for host, cert := range m.state.Certs { + c, err := cert.toTLS() + if err != nil { + log.Printf("letsencrypt: ignoring entry for %s: %v", host, err) + continue + } + m.certCache[host] = &cacheEntry{host: host, m: m, cert: c} + } + m.updated() + return nil +} + +// SetHosts sets the manager's list of known host names. +// If the list is non-nil, the manager will only ever attempt to acquire +// certificates for host names on the list. +// If the list is nil, the manager does not restrict the hosts it will +// ask for certificates for. +func (m *Manager) SetHosts(hosts []string) { + m.init() + m.mu.Lock() + m.state.Hosts = append(m.state.Hosts[:0], hosts...) + m.mu.Unlock() + m.updated() +} + +// GetCertificate can be placed a tls.Config's GetCertificate field to make +// the TLS server use Let's Encrypt certificates. +// Each time a client connects to the TLS server expecting a new host name, +// the TLS server's call to GetCertificate will trigger an exchange with the +// Let's Encrypt servers to obtain that certificate, subject to the manager rate limits. +// +// As noted in the Manager's documentation comment, +// to obtain a certificate for a given host name, that name +// must resolve to a computer running a TLS server on port 443 +// that obtains TLS SNI certificates by calling m.GetCertificate. +// In the standard usage, then, installing m.GetCertificate in the tls.Config +// both automatically provisions the TLS certificates needed for +// ordinary HTTPS service and answers the challenges from LetsEncrypt.org. +func (m *Manager) GetCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { + m.init() + + host := clientHello.ServerName + + if debug { + log.Printf("GetCertificate %s", host) + } + + if strings.HasSuffix(host, ".acme.invalid") { + m.mu.Lock() + cert := m.certTokens[host] + m.mu.Unlock() + if cert == nil { + return nil, fmt.Errorf("unknown host") + } + return cert, nil + } + + return m.Cert(host) +} + +// Cert returns the certificate for the given host name, obtaining a new one if necessary. +// +// As noted in the documentation for Manager and for the GetCertificate method, +// obtaining a certificate requires that m.GetCertificate be associated with host. +// In most servers, simply starting a TLS server with a configuration referring +// to m.GetCertificate is sufficient, and Cert need not be called. +// +// The main use of Cert is to force the manager to obtain a certificate +// for a particular host name ahead of time. +func (m *Manager) Cert(host string) (*tls.Certificate, error) { + host = strings.ToLower(host) + if debug { + log.Printf("Cert %s", host) + } + + m.init() + m.mu.Lock() + if !m.registered() { + m.register("", nil) + } + + ok := false + if m.state.Hosts == nil { + ok = true + } else { + for _, h := range m.state.Hosts { + if host == h { + ok = true + break + } + } + } + if !ok { + m.mu.Unlock() + return nil, fmt.Errorf("unknown host") + } + + // Otherwise look in our cert cache. + entry, ok := m.certCache[host] + if !ok { + r := m.rateLimit.Reserve() + ok := r.OK() + if ok { + ok = m.newHostLimit.Allow() + if !ok { + r.Cancel() + } + } + if !ok { + m.mu.Unlock() + return nil, fmt.Errorf("rate limited") + } + entry = &cacheEntry{host: host, m: m} + m.certCache[host] = entry + } + m.mu.Unlock() + + entry.mu.Lock() + defer entry.mu.Unlock() + entry.init() + if entry.err != nil { + return nil, entry.err + } + return entry.cert, nil +} + +func (e *cacheEntry) init() { + if e.err != nil && time.Now().Before(e.timeout) { + return + } + if e.cert != nil { + if e.timeout.IsZero() { + t, err := certRefreshTime(e.cert) + if err != nil { + e.err = err + e.timeout = time.Now().Add(1 * time.Minute) + e.cert = nil + return + } + e.timeout = t + } + if time.Now().After(e.timeout) && !e.refreshing { + e.refreshing = true + go e.refresh() + } + return + } + + cert, refreshTime, err := e.m.verify(e.host) + e.m.mu.Lock() + e.m.certCache[e.host] = e + e.m.mu.Unlock() + e.install(cert, refreshTime, err) +} + +func (e *cacheEntry) install(cert *tls.Certificate, refreshTime time.Time, err error) { + e.cert = nil + e.timeout = time.Time{} + e.err = nil + + if err != nil { + e.err = err + e.timeout = time.Now().Add(1 * time.Minute) + return + } + + e.cert = cert + e.timeout = refreshTime +} + +func (e *cacheEntry) refresh() { + e.m.rateLimit.Wait(context.Background()) + cert, refreshTime, err := e.m.verify(e.host) + + e.mu.Lock() + defer e.mu.Unlock() + e.refreshing = false + if err == nil { + e.install(cert, refreshTime, nil) + } +} + +func (m *Manager) verify(host string) (cert *tls.Certificate, refreshTime time.Time, err error) { + c, err := acme.NewClient(letsEncryptURL, &m.state, acme.EC256) + if err != nil { + return + } + if err = c.SetChallengeProvider(acme.TLSSNI01, tlsProvider{m}); err != nil { + return + } + c.SetChallengeProvider(acme.TLSSNI01, tlsProvider{m}) + c.ExcludeChallenges([]acme.Challenge{acme.HTTP01}) + acmeCert, errmap := c.ObtainCertificate([]string{host}, true, nil) + if len(errmap) > 0 { + if debug { + log.Printf("ObtainCertificate %v => %v", host, errmap) + } + err = fmt.Errorf("%v", errmap) + return + } + entryCert := stateCert{ + Cert: string(acmeCert.Certificate), + Key: string(acmeCert.PrivateKey), + } + cert, err = entryCert.toTLS() + if err != nil { + if debug { + log.Printf("ObtainCertificate %v toTLS failure: %v", host, err) + } + err = err + return + } + if refreshTime, err = certRefreshTime(cert); err != nil { + return + } + + m.mu.Lock() + if m.state.Certs == nil { + m.state.Certs = make(map[string]stateCert) + } + m.state.Certs[host] = entryCert + m.mu.Unlock() + m.updated() + + return cert, refreshTime, nil +} + +func certRefreshTime(cert *tls.Certificate) (time.Time, error) { + xc, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + if debug { + log.Printf("ObtainCertificate to X.509 failure: %v", err) + } + return time.Time{}, err + } + t := xc.NotBefore.Add(xc.NotAfter.Sub(xc.NotBefore) / 2) + monthEarly := xc.NotAfter.Add(-30 * 24 * time.Hour) + if t.Before(monthEarly) { + t = monthEarly + } + return t, nil +} + +// tlsProvider implements acme.ChallengeProvider for TLS handshake challenges. +type tlsProvider struct { + m *Manager +} + +func (p tlsProvider) Present(domain, token, keyAuth string) error { + cert, dom, err := acme.TLSSNI01ChallengeCertDomain(keyAuth) + if err != nil { + return err + } + + p.m.mu.Lock() + p.m.certTokens[dom] = &cert + p.m.mu.Unlock() + + return nil +} + +func (p tlsProvider) CleanUp(domain, token, keyAuth string) error { + _, dom, err := acme.TLSSNI01ChallengeCertDomain(keyAuth) + if err != nil { + return err + } + + p.m.mu.Lock() + delete(p.m.certTokens, dom) + p.m.mu.Unlock() + + return nil +} + +func marshalKey(key *ecdsa.PrivateKey) ([]byte, error) { + data, err := x509.MarshalECPrivateKey(key) + if err != nil { + return nil, err + } + return pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: data}), nil +} + +func unmarshalKey(text string) (*ecdsa.PrivateKey, error) { + b, _ := pem.Decode([]byte(text)) + if b == nil { + return nil, fmt.Errorf("unmarshalKey: missing key") + } + if b.Type != "EC PRIVATE KEY" { + return nil, fmt.Errorf("unmarshalKey: found %q, not %q", b.Type, "EC PRIVATE KEY") + } + k, err := x509.ParseECPrivateKey(b.Bytes) + if err != nil { + return nil, fmt.Errorf("unmarshalKey: %v", err) + } + return k, nil +} + +func newKey() (*ecdsa.PrivateKey, error) { + return ecdsa.GenerateKey(elliptic.P384(), rand.Reader) +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/LICENSE b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/LICENSE new file mode 100644 index 000000000..17460b716 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Sebastian Erhart + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/challenges.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/challenges.go new file mode 100644 index 000000000..857900507 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/challenges.go @@ -0,0 +1,16 @@ +package acme + +// Challenge is a string that identifies a particular type and version of ACME challenge. +type Challenge string + +const ( + // HTTP01 is the "http-01" ACME challenge https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#http + // Note: HTTP01ChallengePath returns the URL path to fulfill this challenge + HTTP01 = Challenge("http-01") + // TLSSNI01 is the "tls-sni-01" ACME challenge https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#tls-with-server-name-indication-tls-sni + // Note: TLSSNI01ChallengeCert returns a certificate to fulfill this challenge + TLSSNI01 = Challenge("tls-sni-01") + // DNS01 is the "dns-01" ACME challenge https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#dns + // Note: DNS01Record returns a DNS record which will fulfill this challenge + DNS01 = Challenge("dns-01") +) diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/client.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/client.go new file mode 100644 index 000000000..16e4cbe00 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/client.go @@ -0,0 +1,638 @@ +// Package acme implements the ACME protocol for Let's Encrypt and other conforming providers. +package acme + +import ( + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "log" + "net" + "regexp" + "strconv" + "strings" + "time" +) + +var ( + // Logger is an optional custom logger. + Logger *log.Logger +) + +// logf writes a log entry. It uses Logger if not +// nil, otherwise it uses the default log.Logger. +func logf(format string, args ...interface{}) { + if Logger != nil { + Logger.Printf(format, args...) + } else { + log.Printf(format, args...) + } +} + +// User interface is to be implemented by users of this library. +// It is used by the client type to get user specific information. +type User interface { + GetEmail() string + GetRegistration() *RegistrationResource + GetPrivateKey() crypto.PrivateKey +} + +// Interface for all challenge solvers to implement. +type solver interface { + Solve(challenge challenge, domain string) error +} + +type validateFunc func(j *jws, domain, uri string, chlng challenge) error + +// Client is the user-friendy way to ACME +type Client struct { + directory directory + user User + jws *jws + keyType KeyType + issuerCert []byte + solvers map[Challenge]solver +} + +// NewClient creates a new ACME client on behalf of the user. The client will depend on +// the ACME directory located at caDirURL for the rest of its actions. It will +// generate private keys for certificates of size keyBits. +func NewClient(caDirURL string, user User, keyType KeyType) (*Client, error) { + privKey := user.GetPrivateKey() + if privKey == nil { + return nil, errors.New("private key was nil") + } + + var dir directory + if _, err := getJSON(caDirURL, &dir); err != nil { + return nil, fmt.Errorf("get directory at '%s': %v", caDirURL, err) + } + + if dir.NewRegURL == "" { + return nil, errors.New("directory missing new registration URL") + } + if dir.NewAuthzURL == "" { + return nil, errors.New("directory missing new authz URL") + } + if dir.NewCertURL == "" { + return nil, errors.New("directory missing new certificate URL") + } + if dir.RevokeCertURL == "" { + return nil, errors.New("directory missing revoke certificate URL") + } + + jws := &jws{privKey: privKey, directoryURL: caDirURL} + + // REVIEW: best possibility? + // Add all available solvers with the right index as per ACME + // spec to this map. Otherwise they won`t be found. + solvers := make(map[Challenge]solver) + solvers[HTTP01] = &httpChallenge{jws: jws, validate: validate, provider: &HTTPProviderServer{}} + solvers[TLSSNI01] = &tlsSNIChallenge{jws: jws, validate: validate, provider: &TLSProviderServer{}} + + return &Client{directory: dir, user: user, jws: jws, keyType: keyType, solvers: solvers}, nil +} + +// SetChallengeProvider specifies a custom provider that will make the solution available +func (c *Client) SetChallengeProvider(challenge Challenge, p ChallengeProvider) error { + switch challenge { + case HTTP01: + c.solvers[challenge] = &httpChallenge{jws: c.jws, validate: validate, provider: p} + case TLSSNI01: + c.solvers[challenge] = &tlsSNIChallenge{jws: c.jws, validate: validate, provider: p} + default: + return fmt.Errorf("Unknown challenge %v", challenge) + } + return nil +} + +// SetHTTPAddress specifies a custom interface:port to be used for HTTP based challenges. +// If this option is not used, the default port 80 and all interfaces will be used. +// To only specify a port and no interface use the ":port" notation. +func (c *Client) SetHTTPAddress(iface string) error { + host, port, err := net.SplitHostPort(iface) + if err != nil { + return err + } + + if chlng, ok := c.solvers[HTTP01]; ok { + chlng.(*httpChallenge).provider = NewHTTPProviderServer(host, port) + } + + return nil +} + +// SetTLSAddress specifies a custom interface:port to be used for TLS based challenges. +// If this option is not used, the default port 443 and all interfaces will be used. +// To only specify a port and no interface use the ":port" notation. +func (c *Client) SetTLSAddress(iface string) error { + host, port, err := net.SplitHostPort(iface) + if err != nil { + return err + } + + if chlng, ok := c.solvers[TLSSNI01]; ok { + chlng.(*tlsSNIChallenge).provider = NewTLSProviderServer(host, port) + } + return nil +} + +// ExcludeChallenges explicitly removes challenges from the pool for solving. +func (c *Client) ExcludeChallenges(challenges []Challenge) { + // Loop through all challenges and delete the requested one if found. + for _, challenge := range challenges { + delete(c.solvers, challenge) + } +} + +// Register the current account to the ACME server. +func (c *Client) Register() (*RegistrationResource, error) { + if c == nil || c.user == nil { + return nil, errors.New("acme: cannot register a nil client or user") + } + logf("[INFO] acme: Registering account for %s", c.user.GetEmail()) + + regMsg := registrationMessage{ + Resource: "new-reg", + } + if c.user.GetEmail() != "" { + regMsg.Contact = []string{"mailto:" + c.user.GetEmail()} + } else { + regMsg.Contact = []string{} + } + + var serverReg Registration + hdr, err := postJSON(c.jws, c.directory.NewRegURL, regMsg, &serverReg) + if err != nil { + return nil, err + } + + reg := &RegistrationResource{Body: serverReg} + + links := parseLinks(hdr["Link"]) + reg.URI = hdr.Get("Location") + if links["terms-of-service"] != "" { + reg.TosURL = links["terms-of-service"] + } + + if links["next"] != "" { + reg.NewAuthzURL = links["next"] + } else { + return nil, errors.New("acme: The server did not return 'next' link to proceed") + } + + return reg, nil +} + +// AgreeToTOS updates the Client registration and sends the agreement to +// the server. +func (c *Client) AgreeToTOS() error { + reg := c.user.GetRegistration() + + reg.Body.Agreement = c.user.GetRegistration().TosURL + reg.Body.Resource = "reg" + _, err := postJSON(c.jws, c.user.GetRegistration().URI, c.user.GetRegistration().Body, nil) + return err +} + +// ObtainCertificate tries to obtain a single certificate using all domains passed into it. +// The first domain in domains is used for the CommonName field of the certificate, all other +// domains are added using the Subject Alternate Names extension. A new private key is generated +// for every invocation of this function. If you do not want that you can supply your own private key +// in the privKey parameter. If this parameter is non-nil it will be used instead of generating a new one. +// If bundle is true, the []byte contains both the issuer certificate and +// your issued certificate as a bundle. +// This function will never return a partial certificate. If one domain in the list fails, +// the whole certificate will fail. +func (c *Client) ObtainCertificate(domains []string, bundle bool, privKey crypto.PrivateKey) (CertificateResource, map[string]error) { + if bundle { + logf("[INFO][%s] acme: Obtaining bundled SAN certificate", strings.Join(domains, ", ")) + } else { + logf("[INFO][%s] acme: Obtaining SAN certificate", strings.Join(domains, ", ")) + } + + challenges, failures := c.getChallenges(domains) + // If any challenge fails - return. Do not generate partial SAN certificates. + if len(failures) > 0 { + return CertificateResource{}, failures + } + + errs := c.solveChallenges(challenges) + // If any challenge fails - return. Do not generate partial SAN certificates. + if len(errs) > 0 { + return CertificateResource{}, errs + } + + logf("[INFO][%s] acme: Validations succeeded; requesting certificates", strings.Join(domains, ", ")) + + cert, err := c.requestCertificate(challenges, bundle, privKey) + if err != nil { + for _, chln := range challenges { + failures[chln.Domain] = err + } + } + + return cert, failures +} + +// RevokeCertificate takes a PEM encoded certificate or bundle and tries to revoke it at the CA. +func (c *Client) RevokeCertificate(certificate []byte) error { + certificates, err := parsePEMBundle(certificate) + if err != nil { + return err + } + + x509Cert := certificates[0] + if x509Cert.IsCA { + return fmt.Errorf("Certificate bundle starts with a CA certificate") + } + + encodedCert := base64.URLEncoding.EncodeToString(x509Cert.Raw) + + _, err = postJSON(c.jws, c.directory.RevokeCertURL, revokeCertMessage{Resource: "revoke-cert", Certificate: encodedCert}, nil) + return err +} + +// RenewCertificate takes a CertificateResource and tries to renew the certificate. +// If the renewal process succeeds, the new certificate will ge returned in a new CertResource. +// Please be aware that this function will return a new certificate in ANY case that is not an error. +// If the server does not provide us with a new cert on a GET request to the CertURL +// this function will start a new-cert flow where a new certificate gets generated. +// If bundle is true, the []byte contains both the issuer certificate and +// your issued certificate as a bundle. +// For private key reuse the PrivateKey property of the passed in CertificateResource should be non-nil. +func (c *Client) RenewCertificate(cert CertificateResource, bundle bool) (CertificateResource, error) { + // Input certificate is PEM encoded. Decode it here as we may need the decoded + // cert later on in the renewal process. The input may be a bundle or a single certificate. + certificates, err := parsePEMBundle(cert.Certificate) + if err != nil { + return CertificateResource{}, err + } + + x509Cert := certificates[0] + if x509Cert.IsCA { + return CertificateResource{}, fmt.Errorf("[%s] Certificate bundle starts with a CA certificate", cert.Domain) + } + + // This is just meant to be informal for the user. + timeLeft := x509Cert.NotAfter.Sub(time.Now().UTC()) + logf("[INFO][%s] acme: Trying renewal with %d hours remaining", cert.Domain, int(timeLeft.Hours())) + + // The first step of renewal is to check if we get a renewed cert + // directly from the cert URL. + resp, err := httpGet(cert.CertURL) + if err != nil { + return CertificateResource{}, err + } + defer resp.Body.Close() + serverCertBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + return CertificateResource{}, err + } + + serverCert, err := x509.ParseCertificate(serverCertBytes) + if err != nil { + return CertificateResource{}, err + } + + // If the server responds with a different certificate we are effectively renewed. + // TODO: Further test if we can actually use the new certificate (Our private key works) + if !x509Cert.Equal(serverCert) { + logf("[INFO][%s] acme: Server responded with renewed certificate", cert.Domain) + issuedCert := pemEncode(derCertificateBytes(serverCertBytes)) + // If bundle is true, we want to return a certificate bundle. + // To do this, we need the issuer certificate. + if bundle { + // The issuer certificate link is always supplied via an "up" link + // in the response headers of a new certificate. + links := parseLinks(resp.Header["Link"]) + issuerCert, err := c.getIssuerCertificate(links["up"]) + if err != nil { + // If we fail to acquire the issuer cert, return the issued certificate - do not fail. + logf("[ERROR][%s] acme: Could not bundle issuer certificate: %v", cert.Domain, err) + } else { + // Success - append the issuer cert to the issued cert. + issuerCert = pemEncode(derCertificateBytes(issuerCert)) + issuedCert = append(issuedCert, issuerCert...) + } + } + + cert.Certificate = issuedCert + return cert, nil + } + + var privKey crypto.PrivateKey + if cert.PrivateKey != nil { + privKey, err = parsePEMPrivateKey(cert.PrivateKey) + if err != nil { + return CertificateResource{}, err + } + } + + var domains []string + var failures map[string]error + // check for SAN certificate + if len(x509Cert.DNSNames) > 1 { + domains = append(domains, x509Cert.Subject.CommonName) + for _, sanDomain := range x509Cert.DNSNames { + if sanDomain == x509Cert.Subject.CommonName { + continue + } + domains = append(domains, sanDomain) + } + } else { + domains = append(domains, x509Cert.Subject.CommonName) + } + + newCert, failures := c.ObtainCertificate(domains, bundle, privKey) + return newCert, failures[cert.Domain] +} + +// Looks through the challenge combinations to find a solvable match. +// Then solves the challenges in series and returns. +func (c *Client) solveChallenges(challenges []authorizationResource) map[string]error { + // loop through the resources, basically through the domains. + failures := make(map[string]error) + for _, authz := range challenges { + // no solvers - no solving + if solvers := c.chooseSolvers(authz.Body, authz.Domain); solvers != nil { + for i, solver := range solvers { + // TODO: do not immediately fail if one domain fails to validate. + err := solver.Solve(authz.Body.Challenges[i], authz.Domain) + if err != nil { + failures[authz.Domain] = err + } + } + } else { + failures[authz.Domain] = fmt.Errorf("[%s] acme: Could not determine solvers", authz.Domain) + } + } + + return failures +} + +// Checks all combinations from the server and returns an array of +// solvers which should get executed in series. +func (c *Client) chooseSolvers(auth authorization, domain string) map[int]solver { + for _, combination := range auth.Combinations { + solvers := make(map[int]solver) + for _, idx := range combination { + if solver, ok := c.solvers[auth.Challenges[idx].Type]; ok { + solvers[idx] = solver + } else { + logf("[INFO][%s] acme: Could not find solver for: %s", domain, auth.Challenges[idx].Type) + } + } + + // If we can solve the whole combination, return the solvers + if len(solvers) == len(combination) { + return solvers + } + } + return nil +} + +// Get the challenges needed to proof our identifier to the ACME server. +func (c *Client) getChallenges(domains []string) ([]authorizationResource, map[string]error) { + resc, errc := make(chan authorizationResource), make(chan domainError) + + for _, domain := range domains { + go func(domain string) { + authMsg := authorization{Resource: "new-authz", Identifier: identifier{Type: "dns", Value: domain}} + var authz authorization + hdr, err := postJSON(c.jws, c.user.GetRegistration().NewAuthzURL, authMsg, &authz) + if err != nil { + errc <- domainError{Domain: domain, Error: err} + return + } + + links := parseLinks(hdr["Link"]) + if links["next"] == "" { + logf("[ERROR][%s] acme: Server did not provide next link to proceed", domain) + return + } + + resc <- authorizationResource{Body: authz, NewCertURL: links["next"], AuthURL: hdr.Get("Location"), Domain: domain} + }(domain) + } + + responses := make(map[string]authorizationResource) + failures := make(map[string]error) + for i := 0; i < len(domains); i++ { + select { + case res := <-resc: + responses[res.Domain] = res + case err := <-errc: + failures[err.Domain] = err.Error + } + } + + challenges := make([]authorizationResource, 0, len(responses)) + for _, domain := range domains { + if challenge, ok := responses[domain]; ok { + challenges = append(challenges, challenge) + } + } + + close(resc) + close(errc) + + return challenges, failures +} + +func (c *Client) requestCertificate(authz []authorizationResource, bundle bool, privKey crypto.PrivateKey) (CertificateResource, error) { + if len(authz) == 0 { + return CertificateResource{}, errors.New("Passed no authorizations to requestCertificate!") + } + + commonName := authz[0] + var err error + if privKey == nil { + privKey, err = generatePrivateKey(c.keyType) + if err != nil { + return CertificateResource{}, err + } + } + + var san []string + var authURLs []string + for _, auth := range authz[1:] { + san = append(san, auth.Domain) + authURLs = append(authURLs, auth.AuthURL) + } + + // TODO: should the CSR be customizable? + csr, err := generateCsr(privKey, commonName.Domain, san) + if err != nil { + return CertificateResource{}, err + } + + csrString := base64.URLEncoding.EncodeToString(csr) + jsonBytes, err := json.Marshal(csrMessage{Resource: "new-cert", Csr: csrString, Authorizations: authURLs}) + if err != nil { + return CertificateResource{}, err + } + + resp, err := c.jws.post(commonName.NewCertURL, jsonBytes) + if err != nil { + return CertificateResource{}, err + } + + privateKeyPem := pemEncode(privKey) + cerRes := CertificateResource{ + Domain: commonName.Domain, + CertURL: resp.Header.Get("Location"), + PrivateKey: privateKeyPem} + + for { + switch resp.StatusCode { + case 201, 202: + cert, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024)) + resp.Body.Close() + if err != nil { + return CertificateResource{}, err + } + + // The server returns a body with a length of zero if the + // certificate was not ready at the time this request completed. + // Otherwise the body is the certificate. + if len(cert) > 0 { + + cerRes.CertStableURL = resp.Header.Get("Content-Location") + cerRes.AccountRef = c.user.GetRegistration().URI + + issuedCert := pemEncode(derCertificateBytes(cert)) + // If bundle is true, we want to return a certificate bundle. + // To do this, we need the issuer certificate. + if bundle { + // The issuer certificate link is always supplied via an "up" link + // in the response headers of a new certificate. + links := parseLinks(resp.Header["Link"]) + issuerCert, err := c.getIssuerCertificate(links["up"]) + if err != nil { + // If we fail to acquire the issuer cert, return the issued certificate - do not fail. + logf("[WARNING][%s] acme: Could not bundle issuer certificate: %v", commonName.Domain, err) + } else { + // Success - append the issuer cert to the issued cert. + issuerCert = pemEncode(derCertificateBytes(issuerCert)) + issuedCert = append(issuedCert, issuerCert...) + } + } + + cerRes.Certificate = issuedCert + logf("[INFO][%s] Server responded with a certificate.", commonName.Domain) + return cerRes, nil + } + + // The certificate was granted but is not yet issued. + // Check retry-after and loop. + ra := resp.Header.Get("Retry-After") + retryAfter, err := strconv.Atoi(ra) + if err != nil { + return CertificateResource{}, err + } + + logf("[INFO][%s] acme: Server responded with status 202; retrying after %ds", commonName.Domain, retryAfter) + time.Sleep(time.Duration(retryAfter) * time.Second) + + break + default: + return CertificateResource{}, handleHTTPError(resp) + } + + resp, err = httpGet(cerRes.CertURL) + if err != nil { + return CertificateResource{}, err + } + } +} + +// getIssuerCertificate requests the issuer certificate and caches it for +// subsequent requests. +func (c *Client) getIssuerCertificate(url string) ([]byte, error) { + logf("[INFO] acme: Requesting issuer cert from %s", url) + if c.issuerCert != nil { + return c.issuerCert, nil + } + + resp, err := httpGet(url) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + issuerBytes, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024)) + if err != nil { + return nil, err + } + + _, err = x509.ParseCertificate(issuerBytes) + if err != nil { + return nil, err + } + + c.issuerCert = issuerBytes + return issuerBytes, err +} + +func parseLinks(links []string) map[string]string { + aBrkt := regexp.MustCompile("[<>]") + slver := regexp.MustCompile("(.+) *= *\"(.+)\"") + linkMap := make(map[string]string) + + for _, link := range links { + + link = aBrkt.ReplaceAllString(link, "") + parts := strings.Split(link, ";") + + matches := slver.FindStringSubmatch(parts[1]) + if len(matches) > 0 { + linkMap[matches[2]] = parts[0] + } + } + + return linkMap +} + +// validate makes the ACME server start validating a +// challenge response, only returning once it is done. +func validate(j *jws, domain, uri string, chlng challenge) error { + var challengeResponse challenge + + hdr, err := postJSON(j, uri, chlng, &challengeResponse) + if err != nil { + return err + } + + // After the path is sent, the ACME server will access our server. + // Repeatedly check the server for an updated status on our request. + for { + switch challengeResponse.Status { + case "valid": + logf("[INFO][%s] The server validated our request", domain) + return nil + case "pending": + break + case "invalid": + return handleChallengeError(challengeResponse) + default: + return errors.New("The server returned an unexpected state.") + } + + ra, err := strconv.Atoi(hdr.Get("Retry-After")) + if err != nil { + // The ACME server MUST return a Retry-After. + // If it doesn't, we'll just poll hard. + ra = 1 + } + time.Sleep(time.Duration(ra) * time.Second) + + hdr, err = getJSON(uri, &challengeResponse) + if err != nil { + return err + } + } +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/client_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/client_test.go new file mode 100644 index 000000000..e309554f3 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/client_test.go @@ -0,0 +1,198 @@ +package acme + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "encoding/json" + "net" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestNewClient(t *testing.T) { + keyBits := 32 // small value keeps test fast + keyType := RSA2048 + key, err := rsa.GenerateKey(rand.Reader, keyBits) + if err != nil { + t.Fatal("Could not generate test key:", err) + } + user := mockUser{ + email: "test@test.com", + regres: new(RegistrationResource), + privatekey: key, + } + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + data, _ := json.Marshal(directory{NewAuthzURL: "http://test", NewCertURL: "http://test", NewRegURL: "http://test", RevokeCertURL: "http://test"}) + w.Write(data) + })) + + client, err := NewClient(ts.URL, user, keyType) + if err != nil { + t.Fatalf("Could not create client: %v", err) + } + + if client.jws == nil { + t.Fatalf("Expected client.jws to not be nil") + } + if expected, actual := key, client.jws.privKey; actual != expected { + t.Errorf("Expected jws.privKey to be %p but was %p", expected, actual) + } + + if client.keyType != keyType { + t.Errorf("Expected keyType to be %s but was %s", keyType, client.keyType) + } + + if expected, actual := 2, len(client.solvers); actual != expected { + t.Fatalf("Expected %d solver(s), got %d", expected, actual) + } +} + +func TestClientOptPort(t *testing.T) { + keyBits := 32 // small value keeps test fast + key, err := rsa.GenerateKey(rand.Reader, keyBits) + if err != nil { + t.Fatal("Could not generate test key:", err) + } + user := mockUser{ + email: "test@test.com", + regres: new(RegistrationResource), + privatekey: key, + } + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + data, _ := json.Marshal(directory{NewAuthzURL: "http://test", NewCertURL: "http://test", NewRegURL: "http://test", RevokeCertURL: "http://test"}) + w.Write(data) + })) + + optPort := "1234" + optHost := "" + client, err := NewClient(ts.URL, user, RSA2048) + if err != nil { + t.Fatalf("Could not create client: %v", err) + } + client.SetHTTPAddress(net.JoinHostPort(optHost, optPort)) + client.SetTLSAddress(net.JoinHostPort(optHost, optPort)) + + httpSolver, ok := client.solvers[HTTP01].(*httpChallenge) + if !ok { + t.Fatal("Expected http-01 solver to be httpChallenge type") + } + if httpSolver.jws != client.jws { + t.Error("Expected http-01 to have same jws as client") + } + if got := httpSolver.provider.(*HTTPProviderServer).port; got != optPort { + t.Errorf("Expected http-01 to have port %s but was %s", optPort, got) + } + if got := httpSolver.provider.(*HTTPProviderServer).iface; got != optHost { + t.Errorf("Expected http-01 to have iface %s but was %s", optHost, got) + } + + httpsSolver, ok := client.solvers[TLSSNI01].(*tlsSNIChallenge) + if !ok { + t.Fatal("Expected tls-sni-01 solver to be httpChallenge type") + } + if httpsSolver.jws != client.jws { + t.Error("Expected tls-sni-01 to have same jws as client") + } + if got := httpsSolver.provider.(*TLSProviderServer).port; got != optPort { + t.Errorf("Expected tls-sni-01 to have port %s but was %s", optPort, got) + } + if got := httpsSolver.provider.(*TLSProviderServer).iface; got != optHost { + t.Errorf("Expected tls-sni-01 to have port %s but was %s", optHost, got) + } + + // test setting different host + optHost = "127.0.0.1" + client.SetHTTPAddress(net.JoinHostPort(optHost, optPort)) + client.SetTLSAddress(net.JoinHostPort(optHost, optPort)) + + if got := httpSolver.provider.(*HTTPProviderServer).iface; got != optHost { + t.Errorf("Expected http-01 to have iface %s but was %s", optHost, got) + } + if got := httpsSolver.provider.(*TLSProviderServer).port; got != optPort { + t.Errorf("Expected tls-sni-01 to have port %s but was %s", optPort, got) + } +} + +func TestValidate(t *testing.T) { + var statuses []string + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Minimal stub ACME server for validation. + w.Header().Add("Replay-Nonce", "12345") + w.Header().Add("Retry-After", "0") + switch r.Method { + case "HEAD": + case "POST": + st := statuses[0] + statuses = statuses[1:] + writeJSONResponse(w, &challenge{Type: "http-01", Status: st, URI: "http://example.com/", Token: "token"}) + + case "GET": + st := statuses[0] + statuses = statuses[1:] + writeJSONResponse(w, &challenge{Type: "http-01", Status: st, URI: "http://example.com/", Token: "token"}) + + default: + http.Error(w, r.Method, http.StatusMethodNotAllowed) + } + })) + defer ts.Close() + + privKey, _ := rsa.GenerateKey(rand.Reader, 512) + j := &jws{privKey: privKey, directoryURL: ts.URL} + + tsts := []struct { + name string + statuses []string + want string + }{ + {"POST-unexpected", []string{"weird"}, "unexpected"}, + {"POST-valid", []string{"valid"}, ""}, + {"POST-invalid", []string{"invalid"}, "Error Detail"}, + {"GET-unexpected", []string{"pending", "weird"}, "unexpected"}, + {"GET-valid", []string{"pending", "valid"}, ""}, + {"GET-invalid", []string{"pending", "invalid"}, "Error Detail"}, + } + + for _, tst := range tsts { + statuses = tst.statuses + if err := validate(j, "example.com", ts.URL, challenge{Type: "http-01", Token: "token"}); err == nil && tst.want != "" { + t.Errorf("[%s] validate: got error %v, want something with %q", tst.name, err, tst.want) + } else if err != nil && !strings.Contains(err.Error(), tst.want) { + t.Errorf("[%s] validate: got error %v, want something with %q", tst.name, err, tst.want) + } + } +} + +// writeJSONResponse marshals the body as JSON and writes it to the response. +func writeJSONResponse(w http.ResponseWriter, body interface{}) { + bs, err := json.Marshal(body) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + if _, err := w.Write(bs); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +// stubValidate is like validate, except it does nothing. +func stubValidate(j *jws, domain, uri string, chlng challenge) error { + return nil +} + +type mockUser struct { + email string + regres *RegistrationResource + privatekey *rsa.PrivateKey +} + +func (u mockUser) GetEmail() string { return u.email } +func (u mockUser) GetRegistration() *RegistrationResource { return u.regres } +func (u mockUser) GetPrivateKey() crypto.PrivateKey { return u.privatekey } diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto.go new file mode 100644 index 000000000..fc20442f7 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto.go @@ -0,0 +1,323 @@ +package acme + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "io" + "io/ioutil" + "math/big" + "net/http" + "strings" + "time" + + "golang.org/x/crypto/ocsp" +) + +// KeyType represents the key algo as well as the key size or curve to use. +type KeyType string +type derCertificateBytes []byte + +// Constants for all key types we support. +const ( + EC256 = KeyType("P256") + EC384 = KeyType("P384") + RSA2048 = KeyType("2048") + RSA4096 = KeyType("4096") + RSA8192 = KeyType("8192") +) + +const ( + // OCSPGood means that the certificate is valid. + OCSPGood = ocsp.Good + // OCSPRevoked means that the certificate has been deliberately revoked. + OCSPRevoked = ocsp.Revoked + // OCSPUnknown means that the OCSP responder doesn't know about the certificate. + OCSPUnknown = ocsp.Unknown + // OCSPServerFailed means that the OCSP responder failed to process the request. + OCSPServerFailed = ocsp.ServerFailed +) + +// GetOCSPForCert takes a PEM encoded cert or cert bundle returning the raw OCSP response, +// the parsed response, and an error, if any. The returned []byte can be passed directly +// into the OCSPStaple property of a tls.Certificate. If the bundle only contains the +// issued certificate, this function will try to get the issuer certificate from the +// IssuingCertificateURL in the certificate. If the []byte and/or ocsp.Response return +// values are nil, the OCSP status may be assumed OCSPUnknown. +func GetOCSPForCert(bundle []byte) ([]byte, *ocsp.Response, error) { + certificates, err := parsePEMBundle(bundle) + if err != nil { + return nil, nil, err + } + + // We expect the certificate slice to be ordered downwards the chain. + // SRV CRT -> CA. We need to pull the leaf and issuer certs out of it, + // which should always be the first two certificates. If there's no + // OCSP server listed in the leaf cert, there's nothing to do. And if + // we have only one certificate so far, we need to get the issuer cert. + issuedCert := certificates[0] + if len(issuedCert.OCSPServer) == 0 { + return nil, nil, errors.New("no OCSP server specified in cert") + } + if len(certificates) == 1 { + // TODO: build fallback. If this fails, check the remaining array entries. + if len(issuedCert.IssuingCertificateURL) == 0 { + return nil, nil, errors.New("no issuing certificate URL") + } + + resp, err := httpGet(issuedCert.IssuingCertificateURL[0]) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + issuerBytes, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024)) + if err != nil { + return nil, nil, err + } + + issuerCert, err := x509.ParseCertificate(issuerBytes) + if err != nil { + return nil, nil, err + } + + // Insert it into the slice on position 0 + // We want it ordered right SRV CRT -> CA + certificates = append(certificates, issuerCert) + } + issuerCert := certificates[1] + + // Finally kick off the OCSP request. + ocspReq, err := ocsp.CreateRequest(issuedCert, issuerCert, nil) + if err != nil { + return nil, nil, err + } + + reader := bytes.NewReader(ocspReq) + req, err := httpPost(issuedCert.OCSPServer[0], "application/ocsp-request", reader) + if err != nil { + return nil, nil, err + } + defer req.Body.Close() + + ocspResBytes, err := ioutil.ReadAll(limitReader(req.Body, 1024*1024)) + ocspRes, err := ocsp.ParseResponse(ocspResBytes, issuerCert) + if err != nil { + return nil, nil, err + } + + if ocspRes.Certificate == nil { + err = ocspRes.CheckSignatureFrom(issuerCert) + if err != nil { + return nil, nil, err + } + } + + return ocspResBytes, ocspRes, nil +} + +func getKeyAuthorization(token string, key interface{}) (string, error) { + var publicKey crypto.PublicKey + switch k := key.(type) { + case *ecdsa.PrivateKey: + publicKey = k.Public() + case *rsa.PrivateKey: + publicKey = k.Public() + } + + // Generate the Key Authorization for the challenge + jwk := keyAsJWK(publicKey) + if jwk == nil { + return "", errors.New("Could not generate JWK from key.") + } + thumbBytes, err := jwk.Thumbprint(crypto.SHA256) + if err != nil { + return "", err + } + + // unpad the base64URL + keyThumb := base64.URLEncoding.EncodeToString(thumbBytes) + index := strings.Index(keyThumb, "=") + if index != -1 { + keyThumb = keyThumb[:index] + } + + return token + "." + keyThumb, nil +} + +// parsePEMBundle parses a certificate bundle from top to bottom and returns +// a slice of x509 certificates. This function will error if no certificates are found. +func parsePEMBundle(bundle []byte) ([]*x509.Certificate, error) { + var certificates []*x509.Certificate + var certDERBlock *pem.Block + + for { + certDERBlock, bundle = pem.Decode(bundle) + if certDERBlock == nil { + break + } + + if certDERBlock.Type == "CERTIFICATE" { + cert, err := x509.ParseCertificate(certDERBlock.Bytes) + if err != nil { + return nil, err + } + certificates = append(certificates, cert) + } + } + + if len(certificates) == 0 { + return nil, errors.New("No certificates were found while parsing the bundle.") + } + + return certificates, nil +} + +func parsePEMPrivateKey(key []byte) (crypto.PrivateKey, error) { + keyBlock, _ := pem.Decode(key) + + switch keyBlock.Type { + case "RSA PRIVATE KEY": + return x509.ParsePKCS1PrivateKey(keyBlock.Bytes) + case "EC PRIVATE KEY": + return x509.ParseECPrivateKey(keyBlock.Bytes) + default: + return nil, errors.New("Unknown PEM header value") + } +} + +func generatePrivateKey(keyType KeyType) (crypto.PrivateKey, error) { + + switch keyType { + case EC256: + return ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + case EC384: + return ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + case RSA2048: + return rsa.GenerateKey(rand.Reader, 2048) + case RSA4096: + return rsa.GenerateKey(rand.Reader, 4096) + case RSA8192: + return rsa.GenerateKey(rand.Reader, 8192) + } + + return nil, fmt.Errorf("Invalid KeyType: %s", keyType) +} + +func generateCsr(privateKey crypto.PrivateKey, domain string, san []string) ([]byte, error) { + template := x509.CertificateRequest{ + Subject: pkix.Name{ + CommonName: domain, + }, + } + + if len(san) > 0 { + template.DNSNames = san + } + + return x509.CreateCertificateRequest(rand.Reader, &template, privateKey) +} + +func pemEncode(data interface{}) []byte { + var pemBlock *pem.Block + switch key := data.(type) { + case *ecdsa.PrivateKey: + keyBytes, _ := x509.MarshalECPrivateKey(key) + pemBlock = &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes} + case *rsa.PrivateKey: + pemBlock = &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)} + break + case derCertificateBytes: + pemBlock = &pem.Block{Type: "CERTIFICATE", Bytes: []byte(data.(derCertificateBytes))} + } + + return pem.EncodeToMemory(pemBlock) +} + +func pemDecode(data []byte) (*pem.Block, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, fmt.Errorf("Pem decode did not yield a valid block. Is the certificate in the right format?") + } + + return pemBlock, nil +} + +func pemDecodeTox509(pem []byte) (*x509.Certificate, error) { + pemBlock, err := pemDecode(pem) + if pemBlock == nil { + return nil, err + } + + return x509.ParseCertificate(pemBlock.Bytes) +} + +// GetPEMCertExpiration returns the "NotAfter" date of a PEM encoded certificate. +// The certificate has to be PEM encoded. Any other encodings like DER will fail. +func GetPEMCertExpiration(cert []byte) (time.Time, error) { + pemBlock, err := pemDecode(cert) + if pemBlock == nil { + return time.Time{}, err + } + + return getCertExpiration(pemBlock.Bytes) +} + +// getCertExpiration returns the "NotAfter" date of a DER encoded certificate. +func getCertExpiration(cert []byte) (time.Time, error) { + pCert, err := x509.ParseCertificate(cert) + if err != nil { + return time.Time{}, err + } + + return pCert.NotAfter, nil +} + +func generatePemCert(privKey *rsa.PrivateKey, domain string) ([]byte, error) { + derBytes, err := generateDerCert(privKey, time.Time{}, domain) + if err != nil { + return nil, err + } + + return pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes}), nil +} + +func generateDerCert(privKey *rsa.PrivateKey, expiration time.Time, domain string) ([]byte, error) { + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return nil, err + } + + if expiration.IsZero() { + expiration = time.Now().Add(365) + } + + template := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: "ACME Challenge TEMP", + }, + NotBefore: time.Now(), + NotAfter: expiration, + + KeyUsage: x509.KeyUsageKeyEncipherment, + BasicConstraintsValid: true, + DNSNames: []string{domain}, + } + + return x509.CreateCertificate(rand.Reader, &template, &template, &privKey.PublicKey, privKey) +} + +func limitReader(rd io.ReadCloser, numBytes int64) io.ReadCloser { + return http.MaxBytesReader(nil, rd, numBytes) +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto_test.go new file mode 100644 index 000000000..d2fc5088b --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto_test.go @@ -0,0 +1,93 @@ +package acme + +import ( + "bytes" + "crypto/rand" + "crypto/rsa" + "testing" + "time" +) + +func TestGeneratePrivateKey(t *testing.T) { + key, err := generatePrivateKey(RSA2048) + if err != nil { + t.Error("Error generating private key:", err) + } + if key == nil { + t.Error("Expected key to not be nil, but it was") + } +} + +func TestGenerateCSR(t *testing.T) { + key, err := rsa.GenerateKey(rand.Reader, 512) + if err != nil { + t.Fatal("Error generating private key:", err) + } + + csr, err := generateCsr(key, "fizz.buzz", nil) + if err != nil { + t.Error("Error generating CSR:", err) + } + if csr == nil || len(csr) == 0 { + t.Error("Expected CSR with data, but it was nil or length 0") + } +} + +func TestPEMEncode(t *testing.T) { + buf := bytes.NewBufferString("TestingRSAIsSoMuchFun") + + reader := MockRandReader{b: buf} + key, err := rsa.GenerateKey(reader, 32) + if err != nil { + t.Fatal("Error generating private key:", err) + } + + data := pemEncode(key) + + if data == nil { + t.Fatal("Expected result to not be nil, but it was") + } + if len(data) != 127 { + t.Errorf("Expected PEM encoding to be length 127, but it was %d", len(data)) + } +} + +func TestPEMCertExpiration(t *testing.T) { + privKey, err := generatePrivateKey(RSA2048) + if err != nil { + t.Fatal("Error generating private key:", err) + } + + expiration := time.Now().Add(365) + expiration = expiration.Round(time.Second) + certBytes, err := generateDerCert(privKey.(*rsa.PrivateKey), expiration, "test.com") + if err != nil { + t.Fatal("Error generating cert:", err) + } + + buf := bytes.NewBufferString("TestingRSAIsSoMuchFun") + + // Some random string should return an error. + if ctime, err := GetPEMCertExpiration(buf.Bytes()); err == nil { + t.Errorf("Expected getCertExpiration to return an error for garbage string but returned %v", ctime) + } + + // A DER encoded certificate should return an error. + if _, err := GetPEMCertExpiration(certBytes); err == nil { + t.Errorf("Expected getCertExpiration to return an error for DER certificates but returned none.") + } + + // A PEM encoded certificate should work ok. + pemCert := pemEncode(derCertificateBytes(certBytes)) + if ctime, err := GetPEMCertExpiration(pemCert); err != nil || !ctime.Equal(expiration.UTC()) { + t.Errorf("Expected getCertExpiration to return %v but returned %v. Error: %v", expiration, ctime, err) + } +} + +type MockRandReader struct { + b *bytes.Buffer +} + +func (r MockRandReader) Read(p []byte) (int, error) { + return r.b.Read(p) +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/error.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/error.go new file mode 100644 index 000000000..b32561a3a --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/error.go @@ -0,0 +1,73 @@ +package acme + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" +) + +const ( + tosAgreementError = "Must agree to subscriber agreement before any further actions" +) + +// RemoteError is the base type for all errors specific to the ACME protocol. +type RemoteError struct { + StatusCode int `json:"status,omitempty"` + Type string `json:"type"` + Detail string `json:"detail"` +} + +func (e RemoteError) Error() string { + return fmt.Sprintf("acme: Error %d - %s - %s", e.StatusCode, e.Type, e.Detail) +} + +// TOSError represents the error which is returned if the user needs to +// accept the TOS. +// TODO: include the new TOS url if we can somehow obtain it. +type TOSError struct { + RemoteError +} + +type domainError struct { + Domain string + Error error +} + +type challengeError struct { + RemoteError + records []validationRecord +} + +func (c challengeError) Error() string { + + var errStr string + for _, validation := range c.records { + errStr = errStr + fmt.Sprintf("\tValidation for %s:%s\n\tResolved to:\n\t\t%s\n\tUsed: %s\n\n", + validation.Hostname, validation.Port, strings.Join(validation.ResolvedAddresses, "\n\t\t"), validation.UsedAddress) + } + + return fmt.Sprintf("%s\nError Detail:\n%s", c.RemoteError.Error(), errStr) +} + +func handleHTTPError(resp *http.Response) error { + var errorDetail RemoteError + decoder := json.NewDecoder(resp.Body) + err := decoder.Decode(&errorDetail) + if err != nil { + return err + } + + errorDetail.StatusCode = resp.StatusCode + + // Check for errors we handle specifically + if errorDetail.StatusCode == http.StatusForbidden && errorDetail.Detail == tosAgreementError { + return TOSError{errorDetail} + } + + return errorDetail +} + +func handleChallengeError(chlng challenge) error { + return challengeError{chlng.Error, chlng.ValidationRecords} +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http.go new file mode 100644 index 000000000..410aead6d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http.go @@ -0,0 +1,117 @@ +package acme + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "runtime" + "strings" + "time" +) + +// UserAgent (if non-empty) will be tacked onto the User-Agent string in requests. +var UserAgent string + +// defaultClient is an HTTP client with a reasonable timeout value. +var defaultClient = http.Client{Timeout: 10 * time.Second} + +const ( + // defaultGoUserAgent is the Go HTTP package user agent string. Too + // bad it isn't exported. If it changes, we should update it here, too. + defaultGoUserAgent = "Go-http-client/1.1" + + // ourUserAgent is the User-Agent of this underlying library package. + ourUserAgent = "xenolf-acme" +) + +// httpHead performs a HEAD request with a proper User-Agent string. +// The response body (resp.Body) is already closed when this function returns. +func httpHead(url string) (resp *http.Response, err error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + + req.Header.Set("User-Agent", userAgent()) + + resp, err = defaultClient.Do(req) + if err != nil { + return resp, err + } + resp.Body.Close() + return resp, err +} + +// httpPost performs a POST request with a proper User-Agent string. +// Callers should close resp.Body when done reading from it. +func httpPost(url string, bodyType string, body io.Reader) (resp *http.Response, err error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + req.Header.Set("User-Agent", userAgent()) + + return defaultClient.Do(req) +} + +// httpGet performs a GET request with a proper User-Agent string. +// Callers should close resp.Body when done reading from it. +func httpGet(url string) (resp *http.Response, err error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + req.Header.Set("User-Agent", userAgent()) + + return defaultClient.Do(req) +} + +// getJSON performs an HTTP GET request and parses the response body +// as JSON, into the provided respBody object. +func getJSON(uri string, respBody interface{}) (http.Header, error) { + resp, err := httpGet(uri) + if err != nil { + return nil, fmt.Errorf("failed to get %q: %v", uri, err) + } + defer resp.Body.Close() + + if resp.StatusCode >= http.StatusBadRequest { + return resp.Header, handleHTTPError(resp) + } + + return resp.Header, json.NewDecoder(resp.Body).Decode(respBody) +} + +// postJSON performs an HTTP POST request and parses the response body +// as JSON, into the provided respBody object. +func postJSON(j *jws, uri string, reqBody, respBody interface{}) (http.Header, error) { + jsonBytes, err := json.Marshal(reqBody) + if err != nil { + return nil, errors.New("Failed to marshal network message...") + } + + resp, err := j.post(uri, jsonBytes) + if err != nil { + return nil, fmt.Errorf("Failed to post JWS message. -> %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode >= http.StatusBadRequest { + return resp.Header, handleHTTPError(resp) + } + + if respBody == nil { + return resp.Header, nil + } + + return resp.Header, json.NewDecoder(resp.Body).Decode(respBody) +} + +// userAgent builds and returns the User-Agent string to use in requests. +func userAgent() string { + ua := fmt.Sprintf("%s (%s; %s) %s %s", defaultGoUserAgent, runtime.GOOS, runtime.GOARCH, ourUserAgent, UserAgent) + return strings.TrimSpace(ua) +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge.go new file mode 100644 index 000000000..95cb1fd81 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge.go @@ -0,0 +1,41 @@ +package acme + +import ( + "fmt" + "log" +) + +type httpChallenge struct { + jws *jws + validate validateFunc + provider ChallengeProvider +} + +// HTTP01ChallengePath returns the URL path for the `http-01` challenge +func HTTP01ChallengePath(token string) string { + return "/.well-known/acme-challenge/" + token +} + +func (s *httpChallenge) Solve(chlng challenge, domain string) error { + + logf("[INFO][%s] acme: Trying to solve HTTP-01", domain) + + // Generate the Key Authorization for the challenge + keyAuth, err := getKeyAuthorization(chlng.Token, s.jws.privKey) + if err != nil { + return err + } + + err = s.provider.Present(domain, chlng.Token, keyAuth) + if err != nil { + return fmt.Errorf("[%s] error presenting token: %v", domain, err) + } + defer func() { + err := s.provider.CleanUp(domain, chlng.Token, keyAuth) + if err != nil { + log.Printf("[%s] error cleaning up: %v", domain, err) + } + }() + + return s.validate(s.jws, domain, chlng.URI, challenge{Resource: "challenge", Type: chlng.Type, Token: chlng.Token, KeyAuthorization: keyAuth}) +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_server.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_server.go new file mode 100644 index 000000000..42541380c --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_server.go @@ -0,0 +1,79 @@ +package acme + +import ( + "fmt" + "net" + "net/http" + "strings" +) + +// HTTPProviderServer implements ChallengeProvider for `http-01` challenge +// It may be instantiated without using the NewHTTPProviderServer function if +// you want only to use the default values. +type HTTPProviderServer struct { + iface string + port string + done chan bool + listener net.Listener +} + +// NewHTTPProviderServer creates a new HTTPProviderServer on the selected interface and port. +// Setting iface and / or port to an empty string will make the server fall back to +// the "any" interface and port 80 respectively. +func NewHTTPProviderServer(iface, port string) *HTTPProviderServer { + return &HTTPProviderServer{iface: iface, port: port} +} + +// Present starts a web server and makes the token available at `HTTP01ChallengePath(token)` for web requests. +func (s *HTTPProviderServer) Present(domain, token, keyAuth string) error { + if s.port == "" { + s.port = "80" + } + + var err error + s.listener, err = net.Listen("tcp", net.JoinHostPort(s.iface, s.port)) + if err != nil { + return fmt.Errorf("Could not start HTTP server for challenge -> %v", err) + } + + s.done = make(chan bool) + go s.serve(domain, token, keyAuth) + return nil +} + +// CleanUp closes the HTTP server and removes the token from `HTTP01ChallengePath(token)` +func (s *HTTPProviderServer) CleanUp(domain, token, keyAuth string) error { + if s.listener == nil { + return nil + } + s.listener.Close() + <-s.done + return nil +} + +func (s *HTTPProviderServer) serve(domain, token, keyAuth string) { + path := HTTP01ChallengePath(token) + + // The handler validates the HOST header and request type. + // For validation it then writes the token the server returned with the challenge + mux := http.NewServeMux() + mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) { + if strings.HasPrefix(r.Host, domain) && r.Method == "GET" { + w.Header().Add("Content-Type", "text/plain") + w.Write([]byte(keyAuth)) + logf("[INFO][%s] Served key authentication", domain) + } else { + logf("[INFO] Received request for domain %s with method %s", r.Host, r.Method) + w.Write([]byte("TEST")) + } + }) + + httpServer := &http.Server{ + Handler: mux, + } + // Once httpServer is shut down we don't want any lingering + // connections, so disable KeepAlives. + httpServer.SetKeepAlivesEnabled(false) + httpServer.Serve(s.listener) + s.done <- true +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_test.go new file mode 100644 index 000000000..fdd8f4d27 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_test.go @@ -0,0 +1,57 @@ +package acme + +import ( + "crypto/rand" + "crypto/rsa" + "io/ioutil" + "strings" + "testing" +) + +func TestHTTPChallenge(t *testing.T) { + privKey, _ := rsa.GenerateKey(rand.Reader, 512) + j := &jws{privKey: privKey} + clientChallenge := challenge{Type: HTTP01, Token: "http1"} + mockValidate := func(_ *jws, _, _ string, chlng challenge) error { + uri := "http://localhost:23457/.well-known/acme-challenge/" + chlng.Token + resp, err := httpGet(uri) + if err != nil { + return err + } + defer resp.Body.Close() + + if want := "text/plain"; resp.Header.Get("Content-Type") != want { + t.Errorf("Get(%q) Content-Type: got %q, want %q", uri, resp.Header.Get("Content-Type"), want) + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + bodyStr := string(body) + + if bodyStr != chlng.KeyAuthorization { + t.Errorf("Get(%q) Body: got %q, want %q", uri, bodyStr, chlng.KeyAuthorization) + } + + return nil + } + solver := &httpChallenge{jws: j, validate: mockValidate, provider: &HTTPProviderServer{port: "23457"}} + + if err := solver.Solve(clientChallenge, "localhost:23457"); err != nil { + t.Errorf("Solve error: got %v, want nil", err) + } +} + +func TestHTTPChallengeInvalidPort(t *testing.T) { + privKey, _ := rsa.GenerateKey(rand.Reader, 128) + j := &jws{privKey: privKey} + clientChallenge := challenge{Type: HTTP01, Token: "http2"} + solver := &httpChallenge{jws: j, validate: stubValidate, provider: &HTTPProviderServer{port: "123456"}} + + if err := solver.Solve(clientChallenge, "localhost:123456"); err == nil { + t.Errorf("Solve error: got %v, want error", err) + } else if want := "invalid port 123456"; !strings.HasSuffix(err.Error(), want) { + t.Errorf("Solve error: got %q, want suffix %q", err.Error(), want) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_test.go new file mode 100644 index 000000000..33a48a331 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_test.go @@ -0,0 +1,100 @@ +package acme + +import ( + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestHTTPHeadUserAgent(t *testing.T) { + var ua, method string + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ua = r.Header.Get("User-Agent") + method = r.Method + })) + defer ts.Close() + + _, err := httpHead(ts.URL) + if err != nil { + t.Fatal(err) + } + + if method != "HEAD" { + t.Errorf("Expected method to be HEAD, got %s", method) + } + if !strings.Contains(ua, ourUserAgent) { + t.Errorf("Expected User-Agent to contain '%s', got: '%s'", ourUserAgent, ua) + } +} + +func TestHTTPGetUserAgent(t *testing.T) { + var ua, method string + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ua = r.Header.Get("User-Agent") + method = r.Method + })) + defer ts.Close() + + res, err := httpGet(ts.URL) + if err != nil { + t.Fatal(err) + } + res.Body.Close() + + if method != "GET" { + t.Errorf("Expected method to be GET, got %s", method) + } + if !strings.Contains(ua, ourUserAgent) { + t.Errorf("Expected User-Agent to contain '%s', got: '%s'", ourUserAgent, ua) + } +} + +func TestHTTPPostUserAgent(t *testing.T) { + var ua, method string + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ua = r.Header.Get("User-Agent") + method = r.Method + })) + defer ts.Close() + + res, err := httpPost(ts.URL, "text/plain", strings.NewReader("falalalala")) + if err != nil { + t.Fatal(err) + } + res.Body.Close() + + if method != "POST" { + t.Errorf("Expected method to be POST, got %s", method) + } + if !strings.Contains(ua, ourUserAgent) { + t.Errorf("Expected User-Agent to contain '%s', got: '%s'", ourUserAgent, ua) + } +} + +func TestUserAgent(t *testing.T) { + ua := userAgent() + + if !strings.Contains(ua, defaultGoUserAgent) { + t.Errorf("Expected UA to contain %s, got '%s'", defaultGoUserAgent, ua) + } + if !strings.Contains(ua, ourUserAgent) { + t.Errorf("Expected UA to contain %s, got '%s'", ourUserAgent, ua) + } + if strings.HasSuffix(ua, " ") { + t.Errorf("UA should not have trailing spaces; got '%s'", ua) + } + + // customize the UA by appending a value + UserAgent = "MyApp/1.2.3" + ua = userAgent() + if !strings.Contains(ua, defaultGoUserAgent) { + t.Errorf("Expected UA to contain %s, got '%s'", defaultGoUserAgent, ua) + } + if !strings.Contains(ua, ourUserAgent) { + t.Errorf("Expected UA to contain %s, got '%s'", ourUserAgent, ua) + } + if !strings.Contains(ua, UserAgent) { + t.Errorf("Expected custom UA to contain %s, got '%s'", UserAgent, ua) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/jws.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/jws.go new file mode 100644 index 000000000..8435d0cfc --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/jws.go @@ -0,0 +1,107 @@ +package acme + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "fmt" + "net/http" + + "gopkg.in/square/go-jose.v1" +) + +type jws struct { + directoryURL string + privKey crypto.PrivateKey + nonces []string +} + +func keyAsJWK(key interface{}) *jose.JsonWebKey { + switch k := key.(type) { + case *ecdsa.PublicKey: + return &jose.JsonWebKey{Key: k, Algorithm: "EC"} + case *rsa.PublicKey: + return &jose.JsonWebKey{Key: k, Algorithm: "RSA"} + + default: + return nil + } +} + +// Posts a JWS signed message to the specified URL +func (j *jws) post(url string, content []byte) (*http.Response, error) { + signedContent, err := j.signContent(content) + if err != nil { + return nil, err + } + + resp, err := httpPost(url, "application/jose+json", bytes.NewBuffer([]byte(signedContent.FullSerialize()))) + if err != nil { + return nil, err + } + + j.getNonceFromResponse(resp) + + return resp, err +} + +func (j *jws) signContent(content []byte) (*jose.JsonWebSignature, error) { + + var alg jose.SignatureAlgorithm + switch k := j.privKey.(type) { + case *rsa.PrivateKey: + alg = jose.RS256 + case *ecdsa.PrivateKey: + if k.Curve == elliptic.P256() { + alg = jose.ES256 + } else if k.Curve == elliptic.P384() { + alg = jose.ES384 + } + } + + signer, err := jose.NewSigner(alg, j.privKey) + if err != nil { + return nil, err + } + signer.SetNonceSource(j) + + signed, err := signer.Sign(content) + if err != nil { + return nil, err + } + return signed, nil +} + +func (j *jws) getNonceFromResponse(resp *http.Response) error { + nonce := resp.Header.Get("Replay-Nonce") + if nonce == "" { + return fmt.Errorf("Server did not respond with a proper nonce header.") + } + + j.nonces = append(j.nonces, nonce) + return nil +} + +func (j *jws) getNonce() error { + resp, err := httpHead(j.directoryURL) + if err != nil { + return err + } + + return j.getNonceFromResponse(resp) +} + +func (j *jws) Nonce() (string, error) { + nonce := "" + if len(j.nonces) == 0 { + err := j.getNonce() + if err != nil { + return nonce, err + } + } + + nonce, j.nonces = j.nonces[len(j.nonces)-1], j.nonces[:len(j.nonces)-1] + return nonce, nil +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/messages.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/messages.go new file mode 100644 index 000000000..d1fac9200 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/messages.go @@ -0,0 +1,115 @@ +package acme + +import ( + "time" + + "gopkg.in/square/go-jose.v1" +) + +type directory struct { + NewAuthzURL string `json:"new-authz"` + NewCertURL string `json:"new-cert"` + NewRegURL string `json:"new-reg"` + RevokeCertURL string `json:"revoke-cert"` +} + +type recoveryKeyMessage struct { + Length int `json:"length,omitempty"` + Client jose.JsonWebKey `json:"client,omitempty"` + Server jose.JsonWebKey `json:"client,omitempty"` +} + +type registrationMessage struct { + Resource string `json:"resource"` + Contact []string `json:"contact"` + // RecoveryKey recoveryKeyMessage `json:"recoveryKey,omitempty"` +} + +// Registration is returned by the ACME server after the registration +// The client implementation should save this registration somewhere. +type Registration struct { + Resource string `json:"resource,omitempty"` + ID int `json:"id"` + Key jose.JsonWebKey `json:"key"` + Contact []string `json:"contact"` + Agreement string `json:"agreement,omitempty"` + Authorizations string `json:"authorizations,omitempty"` + Certificates string `json:"certificates,omitempty"` + // RecoveryKey recoveryKeyMessage `json:"recoveryKey,omitempty"` +} + +// RegistrationResource represents all important informations about a registration +// of which the client needs to keep track itself. +type RegistrationResource struct { + Body Registration `json:"body,omitempty"` + URI string `json:"uri,omitempty"` + NewAuthzURL string `json:"new_authzr_uri,omitempty"` + TosURL string `json:"terms_of_service,omitempty"` +} + +type authorizationResource struct { + Body authorization + Domain string + NewCertURL string + AuthURL string +} + +type authorization struct { + Resource string `json:"resource,omitempty"` + Identifier identifier `json:"identifier"` + Status string `json:"status,omitempty"` + Expires time.Time `json:"expires,omitempty"` + Challenges []challenge `json:"challenges,omitempty"` + Combinations [][]int `json:"combinations,omitempty"` +} + +type identifier struct { + Type string `json:"type"` + Value string `json:"value"` +} + +type validationRecord struct { + URI string `json:"url,omitempty"` + Hostname string `json:"hostname,omitempty"` + Port string `json:"port,omitempty"` + ResolvedAddresses []string `json:"addressesResolved,omitempty"` + UsedAddress string `json:"addressUsed,omitempty"` +} + +type challenge struct { + Resource string `json:"resource,omitempty"` + Type Challenge `json:"type,omitempty"` + Status string `json:"status,omitempty"` + URI string `json:"uri,omitempty"` + Token string `json:"token,omitempty"` + KeyAuthorization string `json:"keyAuthorization,omitempty"` + TLS bool `json:"tls,omitempty"` + Iterations int `json:"n,omitempty"` + Error RemoteError `json:"error,omitempty"` + ValidationRecords []validationRecord `json:"validationRecord,omitempty"` +} + +type csrMessage struct { + Resource string `json:"resource,omitempty"` + Csr string `json:"csr"` + Authorizations []string `json:"authorizations"` +} + +type revokeCertMessage struct { + Resource string `json:"resource"` + Certificate string `json:"certificate"` +} + +// CertificateResource represents a CA issued certificate. +// PrivateKey and Certificate are both already PEM encoded +// and can be directly written to disk. Certificate may +// be a certificate bundle, depending on the options supplied +// to create it. +type CertificateResource struct { + Domain string `json:"domain"` + CertURL string `json:"certUrl"` + CertStableURL string `json:"certStableUrl"` + AccountRef string `json:"accountRef,omitempty"` + PrivateKey []byte `json:"-"` + Certificate []byte `json:"-"` +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/provider.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/provider.go new file mode 100644 index 000000000..d177ff07a --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/provider.go @@ -0,0 +1,28 @@ +package acme + +import "time" + +// ChallengeProvider enables implementing a custom challenge +// provider. Present presents the solution to a challenge available to +// be solved. CleanUp will be called by the challenge if Present ends +// in a non-error state. +type ChallengeProvider interface { + Present(domain, token, keyAuth string) error + CleanUp(domain, token, keyAuth string) error +} + +// ChallengeProviderTimeout allows for implementing a +// ChallengeProvider where an unusually long timeout is required when +// waiting for an ACME challenge to be satisfied, such as when +// checking for DNS record progagation. If an implementor of a +// ChallengeProvider provides a Timeout method, then the return values +// of the Timeout method will be used when appropriate by the acme +// package. The interval value is the time between checks. +// +// The default values used for timeout and interval are 60 seconds and +// 2 seconds respectively. These are used when no Timeout method is +// defined for the ChallengeProvider. +type ChallengeProviderTimeout interface { + ChallengeProvider + Timeout() (timeout, interval time.Duration) +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go new file mode 100644 index 000000000..f184b17a5 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go @@ -0,0 +1,73 @@ +package acme + +import ( + "crypto/rsa" + "crypto/sha256" + "crypto/tls" + "encoding/hex" + "fmt" + "log" +) + +type tlsSNIChallenge struct { + jws *jws + validate validateFunc + provider ChallengeProvider +} + +func (t *tlsSNIChallenge) Solve(chlng challenge, domain string) error { + // FIXME: https://github.com/ietf-wg-acme/acme/pull/22 + // Currently we implement this challenge to track boulder, not the current spec! + + logf("[INFO][%s] acme: Trying to solve TLS-SNI-01", domain) + + // Generate the Key Authorization for the challenge + keyAuth, err := getKeyAuthorization(chlng.Token, t.jws.privKey) + if err != nil { + return err + } + + err = t.provider.Present(domain, chlng.Token, keyAuth) + if err != nil { + return fmt.Errorf("[%s] error presenting token: %v", domain, err) + } + defer func() { + err := t.provider.CleanUp(domain, chlng.Token, keyAuth) + if err != nil { + log.Printf("[%s] error cleaning up: %v", domain, err) + } + }() + return t.validate(t.jws, domain, chlng.URI, challenge{Resource: "challenge", Type: chlng.Type, Token: chlng.Token, KeyAuthorization: keyAuth}) +} + +// TLSSNI01ChallengeCert returns a certificate and target domain for the `tls-sni-01` challenge +func TLSSNI01ChallengeCertDomain(keyAuth string) (tls.Certificate, string, error) { + // generate a new RSA key for the certificates + tempPrivKey, err := generatePrivateKey(RSA2048) + if err != nil { + return tls.Certificate{}, "", err + } + rsaPrivKey := tempPrivKey.(*rsa.PrivateKey) + rsaPrivPEM := pemEncode(rsaPrivKey) + + zBytes := sha256.Sum256([]byte(keyAuth)) + z := hex.EncodeToString(zBytes[:sha256.Size]) + domain := fmt.Sprintf("%s.%s.acme.invalid", z[:32], z[32:]) + tempCertPEM, err := generatePemCert(rsaPrivKey, domain) + if err != nil { + return tls.Certificate{}, "", err + } + + certificate, err := tls.X509KeyPair(tempCertPEM, rsaPrivPEM) + if err != nil { + return tls.Certificate{}, "", err + } + + return certificate, domain, nil +} + +// TLSSNI01ChallengeCert returns a certificate for the `tls-sni-01` challenge +func TLSSNI01ChallengeCert(keyAuth string) (tls.Certificate, error) { + cert, _, err := TLSSNI01ChallengeCertDomain(keyAuth) + return cert, err +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go new file mode 100644 index 000000000..faaf16f6b --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go @@ -0,0 +1,62 @@ +package acme + +import ( + "crypto/tls" + "fmt" + "net" + "net/http" +) + +// TLSProviderServer implements ChallengeProvider for `TLS-SNI-01` challenge +// It may be instantiated without using the NewTLSProviderServer function if +// you want only to use the default values. +type TLSProviderServer struct { + iface string + port string + done chan bool + listener net.Listener +} + +// NewTLSProviderServer creates a new TLSProviderServer on the selected interface and port. +// Setting iface and / or port to an empty string will make the server fall back to +// the "any" interface and port 443 respectively. +func NewTLSProviderServer(iface, port string) *TLSProviderServer { + return &TLSProviderServer{iface: iface, port: port} +} + +// Present makes the keyAuth available as a cert +func (s *TLSProviderServer) Present(domain, token, keyAuth string) error { + if s.port == "" { + s.port = "443" + } + + cert, err := TLSSNI01ChallengeCert(keyAuth) + if err != nil { + return err + } + + tlsConf := new(tls.Config) + tlsConf.Certificates = []tls.Certificate{cert} + + s.listener, err = tls.Listen("tcp", net.JoinHostPort(s.iface, s.port), tlsConf) + if err != nil { + return fmt.Errorf("Could not start HTTPS server for challenge -> %v", err) + } + + s.done = make(chan bool) + go func() { + http.Serve(s.listener, nil) + s.done <- true + }() + return nil +} + +// CleanUp closes the HTTP server. +func (s *TLSProviderServer) CleanUp(domain, token, keyAuth string) error { + if s.listener == nil { + return nil + } + s.listener.Close() + <-s.done + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_test.go new file mode 100644 index 000000000..3aec74565 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_test.go @@ -0,0 +1,65 @@ +package acme + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/tls" + "encoding/hex" + "fmt" + "strings" + "testing" +) + +func TestTLSSNIChallenge(t *testing.T) { + privKey, _ := rsa.GenerateKey(rand.Reader, 512) + j := &jws{privKey: privKey} + clientChallenge := challenge{Type: TLSSNI01, Token: "tlssni1"} + mockValidate := func(_ *jws, _, _ string, chlng challenge) error { + conn, err := tls.Dial("tcp", "localhost:23457", &tls.Config{ + InsecureSkipVerify: true, + }) + if err != nil { + t.Errorf("Expected to connect to challenge server without an error. %s", err.Error()) + } + + // Expect the server to only return one certificate + connState := conn.ConnectionState() + if count := len(connState.PeerCertificates); count != 1 { + t.Errorf("Expected the challenge server to return exactly one certificate but got %d", count) + } + + remoteCert := connState.PeerCertificates[0] + if count := len(remoteCert.DNSNames); count != 1 { + t.Errorf("Expected the challenge certificate to have exactly one DNSNames entry but had %d", count) + } + + zBytes := sha256.Sum256([]byte(chlng.KeyAuthorization)) + z := hex.EncodeToString(zBytes[:sha256.Size]) + domain := fmt.Sprintf("%s.%s.acme.invalid", z[:32], z[32:]) + + if remoteCert.DNSNames[0] != domain { + t.Errorf("Expected the challenge certificate DNSName to match %s but was %s", domain, remoteCert.DNSNames[0]) + } + + return nil + } + solver := &tlsSNIChallenge{jws: j, validate: mockValidate, provider: &TLSProviderServer{port: "23457"}} + + if err := solver.Solve(clientChallenge, "localhost:23457"); err != nil { + t.Errorf("Solve error: got %v, want nil", err) + } +} + +func TestTLSSNIChallengeInvalidPort(t *testing.T) { + privKey, _ := rsa.GenerateKey(rand.Reader, 128) + j := &jws{privKey: privKey} + clientChallenge := challenge{Type: TLSSNI01, Token: "tlssni2"} + solver := &tlsSNIChallenge{jws: j, validate: stubValidate, provider: &TLSProviderServer{port: "123456"}} + + if err := solver.Solve(clientChallenge, "localhost:123456"); err == nil { + t.Errorf("Solve error: got %v, want error", err) + } else if want := "invalid port 123456"; !strings.HasSuffix(err.Error(), want) { + t.Errorf("Solve error: got %q, want suffix %q", err.Error(), want) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/utils.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/utils.go new file mode 100644 index 000000000..2fa0db304 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/utils.go @@ -0,0 +1,29 @@ +package acme + +import ( + "fmt" + "time" +) + +// WaitFor polls the given function 'f', once every 'interval', up to 'timeout'. +func WaitFor(timeout, interval time.Duration, f func() (bool, error)) error { + var lastErr string + timeup := time.After(timeout) + for { + select { + case <-timeup: + return fmt.Errorf("Time limit exceeded. Last error: %s", lastErr) + default: + } + + stop, err := f() + if stop { + return nil + } + if err != nil { + lastErr = err.Error() + } + + time.Sleep(interval) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/utils_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/utils_test.go new file mode 100644 index 000000000..158af4116 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/utils_test.go @@ -0,0 +1,26 @@ +package acme + +import ( + "testing" + "time" +) + +func TestWaitForTimeout(t *testing.T) { + c := make(chan error) + go func() { + err := WaitFor(3*time.Second, 1*time.Second, func() (bool, error) { + return false, nil + }) + c <- err + }() + + timeout := time.After(4 * time.Second) + select { + case <-timeout: + t.Fatal("timeout exceeded") + case err := <-c: + if err == nil { + t.Errorf("expected timeout error; got %v", err) + } + } +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/BUG-BOUNTY.md b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/BUG-BOUNTY.md new file mode 100644 index 000000000..97e61dbb6 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/BUG-BOUNTY.md @@ -0,0 +1,10 @@ +Serious about security +====================== + +Square recognizes the important contributions the security research community +can make. We therefore encourage reporting security issues with the code +contained in this repository. + +If you believe you have discovered a security vulnerability, please follow the +guidelines at . + diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md new file mode 100644 index 000000000..61b183651 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md @@ -0,0 +1,14 @@ +# Contributing + +If you would like to contribute code to go-jose you can do so through GitHub by +forking the repository and sending a pull request. + +When submitting code, please make every effort to follow existing conventions +and style in order to keep the code as readable as possible. Please also make +sure all tests pass by running `go test`, and format your code with `go fmt`. +We also recommend using `golint` and `errcheck`. + +Before your code can be accepted into the project you must also sign the +[Individual Contributor License Agreement][1]. + + [1]: https://spreadsheets.google.com/spreadsheet/viewform?formkey=dDViT2xzUHAwRkI3X3k5Z0lQM091OGc6MQ&ndplr=1 diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/LICENSE b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/README.md b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/README.md new file mode 100644 index 000000000..fd859da7a --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/README.md @@ -0,0 +1,209 @@ +# Go JOSE + +[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v1) [![license](http://img.shields.io/badge/license-apache_2.0-red.svg?style=flat)](https://raw.githubusercontent.com/square/go-jose/master/LICENSE) [![build](https://travis-ci.org/square/go-jose.svg?branch=master)](https://travis-ci.org/square/go-jose) [![coverage](https://coveralls.io/repos/github/square/go-jose/badge.svg?branch=master)](https://coveralls.io/r/square/go-jose) + +Package jose aims to provide an implementation of the Javascript Object Signing +and Encryption set of standards. For the moment, it mainly focuses on encryption +and signing based on the JSON Web Encryption and JSON Web Signature standards. + +**Disclaimer**: This library contains encryption software that is subject to +the U.S. Export Administration Regulations. You may not export, re-export, +transfer or download this code or any part of it in violation of any United +States law, directive or regulation. In particular this software may not be +exported or re-exported in any form or on any media to Iran, North Sudan, +Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any +US maintained blocked list. + +## Overview + +The implementation follows the +[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) +standard (RFC 7516) and +[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) +standard (RFC 7515). Tables of supported algorithms are shown below. +The library supports both the compact and full serialization formats, and has +optional support for multiple recipients. It also comes with a small +command-line utility +([`jose-util`](https://github.com/square/go-jose/tree/master/jose-util)) +for dealing with JOSE messages in a shell. + +**Note**: We use a forked version of the `encoding/json` package from the Go +standard library which uses case-sensitive matching for member names (instead +of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)). +This is to avoid differences in interpretation of messages between go-jose and +libraries in other languages. If you do not like this behavior, you can use the +`std_json` build tag to disable it (though we do not recommend doing so). + +### Versions + +We use [gopkg.in](https://gopkg.in) for versioning. + +[Version 1](https://gopkg.in/square/go-jose.v1) is the current stable version: + + import "gopkg.in/square/go-jose.v1" + +The interface for [go-jose.v1](https://gopkg.in/square/go-jose.v1) will remain +backwards compatible. We're currently sketching out ideas for a new version, to +clean up the interface a bit. If you have ideas or feature requests [please let +us know](https://github.com/square/go-jose/issues/64)! + +### Supported algorithms + +See below for a table of supported algorithms. Algorithm identifiers match +the names in the +[JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518) +standard where possible. The +[Godoc reference](https://godoc.org/github.com/square/go-jose#pkg-constants) +has a list of constants. + + Key encryption | Algorithm identifier(s) + :------------------------- | :------------------------------ + RSA-PKCS#1v1.5 | RSA1_5 + RSA-OAEP | RSA-OAEP, RSA-OAEP-256 + AES key wrap | A128KW, A192KW, A256KW + AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW + ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW + ECDH-ES (direct) | ECDH-ES1 + Direct encryption | dir1 + +1. Not supported in multi-recipient mode + + Signing / MAC | Algorithm identifier(s) + :------------------------- | :------------------------------ + RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 + RSASSA-PSS | PS256, PS384, PS512 + HMAC | HS256, HS384, HS512 + ECDSA | ES256, ES384, ES512 + + Content encryption | Algorithm identifier(s) + :------------------------- | :------------------------------ + AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 + AES-GCM | A128GCM, A192GCM, A256GCM + + Compression | Algorithm identifiers(s) + :------------------------- | ------------------------------- + DEFLATE (RFC 1951) | DEF + +### Supported key types + +See below for a table of supported key types. These are understood by the +library, and can be passed to corresponding functions such as `NewEncrypter` or +`NewSigner`. Note that if you are creating a new encrypter or signer with a +JsonWebKey, the key id of the JsonWebKey (if present) will be added to any +resulting messages. + + Algorithm(s) | Corresponding types + :------------------------- | ------------------------------- + RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey), *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey) + ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey), *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey) + AES, HMAC | []byte, *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey) + +## Examples + +Encryption/decryption example using RSA: + +```Go +// Generate a public/private key pair to use for this example. The library +// also provides two utility functions (LoadPublicKey and LoadPrivateKey) +// that can be used to load keys from PEM/DER-encoded data. +privateKey, err := rsa.GenerateKey(rand.Reader, 2048) +if err != nil { + panic(err) +} + +// Instantiate an encrypter using RSA-OAEP with AES128-GCM. An error would +// indicate that the selected algorithm(s) are not currently supported. +publicKey := &privateKey.PublicKey +encrypter, err := NewEncrypter(RSA_OAEP, A128GCM, publicKey) +if err != nil { + panic(err) +} + +// Encrypt a sample plaintext. Calling the encrypter returns an encrypted +// JWE object, which can then be serialized for output afterwards. An error +// would indicate a problem in an underlying cryptographic primitive. +var plaintext = []byte("Lorem ipsum dolor sit amet") +object, err := encrypter.Encrypt(plaintext) +if err != nil { + panic(err) +} + +// Serialize the encrypted object using the full serialization format. +// Alternatively you can also use the compact format here by calling +// object.CompactSerialize() instead. +serialized := object.FullSerialize() + +// Parse the serialized, encrypted JWE object. An error would indicate that +// the given input did not represent a valid message. +object, err = ParseEncrypted(serialized) +if err != nil { + panic(err) +} + +// Now we can decrypt and get back our original plaintext. An error here +// would indicate the the message failed to decrypt, e.g. because the auth +// tag was broken or the message was tampered with. +decrypted, err := object.Decrypt(privateKey) +if err != nil { + panic(err) +} + +fmt.Printf(string(decrypted)) +// output: Lorem ipsum dolor sit amet +``` + +Signing/verification example using RSA: + +```Go +// Generate a public/private key pair to use for this example. The library +// also provides two utility functions (LoadPublicKey and LoadPrivateKey) +// that can be used to load keys from PEM/DER-encoded data. +privateKey, err := rsa.GenerateKey(rand.Reader, 2048) +if err != nil { + panic(err) +} + +// Instantiate a signer using RSASSA-PSS (SHA512) with the given private key. +signer, err := NewSigner(PS512, privateKey) +if err != nil { + panic(err) +} + +// Sign a sample payload. Calling the signer returns a protected JWS object, +// which can then be serialized for output afterwards. An error would +// indicate a problem in an underlying cryptographic primitive. +var payload = []byte("Lorem ipsum dolor sit amet") +object, err := signer.Sign(payload) +if err != nil { + panic(err) +} + +// Serialize the encrypted object using the full serialization format. +// Alternatively you can also use the compact format here by calling +// object.CompactSerialize() instead. +serialized := object.FullSerialize() + +// Parse the serialized, protected JWS object. An error would indicate that +// the given input did not represent a valid message. +object, err = ParseSigned(serialized) +if err != nil { + panic(err) +} + +// Now we can verify the signature on the payload. An error here would +// indicate the the message failed to verify, e.g. because the signature was +// broken or the message was tampered with. +output, err := object.Verify(&privateKey.PublicKey) +if err != nil { + panic(err) +} + +fmt.Printf(string(output)) +// output: Lorem ipsum dolor sit amet +``` + +More examples can be found in the [Godoc +reference](https://godoc.org/github.com/square/go-jose) for this package. The +[`jose-util`](https://github.com/square/go-jose/tree/master/jose-util) +subdirectory also contains a small command-line utility which might +be useful as an example. diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric.go new file mode 100644 index 000000000..381156cab --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric.go @@ -0,0 +1,498 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto" + "crypto/aes" + "crypto/ecdsa" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "errors" + "fmt" + "math/big" + + "gopkg.in/square/go-jose.v1/cipher" +) + +// A generic RSA-based encrypter/verifier +type rsaEncrypterVerifier struct { + publicKey *rsa.PublicKey +} + +// A generic RSA-based decrypter/signer +type rsaDecrypterSigner struct { + privateKey *rsa.PrivateKey +} + +// A generic EC-based encrypter/verifier +type ecEncrypterVerifier struct { + publicKey *ecdsa.PublicKey +} + +// A key generator for ECDH-ES +type ecKeyGenerator struct { + size int + algID string + publicKey *ecdsa.PublicKey +} + +// A generic EC-based decrypter/signer +type ecDecrypterSigner struct { + privateKey *ecdsa.PrivateKey +} + +// newRSARecipient creates recipientKeyInfo based on the given key. +func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch keyAlg { + case RSA1_5, RSA_OAEP, RSA_OAEP_256: + default: + return recipientKeyInfo{}, ErrUnsupportedAlgorithm + } + + return recipientKeyInfo{ + keyAlg: keyAlg, + keyEncrypter: &rsaEncrypterVerifier{ + publicKey: publicKey, + }, + }, nil +} + +// newRSASigner creates a recipientSigInfo based on the given key. +func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch sigAlg { + case RS256, RS384, RS512, PS256, PS384, PS512: + default: + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: &JsonWebKey{ + Key: &privateKey.PublicKey, + }, + signer: &rsaDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +// newECDHRecipient creates recipientKeyInfo based on the given key. +func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch keyAlg { + case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: + default: + return recipientKeyInfo{}, ErrUnsupportedAlgorithm + } + + return recipientKeyInfo{ + keyAlg: keyAlg, + keyEncrypter: &ecEncrypterVerifier{ + publicKey: publicKey, + }, + }, nil +} + +// newECDSASigner creates a recipientSigInfo based on the given key. +func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch sigAlg { + case ES256, ES384, ES512: + default: + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: &JsonWebKey{ + Key: &privateKey.PublicKey, + }, + signer: &ecDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +// Encrypt the given payload and update the object. +func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { + encryptedKey, err := ctx.encrypt(cek, alg) + if err != nil { + return recipientInfo{}, err + } + + return recipientInfo{ + encryptedKey: encryptedKey, + header: &rawHeader{}, + }, nil +} + +// Encrypt the given payload. Based on the key encryption algorithm, +// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). +func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) { + switch alg { + case RSA1_5: + return rsa.EncryptPKCS1v15(randReader, ctx.publicKey, cek) + case RSA_OAEP: + return rsa.EncryptOAEP(sha1.New(), randReader, ctx.publicKey, cek, []byte{}) + case RSA_OAEP_256: + return rsa.EncryptOAEP(sha256.New(), randReader, ctx.publicKey, cek, []byte{}) + } + + return nil, ErrUnsupportedAlgorithm +} + +// Decrypt the given payload and return the content encryption key. +func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { + return ctx.decrypt(recipient.encryptedKey, KeyAlgorithm(headers.Alg), generator) +} + +// Decrypt the given payload. Based on the key encryption algorithm, +// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). +func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) { + // Note: The random reader on decrypt operations is only used for blinding, + // so stubbing is meanlingless (hence the direct use of rand.Reader). + switch alg { + case RSA1_5: + defer func() { + // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload + // because of an index out of bounds error, which we want to ignore. + // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover() + // only exists for preventing crashes with unpatched versions. + // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k + // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33 + _ = recover() + }() + + // Perform some input validation. + keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8 + if keyBytes != len(jek) { + // Input size is incorrect, the encrypted payload should always match + // the size of the public modulus (e.g. using a 2048 bit key will + // produce 256 bytes of output). Reject this since it's invalid input. + return nil, ErrCryptoFailure + } + + cek, _, err := generator.genKey() + if err != nil { + return nil, ErrCryptoFailure + } + + // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to + // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing + // the Million Message Attack on Cryptographic Message Syntax". We are + // therefore deliberatly ignoring errors here. + _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek) + + return cek, nil + case RSA_OAEP: + // Use rand.Reader for RSA blinding + return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{}) + case RSA_OAEP_256: + // Use rand.Reader for RSA blinding + return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{}) + } + + return nil, ErrUnsupportedAlgorithm +} + +// Sign the given payload +func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + var hash crypto.Hash + + switch alg { + case RS256, PS256: + hash = crypto.SHA256 + case RS384, PS384: + hash = crypto.SHA384 + case RS512, PS512: + hash = crypto.SHA512 + default: + return Signature{}, ErrUnsupportedAlgorithm + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + var out []byte + var err error + + switch alg { + case RS256, RS384, RS512: + out, err = rsa.SignPKCS1v15(randReader, ctx.privateKey, hash, hashed) + case PS256, PS384, PS512: + out, err = rsa.SignPSS(randReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }) + } + + if err != nil { + return Signature{}, err + } + + return Signature{ + Signature: out, + protected: &rawHeader{}, + }, nil +} + +// Verify the given payload +func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + var hash crypto.Hash + + switch alg { + case RS256, PS256: + hash = crypto.SHA256 + case RS384, PS384: + hash = crypto.SHA384 + case RS512, PS512: + hash = crypto.SHA512 + default: + return ErrUnsupportedAlgorithm + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + switch alg { + case RS256, RS384, RS512: + return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature) + case PS256, PS384, PS512: + return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil) + } + + return ErrUnsupportedAlgorithm +} + +// Encrypt the given payload and update the object. +func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { + switch alg { + case ECDH_ES: + // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key. + return recipientInfo{ + header: &rawHeader{}, + }, nil + case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: + default: + return recipientInfo{}, ErrUnsupportedAlgorithm + } + + generator := ecKeyGenerator{ + algID: string(alg), + publicKey: ctx.publicKey, + } + + switch alg { + case ECDH_ES_A128KW: + generator.size = 16 + case ECDH_ES_A192KW: + generator.size = 24 + case ECDH_ES_A256KW: + generator.size = 32 + } + + kek, header, err := generator.genKey() + if err != nil { + return recipientInfo{}, err + } + + block, err := aes.NewCipher(kek) + if err != nil { + return recipientInfo{}, err + } + + jek, err := josecipher.KeyWrap(block, cek) + if err != nil { + return recipientInfo{}, err + } + + return recipientInfo{ + encryptedKey: jek, + header: &header, + }, nil +} + +// Get key size for EC key generator +func (ctx ecKeyGenerator) keySize() int { + return ctx.size +} + +// Get a content encryption key for ECDH-ES +func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) { + priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, randReader) + if err != nil { + return nil, rawHeader{}, err + } + + out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size) + + headers := rawHeader{ + Epk: &JsonWebKey{ + Key: &priv.PublicKey, + }, + } + + return out, headers, nil +} + +// Decrypt the given payload and return the content encryption key. +func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { + if headers.Epk == nil { + return nil, errors.New("square/go-jose: missing epk header") + } + + publicKey, ok := headers.Epk.Key.(*ecdsa.PublicKey) + if publicKey == nil || !ok { + return nil, errors.New("square/go-jose: invalid epk header") + } + + apuData := headers.Apu.bytes() + apvData := headers.Apv.bytes() + + deriveKey := func(algID string, size int) []byte { + return josecipher.DeriveECDHES(algID, apuData, apvData, ctx.privateKey, publicKey, size) + } + + var keySize int + + switch KeyAlgorithm(headers.Alg) { + case ECDH_ES: + // ECDH-ES uses direct key agreement, no key unwrapping necessary. + return deriveKey(string(headers.Enc), generator.keySize()), nil + case ECDH_ES_A128KW: + keySize = 16 + case ECDH_ES_A192KW: + keySize = 24 + case ECDH_ES_A256KW: + keySize = 32 + default: + return nil, ErrUnsupportedAlgorithm + } + + key := deriveKey(headers.Alg, keySize) + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + return josecipher.KeyUnwrap(block, recipient.encryptedKey) +} + +// Sign the given payload +func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + var expectedBitSize int + var hash crypto.Hash + + switch alg { + case ES256: + expectedBitSize = 256 + hash = crypto.SHA256 + case ES384: + expectedBitSize = 384 + hash = crypto.SHA384 + case ES512: + expectedBitSize = 521 + hash = crypto.SHA512 + } + + curveBits := ctx.privateKey.Curve.Params().BitSize + if expectedBitSize != curveBits { + return Signature{}, fmt.Errorf("square/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits) + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + r, s, err := ecdsa.Sign(randReader, ctx.privateKey, hashed) + if err != nil { + return Signature{}, err + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes += 1 + } + + // We serialize the outpus (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return Signature{ + Signature: out, + protected: &rawHeader{}, + }, nil +} + +// Verify the given payload +func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + var keySize int + var hash crypto.Hash + + switch alg { + case ES256: + keySize = 32 + hash = crypto.SHA256 + case ES384: + keySize = 48 + hash = crypto.SHA384 + case ES512: + keySize = 66 + hash = crypto.SHA512 + } + + if len(signature) != 2*keySize { + return fmt.Errorf("square/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize) + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + r := big.NewInt(0).SetBytes(signature[:keySize]) + s := big.NewInt(0).SetBytes(signature[keySize:]) + + match := ecdsa.Verify(ctx.publicKey, hashed, r, s) + if !match { + return errors.New("square/go-jose: ecdsa signature failed to verify") + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric_test.go new file mode 100644 index 000000000..1c8c8b347 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric_test.go @@ -0,0 +1,431 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "bytes" + "crypto/rand" + "crypto/rsa" + "errors" + "io" + "math/big" + "testing" +) + +func TestVectorsRSA(t *testing.T) { + // Sources: + // http://www.emc.com/emc-plus/rsa-labs/standards-initiatives/pkcs-rsa-cryptography-standard.htm + // ftp://ftp.rsa.com/pub/rsalabs/tmp/pkcs1v15crypt-vectors.txt + priv := &rsa.PrivateKey{ + PublicKey: rsa.PublicKey{ + N: fromHexInt(` + a8b3b284af8eb50b387034a860f146c4919f318763cd6c5598c8 + ae4811a1e0abc4c7e0b082d693a5e7fced675cf4668512772c0c + bc64a742c6c630f533c8cc72f62ae833c40bf25842e984bb78bd + bf97c0107d55bdb662f5c4e0fab9845cb5148ef7392dd3aaff93 + ae1e6b667bb3d4247616d4f5ba10d4cfd226de88d39f16fb`), + E: 65537, + }, + D: fromHexInt(` + 53339cfdb79fc8466a655c7316aca85c55fd8f6dd898fdaf1195 + 17ef4f52e8fd8e258df93fee180fa0e4ab29693cd83b152a553d + 4ac4d1812b8b9fa5af0e7f55fe7304df41570926f3311f15c4d6 + 5a732c483116ee3d3d2d0af3549ad9bf7cbfb78ad884f84d5beb + 04724dc7369b31def37d0cf539e9cfcdd3de653729ead5d1`), + Primes: []*big.Int{ + fromHexInt(` + d32737e7267ffe1341b2d5c0d150a81b586fb3132bed2f8d5262 + 864a9cb9f30af38be448598d413a172efb802c21acf1c11c520c + 2f26a471dcad212eac7ca39d`), + fromHexInt(` + cc8853d1d54da630fac004f471f281c7b8982d8224a490edbeb3 + 3d3e3d5cc93c4765703d1dd791642f1f116a0dd852be2419b2af + 72bfe9a030e860b0288b5d77`), + }, + } + + input := fromHexBytes( + "6628194e12073db03ba94cda9ef9532397d50dba79b987004afefe34") + + expectedPKCS := fromHexBytes(` + 50b4c14136bd198c2f3c3ed243fce036e168d56517984a263cd66492b808 + 04f169d210f2b9bdfb48b12f9ea05009c77da257cc600ccefe3a6283789d + 8ea0e607ac58e2690ec4ebc10146e8cbaa5ed4d5cce6fe7b0ff9efc1eabb + 564dbf498285f449ee61dd7b42ee5b5892cb90601f30cda07bf26489310b + cd23b528ceab3c31`) + + expectedOAEP := fromHexBytes(` + 354fe67b4a126d5d35fe36c777791a3f7ba13def484e2d3908aff722fad4 + 68fb21696de95d0be911c2d3174f8afcc201035f7b6d8e69402de5451618 + c21a535fa9d7bfc5b8dd9fc243f8cf927db31322d6e881eaa91a996170e6 + 57a05a266426d98c88003f8477c1227094a0d9fa1e8c4024309ce1ecccb5 + 210035d47ac72e8a`) + + // Mock random reader + randReader = bytes.NewReader(fromHexBytes(` + 017341ae3875d5f87101f8cc4fa9b9bc156bb04628fccdb2f4f11e905bd3 + a155d376f593bd7304210874eba08a5e22bcccb4c9d3882a93a54db022f5 + 03d16338b6b7ce16dc7f4bbf9a96b59772d6606e9747c7649bf9e083db98 + 1884a954ab3c6f18b776ea21069d69776a33e96bad48e1dda0a5ef`)) + defer resetRandReader() + + // RSA-PKCS1v1.5 encrypt + enc := new(rsaEncrypterVerifier) + enc.publicKey = &priv.PublicKey + encryptedPKCS, err := enc.encrypt(input, RSA1_5) + if err != nil { + t.Error("Encryption failed:", err) + return + } + + if bytes.Compare(encryptedPKCS, expectedPKCS) != 0 { + t.Error("Output does not match expected value (PKCS1v1.5)") + } + + // RSA-OAEP encrypt + encryptedOAEP, err := enc.encrypt(input, RSA_OAEP) + if err != nil { + t.Error("Encryption failed:", err) + return + } + + if bytes.Compare(encryptedOAEP, expectedOAEP) != 0 { + t.Error("Output does not match expected value (OAEP)") + } + + // Need fake cipher for PKCS1v1.5 decrypt + resetRandReader() + aes := newAESGCM(len(input)) + + keygen := randomKeyGenerator{ + size: aes.keySize(), + } + + // RSA-PKCS1v1.5 decrypt + dec := new(rsaDecrypterSigner) + dec.privateKey = priv + decryptedPKCS, err := dec.decrypt(encryptedPKCS, RSA1_5, keygen) + if err != nil { + t.Error("Decryption failed:", err) + return + } + + if bytes.Compare(input, decryptedPKCS) != 0 { + t.Error("Output does not match expected value (PKCS1v1.5)") + } + + // RSA-OAEP decrypt + decryptedOAEP, err := dec.decrypt(encryptedOAEP, RSA_OAEP, keygen) + if err != nil { + t.Error("decryption failed:", err) + return + } + + if bytes.Compare(input, decryptedOAEP) != 0 { + t.Error("output does not match expected value (OAEP)") + } +} + +func TestInvalidAlgorithmsRSA(t *testing.T) { + _, err := newRSARecipient("XYZ", nil) + if err != ErrUnsupportedAlgorithm { + t.Error("should return error on invalid algorithm") + } + + _, err = newRSASigner("XYZ", nil) + if err != ErrUnsupportedAlgorithm { + t.Error("should return error on invalid algorithm") + } + + enc := new(rsaEncrypterVerifier) + enc.publicKey = &rsaTestKey.PublicKey + _, err = enc.encryptKey([]byte{}, "XYZ") + if err != ErrUnsupportedAlgorithm { + t.Error("should return error on invalid algorithm") + } + + err = enc.verifyPayload([]byte{}, []byte{}, "XYZ") + if err != ErrUnsupportedAlgorithm { + t.Error("should return error on invalid algorithm") + } + + dec := new(rsaDecrypterSigner) + dec.privateKey = rsaTestKey + _, err = dec.decrypt(make([]byte, 256), "XYZ", randomKeyGenerator{size: 16}) + if err != ErrUnsupportedAlgorithm { + t.Error("should return error on invalid algorithm") + } + + _, err = dec.signPayload([]byte{}, "XYZ") + if err != ErrUnsupportedAlgorithm { + t.Error("should return error on invalid algorithm") + } +} + +type failingKeyGenerator struct{} + +func (ctx failingKeyGenerator) keySize() int { + return 0 +} + +func (ctx failingKeyGenerator) genKey() ([]byte, rawHeader, error) { + return nil, rawHeader{}, errors.New("failed to generate key") +} + +func TestPKCSKeyGeneratorFailure(t *testing.T) { + dec := new(rsaDecrypterSigner) + dec.privateKey = rsaTestKey + generator := failingKeyGenerator{} + _, err := dec.decrypt(make([]byte, 256), RSA1_5, generator) + if err != ErrCryptoFailure { + t.Error("should return error on invalid algorithm") + } +} + +func TestInvalidAlgorithmsEC(t *testing.T) { + _, err := newECDHRecipient("XYZ", nil) + if err != ErrUnsupportedAlgorithm { + t.Error("should return error on invalid algorithm") + } + + _, err = newECDSASigner("XYZ", nil) + if err != ErrUnsupportedAlgorithm { + t.Error("should return error on invalid algorithm") + } + + enc := new(ecEncrypterVerifier) + enc.publicKey = &ecTestKey256.PublicKey + _, err = enc.encryptKey([]byte{}, "XYZ") + if err != ErrUnsupportedAlgorithm { + t.Error("should return error on invalid algorithm") + } +} + +func TestInvalidECKeyGen(t *testing.T) { + gen := ecKeyGenerator{ + size: 16, + algID: "A128GCM", + publicKey: &ecTestKey256.PublicKey, + } + + if gen.keySize() != 16 { + t.Error("ec key generator reported incorrect key size") + } + + _, _, err := gen.genKey() + if err != nil { + t.Error("ec key generator failed to generate key", err) + } +} + +func TestInvalidECDecrypt(t *testing.T) { + dec := ecDecrypterSigner{ + privateKey: ecTestKey256, + } + + generator := randomKeyGenerator{size: 16} + + // Missing epk header + headers := rawHeader{ + Alg: string(ECDH_ES), + } + + _, err := dec.decryptKey(headers, nil, generator) + if err == nil { + t.Error("ec decrypter accepted object with missing epk header") + } + + // Invalid epk header + headers.Epk = &JsonWebKey{} + + _, err = dec.decryptKey(headers, nil, generator) + if err == nil { + t.Error("ec decrypter accepted object with invalid epk header") + } +} + +func TestDecryptWithIncorrectSize(t *testing.T) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Error(err) + return + } + + dec := new(rsaDecrypterSigner) + dec.privateKey = priv + aes := newAESGCM(16) + + keygen := randomKeyGenerator{ + size: aes.keySize(), + } + + payload := make([]byte, 254) + _, err = dec.decrypt(payload, RSA1_5, keygen) + if err == nil { + t.Error("Invalid payload size should return error") + } + + payload = make([]byte, 257) + _, err = dec.decrypt(payload, RSA1_5, keygen) + if err == nil { + t.Error("Invalid payload size should return error") + } +} + +func TestPKCSDecryptNeverFails(t *testing.T) { + // We don't want RSA-PKCS1 v1.5 decryption to ever fail, in order to prevent + // side-channel timing attacks (Bleichenbacher attack in particular). + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Error(err) + return + } + + dec := new(rsaDecrypterSigner) + dec.privateKey = priv + aes := newAESGCM(16) + + keygen := randomKeyGenerator{ + size: aes.keySize(), + } + + for i := 1; i < 50; i++ { + payload := make([]byte, 256) + _, err := io.ReadFull(rand.Reader, payload) + if err != nil { + t.Error("Unable to get random data:", err) + return + } + _, err = dec.decrypt(payload, RSA1_5, keygen) + if err != nil { + t.Error("PKCS1v1.5 decrypt should never fail:", err) + return + } + } +} + +func BenchmarkPKCSDecryptWithValidPayloads(b *testing.B) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + panic(err) + } + + enc := new(rsaEncrypterVerifier) + enc.publicKey = &priv.PublicKey + dec := new(rsaDecrypterSigner) + dec.privateKey = priv + aes := newAESGCM(32) + + b.StopTimer() + b.ResetTimer() + for i := 0; i < b.N; i++ { + plaintext := make([]byte, 32) + _, err = io.ReadFull(rand.Reader, plaintext) + if err != nil { + panic(err) + } + + ciphertext, err := enc.encrypt(plaintext, RSA1_5) + if err != nil { + panic(err) + } + + keygen := randomKeyGenerator{ + size: aes.keySize(), + } + + b.StartTimer() + _, err = dec.decrypt(ciphertext, RSA1_5, keygen) + b.StopTimer() + if err != nil { + panic(err) + } + } +} + +func BenchmarkPKCSDecryptWithInvalidPayloads(b *testing.B) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + panic(err) + } + + enc := new(rsaEncrypterVerifier) + enc.publicKey = &priv.PublicKey + dec := new(rsaDecrypterSigner) + dec.privateKey = priv + aes := newAESGCM(16) + + keygen := randomKeyGenerator{ + size: aes.keySize(), + } + + b.StopTimer() + b.ResetTimer() + for i := 0; i < b.N; i++ { + plaintext := make([]byte, 16) + _, err = io.ReadFull(rand.Reader, plaintext) + if err != nil { + panic(err) + } + + ciphertext, err := enc.encrypt(plaintext, RSA1_5) + if err != nil { + panic(err) + } + + // Do some simple scrambling + ciphertext[128] ^= 0xFF + + b.StartTimer() + _, err = dec.decrypt(ciphertext, RSA1_5, keygen) + b.StopTimer() + if err != nil { + panic(err) + } + } +} + +func TestInvalidEllipticCurve(t *testing.T) { + signer256 := ecDecrypterSigner{privateKey: ecTestKey256} + signer384 := ecDecrypterSigner{privateKey: ecTestKey384} + signer521 := ecDecrypterSigner{privateKey: ecTestKey521} + + _, err := signer256.signPayload([]byte{}, ES384) + if err == nil { + t.Error("should not generate ES384 signature with P-256 key") + } + _, err = signer256.signPayload([]byte{}, ES512) + if err == nil { + t.Error("should not generate ES512 signature with P-256 key") + } + _, err = signer384.signPayload([]byte{}, ES256) + if err == nil { + t.Error("should not generate ES256 signature with P-384 key") + } + _, err = signer384.signPayload([]byte{}, ES512) + if err == nil { + t.Error("should not generate ES512 signature with P-384 key") + } + _, err = signer521.signPayload([]byte{}, ES256) + if err == nil { + t.Error("should not generate ES256 signature with P-521 key") + } + _, err = signer521.signPayload([]byte{}, ES384) + if err == nil { + t.Error("should not generate ES384 signature with P-521 key") + } +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go new file mode 100644 index 000000000..a5c358340 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go @@ -0,0 +1,196 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "bytes" + "crypto/cipher" + "crypto/hmac" + "crypto/sha256" + "crypto/sha512" + "crypto/subtle" + "encoding/binary" + "errors" + "hash" +) + +const ( + nonceBytes = 16 +) + +// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC. +func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) { + keySize := len(key) / 2 + integrityKey := key[:keySize] + encryptionKey := key[keySize:] + + blockCipher, err := newBlockCipher(encryptionKey) + if err != nil { + return nil, err + } + + var hash func() hash.Hash + switch keySize { + case 16: + hash = sha256.New + case 24: + hash = sha512.New384 + case 32: + hash = sha512.New + } + + return &cbcAEAD{ + hash: hash, + blockCipher: blockCipher, + authtagBytes: keySize, + integrityKey: integrityKey, + }, nil +} + +// An AEAD based on CBC+HMAC +type cbcAEAD struct { + hash func() hash.Hash + authtagBytes int + integrityKey []byte + blockCipher cipher.Block +} + +func (ctx *cbcAEAD) NonceSize() int { + return nonceBytes +} + +func (ctx *cbcAEAD) Overhead() int { + // Maximum overhead is block size (for padding) plus auth tag length, where + // the length of the auth tag is equivalent to the key size. + return ctx.blockCipher.BlockSize() + ctx.authtagBytes +} + +// Seal encrypts and authenticates the plaintext. +func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte { + // Output buffer -- must take care not to mangle plaintext input. + ciphertext := make([]byte, len(plaintext)+ctx.Overhead())[:len(plaintext)] + copy(ciphertext, plaintext) + ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize()) + + cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce) + + cbc.CryptBlocks(ciphertext, ciphertext) + authtag := ctx.computeAuthTag(data, nonce, ciphertext) + + ret, out := resize(dst, len(dst)+len(ciphertext)+len(authtag)) + copy(out, ciphertext) + copy(out[len(ciphertext):], authtag) + + return ret +} + +// Open decrypts and authenticates the ciphertext. +func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) { + if len(ciphertext) < ctx.authtagBytes { + return nil, errors.New("square/go-jose: invalid ciphertext (too short)") + } + + offset := len(ciphertext) - ctx.authtagBytes + expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset]) + match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:]) + if match != 1 { + return nil, errors.New("square/go-jose: invalid ciphertext (auth tag mismatch)") + } + + cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce) + + // Make copy of ciphertext buffer, don't want to modify in place + buffer := append([]byte{}, []byte(ciphertext[:offset])...) + + if len(buffer)%ctx.blockCipher.BlockSize() > 0 { + return nil, errors.New("square/go-jose: invalid ciphertext (invalid length)") + } + + cbc.CryptBlocks(buffer, buffer) + + // Remove padding + plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize()) + if err != nil { + return nil, err + } + + ret, out := resize(dst, len(dst)+len(plaintext)) + copy(out, plaintext) + + return ret, nil +} + +// Compute an authentication tag +func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte { + buffer := make([]byte, len(aad)+len(nonce)+len(ciphertext)+8) + n := 0 + n += copy(buffer, aad) + n += copy(buffer[n:], nonce) + n += copy(buffer[n:], ciphertext) + binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad)*8)) + + // According to documentation, Write() on hash.Hash never fails. + hmac := hmac.New(ctx.hash, ctx.integrityKey) + _, _ = hmac.Write(buffer) + + return hmac.Sum(nil)[:ctx.authtagBytes] +} + +// resize ensures the the given slice has a capacity of at least n bytes. +// If the capacity of the slice is less than n, a new slice is allocated +// and the existing data will be copied. +func resize(in []byte, n int) (head, tail []byte) { + if cap(in) >= n { + head = in[:n] + } else { + head = make([]byte, n) + copy(head, in) + } + + tail = head[len(in):] + return +} + +// Apply padding +func padBuffer(buffer []byte, blockSize int) []byte { + missing := blockSize - (len(buffer) % blockSize) + ret, out := resize(buffer, len(buffer)+missing) + padding := bytes.Repeat([]byte{byte(missing)}, missing) + copy(out, padding) + return ret +} + +// Remove padding +func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) { + if len(buffer)%blockSize != 0 { + return nil, errors.New("square/go-jose: invalid padding") + } + + last := buffer[len(buffer)-1] + count := int(last) + + if count == 0 || count > blockSize || count > len(buffer) { + return nil, errors.New("square/go-jose: invalid padding") + } + + padding := bytes.Repeat([]byte{last}, count) + if !bytes.HasSuffix(buffer, padding) { + return nil, errors.New("square/go-jose: invalid padding") + } + + return buffer[:len(buffer)-count], nil +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac_test.go new file mode 100644 index 000000000..c230271b5 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac_test.go @@ -0,0 +1,498 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "io" + "strings" + "testing" +) + +func TestInvalidInputs(t *testing.T) { + key := []byte{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + } + + nonce := []byte{ + 92, 80, 104, 49, 133, 25, 161, 215, 173, 101, 219, 211, 136, 91, 210, 145} + + aead, _ := NewCBCHMAC(key, aes.NewCipher) + ciphertext := aead.Seal(nil, nonce, []byte("plaintext"), []byte("aad")) + + // Changed AAD, must fail + _, err := aead.Open(nil, nonce, ciphertext, []byte("INVALID")) + if err == nil { + t.Error("must detect invalid aad") + } + + // Empty ciphertext, must fail + _, err = aead.Open(nil, nonce, []byte{}, []byte("aad")) + if err == nil { + t.Error("must detect invalid/empty ciphertext") + } + + // Corrupt ciphertext, must fail + corrupt := make([]byte, len(ciphertext)) + copy(corrupt, ciphertext) + corrupt[0] ^= 0xFF + + _, err = aead.Open(nil, nonce, corrupt, []byte("aad")) + if err == nil { + t.Error("must detect corrupt ciphertext") + } + + // Corrupt authtag, must fail + copy(corrupt, ciphertext) + corrupt[len(ciphertext)-1] ^= 0xFF + + _, err = aead.Open(nil, nonce, corrupt, []byte("aad")) + if err == nil { + t.Error("must detect corrupt authtag") + } + + // Truncated data, must fail + _, err = aead.Open(nil, nonce, ciphertext[:10], []byte("aad")) + if err == nil { + t.Error("must detect corrupt authtag") + } +} + +func TestVectorsAESCBC128(t *testing.T) { + // Source: http://tools.ietf.org/html/draft-ietf-jose-json-web-encryption-29#appendix-A.2 + plaintext := []byte{ + 76, 105, 118, 101, 32, 108, 111, 110, 103, 32, 97, 110, 100, 32, + 112, 114, 111, 115, 112, 101, 114, 46} + + aad := []byte{ + 101, 121, 74, 104, 98, 71, 99, 105, 79, 105, 74, 83, 85, 48, 69, + 120, 88, 122, 85, 105, 76, 67, 74, 108, 98, 109, 77, 105, 79, 105, + 74, 66, 77, 84, 73, 52, 81, 48, 74, 68, 76, 85, 104, 84, 77, 106, 85, + 50, 73, 110, 48} + + expectedCiphertext := []byte{ + 40, 57, 83, 181, 119, 33, 133, 148, 198, 185, 243, 24, 152, 230, 6, + 75, 129, 223, 127, 19, 210, 82, 183, 230, 168, 33, 215, 104, 143, + 112, 56, 102} + + expectedAuthtag := []byte{ + 246, 17, 244, 190, 4, 95, 98, 3, 231, 0, 115, 157, 242, 203, 100, + 191} + + key := []byte{ + 4, 211, 31, 197, 84, 157, 252, 254, 11, 100, 157, 250, 63, 170, 106, 206, + 107, 124, 212, 45, 111, 107, 9, 219, 200, 177, 0, 240, 143, 156, 44, 207} + + nonce := []byte{ + 3, 22, 60, 12, 43, 67, 104, 105, 108, 108, 105, 99, 111, 116, 104, 101} + + enc, err := NewCBCHMAC(key, aes.NewCipher) + out := enc.Seal(nil, nonce, plaintext, aad) + if err != nil { + t.Error("Unable to encrypt:", err) + return + } + + if bytes.Compare(out[:len(out)-16], expectedCiphertext) != 0 { + t.Error("Ciphertext did not match") + } + if bytes.Compare(out[len(out)-16:], expectedAuthtag) != 0 { + t.Error("Auth tag did not match") + } +} + +func TestVectorsAESCBC256(t *testing.T) { + // Source: https://tools.ietf.org/html/draft-mcgrew-aead-aes-cbc-hmac-sha2-05#section-5.4 + plaintext := []byte{ + 0x41, 0x20, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x20, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x20, + 0x6d, 0x75, 0x73, 0x74, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x62, 0x65, 0x20, 0x73, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x69, 0x74, 0x20, 0x6d, 0x75, 0x73, 0x74, 0x20, 0x62, + 0x65, 0x20, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x66, 0x61, 0x6c, 0x6c, 0x20, 0x69, + 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x20, 0x6f, 0x66, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x6e, 0x65, 0x6d, 0x79, 0x20, 0x77, 0x69, 0x74, 0x68, 0x6f, + 0x75, 0x74, 0x20, 0x69, 0x6e, 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x6e, 0x69, 0x65, 0x6e, 0x63, 0x65} + + aad := []byte{ + 0x54, 0x68, 0x65, 0x20, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x20, 0x70, 0x72, 0x69, 0x6e, 0x63, + 0x69, 0x70, 0x6c, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x41, 0x75, 0x67, 0x75, 0x73, 0x74, 0x65, 0x20, + 0x4b, 0x65, 0x72, 0x63, 0x6b, 0x68, 0x6f, 0x66, 0x66, 0x73} + + expectedCiphertext := []byte{ + 0x4a, 0xff, 0xaa, 0xad, 0xb7, 0x8c, 0x31, 0xc5, 0xda, 0x4b, 0x1b, 0x59, 0x0d, 0x10, 0xff, 0xbd, + 0x3d, 0xd8, 0xd5, 0xd3, 0x02, 0x42, 0x35, 0x26, 0x91, 0x2d, 0xa0, 0x37, 0xec, 0xbc, 0xc7, 0xbd, + 0x82, 0x2c, 0x30, 0x1d, 0xd6, 0x7c, 0x37, 0x3b, 0xcc, 0xb5, 0x84, 0xad, 0x3e, 0x92, 0x79, 0xc2, + 0xe6, 0xd1, 0x2a, 0x13, 0x74, 0xb7, 0x7f, 0x07, 0x75, 0x53, 0xdf, 0x82, 0x94, 0x10, 0x44, 0x6b, + 0x36, 0xeb, 0xd9, 0x70, 0x66, 0x29, 0x6a, 0xe6, 0x42, 0x7e, 0xa7, 0x5c, 0x2e, 0x08, 0x46, 0xa1, + 0x1a, 0x09, 0xcc, 0xf5, 0x37, 0x0d, 0xc8, 0x0b, 0xfe, 0xcb, 0xad, 0x28, 0xc7, 0x3f, 0x09, 0xb3, + 0xa3, 0xb7, 0x5e, 0x66, 0x2a, 0x25, 0x94, 0x41, 0x0a, 0xe4, 0x96, 0xb2, 0xe2, 0xe6, 0x60, 0x9e, + 0x31, 0xe6, 0xe0, 0x2c, 0xc8, 0x37, 0xf0, 0x53, 0xd2, 0x1f, 0x37, 0xff, 0x4f, 0x51, 0x95, 0x0b, + 0xbe, 0x26, 0x38, 0xd0, 0x9d, 0xd7, 0xa4, 0x93, 0x09, 0x30, 0x80, 0x6d, 0x07, 0x03, 0xb1, 0xf6} + + expectedAuthtag := []byte{ + 0x4d, 0xd3, 0xb4, 0xc0, 0x88, 0xa7, 0xf4, 0x5c, 0x21, 0x68, 0x39, 0x64, 0x5b, 0x20, 0x12, 0xbf, + 0x2e, 0x62, 0x69, 0xa8, 0xc5, 0x6a, 0x81, 0x6d, 0xbc, 0x1b, 0x26, 0x77, 0x61, 0x95, 0x5b, 0xc5} + + key := []byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f} + + nonce := []byte{ + 0x1a, 0xf3, 0x8c, 0x2d, 0xc2, 0xb9, 0x6f, 0xfd, 0xd8, 0x66, 0x94, 0x09, 0x23, 0x41, 0xbc, 0x04} + + enc, err := NewCBCHMAC(key, aes.NewCipher) + out := enc.Seal(nil, nonce, plaintext, aad) + if err != nil { + t.Error("Unable to encrypt:", err) + return + } + + if bytes.Compare(out[:len(out)-32], expectedCiphertext) != 0 { + t.Error("Ciphertext did not match, got", out[:len(out)-32], "wanted", expectedCiphertext) + } + if bytes.Compare(out[len(out)-32:], expectedAuthtag) != 0 { + t.Error("Auth tag did not match, got", out[len(out)-32:], "wanted", expectedAuthtag) + } +} + +func TestAESCBCRoundtrip(t *testing.T) { + key128 := []byte{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} + + key192 := []byte{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 0, 1, 2, 3, 4, 5, 6, 7, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 0, 1, 2, 3, 4, 5, 6, 7} + + key256 := []byte{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} + + nonce := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} + + RunRoundtrip(t, key128, nonce) + RunRoundtrip(t, key192, nonce) + RunRoundtrip(t, key256, nonce) +} + +func RunRoundtrip(t *testing.T, key, nonce []byte) { + aead, err := NewCBCHMAC(key, aes.NewCipher) + if err != nil { + panic(err) + } + + if aead.NonceSize() != len(nonce) { + panic("invalid nonce") + } + + // Test pre-existing data in dst buffer + dst := []byte{15, 15, 15, 15} + plaintext := []byte{0, 0, 0, 0} + aad := []byte{4, 3, 2, 1} + + result := aead.Seal(dst, nonce, plaintext, aad) + if bytes.Compare(dst, result[:4]) != 0 { + t.Error("Existing data in dst not preserved") + } + + // Test pre-existing (empty) dst buffer with sufficient capacity + dst = make([]byte, 256)[:0] + result, err = aead.Open(dst, nonce, result[4:], aad) + if err != nil { + panic(err) + } + + if bytes.Compare(result, plaintext) != 0 { + t.Error("Plaintext does not match output") + } +} + +func TestAESCBCOverhead(t *testing.T) { + aead, err := NewCBCHMAC(make([]byte, 32), aes.NewCipher) + if err != nil { + panic(err) + } + + if aead.Overhead() != 32 { + t.Error("CBC-HMAC reports incorrect overhead value") + } +} + +func TestPadding(t *testing.T) { + for i := 0; i < 256; i++ { + slice := make([]byte, i) + padded := padBuffer(slice, 16) + if len(padded)%16 != 0 { + t.Error("failed to pad slice properly", i) + return + } + unpadded, err := unpadBuffer(padded, 16) + if err != nil || len(unpadded) != i { + t.Error("failed to unpad slice properly", i) + return + } + } +} + +func TestInvalidKey(t *testing.T) { + key := make([]byte, 30) + _, err := NewCBCHMAC(key, aes.NewCipher) + if err == nil { + t.Error("should not be able to instantiate CBC-HMAC with invalid key") + } +} + +func TestTruncatedCiphertext(t *testing.T) { + key := make([]byte, 32) + nonce := make([]byte, 16) + data := make([]byte, 32) + + io.ReadFull(rand.Reader, key) + io.ReadFull(rand.Reader, nonce) + + aead, err := NewCBCHMAC(key, aes.NewCipher) + if err != nil { + panic(err) + } + + ctx := aead.(*cbcAEAD) + ct := aead.Seal(nil, nonce, data, nil) + + // Truncated ciphertext, but with correct auth tag + truncated, tail := resize(ct[:len(ct)-ctx.authtagBytes-2], len(ct)-2) + copy(tail, ctx.computeAuthTag(nil, nonce, truncated[:len(truncated)-ctx.authtagBytes])) + + // Open should fail + _, err = aead.Open(nil, nonce, truncated, nil) + if err == nil { + t.Error("open on truncated ciphertext should fail") + } +} + +func TestInvalidPaddingOpen(t *testing.T) { + key := make([]byte, 32) + nonce := make([]byte, 16) + + // Plaintext with invalid padding + plaintext := padBuffer(make([]byte, 28), aes.BlockSize) + plaintext[len(plaintext)-1] = 0xFF + + io.ReadFull(rand.Reader, key) + io.ReadFull(rand.Reader, nonce) + + block, _ := aes.NewCipher(key) + cbc := cipher.NewCBCEncrypter(block, nonce) + buffer := append([]byte{}, plaintext...) + cbc.CryptBlocks(buffer, buffer) + + aead, _ := NewCBCHMAC(key, aes.NewCipher) + ctx := aead.(*cbcAEAD) + + // Mutated ciphertext, but with correct auth tag + size := len(buffer) + ciphertext, tail := resize(buffer, size+(len(key)/2)) + copy(tail, ctx.computeAuthTag(nil, nonce, ciphertext[:size])) + + // Open should fail (b/c of invalid padding, even though tag matches) + _, err := aead.Open(nil, nonce, ciphertext, nil) + if err == nil || !strings.Contains(err.Error(), "invalid padding") { + t.Error("no or unexpected error on open with invalid padding:", err) + } +} + +func TestInvalidPadding(t *testing.T) { + for i := 0; i < 256; i++ { + slice := make([]byte, i) + padded := padBuffer(slice, 16) + if len(padded)%16 != 0 { + t.Error("failed to pad slice properly", i) + return + } + + paddingBytes := 16 - (i % 16) + + // Mutate padding for testing + for j := 1; j <= paddingBytes; j++ { + mutated := make([]byte, len(padded)) + copy(mutated, padded) + mutated[len(mutated)-j] ^= 0xFF + + _, err := unpadBuffer(mutated, 16) + if err == nil { + t.Error("unpad on invalid padding should fail", i) + return + } + } + + // Test truncated padding + _, err := unpadBuffer(padded[:len(padded)-1], 16) + if err == nil { + t.Error("unpad on truncated padding should fail", i) + return + } + } +} + +func TestZeroLengthPadding(t *testing.T) { + data := make([]byte, 16) + data, err := unpadBuffer(data, 16) + if err == nil { + t.Error("padding with 0x00 should never be valid") + } +} + +func benchEncryptCBCHMAC(b *testing.B, keySize, chunkSize int) { + key := make([]byte, keySize*2) + nonce := make([]byte, 16) + + io.ReadFull(rand.Reader, key) + io.ReadFull(rand.Reader, nonce) + + chunk := make([]byte, chunkSize) + + aead, err := NewCBCHMAC(key, aes.NewCipher) + if err != nil { + panic(err) + } + + b.SetBytes(int64(chunkSize)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + aead.Seal(nil, nonce, chunk, nil) + } +} + +func benchDecryptCBCHMAC(b *testing.B, keySize, chunkSize int) { + key := make([]byte, keySize*2) + nonce := make([]byte, 16) + + io.ReadFull(rand.Reader, key) + io.ReadFull(rand.Reader, nonce) + + chunk := make([]byte, chunkSize) + + aead, err := NewCBCHMAC(key, aes.NewCipher) + if err != nil { + panic(err) + } + + out := aead.Seal(nil, nonce, chunk, nil) + + b.SetBytes(int64(chunkSize)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + aead.Open(nil, nonce, out, nil) + } +} + +func BenchmarkEncryptAES128_CBCHMAC_1k(b *testing.B) { + benchEncryptCBCHMAC(b, 16, 1024) +} + +func BenchmarkEncryptAES128_CBCHMAC_64k(b *testing.B) { + benchEncryptCBCHMAC(b, 16, 65536) +} + +func BenchmarkEncryptAES128_CBCHMAC_1MB(b *testing.B) { + benchEncryptCBCHMAC(b, 16, 1048576) +} + +func BenchmarkEncryptAES128_CBCHMAC_64MB(b *testing.B) { + benchEncryptCBCHMAC(b, 16, 67108864) +} + +func BenchmarkDecryptAES128_CBCHMAC_1k(b *testing.B) { + benchDecryptCBCHMAC(b, 16, 1024) +} + +func BenchmarkDecryptAES128_CBCHMAC_64k(b *testing.B) { + benchDecryptCBCHMAC(b, 16, 65536) +} + +func BenchmarkDecryptAES128_CBCHMAC_1MB(b *testing.B) { + benchDecryptCBCHMAC(b, 16, 1048576) +} + +func BenchmarkDecryptAES128_CBCHMAC_64MB(b *testing.B) { + benchDecryptCBCHMAC(b, 16, 67108864) +} + +func BenchmarkEncryptAES192_CBCHMAC_64k(b *testing.B) { + benchEncryptCBCHMAC(b, 24, 65536) +} + +func BenchmarkEncryptAES192_CBCHMAC_1MB(b *testing.B) { + benchEncryptCBCHMAC(b, 24, 1048576) +} + +func BenchmarkEncryptAES192_CBCHMAC_64MB(b *testing.B) { + benchEncryptCBCHMAC(b, 24, 67108864) +} + +func BenchmarkDecryptAES192_CBCHMAC_1k(b *testing.B) { + benchDecryptCBCHMAC(b, 24, 1024) +} + +func BenchmarkDecryptAES192_CBCHMAC_64k(b *testing.B) { + benchDecryptCBCHMAC(b, 24, 65536) +} + +func BenchmarkDecryptAES192_CBCHMAC_1MB(b *testing.B) { + benchDecryptCBCHMAC(b, 24, 1048576) +} + +func BenchmarkDecryptAES192_CBCHMAC_64MB(b *testing.B) { + benchDecryptCBCHMAC(b, 24, 67108864) +} + +func BenchmarkEncryptAES256_CBCHMAC_64k(b *testing.B) { + benchEncryptCBCHMAC(b, 32, 65536) +} + +func BenchmarkEncryptAES256_CBCHMAC_1MB(b *testing.B) { + benchEncryptCBCHMAC(b, 32, 1048576) +} + +func BenchmarkEncryptAES256_CBCHMAC_64MB(b *testing.B) { + benchEncryptCBCHMAC(b, 32, 67108864) +} + +func BenchmarkDecryptAES256_CBCHMAC_1k(b *testing.B) { + benchDecryptCBCHMAC(b, 32, 1032) +} + +func BenchmarkDecryptAES256_CBCHMAC_64k(b *testing.B) { + benchDecryptCBCHMAC(b, 32, 65536) +} + +func BenchmarkDecryptAES256_CBCHMAC_1MB(b *testing.B) { + benchDecryptCBCHMAC(b, 32, 1048576) +} + +func BenchmarkDecryptAES256_CBCHMAC_64MB(b *testing.B) { + benchDecryptCBCHMAC(b, 32, 67108864) +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go new file mode 100644 index 000000000..cbb5f7b88 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go @@ -0,0 +1,75 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "crypto" + "encoding/binary" + "hash" + "io" +) + +type concatKDF struct { + z, info []byte + i uint32 + cache []byte + hasher hash.Hash +} + +// NewConcatKDF builds a KDF reader based on the given inputs. +func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader { + buffer := make([]byte, len(algID)+len(ptyUInfo)+len(ptyVInfo)+len(supPubInfo)+len(supPrivInfo)) + n := 0 + n += copy(buffer, algID) + n += copy(buffer[n:], ptyUInfo) + n += copy(buffer[n:], ptyVInfo) + n += copy(buffer[n:], supPubInfo) + copy(buffer[n:], supPrivInfo) + + hasher := hash.New() + + return &concatKDF{ + z: z, + info: buffer, + hasher: hasher, + cache: []byte{}, + i: 1, + } +} + +func (ctx *concatKDF) Read(out []byte) (int, error) { + copied := copy(out, ctx.cache) + ctx.cache = ctx.cache[copied:] + + for copied < len(out) { + ctx.hasher.Reset() + + // Write on a hash.Hash never fails + _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i) + _, _ = ctx.hasher.Write(ctx.z) + _, _ = ctx.hasher.Write(ctx.info) + + hash := ctx.hasher.Sum(nil) + chunkCopied := copy(out[copied:], hash) + copied += chunkCopied + ctx.cache = hash[chunkCopied:] + + ctx.i++ + } + + return copied, nil +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf_test.go new file mode 100644 index 000000000..48219b3e1 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf_test.go @@ -0,0 +1,150 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "bytes" + "crypto" + "testing" +) + +// Taken from: https://tools.ietf.org/id/draft-ietf-jose-json-web-algorithms-38.txt +func TestVectorConcatKDF(t *testing.T) { + z := []byte{ + 158, 86, 217, 29, 129, 113, 53, 211, 114, 131, 66, 131, 191, 132, + 38, 156, 251, 49, 110, 163, 218, 128, 106, 72, 246, 218, 167, 121, + 140, 254, 144, 196} + + algID := []byte{0, 0, 0, 7, 65, 49, 50, 56, 71, 67, 77} + + ptyUInfo := []byte{0, 0, 0, 5, 65, 108, 105, 99, 101} + ptyVInfo := []byte{0, 0, 0, 3, 66, 111, 98} + + supPubInfo := []byte{0, 0, 0, 128} + supPrivInfo := []byte{} + + expected := []byte{ + 86, 170, 141, 234, 248, 35, 109, 32, 92, 34, 40, 205, 113, 167, 16, 26} + + ckdf := NewConcatKDF(crypto.SHA256, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo) + + out0 := make([]byte, 9) + out1 := make([]byte, 7) + + read0, err := ckdf.Read(out0) + if err != nil { + t.Error("error when reading from concat kdf reader", err) + return + } + + read1, err := ckdf.Read(out1) + if err != nil { + t.Error("error when reading from concat kdf reader", err) + return + } + + if read0+read1 != len(out0)+len(out1) { + t.Error("did not receive enough bytes from concat kdf reader") + return + } + + out := []byte{} + out = append(out, out0...) + out = append(out, out1...) + + if bytes.Compare(out, expected) != 0 { + t.Error("did not receive expected output from concat kdf reader") + return + } +} + +func TestCache(t *testing.T) { + z := []byte{ + 158, 86, 217, 29, 129, 113, 53, 211, 114, 131, 66, 131, 191, 132, + 38, 156, 251, 49, 110, 163, 218, 128, 106, 72, 246, 218, 167, 121, + 140, 254, 144, 196} + + algID := []byte{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4} + + ptyUInfo := []byte{1, 2, 3, 4} + ptyVInfo := []byte{4, 3, 2, 1} + + supPubInfo := []byte{} + supPrivInfo := []byte{} + + outputs := [][]byte{} + + // Read the same amount of data in different chunk sizes + chunkSizes := []int{1, 2, 4, 8, 16, 32, 64, 128, 256, 512} + + for _, c := range chunkSizes { + out := make([]byte, 1024) + reader := NewConcatKDF(crypto.SHA256, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo) + + for i := 0; i < 1024; i += c { + _, _ = reader.Read(out[i : i+c]) + } + + outputs = append(outputs, out) + } + + for i := range outputs { + if bytes.Compare(outputs[i], outputs[(i+1)%len(outputs)]) != 0 { + t.Error("not all outputs from KDF matched") + } + } +} + +func benchmarkKDF(b *testing.B, total int) { + z := []byte{ + 158, 86, 217, 29, 129, 113, 53, 211, 114, 131, 66, 131, 191, 132, + 38, 156, 251, 49, 110, 163, 218, 128, 106, 72, 246, 218, 167, 121, + 140, 254, 144, 196} + + algID := []byte{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4} + + ptyUInfo := []byte{1, 2, 3, 4} + ptyVInfo := []byte{4, 3, 2, 1} + + supPubInfo := []byte{} + supPrivInfo := []byte{} + + out := make([]byte, total) + reader := NewConcatKDF(crypto.SHA256, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo) + + b.ResetTimer() + b.SetBytes(int64(total)) + for i := 0; i < b.N; i++ { + _, _ = reader.Read(out) + } +} + +func BenchmarkConcatKDF_1k(b *testing.B) { + benchmarkKDF(b, 1024) +} + +func BenchmarkConcatKDF_64k(b *testing.B) { + benchmarkKDF(b, 65536) +} + +func BenchmarkConcatKDF_1MB(b *testing.B) { + benchmarkKDF(b, 1048576) +} + +func BenchmarkConcatKDF_64MB(b *testing.B) { + benchmarkKDF(b, 67108864) +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go new file mode 100644 index 000000000..c6a5a8215 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go @@ -0,0 +1,51 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "crypto" + "crypto/ecdsa" + "encoding/binary" +) + +// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA. +func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte { + // algId, partyUInfo, partyVInfo inputs must be prefixed with the length + algID := lengthPrefixed([]byte(alg)) + ptyUInfo := lengthPrefixed(apuData) + ptyVInfo := lengthPrefixed(apvData) + + // suppPubInfo is the encoded length of the output size in bits + supPubInfo := make([]byte, 4) + binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8) + + z, _ := priv.PublicKey.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes()) + reader := NewConcatKDF(crypto.SHA256, z.Bytes(), algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{}) + + key := make([]byte, size) + + // Read on the KDF will never fail + _, _ = reader.Read(key) + return key +} + +func lengthPrefixed(data []byte) []byte { + out := make([]byte, len(data)+4) + binary.BigEndian.PutUint32(out, uint32(len(data))) + copy(out[4:], data) + return out +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es_test.go new file mode 100644 index 000000000..f92abb175 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es_test.go @@ -0,0 +1,98 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "encoding/base64" + "math/big" + "testing" +) + +// Example keys from JWA, Appendix C +var aliceKey = &ecdsa.PrivateKey{ + PublicKey: ecdsa.PublicKey{ + Curve: elliptic.P256(), + X: fromBase64Int("gI0GAILBdu7T53akrFmMyGcsF3n5dO7MmwNBHKW5SV0="), + Y: fromBase64Int("SLW_xSffzlPWrHEVI30DHM_4egVwt3NQqeUD7nMFpps="), + }, + D: fromBase64Int("0_NxaRPUMQoAJt50Gz8YiTr8gRTwyEaCumd-MToTmIo="), +} + +var bobKey = &ecdsa.PrivateKey{ + PublicKey: ecdsa.PublicKey{ + Curve: elliptic.P256(), + X: fromBase64Int("weNJy2HscCSM6AEDTDg04biOvhFhyyWvOHQfeF_PxMQ="), + Y: fromBase64Int("e8lnCO-AlStT-NJVX-crhB7QRYhiix03illJOVAOyck="), + }, + D: fromBase64Int("VEmDZpDXXK8p8N0Cndsxs924q6nS1RXFASRl6BfUqdw="), +} + +// Build big int from base64-encoded string. Strips whitespace (for testing). +func fromBase64Int(data string) *big.Int { + val, err := base64.URLEncoding.DecodeString(data) + if err != nil { + panic("Invalid test data") + } + return new(big.Int).SetBytes(val) +} + +func TestVectorECDHES(t *testing.T) { + apuData := []byte("Alice") + apvData := []byte("Bob") + + expected := []byte{ + 86, 170, 141, 234, 248, 35, 109, 32, 92, 34, 40, 205, 113, 167, 16, 26} + + output := DeriveECDHES("A128GCM", apuData, apvData, bobKey, &aliceKey.PublicKey, 16) + + if bytes.Compare(output, expected) != 0 { + t.Error("output did not match what we expect, got", output, "wanted", expected) + } +} + +func BenchmarkECDHES_128(b *testing.B) { + apuData := []byte("APU") + apvData := []byte("APV") + + b.ResetTimer() + for i := 0; i < b.N; i++ { + DeriveECDHES("ID", apuData, apvData, bobKey, &aliceKey.PublicKey, 16) + } +} + +func BenchmarkECDHES_192(b *testing.B) { + apuData := []byte("APU") + apvData := []byte("APV") + + b.ResetTimer() + for i := 0; i < b.N; i++ { + DeriveECDHES("ID", apuData, apvData, bobKey, &aliceKey.PublicKey, 24) + } +} + +func BenchmarkECDHES_256(b *testing.B) { + apuData := []byte("APU") + apvData := []byte("APV") + + b.ResetTimer() + for i := 0; i < b.N; i++ { + DeriveECDHES("ID", apuData, apvData, bobKey, &aliceKey.PublicKey, 32) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go new file mode 100644 index 000000000..1d36d5015 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go @@ -0,0 +1,109 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "crypto/cipher" + "crypto/subtle" + "encoding/binary" + "errors" +) + +var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6} + +// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher. +func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) { + if len(cek)%8 != 0 { + return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks") + } + + n := len(cek) / 8 + r := make([][]byte, n) + + for i := range r { + r[i] = make([]byte, 8) + copy(r[i], cek[i*8:]) + } + + buffer := make([]byte, 16) + tBytes := make([]byte, 8) + copy(buffer, defaultIV) + + for t := 0; t < 6*n; t++ { + copy(buffer[8:], r[t%n]) + + block.Encrypt(buffer, buffer) + + binary.BigEndian.PutUint64(tBytes, uint64(t+1)) + + for i := 0; i < 8; i++ { + buffer[i] = buffer[i] ^ tBytes[i] + } + copy(r[t%n], buffer[8:]) + } + + out := make([]byte, (n+1)*8) + copy(out, buffer[:8]) + for i := range r { + copy(out[(i+1)*8:], r[i]) + } + + return out, nil +} + +// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher. +func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) { + if len(ciphertext)%8 != 0 { + return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks") + } + + n := (len(ciphertext) / 8) - 1 + r := make([][]byte, n) + + for i := range r { + r[i] = make([]byte, 8) + copy(r[i], ciphertext[(i+1)*8:]) + } + + buffer := make([]byte, 16) + tBytes := make([]byte, 8) + copy(buffer[:8], ciphertext[:8]) + + for t := 6*n - 1; t >= 0; t-- { + binary.BigEndian.PutUint64(tBytes, uint64(t+1)) + + for i := 0; i < 8; i++ { + buffer[i] = buffer[i] ^ tBytes[i] + } + copy(buffer[8:], r[t%n]) + + block.Decrypt(buffer, buffer) + + copy(r[t%n], buffer[8:]) + } + + if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 { + return nil, errors.New("square/go-jose: failed to unwrap key") + } + + out := make([]byte, n*8) + for i := range r { + copy(out[i*8:], r[i]) + } + + return out, nil +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap_test.go new file mode 100644 index 000000000..ceecf812b --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap_test.go @@ -0,0 +1,133 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "bytes" + "crypto/aes" + "encoding/hex" + "testing" +) + +func TestAesKeyWrap(t *testing.T) { + // Test vectors from: http://csrc.nist.gov/groups/ST/toolkit/documents/kms/key-wrap.pdf + kek0, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F") + cek0, _ := hex.DecodeString("00112233445566778899AABBCCDDEEFF") + + expected0, _ := hex.DecodeString("1FA68B0A8112B447AEF34BD8FB5A7B829D3E862371D2CFE5") + + kek1, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F1011121314151617") + cek1, _ := hex.DecodeString("00112233445566778899AABBCCDDEEFF") + + expected1, _ := hex.DecodeString("96778B25AE6CA435F92B5B97C050AED2468AB8A17AD84E5D") + + kek2, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F") + cek2, _ := hex.DecodeString("00112233445566778899AABBCCDDEEFF0001020304050607") + + expected2, _ := hex.DecodeString("A8F9BC1612C68B3FF6E6F4FBE30E71E4769C8B80A32CB8958CD5D17D6B254DA1") + + block0, _ := aes.NewCipher(kek0) + block1, _ := aes.NewCipher(kek1) + block2, _ := aes.NewCipher(kek2) + + out0, _ := KeyWrap(block0, cek0) + out1, _ := KeyWrap(block1, cek1) + out2, _ := KeyWrap(block2, cek2) + + if bytes.Compare(out0, expected0) != 0 { + t.Error("output 0 not as expected, got", out0, "wanted", expected0) + } + + if bytes.Compare(out1, expected1) != 0 { + t.Error("output 1 not as expected, got", out1, "wanted", expected1) + } + + if bytes.Compare(out2, expected2) != 0 { + t.Error("output 2 not as expected, got", out2, "wanted", expected2) + } + + unwrap0, _ := KeyUnwrap(block0, out0) + unwrap1, _ := KeyUnwrap(block1, out1) + unwrap2, _ := KeyUnwrap(block2, out2) + + if bytes.Compare(unwrap0, cek0) != 0 { + t.Error("key unwrap did not return original input, got", unwrap0, "wanted", cek0) + } + + if bytes.Compare(unwrap1, cek1) != 0 { + t.Error("key unwrap did not return original input, got", unwrap1, "wanted", cek1) + } + + if bytes.Compare(unwrap2, cek2) != 0 { + t.Error("key unwrap did not return original input, got", unwrap2, "wanted", cek2) + } +} + +func TestAesKeyWrapInvalid(t *testing.T) { + kek, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F") + + // Invalid unwrap input (bit flipped) + input0, _ := hex.DecodeString("1EA68C1A8112B447AEF34BD8FB5A7B828D3E862371D2CFE5") + + block, _ := aes.NewCipher(kek) + + _, err := KeyUnwrap(block, input0) + if err == nil { + t.Error("key unwrap failed to detect invalid input") + } + + // Invalid unwrap input (truncated) + input1, _ := hex.DecodeString("1EA68C1A8112B447AEF34BD8FB5A7B828D3E862371D2CF") + + _, err = KeyUnwrap(block, input1) + if err == nil { + t.Error("key unwrap failed to detect truncated input") + } + + // Invalid wrap input (not multiple of 8) + input2, _ := hex.DecodeString("0123456789ABCD") + + _, err = KeyWrap(block, input2) + if err == nil { + t.Error("key wrap accepted invalid input") + } + +} + +func BenchmarkAesKeyWrap(b *testing.B) { + kek, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F") + key, _ := hex.DecodeString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF") + + block, _ := aes.NewCipher(kek) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + KeyWrap(block, key) + } +} + +func BenchmarkAesKeyUnwrap(b *testing.B) { + kek, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F") + input, _ := hex.DecodeString("1FA68B0A8112B447AEF34BD8FB5A7B829D3E862371D2CFE5") + + block, _ := aes.NewCipher(kek) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + KeyUnwrap(block, input) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter.go new file mode 100644 index 000000000..f61af2c0e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter.go @@ -0,0 +1,349 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto/ecdsa" + "crypto/rsa" + "fmt" + "reflect" +) + +// Encrypter represents an encrypter which produces an encrypted JWE object. +type Encrypter interface { + Encrypt(plaintext []byte) (*JsonWebEncryption, error) + EncryptWithAuthData(plaintext []byte, aad []byte) (*JsonWebEncryption, error) + SetCompression(alg CompressionAlgorithm) +} + +// MultiEncrypter represents an encrypter which supports multiple recipients. +type MultiEncrypter interface { + Encrypt(plaintext []byte) (*JsonWebEncryption, error) + EncryptWithAuthData(plaintext []byte, aad []byte) (*JsonWebEncryption, error) + SetCompression(alg CompressionAlgorithm) + AddRecipient(alg KeyAlgorithm, encryptionKey interface{}) error +} + +// A generic content cipher +type contentCipher interface { + keySize() int + encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error) + decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error) +} + +// A key generator (for generating/getting a CEK) +type keyGenerator interface { + keySize() int + genKey() ([]byte, rawHeader, error) +} + +// A generic key encrypter +type keyEncrypter interface { + encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key +} + +// A generic key decrypter +type keyDecrypter interface { + decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key +} + +// A generic encrypter based on the given key encrypter and content cipher. +type genericEncrypter struct { + contentAlg ContentEncryption + compressionAlg CompressionAlgorithm + cipher contentCipher + recipients []recipientKeyInfo + keyGenerator keyGenerator +} + +type recipientKeyInfo struct { + keyID string + keyAlg KeyAlgorithm + keyEncrypter keyEncrypter +} + +// SetCompression sets a compression algorithm to be applied before encryption. +func (ctx *genericEncrypter) SetCompression(compressionAlg CompressionAlgorithm) { + ctx.compressionAlg = compressionAlg +} + +// NewEncrypter creates an appropriate encrypter based on the key type +func NewEncrypter(alg KeyAlgorithm, enc ContentEncryption, encryptionKey interface{}) (Encrypter, error) { + encrypter := &genericEncrypter{ + contentAlg: enc, + compressionAlg: NONE, + recipients: []recipientKeyInfo{}, + cipher: getContentCipher(enc), + } + + if encrypter.cipher == nil { + return nil, ErrUnsupportedAlgorithm + } + + var keyID string + var rawKey interface{} + switch encryptionKey := encryptionKey.(type) { + case *JsonWebKey: + keyID = encryptionKey.KeyID + rawKey = encryptionKey.Key + default: + rawKey = encryptionKey + } + + switch alg { + case DIRECT: + // Direct encryption mode must be treated differently + if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) { + return nil, ErrUnsupportedKeyType + } + encrypter.keyGenerator = staticKeyGenerator{ + key: rawKey.([]byte), + } + recipient, _ := newSymmetricRecipient(alg, rawKey.([]byte)) + if keyID != "" { + recipient.keyID = keyID + } + encrypter.recipients = []recipientKeyInfo{recipient} + return encrypter, nil + case ECDH_ES: + // ECDH-ES (w/o key wrapping) is similar to DIRECT mode + typeOf := reflect.TypeOf(rawKey) + if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) { + return nil, ErrUnsupportedKeyType + } + encrypter.keyGenerator = ecKeyGenerator{ + size: encrypter.cipher.keySize(), + algID: string(enc), + publicKey: rawKey.(*ecdsa.PublicKey), + } + recipient, _ := newECDHRecipient(alg, rawKey.(*ecdsa.PublicKey)) + if keyID != "" { + recipient.keyID = keyID + } + encrypter.recipients = []recipientKeyInfo{recipient} + return encrypter, nil + default: + // Can just add a standard recipient + encrypter.keyGenerator = randomKeyGenerator{ + size: encrypter.cipher.keySize(), + } + err := encrypter.AddRecipient(alg, encryptionKey) + return encrypter, err + } +} + +// NewMultiEncrypter creates a multi-encrypter based on the given parameters +func NewMultiEncrypter(enc ContentEncryption) (MultiEncrypter, error) { + cipher := getContentCipher(enc) + + if cipher == nil { + return nil, ErrUnsupportedAlgorithm + } + + encrypter := &genericEncrypter{ + contentAlg: enc, + compressionAlg: NONE, + recipients: []recipientKeyInfo{}, + cipher: cipher, + keyGenerator: randomKeyGenerator{ + size: cipher.keySize(), + }, + } + + return encrypter, nil +} + +func (ctx *genericEncrypter) AddRecipient(alg KeyAlgorithm, encryptionKey interface{}) (err error) { + var recipient recipientKeyInfo + + switch alg { + case DIRECT, ECDH_ES: + return fmt.Errorf("square/go-jose: key algorithm '%s' not supported in multi-recipient mode", alg) + } + + recipient, err = makeJWERecipient(alg, encryptionKey) + + if err == nil { + ctx.recipients = append(ctx.recipients, recipient) + } + return err +} + +func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) { + switch encryptionKey := encryptionKey.(type) { + case *rsa.PublicKey: + return newRSARecipient(alg, encryptionKey) + case *ecdsa.PublicKey: + return newECDHRecipient(alg, encryptionKey) + case []byte: + return newSymmetricRecipient(alg, encryptionKey) + case *JsonWebKey: + recipient, err := makeJWERecipient(alg, encryptionKey.Key) + if err == nil && encryptionKey.KeyID != "" { + recipient.keyID = encryptionKey.KeyID + } + return recipient, err + default: + return recipientKeyInfo{}, ErrUnsupportedKeyType + } +} + +// newDecrypter creates an appropriate decrypter based on the key type +func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) { + switch decryptionKey := decryptionKey.(type) { + case *rsa.PrivateKey: + return &rsaDecrypterSigner{ + privateKey: decryptionKey, + }, nil + case *ecdsa.PrivateKey: + return &ecDecrypterSigner{ + privateKey: decryptionKey, + }, nil + case []byte: + return &symmetricKeyCipher{ + key: decryptionKey, + }, nil + case *JsonWebKey: + return newDecrypter(decryptionKey.Key) + default: + return nil, ErrUnsupportedKeyType + } +} + +// Implementation of encrypt method producing a JWE object. +func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JsonWebEncryption, error) { + return ctx.EncryptWithAuthData(plaintext, nil) +} + +// Implementation of encrypt method producing a JWE object. +func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JsonWebEncryption, error) { + obj := &JsonWebEncryption{} + obj.aad = aad + + obj.protected = &rawHeader{ + Enc: ctx.contentAlg, + } + obj.recipients = make([]recipientInfo, len(ctx.recipients)) + + if len(ctx.recipients) == 0 { + return nil, fmt.Errorf("square/go-jose: no recipients to encrypt to") + } + + cek, headers, err := ctx.keyGenerator.genKey() + if err != nil { + return nil, err + } + + obj.protected.merge(&headers) + + for i, info := range ctx.recipients { + recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg) + if err != nil { + return nil, err + } + + recipient.header.Alg = string(info.keyAlg) + if info.keyID != "" { + recipient.header.Kid = info.keyID + } + obj.recipients[i] = recipient + } + + if len(ctx.recipients) == 1 { + // Move per-recipient headers into main protected header if there's + // only a single recipient. + obj.protected.merge(obj.recipients[0].header) + obj.recipients[0].header = nil + } + + if ctx.compressionAlg != NONE { + plaintext, err = compress(ctx.compressionAlg, plaintext) + if err != nil { + return nil, err + } + + obj.protected.Zip = ctx.compressionAlg + } + + authData := obj.computeAuthData() + parts, err := ctx.cipher.encrypt(cek, authData, plaintext) + if err != nil { + return nil, err + } + + obj.iv = parts.iv + obj.ciphertext = parts.ciphertext + obj.tag = parts.tag + + return obj, nil +} + +// Decrypt and validate the object and return the plaintext. +func (obj JsonWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { + headers := obj.mergedHeaders(nil) + + if len(headers.Crit) > 0 { + return nil, fmt.Errorf("square/go-jose: unsupported crit header") + } + + decrypter, err := newDecrypter(decryptionKey) + if err != nil { + return nil, err + } + + cipher := getContentCipher(headers.Enc) + if cipher == nil { + return nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(headers.Enc)) + } + + generator := randomKeyGenerator{ + size: cipher.keySize(), + } + + parts := &aeadParts{ + iv: obj.iv, + ciphertext: obj.ciphertext, + tag: obj.tag, + } + + authData := obj.computeAuthData() + + var plaintext []byte + for _, recipient := range obj.recipients { + recipientHeaders := obj.mergedHeaders(&recipient) + + cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) + if err == nil { + // Found a valid CEK -- let's try to decrypt. + plaintext, err = cipher.decrypt(cek, authData, parts) + if err == nil { + break + } + } + } + + if plaintext == nil { + return nil, ErrCryptoFailure + } + + // The "zip" header paramter may only be present in the protected header. + if obj.protected.Zip != "" { + plaintext, err = decompress(obj.protected.Zip, plaintext) + } + + return plaintext, err +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter_test.go new file mode 100644 index 000000000..86b8fc0a9 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter_test.go @@ -0,0 +1,784 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "fmt" + "io" + "testing" +) + +// We generate only a single RSA and EC key for testing, speeds up tests. +var rsaTestKey, _ = rsa.GenerateKey(rand.Reader, 2048) + +var ecTestKey256, _ = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) +var ecTestKey384, _ = ecdsa.GenerateKey(elliptic.P384(), rand.Reader) +var ecTestKey521, _ = ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + +func RoundtripJWE(keyAlg KeyAlgorithm, encAlg ContentEncryption, compressionAlg CompressionAlgorithm, serializer func(*JsonWebEncryption) (string, error), corrupter func(*JsonWebEncryption) bool, aad []byte, encryptionKey interface{}, decryptionKey interface{}) error { + enc, err := NewEncrypter(keyAlg, encAlg, encryptionKey) + if err != nil { + return fmt.Errorf("error on new encrypter: %s", err) + } + + enc.SetCompression(compressionAlg) + + input := []byte("Lorem ipsum dolor sit amet") + obj, err := enc.EncryptWithAuthData(input, aad) + if err != nil { + return fmt.Errorf("error in encrypt: %s", err) + } + + msg, err := serializer(obj) + if err != nil { + return fmt.Errorf("error in serializer: %s", err) + } + + parsed, err := ParseEncrypted(msg) + if err != nil { + return fmt.Errorf("error in parse: %s, on msg '%s'", err, msg) + } + + // (Maybe) mangle object + skip := corrupter(parsed) + if skip { + return fmt.Errorf("corrupter indicated message should be skipped") + } + + if bytes.Compare(parsed.GetAuthData(), aad) != 0 { + return fmt.Errorf("auth data in parsed object does not match") + } + + output, err := parsed.Decrypt(decryptionKey) + if err != nil { + return fmt.Errorf("error on decrypt: %s", err) + } + + if bytes.Compare(input, output) != 0 { + return fmt.Errorf("Decrypted output does not match input, got '%s' but wanted '%s'", output, input) + } + + return nil +} + +func TestRoundtripsJWE(t *testing.T) { + // Test matrix + keyAlgs := []KeyAlgorithm{ + DIRECT, ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW, A128KW, A192KW, A256KW, + RSA1_5, RSA_OAEP, RSA_OAEP_256, A128GCMKW, A192GCMKW, A256GCMKW} + encAlgs := []ContentEncryption{A128GCM, A192GCM, A256GCM, A128CBC_HS256, A192CBC_HS384, A256CBC_HS512} + zipAlgs := []CompressionAlgorithm{NONE, DEFLATE} + + serializers := []func(*JsonWebEncryption) (string, error){ + func(obj *JsonWebEncryption) (string, error) { return obj.CompactSerialize() }, + func(obj *JsonWebEncryption) (string, error) { return obj.FullSerialize(), nil }, + } + + corrupter := func(obj *JsonWebEncryption) bool { return false } + + // Note: can't use AAD with compact serialization + aads := [][]byte{ + nil, + []byte("Ut enim ad minim veniam"), + } + + // Test all different configurations + for _, alg := range keyAlgs { + for _, enc := range encAlgs { + for _, key := range generateTestKeys(alg, enc) { + for _, zip := range zipAlgs { + for i, serializer := range serializers { + err := RoundtripJWE(alg, enc, zip, serializer, corrupter, aads[i], key.enc, key.dec) + if err != nil { + t.Error(err, alg, enc, zip, i) + } + } + } + } + } + } +} + +func TestRoundtripsJWECorrupted(t *testing.T) { + // Test matrix + keyAlgs := []KeyAlgorithm{DIRECT, ECDH_ES, ECDH_ES_A128KW, A128KW, RSA1_5, RSA_OAEP, RSA_OAEP_256, A128GCMKW} + encAlgs := []ContentEncryption{A128GCM, A192GCM, A256GCM, A128CBC_HS256, A192CBC_HS384, A256CBC_HS512} + zipAlgs := []CompressionAlgorithm{NONE, DEFLATE} + + serializers := []func(*JsonWebEncryption) (string, error){ + func(obj *JsonWebEncryption) (string, error) { return obj.CompactSerialize() }, + func(obj *JsonWebEncryption) (string, error) { return obj.FullSerialize(), nil }, + } + + bitflip := func(slice []byte) bool { + if len(slice) > 0 { + slice[0] ^= 0xFF + return false + } + return true + } + + corrupters := []func(*JsonWebEncryption) bool{ + func(obj *JsonWebEncryption) bool { + // Set invalid ciphertext + return bitflip(obj.ciphertext) + }, + func(obj *JsonWebEncryption) bool { + // Set invalid auth tag + return bitflip(obj.tag) + }, + func(obj *JsonWebEncryption) bool { + // Set invalid AAD + return bitflip(obj.aad) + }, + func(obj *JsonWebEncryption) bool { + // Mess with encrypted key + return bitflip(obj.recipients[0].encryptedKey) + }, + func(obj *JsonWebEncryption) bool { + // Mess with GCM-KW auth tag + return bitflip(obj.protected.Tag.bytes()) + }, + } + + // Note: can't use AAD with compact serialization + aads := [][]byte{ + nil, + []byte("Ut enim ad minim veniam"), + } + + // Test all different configurations + for _, alg := range keyAlgs { + for _, enc := range encAlgs { + for _, key := range generateTestKeys(alg, enc) { + for _, zip := range zipAlgs { + for i, serializer := range serializers { + for j, corrupter := range corrupters { + err := RoundtripJWE(alg, enc, zip, serializer, corrupter, aads[i], key.enc, key.dec) + if err == nil { + t.Error("failed to detect corrupt data", err, alg, enc, zip, i, j) + } + } + } + } + } + } + } +} + +func TestEncrypterWithJWKAndKeyID(t *testing.T) { + enc, err := NewEncrypter(A128KW, A128GCM, &JsonWebKey{ + KeyID: "test-id", + Key: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, + }) + if err != nil { + t.Error(err) + } + + ciphertext, _ := enc.Encrypt([]byte("Lorem ipsum dolor sit amet")) + + serialized1, _ := ciphertext.CompactSerialize() + serialized2 := ciphertext.FullSerialize() + + parsed1, _ := ParseEncrypted(serialized1) + parsed2, _ := ParseEncrypted(serialized2) + + if parsed1.Header.KeyID != "test-id" { + t.Errorf("expected message to have key id from JWK, but found '%s' instead", parsed1.Header.KeyID) + } + if parsed2.Header.KeyID != "test-id" { + t.Errorf("expected message to have key id from JWK, but found '%s' instead", parsed2.Header.KeyID) + } +} + +func TestEncrypterWithBrokenRand(t *testing.T) { + keyAlgs := []KeyAlgorithm{ECDH_ES_A128KW, A128KW, RSA1_5, RSA_OAEP, RSA_OAEP_256, A128GCMKW} + encAlgs := []ContentEncryption{A128GCM, A192GCM, A256GCM, A128CBC_HS256, A192CBC_HS384, A256CBC_HS512} + + serializer := func(obj *JsonWebEncryption) (string, error) { return obj.CompactSerialize() } + corrupter := func(obj *JsonWebEncryption) bool { return false } + + // Break rand reader + readers := []func() io.Reader{ + // Totally broken + func() io.Reader { return bytes.NewReader([]byte{}) }, + // Not enough bytes + func() io.Reader { return io.LimitReader(rand.Reader, 20) }, + } + + defer resetRandReader() + + for _, alg := range keyAlgs { + for _, enc := range encAlgs { + for _, key := range generateTestKeys(alg, enc) { + for i, getReader := range readers { + randReader = getReader() + err := RoundtripJWE(alg, enc, NONE, serializer, corrupter, nil, key.enc, key.dec) + if err == nil { + t.Error("encrypter should fail if rand is broken", i) + } + } + } + } + } +} + +func TestNewEncrypterErrors(t *testing.T) { + _, err := NewEncrypter("XYZ", "XYZ", nil) + if err == nil { + t.Error("was able to instantiate encrypter with invalid cipher") + } + + _, err = NewMultiEncrypter("XYZ") + if err == nil { + t.Error("was able to instantiate multi-encrypter with invalid cipher") + } + + _, err = NewEncrypter(DIRECT, A128GCM, nil) + if err == nil { + t.Error("was able to instantiate encrypter with invalid direct key") + } + + _, err = NewEncrypter(ECDH_ES, A128GCM, nil) + if err == nil { + t.Error("was able to instantiate encrypter with invalid EC key") + } +} + +func TestMultiRecipientJWE(t *testing.T) { + enc, err := NewMultiEncrypter(A128GCM) + if err != nil { + panic(err) + } + + err = enc.AddRecipient(RSA_OAEP, &rsaTestKey.PublicKey) + if err != nil { + t.Error("error when adding RSA recipient", err) + } + + sharedKey := []byte{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + } + + err = enc.AddRecipient(A256GCMKW, sharedKey) + if err != nil { + t.Error("error when adding AES recipient: ", err) + return + } + + input := []byte("Lorem ipsum dolor sit amet") + obj, err := enc.Encrypt(input) + if err != nil { + t.Error("error in encrypt: ", err) + return + } + + msg := obj.FullSerialize() + + parsed, err := ParseEncrypted(msg) + if err != nil { + t.Error("error in parse: ", err) + return + } + + output, err := parsed.Decrypt(rsaTestKey) + if err != nil { + t.Error("error on decrypt with RSA: ", err) + return + } + + if bytes.Compare(input, output) != 0 { + t.Error("Decrypted output does not match input: ", output, input) + return + } + + output, err = parsed.Decrypt(sharedKey) + if err != nil { + t.Error("error on decrypt with AES: ", err) + return + } + + if bytes.Compare(input, output) != 0 { + t.Error("Decrypted output does not match input", output, input) + return + } +} + +func TestMultiRecipientErrors(t *testing.T) { + enc, err := NewMultiEncrypter(A128GCM) + if err != nil { + panic(err) + } + + input := []byte("Lorem ipsum dolor sit amet") + _, err = enc.Encrypt(input) + if err == nil { + t.Error("should fail when encrypting to zero recipients") + } + + err = enc.AddRecipient(DIRECT, nil) + if err == nil { + t.Error("should reject DIRECT mode when encrypting to multiple recipients") + } + + err = enc.AddRecipient(ECDH_ES, nil) + if err == nil { + t.Error("should reject ECDH_ES mode when encrypting to multiple recipients") + } + + err = enc.AddRecipient(RSA1_5, nil) + if err == nil { + t.Error("should reject invalid recipient key") + } +} + +type testKey struct { + enc, dec interface{} +} + +func symmetricTestKey(size int) []testKey { + key, _, _ := randomKeyGenerator{size: size}.genKey() + + return []testKey{ + testKey{ + enc: key, + dec: key, + }, + testKey{ + enc: &JsonWebKey{KeyID: "test", Key: key}, + dec: &JsonWebKey{KeyID: "test", Key: key}, + }, + } +} + +func generateTestKeys(keyAlg KeyAlgorithm, encAlg ContentEncryption) []testKey { + switch keyAlg { + case DIRECT: + return symmetricTestKey(getContentCipher(encAlg).keySize()) + case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: + return []testKey{ + testKey{ + dec: ecTestKey256, + enc: &ecTestKey256.PublicKey, + }, + testKey{ + dec: ecTestKey384, + enc: &ecTestKey384.PublicKey, + }, + testKey{ + dec: ecTestKey521, + enc: &ecTestKey521.PublicKey, + }, + testKey{ + dec: &JsonWebKey{KeyID: "test", Key: ecTestKey256}, + enc: &JsonWebKey{KeyID: "test", Key: &ecTestKey256.PublicKey}, + }, + } + case A128GCMKW, A128KW: + return symmetricTestKey(16) + case A192GCMKW, A192KW: + return symmetricTestKey(24) + case A256GCMKW, A256KW: + return symmetricTestKey(32) + case RSA1_5, RSA_OAEP, RSA_OAEP_256: + return []testKey{testKey{ + dec: rsaTestKey, + enc: &rsaTestKey.PublicKey, + }} + } + + panic("Must update test case") +} + +func RunRoundtripsJWE(b *testing.B, alg KeyAlgorithm, enc ContentEncryption, zip CompressionAlgorithm, priv, pub interface{}) { + serializer := func(obj *JsonWebEncryption) (string, error) { + return obj.CompactSerialize() + } + + corrupter := func(obj *JsonWebEncryption) bool { return false } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := RoundtripJWE(alg, enc, zip, serializer, corrupter, nil, pub, priv) + if err != nil { + b.Error(err) + } + } +} + +var ( + chunks = map[string][]byte{ + "1B": make([]byte, 1), + "64B": make([]byte, 64), + "1KB": make([]byte, 1024), + "64KB": make([]byte, 65536), + "1MB": make([]byte, 1048576), + "64MB": make([]byte, 67108864), + } + + symKey, _, _ = randomKeyGenerator{size: 32}.genKey() + + encrypters = map[string]Encrypter{ + "OAEPAndGCM": mustEncrypter(RSA_OAEP, A128GCM, &rsaTestKey.PublicKey), + "PKCSAndGCM": mustEncrypter(RSA1_5, A128GCM, &rsaTestKey.PublicKey), + "OAEPAndCBC": mustEncrypter(RSA_OAEP, A128CBC_HS256, &rsaTestKey.PublicKey), + "PKCSAndCBC": mustEncrypter(RSA1_5, A128CBC_HS256, &rsaTestKey.PublicKey), + "DirectGCM128": mustEncrypter(DIRECT, A128GCM, symKey), + "DirectCBC128": mustEncrypter(DIRECT, A128CBC_HS256, symKey), + "DirectGCM256": mustEncrypter(DIRECT, A256GCM, symKey), + "DirectCBC256": mustEncrypter(DIRECT, A256CBC_HS512, symKey), + "AESKWAndGCM128": mustEncrypter(A128KW, A128GCM, symKey), + "AESKWAndCBC256": mustEncrypter(A256KW, A256GCM, symKey), + "ECDHOnP256AndGCM128": mustEncrypter(ECDH_ES, A128GCM, &ecTestKey256.PublicKey), + "ECDHOnP384AndGCM128": mustEncrypter(ECDH_ES, A128GCM, &ecTestKey384.PublicKey), + "ECDHOnP521AndGCM128": mustEncrypter(ECDH_ES, A128GCM, &ecTestKey521.PublicKey), + } +) + +func BenchmarkEncrypt1BWithOAEPAndGCM(b *testing.B) { benchEncrypt("1B", "OAEPAndGCM", b) } +func BenchmarkEncrypt64BWithOAEPAndGCM(b *testing.B) { benchEncrypt("64B", "OAEPAndGCM", b) } +func BenchmarkEncrypt1KBWithOAEPAndGCM(b *testing.B) { benchEncrypt("1KB", "OAEPAndGCM", b) } +func BenchmarkEncrypt64KBWithOAEPAndGCM(b *testing.B) { benchEncrypt("64KB", "OAEPAndGCM", b) } +func BenchmarkEncrypt1MBWithOAEPAndGCM(b *testing.B) { benchEncrypt("1MB", "OAEPAndGCM", b) } +func BenchmarkEncrypt64MBWithOAEPAndGCM(b *testing.B) { benchEncrypt("64MB", "OAEPAndGCM", b) } + +func BenchmarkEncrypt1BWithPKCSAndGCM(b *testing.B) { benchEncrypt("1B", "PKCSAndGCM", b) } +func BenchmarkEncrypt64BWithPKCSAndGCM(b *testing.B) { benchEncrypt("64B", "PKCSAndGCM", b) } +func BenchmarkEncrypt1KBWithPKCSAndGCM(b *testing.B) { benchEncrypt("1KB", "PKCSAndGCM", b) } +func BenchmarkEncrypt64KBWithPKCSAndGCM(b *testing.B) { benchEncrypt("64KB", "PKCSAndGCM", b) } +func BenchmarkEncrypt1MBWithPKCSAndGCM(b *testing.B) { benchEncrypt("1MB", "PKCSAndGCM", b) } +func BenchmarkEncrypt64MBWithPKCSAndGCM(b *testing.B) { benchEncrypt("64MB", "PKCSAndGCM", b) } + +func BenchmarkEncrypt1BWithOAEPAndCBC(b *testing.B) { benchEncrypt("1B", "OAEPAndCBC", b) } +func BenchmarkEncrypt64BWithOAEPAndCBC(b *testing.B) { benchEncrypt("64B", "OAEPAndCBC", b) } +func BenchmarkEncrypt1KBWithOAEPAndCBC(b *testing.B) { benchEncrypt("1KB", "OAEPAndCBC", b) } +func BenchmarkEncrypt64KBWithOAEPAndCBC(b *testing.B) { benchEncrypt("64KB", "OAEPAndCBC", b) } +func BenchmarkEncrypt1MBWithOAEPAndCBC(b *testing.B) { benchEncrypt("1MB", "OAEPAndCBC", b) } +func BenchmarkEncrypt64MBWithOAEPAndCBC(b *testing.B) { benchEncrypt("64MB", "OAEPAndCBC", b) } + +func BenchmarkEncrypt1BWithPKCSAndCBC(b *testing.B) { benchEncrypt("1B", "PKCSAndCBC", b) } +func BenchmarkEncrypt64BWithPKCSAndCBC(b *testing.B) { benchEncrypt("64B", "PKCSAndCBC", b) } +func BenchmarkEncrypt1KBWithPKCSAndCBC(b *testing.B) { benchEncrypt("1KB", "PKCSAndCBC", b) } +func BenchmarkEncrypt64KBWithPKCSAndCBC(b *testing.B) { benchEncrypt("64KB", "PKCSAndCBC", b) } +func BenchmarkEncrypt1MBWithPKCSAndCBC(b *testing.B) { benchEncrypt("1MB", "PKCSAndCBC", b) } +func BenchmarkEncrypt64MBWithPKCSAndCBC(b *testing.B) { benchEncrypt("64MB", "PKCSAndCBC", b) } + +func BenchmarkEncrypt1BWithDirectGCM128(b *testing.B) { benchEncrypt("1B", "DirectGCM128", b) } +func BenchmarkEncrypt64BWithDirectGCM128(b *testing.B) { benchEncrypt("64B", "DirectGCM128", b) } +func BenchmarkEncrypt1KBWithDirectGCM128(b *testing.B) { benchEncrypt("1KB", "DirectGCM128", b) } +func BenchmarkEncrypt64KBWithDirectGCM128(b *testing.B) { benchEncrypt("64KB", "DirectGCM128", b) } +func BenchmarkEncrypt1MBWithDirectGCM128(b *testing.B) { benchEncrypt("1MB", "DirectGCM128", b) } +func BenchmarkEncrypt64MBWithDirectGCM128(b *testing.B) { benchEncrypt("64MB", "DirectGCM128", b) } + +func BenchmarkEncrypt1BWithDirectCBC128(b *testing.B) { benchEncrypt("1B", "DirectCBC128", b) } +func BenchmarkEncrypt64BWithDirectCBC128(b *testing.B) { benchEncrypt("64B", "DirectCBC128", b) } +func BenchmarkEncrypt1KBWithDirectCBC128(b *testing.B) { benchEncrypt("1KB", "DirectCBC128", b) } +func BenchmarkEncrypt64KBWithDirectCBC128(b *testing.B) { benchEncrypt("64KB", "DirectCBC128", b) } +func BenchmarkEncrypt1MBWithDirectCBC128(b *testing.B) { benchEncrypt("1MB", "DirectCBC128", b) } +func BenchmarkEncrypt64MBWithDirectCBC128(b *testing.B) { benchEncrypt("64MB", "DirectCBC128", b) } + +func BenchmarkEncrypt1BWithDirectGCM256(b *testing.B) { benchEncrypt("1B", "DirectGCM256", b) } +func BenchmarkEncrypt64BWithDirectGCM256(b *testing.B) { benchEncrypt("64B", "DirectGCM256", b) } +func BenchmarkEncrypt1KBWithDirectGCM256(b *testing.B) { benchEncrypt("1KB", "DirectGCM256", b) } +func BenchmarkEncrypt64KBWithDirectGCM256(b *testing.B) { benchEncrypt("64KB", "DirectGCM256", b) } +func BenchmarkEncrypt1MBWithDirectGCM256(b *testing.B) { benchEncrypt("1MB", "DirectGCM256", b) } +func BenchmarkEncrypt64MBWithDirectGCM256(b *testing.B) { benchEncrypt("64MB", "DirectGCM256", b) } + +func BenchmarkEncrypt1BWithDirectCBC256(b *testing.B) { benchEncrypt("1B", "DirectCBC256", b) } +func BenchmarkEncrypt64BWithDirectCBC256(b *testing.B) { benchEncrypt("64B", "DirectCBC256", b) } +func BenchmarkEncrypt1KBWithDirectCBC256(b *testing.B) { benchEncrypt("1KB", "DirectCBC256", b) } +func BenchmarkEncrypt64KBWithDirectCBC256(b *testing.B) { benchEncrypt("64KB", "DirectCBC256", b) } +func BenchmarkEncrypt1MBWithDirectCBC256(b *testing.B) { benchEncrypt("1MB", "DirectCBC256", b) } +func BenchmarkEncrypt64MBWithDirectCBC256(b *testing.B) { benchEncrypt("64MB", "DirectCBC256", b) } + +func BenchmarkEncrypt1BWithAESKWAndGCM128(b *testing.B) { benchEncrypt("1B", "AESKWAndGCM128", b) } +func BenchmarkEncrypt64BWithAESKWAndGCM128(b *testing.B) { benchEncrypt("64B", "AESKWAndGCM128", b) } +func BenchmarkEncrypt1KBWithAESKWAndGCM128(b *testing.B) { benchEncrypt("1KB", "AESKWAndGCM128", b) } +func BenchmarkEncrypt64KBWithAESKWAndGCM128(b *testing.B) { benchEncrypt("64KB", "AESKWAndGCM128", b) } +func BenchmarkEncrypt1MBWithAESKWAndGCM128(b *testing.B) { benchEncrypt("1MB", "AESKWAndGCM128", b) } +func BenchmarkEncrypt64MBWithAESKWAndGCM128(b *testing.B) { benchEncrypt("64MB", "AESKWAndGCM128", b) } + +func BenchmarkEncrypt1BWithAESKWAndCBC256(b *testing.B) { benchEncrypt("1B", "AESKWAndCBC256", b) } +func BenchmarkEncrypt64BWithAESKWAndCBC256(b *testing.B) { benchEncrypt("64B", "AESKWAndCBC256", b) } +func BenchmarkEncrypt1KBWithAESKWAndCBC256(b *testing.B) { benchEncrypt("1KB", "AESKWAndCBC256", b) } +func BenchmarkEncrypt64KBWithAESKWAndCBC256(b *testing.B) { benchEncrypt("64KB", "AESKWAndCBC256", b) } +func BenchmarkEncrypt1MBWithAESKWAndCBC256(b *testing.B) { benchEncrypt("1MB", "AESKWAndCBC256", b) } +func BenchmarkEncrypt64MBWithAESKWAndCBC256(b *testing.B) { benchEncrypt("64MB", "AESKWAndCBC256", b) } + +func BenchmarkEncrypt1BWithECDHOnP256AndGCM128(b *testing.B) { + benchEncrypt("1B", "ECDHOnP256AndGCM128", b) +} +func BenchmarkEncrypt64BWithECDHOnP256AndGCM128(b *testing.B) { + benchEncrypt("64B", "ECDHOnP256AndGCM128", b) +} +func BenchmarkEncrypt1KBWithECDHOnP256AndGCM128(b *testing.B) { + benchEncrypt("1KB", "ECDHOnP256AndGCM128", b) +} +func BenchmarkEncrypt64KBWithECDHOnP256AndGCM128(b *testing.B) { + benchEncrypt("64KB", "ECDHOnP256AndGCM128", b) +} +func BenchmarkEncrypt1MBWithECDHOnP256AndGCM128(b *testing.B) { + benchEncrypt("1MB", "ECDHOnP256AndGCM128", b) +} +func BenchmarkEncrypt64MBWithECDHOnP256AndGCM128(b *testing.B) { + benchEncrypt("64MB", "ECDHOnP256AndGCM128", b) +} + +func BenchmarkEncrypt1BWithECDHOnP384AndGCM128(b *testing.B) { + benchEncrypt("1B", "ECDHOnP384AndGCM128", b) +} +func BenchmarkEncrypt64BWithECDHOnP384AndGCM128(b *testing.B) { + benchEncrypt("64B", "ECDHOnP384AndGCM128", b) +} +func BenchmarkEncrypt1KBWithECDHOnP384AndGCM128(b *testing.B) { + benchEncrypt("1KB", "ECDHOnP384AndGCM128", b) +} +func BenchmarkEncrypt64KBWithECDHOnP384AndGCM128(b *testing.B) { + benchEncrypt("64KB", "ECDHOnP384AndGCM128", b) +} +func BenchmarkEncrypt1MBWithECDHOnP384AndGCM128(b *testing.B) { + benchEncrypt("1MB", "ECDHOnP384AndGCM128", b) +} +func BenchmarkEncrypt64MBWithECDHOnP384AndGCM128(b *testing.B) { + benchEncrypt("64MB", "ECDHOnP384AndGCM128", b) +} + +func BenchmarkEncrypt1BWithECDHOnP521AndGCM128(b *testing.B) { + benchEncrypt("1B", "ECDHOnP521AndGCM128", b) +} +func BenchmarkEncrypt64BWithECDHOnP521AndGCM128(b *testing.B) { + benchEncrypt("64B", "ECDHOnP521AndGCM128", b) +} +func BenchmarkEncrypt1KBWithECDHOnP521AndGCM128(b *testing.B) { + benchEncrypt("1KB", "ECDHOnP521AndGCM128", b) +} +func BenchmarkEncrypt64KBWithECDHOnP521AndGCM128(b *testing.B) { + benchEncrypt("64KB", "ECDHOnP521AndGCM128", b) +} +func BenchmarkEncrypt1MBWithECDHOnP521AndGCM128(b *testing.B) { + benchEncrypt("1MB", "ECDHOnP521AndGCM128", b) +} +func BenchmarkEncrypt64MBWithECDHOnP521AndGCM128(b *testing.B) { + benchEncrypt("64MB", "ECDHOnP521AndGCM128", b) +} + +func benchEncrypt(chunkKey, primKey string, b *testing.B) { + data, ok := chunks[chunkKey] + if !ok { + b.Fatalf("unknown chunk size %s", chunkKey) + } + + enc, ok := encrypters[primKey] + if !ok { + b.Fatalf("unknown encrypter %s", primKey) + } + + b.SetBytes(int64(len(data))) + for i := 0; i < b.N; i++ { + enc.Encrypt(data) + } +} + +var ( + decryptionKeys = map[string]interface{}{ + "OAEPAndGCM": rsaTestKey, + "PKCSAndGCM": rsaTestKey, + "OAEPAndCBC": rsaTestKey, + "PKCSAndCBC": rsaTestKey, + + "DirectGCM128": symKey, + "DirectCBC128": symKey, + "DirectGCM256": symKey, + "DirectCBC256": symKey, + + "AESKWAndGCM128": symKey, + "AESKWAndCBC256": symKey, + + "ECDHOnP256AndGCM128": ecTestKey256, + "ECDHOnP384AndGCM128": ecTestKey384, + "ECDHOnP521AndGCM128": ecTestKey521, + } +) + +func BenchmarkDecrypt1BWithOAEPAndGCM(b *testing.B) { benchDecrypt("1B", "OAEPAndGCM", b) } +func BenchmarkDecrypt64BWithOAEPAndGCM(b *testing.B) { benchDecrypt("64B", "OAEPAndGCM", b) } +func BenchmarkDecrypt1KBWithOAEPAndGCM(b *testing.B) { benchDecrypt("1KB", "OAEPAndGCM", b) } +func BenchmarkDecrypt64KBWithOAEPAndGCM(b *testing.B) { benchDecrypt("64KB", "OAEPAndGCM", b) } +func BenchmarkDecrypt1MBWithOAEPAndGCM(b *testing.B) { benchDecrypt("1MB", "OAEPAndGCM", b) } +func BenchmarkDecrypt64MBWithOAEPAndGCM(b *testing.B) { benchDecrypt("64MB", "OAEPAndGCM", b) } + +func BenchmarkDecrypt1BWithPKCSAndGCM(b *testing.B) { benchDecrypt("1B", "PKCSAndGCM", b) } +func BenchmarkDecrypt64BWithPKCSAndGCM(b *testing.B) { benchDecrypt("64B", "PKCSAndGCM", b) } +func BenchmarkDecrypt1KBWithPKCSAndGCM(b *testing.B) { benchDecrypt("1KB", "PKCSAndGCM", b) } +func BenchmarkDecrypt64KBWithPKCSAndGCM(b *testing.B) { benchDecrypt("64KB", "PKCSAndGCM", b) } +func BenchmarkDecrypt1MBWithPKCSAndGCM(b *testing.B) { benchDecrypt("1MB", "PKCSAndGCM", b) } +func BenchmarkDecrypt64MBWithPKCSAndGCM(b *testing.B) { benchDecrypt("64MB", "PKCSAndGCM", b) } + +func BenchmarkDecrypt1BWithOAEPAndCBC(b *testing.B) { benchDecrypt("1B", "OAEPAndCBC", b) } +func BenchmarkDecrypt64BWithOAEPAndCBC(b *testing.B) { benchDecrypt("64B", "OAEPAndCBC", b) } +func BenchmarkDecrypt1KBWithOAEPAndCBC(b *testing.B) { benchDecrypt("1KB", "OAEPAndCBC", b) } +func BenchmarkDecrypt64KBWithOAEPAndCBC(b *testing.B) { benchDecrypt("64KB", "OAEPAndCBC", b) } +func BenchmarkDecrypt1MBWithOAEPAndCBC(b *testing.B) { benchDecrypt("1MB", "OAEPAndCBC", b) } +func BenchmarkDecrypt64MBWithOAEPAndCBC(b *testing.B) { benchDecrypt("64MB", "OAEPAndCBC", b) } + +func BenchmarkDecrypt1BWithPKCSAndCBC(b *testing.B) { benchDecrypt("1B", "PKCSAndCBC", b) } +func BenchmarkDecrypt64BWithPKCSAndCBC(b *testing.B) { benchDecrypt("64B", "PKCSAndCBC", b) } +func BenchmarkDecrypt1KBWithPKCSAndCBC(b *testing.B) { benchDecrypt("1KB", "PKCSAndCBC", b) } +func BenchmarkDecrypt64KBWithPKCSAndCBC(b *testing.B) { benchDecrypt("64KB", "PKCSAndCBC", b) } +func BenchmarkDecrypt1MBWithPKCSAndCBC(b *testing.B) { benchDecrypt("1MB", "PKCSAndCBC", b) } +func BenchmarkDecrypt64MBWithPKCSAndCBC(b *testing.B) { benchDecrypt("64MB", "PKCSAndCBC", b) } + +func BenchmarkDecrypt1BWithDirectGCM128(b *testing.B) { benchDecrypt("1B", "DirectGCM128", b) } +func BenchmarkDecrypt64BWithDirectGCM128(b *testing.B) { benchDecrypt("64B", "DirectGCM128", b) } +func BenchmarkDecrypt1KBWithDirectGCM128(b *testing.B) { benchDecrypt("1KB", "DirectGCM128", b) } +func BenchmarkDecrypt64KBWithDirectGCM128(b *testing.B) { benchDecrypt("64KB", "DirectGCM128", b) } +func BenchmarkDecrypt1MBWithDirectGCM128(b *testing.B) { benchDecrypt("1MB", "DirectGCM128", b) } +func BenchmarkDecrypt64MBWithDirectGCM128(b *testing.B) { benchDecrypt("64MB", "DirectGCM128", b) } + +func BenchmarkDecrypt1BWithDirectCBC128(b *testing.B) { benchDecrypt("1B", "DirectCBC128", b) } +func BenchmarkDecrypt64BWithDirectCBC128(b *testing.B) { benchDecrypt("64B", "DirectCBC128", b) } +func BenchmarkDecrypt1KBWithDirectCBC128(b *testing.B) { benchDecrypt("1KB", "DirectCBC128", b) } +func BenchmarkDecrypt64KBWithDirectCBC128(b *testing.B) { benchDecrypt("64KB", "DirectCBC128", b) } +func BenchmarkDecrypt1MBWithDirectCBC128(b *testing.B) { benchDecrypt("1MB", "DirectCBC128", b) } +func BenchmarkDecrypt64MBWithDirectCBC128(b *testing.B) { benchDecrypt("64MB", "DirectCBC128", b) } + +func BenchmarkDecrypt1BWithDirectGCM256(b *testing.B) { benchDecrypt("1B", "DirectGCM256", b) } +func BenchmarkDecrypt64BWithDirectGCM256(b *testing.B) { benchDecrypt("64B", "DirectGCM256", b) } +func BenchmarkDecrypt1KBWithDirectGCM256(b *testing.B) { benchDecrypt("1KB", "DirectGCM256", b) } +func BenchmarkDecrypt64KBWithDirectGCM256(b *testing.B) { benchDecrypt("64KB", "DirectGCM256", b) } +func BenchmarkDecrypt1MBWithDirectGCM256(b *testing.B) { benchDecrypt("1MB", "DirectGCM256", b) } +func BenchmarkDecrypt64MBWithDirectGCM256(b *testing.B) { benchDecrypt("64MB", "DirectGCM256", b) } + +func BenchmarkDecrypt1BWithDirectCBC256(b *testing.B) { benchDecrypt("1B", "DirectCBC256", b) } +func BenchmarkDecrypt64BWithDirectCBC256(b *testing.B) { benchDecrypt("64B", "DirectCBC256", b) } +func BenchmarkDecrypt1KBWithDirectCBC256(b *testing.B) { benchDecrypt("1KB", "DirectCBC256", b) } +func BenchmarkDecrypt64KBWithDirectCBC256(b *testing.B) { benchDecrypt("64KB", "DirectCBC256", b) } +func BenchmarkDecrypt1MBWithDirectCBC256(b *testing.B) { benchDecrypt("1MB", "DirectCBC256", b) } +func BenchmarkDecrypt64MBWithDirectCBC256(b *testing.B) { benchDecrypt("64MB", "DirectCBC256", b) } + +func BenchmarkDecrypt1BWithAESKWAndGCM128(b *testing.B) { benchDecrypt("1B", "AESKWAndGCM128", b) } +func BenchmarkDecrypt64BWithAESKWAndGCM128(b *testing.B) { benchDecrypt("64B", "AESKWAndGCM128", b) } +func BenchmarkDecrypt1KBWithAESKWAndGCM128(b *testing.B) { benchDecrypt("1KB", "AESKWAndGCM128", b) } +func BenchmarkDecrypt64KBWithAESKWAndGCM128(b *testing.B) { benchDecrypt("64KB", "AESKWAndGCM128", b) } +func BenchmarkDecrypt1MBWithAESKWAndGCM128(b *testing.B) { benchDecrypt("1MB", "AESKWAndGCM128", b) } +func BenchmarkDecrypt64MBWithAESKWAndGCM128(b *testing.B) { benchDecrypt("64MB", "AESKWAndGCM128", b) } + +func BenchmarkDecrypt1BWithAESKWAndCBC256(b *testing.B) { benchDecrypt("1B", "AESKWAndCBC256", b) } +func BenchmarkDecrypt64BWithAESKWAndCBC256(b *testing.B) { benchDecrypt("64B", "AESKWAndCBC256", b) } +func BenchmarkDecrypt1KBWithAESKWAndCBC256(b *testing.B) { benchDecrypt("1KB", "AESKWAndCBC256", b) } +func BenchmarkDecrypt64KBWithAESKWAndCBC256(b *testing.B) { benchDecrypt("64KB", "AESKWAndCBC256", b) } +func BenchmarkDecrypt1MBWithAESKWAndCBC256(b *testing.B) { benchDecrypt("1MB", "AESKWAndCBC256", b) } +func BenchmarkDecrypt64MBWithAESKWAndCBC256(b *testing.B) { benchDecrypt("64MB", "AESKWAndCBC256", b) } + +func BenchmarkDecrypt1BWithECDHOnP256AndGCM128(b *testing.B) { + benchDecrypt("1B", "ECDHOnP256AndGCM128", b) +} +func BenchmarkDecrypt64BWithECDHOnP256AndGCM128(b *testing.B) { + benchDecrypt("64B", "ECDHOnP256AndGCM128", b) +} +func BenchmarkDecrypt1KBWithECDHOnP256AndGCM128(b *testing.B) { + benchDecrypt("1KB", "ECDHOnP256AndGCM128", b) +} +func BenchmarkDecrypt64KBWithECDHOnP256AndGCM128(b *testing.B) { + benchDecrypt("64KB", "ECDHOnP256AndGCM128", b) +} +func BenchmarkDecrypt1MBWithECDHOnP256AndGCM128(b *testing.B) { + benchDecrypt("1MB", "ECDHOnP256AndGCM128", b) +} +func BenchmarkDecrypt64MBWithECDHOnP256AndGCM128(b *testing.B) { + benchDecrypt("64MB", "ECDHOnP256AndGCM128", b) +} + +func BenchmarkDecrypt1BWithECDHOnP384AndGCM128(b *testing.B) { + benchDecrypt("1B", "ECDHOnP384AndGCM128", b) +} +func BenchmarkDecrypt64BWithECDHOnP384AndGCM128(b *testing.B) { + benchDecrypt("64B", "ECDHOnP384AndGCM128", b) +} +func BenchmarkDecrypt1KBWithECDHOnP384AndGCM128(b *testing.B) { + benchDecrypt("1KB", "ECDHOnP384AndGCM128", b) +} +func BenchmarkDecrypt64KBWithECDHOnP384AndGCM128(b *testing.B) { + benchDecrypt("64KB", "ECDHOnP384AndGCM128", b) +} +func BenchmarkDecrypt1MBWithECDHOnP384AndGCM128(b *testing.B) { + benchDecrypt("1MB", "ECDHOnP384AndGCM128", b) +} +func BenchmarkDecrypt64MBWithECDHOnP384AndGCM128(b *testing.B) { + benchDecrypt("64MB", "ECDHOnP384AndGCM128", b) +} + +func BenchmarkDecrypt1BWithECDHOnP521AndGCM128(b *testing.B) { + benchDecrypt("1B", "ECDHOnP521AndGCM128", b) +} +func BenchmarkDecrypt64BWithECDHOnP521AndGCM128(b *testing.B) { + benchDecrypt("64B", "ECDHOnP521AndGCM128", b) +} +func BenchmarkDecrypt1KBWithECDHOnP521AndGCM128(b *testing.B) { + benchDecrypt("1KB", "ECDHOnP521AndGCM128", b) +} +func BenchmarkDecrypt64KBWithECDHOnP521AndGCM128(b *testing.B) { + benchDecrypt("64KB", "ECDHOnP521AndGCM128", b) +} +func BenchmarkDecrypt1MBWithECDHOnP521AndGCM128(b *testing.B) { + benchDecrypt("1MB", "ECDHOnP521AndGCM128", b) +} +func BenchmarkDecrypt64MBWithECDHOnP521AndGCM128(b *testing.B) { + benchDecrypt("64MB", "ECDHOnP521AndGCM128", b) +} + +func benchDecrypt(chunkKey, primKey string, b *testing.B) { + chunk, ok := chunks[chunkKey] + if !ok { + b.Fatalf("unknown chunk size %s", chunkKey) + } + + enc, ok := encrypters[primKey] + if !ok { + b.Fatalf("unknown encrypter %s", primKey) + } + + dec, ok := decryptionKeys[primKey] + if !ok { + b.Fatalf("unknown decryption key %s", primKey) + } + + data, err := enc.Encrypt(chunk) + if err != nil { + b.Fatal(err) + } + + b.SetBytes(int64(len(chunk))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + data.Decrypt(dec) + } +} + +func mustEncrypter(keyAlg KeyAlgorithm, encAlg ContentEncryption, encryptionKey interface{}) Encrypter { + enc, err := NewEncrypter(keyAlg, encAlg, encryptionKey) + if err != nil { + panic(err) + } + return enc +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc.go new file mode 100644 index 000000000..b4cd1e989 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc.go @@ -0,0 +1,26 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + +Package jose aims to provide an implementation of the Javascript Object Signing +and Encryption set of standards. For the moment, it mainly focuses on +encryption and signing based on the JSON Web Encryption and JSON Web Signature +standards. The library supports both the compact and full serialization +formats, and has optional support for multiple recipients. + +*/ +package jose // import "gopkg.in/square/go-jose.v1" diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc_test.go new file mode 100644 index 000000000..50468295d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc_test.go @@ -0,0 +1,226 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto/ecdsa" + "crypto/rand" + "crypto/rsa" + "fmt" +) + +// Dummy encrypter for use in examples +var encrypter, _ = NewEncrypter(DIRECT, A128GCM, []byte{}) + +func Example_jWE() { + // Generate a public/private key pair to use for this example. The library + // also provides two utility functions (LoadPublicKey and LoadPrivateKey) + // that can be used to load keys from PEM/DER-encoded data. + privateKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + panic(err) + } + + // Instantiate an encrypter using RSA-OAEP with AES128-GCM. An error would + // indicate that the selected algorithm(s) are not currently supported. + publicKey := &privateKey.PublicKey + encrypter, err := NewEncrypter(RSA_OAEP, A128GCM, publicKey) + if err != nil { + panic(err) + } + + // Encrypt a sample plaintext. Calling the encrypter returns an encrypted + // JWE object, which can then be serialized for output afterwards. An error + // would indicate a problem in an underlying cryptographic primitive. + var plaintext = []byte("Lorem ipsum dolor sit amet") + object, err := encrypter.Encrypt(plaintext) + if err != nil { + panic(err) + } + + // Serialize the encrypted object using the full serialization format. + // Alternatively you can also use the compact format here by calling + // object.CompactSerialize() instead. + serialized := object.FullSerialize() + + // Parse the serialized, encrypted JWE object. An error would indicate that + // the given input did not represent a valid message. + object, err = ParseEncrypted(serialized) + if err != nil { + panic(err) + } + + // Now we can decrypt and get back our original plaintext. An error here + // would indicate the the message failed to decrypt, e.g. because the auth + // tag was broken or the message was tampered with. + decrypted, err := object.Decrypt(privateKey) + if err != nil { + panic(err) + } + + fmt.Printf(string(decrypted)) + // output: Lorem ipsum dolor sit amet +} + +func Example_jWS() { + // Generate a public/private key pair to use for this example. The library + // also provides two utility functions (LoadPublicKey and LoadPrivateKey) + // that can be used to load keys from PEM/DER-encoded data. + privateKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + panic(err) + } + + // Instantiate a signer using RSASSA-PSS (SHA512) with the given private key. + signer, err := NewSigner(PS512, privateKey) + if err != nil { + panic(err) + } + + // Sign a sample payload. Calling the signer returns a protected JWS object, + // which can then be serialized for output afterwards. An error would + // indicate a problem in an underlying cryptographic primitive. + var payload = []byte("Lorem ipsum dolor sit amet") + object, err := signer.Sign(payload) + if err != nil { + panic(err) + } + + // Serialize the encrypted object using the full serialization format. + // Alternatively you can also use the compact format here by calling + // object.CompactSerialize() instead. + serialized := object.FullSerialize() + + // Parse the serialized, protected JWS object. An error would indicate that + // the given input did not represent a valid message. + object, err = ParseSigned(serialized) + if err != nil { + panic(err) + } + + // Now we can verify the signature on the payload. An error here would + // indicate the the message failed to verify, e.g. because the signature was + // broken or the message was tampered with. + output, err := object.Verify(&privateKey.PublicKey) + if err != nil { + panic(err) + } + + fmt.Printf(string(output)) + // output: Lorem ipsum dolor sit amet +} + +func ExampleNewEncrypter_publicKey() { + var publicKey *rsa.PublicKey + + // Instantiate an encrypter using RSA-OAEP with AES128-GCM. + NewEncrypter(RSA_OAEP, A128GCM, publicKey) + + // Instantiate an encrypter using RSA-PKCS1v1.5 with AES128-CBC+HMAC. + NewEncrypter(RSA1_5, A128CBC_HS256, publicKey) +} + +func ExampleNewEncrypter_symmetric() { + var sharedKey []byte + + // Instantiate an encrypter using AES128-GCM with AES-GCM key wrap. + NewEncrypter(A128GCMKW, A128GCM, sharedKey) + + // Instantiate an encrypter using AES256-GCM directly, w/o key wrapping. + NewEncrypter(DIRECT, A256GCM, sharedKey) +} + +func ExampleNewSigner_publicKey() { + var rsaPrivateKey *rsa.PrivateKey + var ecdsaPrivateKey *ecdsa.PrivateKey + + // Instantiate a signer using RSA-PKCS#1v1.5 with SHA-256. + NewSigner(RS256, rsaPrivateKey) + + // Instantiate a signer using ECDSA with SHA-384. + NewSigner(ES384, ecdsaPrivateKey) +} + +func ExampleNewSigner_symmetric() { + var sharedKey []byte + + // Instantiate an signer using HMAC-SHA256. + NewSigner(HS256, sharedKey) + + // Instantiate an signer using HMAC-SHA512. + NewSigner(HS512, sharedKey) +} + +func ExampleNewMultiEncrypter() { + var publicKey *rsa.PublicKey + var sharedKey []byte + + // Instantiate an encrypter using AES-GCM. + encrypter, err := NewMultiEncrypter(A128GCM) + if err != nil { + panic(err) + } + + // Add a recipient using a shared key with AES-GCM key wap + err = encrypter.AddRecipient(A128GCMKW, sharedKey) + if err != nil { + panic(err) + } + + // Add a recipient using an RSA public key with RSA-OAEP + err = encrypter.AddRecipient(RSA_OAEP, publicKey) + if err != nil { + panic(err) + } +} + +func ExampleNewMultiSigner() { + var privateKey *rsa.PrivateKey + var sharedKey []byte + + // Instantiate a signer for multiple recipients. + signer := NewMultiSigner() + + // Add a recipient using a shared key with HMAC-SHA256 + err := signer.AddRecipient(HS256, sharedKey) + if err != nil { + panic(err) + } + + // Add a recipient using an RSA private key with RSASSA-PSS with SHA384 + err = signer.AddRecipient(PS384, privateKey) + if err != nil { + panic(err) + } +} + +func ExampleEncrypter_encrypt() { + // Encrypt a plaintext in order to get an encrypted JWE object. + var plaintext = []byte("This is a secret message") + + encrypter.Encrypt(plaintext) +} + +func ExampleEncrypter_encryptWithAuthData() { + // Encrypt a plaintext in order to get an encrypted JWE object. Also attach + // some additional authenticated data (AAD) to the object. Note that objects + // with attached AAD can only be represented using full serialization. + var plaintext = []byte("This is a secret message") + var aad = []byte("This is authenticated, but public data") + + encrypter.EncryptWithAuthData(plaintext, aad) +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding.go new file mode 100644 index 000000000..3e2ac0ae9 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding.go @@ -0,0 +1,191 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "bytes" + "compress/flate" + "encoding/base64" + "encoding/binary" + "io" + "math/big" + "regexp" + "strings" +) + +var stripWhitespaceRegex = regexp.MustCompile("\\s") + +// Url-safe base64 encode that strips padding +func base64URLEncode(data []byte) string { + var result = base64.URLEncoding.EncodeToString(data) + return strings.TrimRight(result, "=") +} + +// Url-safe base64 decoder that adds padding +func base64URLDecode(data string) ([]byte, error) { + var missing = (4 - len(data)%4) % 4 + data += strings.Repeat("=", missing) + return base64.URLEncoding.DecodeString(data) +} + +// Helper function to serialize known-good objects. +// Precondition: value is not a nil pointer. +func mustSerializeJSON(value interface{}) []byte { + out, err := MarshalJSON(value) + if err != nil { + panic(err) + } + // We never want to serialize the top-level value "null," since it's not a + // valid JOSE message. But if a caller passes in a nil pointer to this method, + // MarshalJSON will happily serialize it as the top-level value "null". If + // that value is then embedded in another operation, for instance by being + // base64-encoded and fed as input to a signing algorithm + // (https://github.com/square/go-jose/issues/22), the result will be + // incorrect. Because this method is intended for known-good objects, and a nil + // pointer is not a known-good object, we are free to panic in this case. + // Note: It's not possible to directly check whether the data pointed at by an + // interface is a nil pointer, so we do this hacky workaround. + // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I + if string(out) == "null" { + panic("Tried to serialize a nil pointer.") + } + return out +} + +// Strip all newlines and whitespace +func stripWhitespace(data string) string { + return stripWhitespaceRegex.ReplaceAllString(data, "") +} + +// Perform compression based on algorithm +func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { + switch algorithm { + case DEFLATE: + return deflate(input) + default: + return nil, ErrUnsupportedAlgorithm + } +} + +// Perform decompression based on algorithm +func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { + switch algorithm { + case DEFLATE: + return inflate(input) + default: + return nil, ErrUnsupportedAlgorithm + } +} + +// Compress with DEFLATE +func deflate(input []byte) ([]byte, error) { + output := new(bytes.Buffer) + + // Writing to byte buffer, err is always nil + writer, _ := flate.NewWriter(output, 1) + _, _ = io.Copy(writer, bytes.NewBuffer(input)) + + err := writer.Close() + return output.Bytes(), err +} + +// Decompress with DEFLATE +func inflate(input []byte) ([]byte, error) { + output := new(bytes.Buffer) + reader := flate.NewReader(bytes.NewBuffer(input)) + + _, err := io.Copy(output, reader) + if err != nil { + return nil, err + } + + err = reader.Close() + return output.Bytes(), err +} + +// byteBuffer represents a slice of bytes that can be serialized to url-safe base64. +type byteBuffer struct { + data []byte +} + +func newBuffer(data []byte) *byteBuffer { + if data == nil { + return nil + } + return &byteBuffer{ + data: data, + } +} + +func newFixedSizeBuffer(data []byte, length int) *byteBuffer { + if len(data) > length { + panic("square/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)") + } + pad := make([]byte, length-len(data)) + return newBuffer(append(pad, data...)) +} + +func newBufferFromInt(num uint64) *byteBuffer { + data := make([]byte, 8) + binary.BigEndian.PutUint64(data, num) + return newBuffer(bytes.TrimLeft(data, "\x00")) +} + +func (b *byteBuffer) MarshalJSON() ([]byte, error) { + return MarshalJSON(b.base64()) +} + +func (b *byteBuffer) UnmarshalJSON(data []byte) error { + var encoded string + err := UnmarshalJSON(data, &encoded) + if err != nil { + return err + } + + if encoded == "" { + return nil + } + + decoded, err := base64URLDecode(encoded) + if err != nil { + return err + } + + *b = *newBuffer(decoded) + + return nil +} + +func (b *byteBuffer) base64() string { + return base64URLEncode(b.data) +} + +func (b *byteBuffer) bytes() []byte { + // Handling nil here allows us to transparently handle nil slices when serializing. + if b == nil { + return nil + } + return b.data +} + +func (b byteBuffer) bigInt() *big.Int { + return new(big.Int).SetBytes(b.data) +} + +func (b byteBuffer) toInt() int { + return int(b.bigInt().Int64()) +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding_test.go new file mode 100644 index 000000000..e2f8d979c --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding_test.go @@ -0,0 +1,173 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "bytes" + "strings" + "testing" +) + +func TestBase64URLEncode(t *testing.T) { + // Test arrays with various sizes + if base64URLEncode([]byte{}) != "" { + t.Error("failed to encode empty array") + } + + if base64URLEncode([]byte{0}) != "AA" { + t.Error("failed to encode [0x00]") + } + + if base64URLEncode([]byte{0, 1}) != "AAE" { + t.Error("failed to encode [0x00, 0x01]") + } + + if base64URLEncode([]byte{0, 1, 2}) != "AAEC" { + t.Error("failed to encode [0x00, 0x01, 0x02]") + } + + if base64URLEncode([]byte{0, 1, 2, 3}) != "AAECAw" { + t.Error("failed to encode [0x00, 0x01, 0x02, 0x03]") + } +} + +func TestBase64URLDecode(t *testing.T) { + // Test arrays with various sizes + val, err := base64URLDecode("") + if err != nil || !bytes.Equal(val, []byte{}) { + t.Error("failed to decode empty array") + } + + val, err = base64URLDecode("AA") + if err != nil || !bytes.Equal(val, []byte{0}) { + t.Error("failed to decode [0x00]") + } + + val, err = base64URLDecode("AAE") + if err != nil || !bytes.Equal(val, []byte{0, 1}) { + t.Error("failed to decode [0x00, 0x01]") + } + + val, err = base64URLDecode("AAEC") + if err != nil || !bytes.Equal(val, []byte{0, 1, 2}) { + t.Error("failed to decode [0x00, 0x01, 0x02]") + } + + val, err = base64URLDecode("AAECAw") + if err != nil || !bytes.Equal(val, []byte{0, 1, 2, 3}) { + t.Error("failed to decode [0x00, 0x01, 0x02, 0x03]") + } +} + +func TestDeflateRoundtrip(t *testing.T) { + original := []byte("Lorem ipsum dolor sit amet") + + compressed, err := deflate(original) + if err != nil { + panic(err) + } + + output, err := inflate(compressed) + if err != nil { + panic(err) + } + + if bytes.Compare(output, original) != 0 { + t.Error("Input and output do not match") + } +} + +func TestInvalidCompression(t *testing.T) { + _, err := compress("XYZ", []byte{}) + if err == nil { + t.Error("should not accept invalid algorithm") + } + + _, err = decompress("XYZ", []byte{}) + if err == nil { + t.Error("should not accept invalid algorithm") + } + + _, err = decompress(DEFLATE, []byte{1, 2, 3, 4}) + if err == nil { + t.Error("should not accept invalid data") + } +} + +func TestByteBufferTrim(t *testing.T) { + buf := newBufferFromInt(1) + if !bytes.Equal(buf.data, []byte{1}) { + t.Error("Byte buffer for integer '1' should contain [0x01]") + } + + buf = newBufferFromInt(65537) + if !bytes.Equal(buf.data, []byte{1, 0, 1}) { + t.Error("Byte buffer for integer '65537' should contain [0x01, 0x00, 0x01]") + } +} + +func TestFixedSizeBuffer(t *testing.T) { + data0 := []byte{} + data1 := []byte{1} + data2 := []byte{1, 2} + data3 := []byte{1, 2, 3} + data4 := []byte{1, 2, 3, 4} + + buf0 := newFixedSizeBuffer(data0, 4) + buf1 := newFixedSizeBuffer(data1, 4) + buf2 := newFixedSizeBuffer(data2, 4) + buf3 := newFixedSizeBuffer(data3, 4) + buf4 := newFixedSizeBuffer(data4, 4) + + if !bytes.Equal(buf0.data, []byte{0, 0, 0, 0}) { + t.Error("Invalid padded buffer for buf0") + } + if !bytes.Equal(buf1.data, []byte{0, 0, 0, 1}) { + t.Error("Invalid padded buffer for buf1") + } + if !bytes.Equal(buf2.data, []byte{0, 0, 1, 2}) { + t.Error("Invalid padded buffer for buf2") + } + if !bytes.Equal(buf3.data, []byte{0, 1, 2, 3}) { + t.Error("Invalid padded buffer for buf3") + } + if !bytes.Equal(buf4.data, []byte{1, 2, 3, 4}) { + t.Error("Invalid padded buffer for buf4") + } +} + +func TestSerializeJSONRejectsNil(t *testing.T) { + defer func() { + r := recover() + if r == nil || !strings.Contains(r.(string), "nil pointer") { + t.Error("serialize function should not accept nil pointer") + } + }() + + mustSerializeJSON(nil) +} + +func TestFixedSizeBufferTooLarge(t *testing.T) { + defer func() { + r := recover() + if r == nil { + t.Error("should not be able to create fixed size buffer with oversized data") + } + }() + + newFixedSizeBuffer(make([]byte, 2), 1) +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/LICENSE b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/LICENSE new file mode 100644 index 000000000..744875676 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/README.md b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/README.md new file mode 100644 index 000000000..86de5e558 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/README.md @@ -0,0 +1,13 @@ +# Safe JSON + +This repository contains a fork of the `encoding/json` package from Go 1.6. + +The following changes were made: + +* Object deserialization uses case-sensitive member name matching instead of + [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html). + This is to avoid differences in the interpretation of JOSE messages between + go-jose and libraries written in other languages. +* When deserializing a JSON object, we check for duplicate keys and reject the + input whenever we detect a duplicate. Rather than trying to work with malformed + data, we prefer to reject it right away. diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/bench_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/bench_test.go new file mode 100644 index 000000000..ed89d1156 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/bench_test.go @@ -0,0 +1,223 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Large data benchmark. +// The JSON data is a summary of agl's changes in the +// go, webkit, and chromium open source projects. +// We benchmark converting between the JSON form +// and in-memory data structures. + +package json + +import ( + "bytes" + "compress/gzip" + "io/ioutil" + "os" + "strings" + "testing" +) + +type codeResponse struct { + Tree *codeNode `json:"tree"` + Username string `json:"username"` +} + +type codeNode struct { + Name string `json:"name"` + Kids []*codeNode `json:"kids"` + CLWeight float64 `json:"cl_weight"` + Touches int `json:"touches"` + MinT int64 `json:"min_t"` + MaxT int64 `json:"max_t"` + MeanT int64 `json:"mean_t"` +} + +var codeJSON []byte +var codeStruct codeResponse + +func codeInit() { + f, err := os.Open("testdata/code.json.gz") + if err != nil { + panic(err) + } + defer f.Close() + gz, err := gzip.NewReader(f) + if err != nil { + panic(err) + } + data, err := ioutil.ReadAll(gz) + if err != nil { + panic(err) + } + + codeJSON = data + + if err := Unmarshal(codeJSON, &codeStruct); err != nil { + panic("unmarshal code.json: " + err.Error()) + } + + if data, err = Marshal(&codeStruct); err != nil { + panic("marshal code.json: " + err.Error()) + } + + if !bytes.Equal(data, codeJSON) { + println("different lengths", len(data), len(codeJSON)) + for i := 0; i < len(data) && i < len(codeJSON); i++ { + if data[i] != codeJSON[i] { + println("re-marshal: changed at byte", i) + println("orig: ", string(codeJSON[i-10:i+10])) + println("new: ", string(data[i-10:i+10])) + break + } + } + panic("re-marshal code.json: different result") + } +} + +func BenchmarkCodeEncoder(b *testing.B) { + if codeJSON == nil { + b.StopTimer() + codeInit() + b.StartTimer() + } + enc := NewEncoder(ioutil.Discard) + for i := 0; i < b.N; i++ { + if err := enc.Encode(&codeStruct); err != nil { + b.Fatal("Encode:", err) + } + } + b.SetBytes(int64(len(codeJSON))) +} + +func BenchmarkCodeMarshal(b *testing.B) { + if codeJSON == nil { + b.StopTimer() + codeInit() + b.StartTimer() + } + for i := 0; i < b.N; i++ { + if _, err := Marshal(&codeStruct); err != nil { + b.Fatal("Marshal:", err) + } + } + b.SetBytes(int64(len(codeJSON))) +} + +func BenchmarkCodeDecoder(b *testing.B) { + if codeJSON == nil { + b.StopTimer() + codeInit() + b.StartTimer() + } + var buf bytes.Buffer + dec := NewDecoder(&buf) + var r codeResponse + for i := 0; i < b.N; i++ { + buf.Write(codeJSON) + // hide EOF + buf.WriteByte('\n') + buf.WriteByte('\n') + buf.WriteByte('\n') + if err := dec.Decode(&r); err != nil { + b.Fatal("Decode:", err) + } + } + b.SetBytes(int64(len(codeJSON))) +} + +func BenchmarkDecoderStream(b *testing.B) { + b.StopTimer() + var buf bytes.Buffer + dec := NewDecoder(&buf) + buf.WriteString(`"` + strings.Repeat("x", 1000000) + `"` + "\n\n\n") + var x interface{} + if err := dec.Decode(&x); err != nil { + b.Fatal("Decode:", err) + } + ones := strings.Repeat(" 1\n", 300000) + "\n\n\n" + b.StartTimer() + for i := 0; i < b.N; i++ { + if i%300000 == 0 { + buf.WriteString(ones) + } + x = nil + if err := dec.Decode(&x); err != nil || x != 1.0 { + b.Fatalf("Decode: %v after %d", err, i) + } + } +} + +func BenchmarkCodeUnmarshal(b *testing.B) { + if codeJSON == nil { + b.StopTimer() + codeInit() + b.StartTimer() + } + for i := 0; i < b.N; i++ { + var r codeResponse + if err := Unmarshal(codeJSON, &r); err != nil { + b.Fatal("Unmmarshal:", err) + } + } + b.SetBytes(int64(len(codeJSON))) +} + +func BenchmarkCodeUnmarshalReuse(b *testing.B) { + if codeJSON == nil { + b.StopTimer() + codeInit() + b.StartTimer() + } + var r codeResponse + for i := 0; i < b.N; i++ { + if err := Unmarshal(codeJSON, &r); err != nil { + b.Fatal("Unmmarshal:", err) + } + } +} + +func BenchmarkUnmarshalString(b *testing.B) { + data := []byte(`"hello, world"`) + var s string + + for i := 0; i < b.N; i++ { + if err := Unmarshal(data, &s); err != nil { + b.Fatal("Unmarshal:", err) + } + } +} + +func BenchmarkUnmarshalFloat64(b *testing.B) { + var f float64 + data := []byte(`3.14`) + + for i := 0; i < b.N; i++ { + if err := Unmarshal(data, &f); err != nil { + b.Fatal("Unmarshal:", err) + } + } +} + +func BenchmarkUnmarshalInt64(b *testing.B) { + var x int64 + data := []byte(`3`) + + for i := 0; i < b.N; i++ { + if err := Unmarshal(data, &x); err != nil { + b.Fatal("Unmarshal:", err) + } + } +} + +func BenchmarkIssue10335(b *testing.B) { + b.ReportAllocs() + var s struct{} + j := []byte(`{"a":{ }}`) + for n := 0; n < b.N; n++ { + if err := Unmarshal(j, &s); err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode.go new file mode 100644 index 000000000..37457e5a8 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode.go @@ -0,0 +1,1183 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "errors" + "fmt" + "reflect" + "runtime" + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. +// Unmarshal will only set exported fields of the struct. +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a string-keyed map, Unmarshal first +// establishes a map to use, If the map is nil, Unmarshal allocates a new map. +// Otherwise Unmarshal reuses the existing map, keeping existing entries. +// Unmarshal then stores key-value pairs from the JSON object into the map. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +// +func Unmarshal(data []byte, v interface{}) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + var d decodeState + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +// Unmarshaler is the interface implemented by objects +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes +} + +func (e *UnmarshalTypeError) Error() string { + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// (No longer used; kept for compatibility.) +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +func (d *decodeState) unmarshal(v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.reset() + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + d.value(rv) + return d.savedError +} + +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +// isValidNumber reports whether s is a valid JSON number literal. +func isValidNumber(s string) bool { + // This function implements the JSON numbers grammar. + // See https://tools.ietf.org/html/rfc7159#section-6 + // and http://json.org/number.gif + + if s == "" { + return false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + + // Digits + switch { + default: + return false + + case s[0] == '0': + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + if s[0] == '+' || s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // Make sure we are at the end. + return s == "" +} + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // read offset in data + scan scanner + nextscan scanner // for calls to nextValue + savedError error + useNumber bool +} + +// errPhase is used for errors that should not happen unless +// there is a bug in the JSON decoder or something is editing +// the data slice while the decoder executes. +var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + return d +} + +// error aborts the decoding by panicking with err. +func (d *decodeState) error(err error) { + panic(err) +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = err + } +} + +// next cuts off and returns the next full JSON value in d.data[d.off:]. +// The next value is known to be an object or array, not a literal. +func (d *decodeState) next() []byte { + c := d.data[d.off] + item, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // Our scanner has seen the opening brace/bracket + // and thinks we're still in the middle of the object. + // invent a closing brace/bracket to get it out. + if c == '{' { + d.scan.step(&d.scan, '}') + } else { + d.scan.step(&d.scan, ']') + } + + return item +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +// It updates d.off and returns the new scan code. +func (d *decodeState) scanWhile(op int) int { + var newOp int + for { + if d.off >= len(d.data) { + newOp = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } else { + c := d.data[d.off] + d.off++ + newOp = d.scan.step(&d.scan, c) + } + if newOp != op { + break + } + } + return newOp +} + +// value decodes a JSON value from d.data[d.off:] into the value. +// it updates d.off to point past the decoded value. +func (d *decodeState) value(v reflect.Value) { + if !v.IsValid() { + _, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // d.scan thinks we're still at the beginning of the item. + // Feed in an empty string - the shortest, simplest value - + // so that it knows we got to the end of the value. + if d.scan.redo { + // rewind. + d.scan.redo = false + d.scan.step = stateBeginValue + } + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + + n := len(d.scan.parseState) + if n > 0 && d.scan.parseState[n-1] == parseObjectKey { + // d.scan thinks we just read an object key; finish the object + d.scan.step(&d.scan, ':') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '}') + } + + return + } + + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(v) + + case scanBeginObject: + d.object(v) + + case scanBeginLiteral: + d.literal(v) + } +} + +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() interface{} { + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(reflect.Value{}) + + case scanBeginObject: + d.object(reflect.Value{}) + + case scanBeginLiteral: + switch v := d.literalInterface().(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into the value v. +// the first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + } + + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + v.Set(reflect.ValueOf(d.arrayInterface())) + return + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + case reflect.Array: + case reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + d.value(v.Index(i)) + } else { + // Ran out of fixed array: skip. + d.value(reflect.Value{}) + } + i++ + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } +} + +var nullLiteral = []byte("null") + +// object consumes an object from d.data[d.off-1:], decoding into the value v. +// the first byte ('{') of the object has been read already. +func (d *decodeState) object(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + v = pv + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + v.Set(reflect.ValueOf(d.objectInterface())) + return + } + + // Check type of target: struct or map[string]T + switch v.Kind() { + case reflect.Map: + // map must have string kind + t := v.Type() + if t.Key().Kind() != reflect.String { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + + default: + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + + var mapElem reflect.Value + keys := map[string]bool{} + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Check for duplicate keys. + _, ok = keys[key] + if !ok { + keys[key] = true + } else { + d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) + } + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := v.Type().Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + fields := cachedTypeFields(v.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, []byte(key)) { + f = ff + break + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Ptr { + if subv.IsNil() { + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + } + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + if destring { + switch qv := d.valueQuoted().(type) { + case nil: + d.literalStore(nullLiteral, subv, false) + case string: + d.literalStore([]byte(qv), subv, true) + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) + } + } else { + d.value(subv) + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kv := reflect.ValueOf(key).Convert(v.Type().Key()) + v.SetMapIndex(kv, subv) + } + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } +} + +// literal consumes a literal from d.data[d.off-1:], decoding into the value v. +// The first byte of the literal has been read already +// (that's how the caller knows it's a literal). +func (d *decodeState) literal(v reflect.Value) { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + + d.literalStore(d.data[start:d.off], v, false) +} + +// convertNumber converts the number literal s to a float64 or a Number +// depending on the setting of d.useNumber. +func (d *decodeState) convertNumber(s string) (interface{}, error) { + if d.useNumber { + return Number(s), nil + } + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} + } + return f, nil +} + +var numberType = reflect.TypeOf(Number("")) + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { + // Check for unmarshaler. + if len(item) == 0 { + //Empty string given + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return + } + wantptr := item[0] == 'n' // null + u, ut, pv := d.indirect(v, wantptr) + if u != nil { + err := u.UnmarshalJSON(item) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + return + } + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + err := ut.UnmarshalText(s) + if err != nil { + d.error(err) + } + return + } + + v = pv + + switch c := item[0]; c { + case 'n': // null + switch v.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + // otherwise, ignore null for primitives/string + } + case 't', 'f': // true, false + value := c == 't' + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + } + + case '"': // string + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + break + } + b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(b, s) + if err != nil { + d.saveError(err) + break + } + v.SetBytes(b[:n]) + case reflect.String: + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + } + + default: // number + if c != '-' && (c < '0' || c > '9') { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + s := string(item) + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + v.SetString(s) + if !isValidNumber(s) { + d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) + } + break + } + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + } + case reflect.Interface: + n, err := d.convertNumber(s) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetFloat(n) + } + } +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface() interface{} { + switch d.scanWhile(scanSkipSpace) { + default: + d.error(errPhase) + panic("unreachable") + case scanBeginArray: + return d.arrayInterface() + case scanBeginObject: + return d.objectInterface() + case scanBeginLiteral: + return d.literalInterface() + } +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface() []interface{} { + var v = make([]interface{}, 0) + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface() map[string]interface{} { + m := make(map[string]interface{}) + keys := map[string]bool{} + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read string key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Check for duplicate keys. + _, ok = keys[key] + if !ok { + keys[key] = true + } else { + d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } + return m +} + +// literalInterface is like literal but returns an interface value. +func (d *decodeState) literalInterface() interface{} { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + item := d.data[start:d.off] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + d.error(errPhase) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + d.error(errPhase) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + r, err := strconv.ParseUint(string(s[2:6]), 16, 64) + if err != nil { + return -1 + } + return rune(r) +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode_test.go new file mode 100644 index 000000000..7577b21ac --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode_test.go @@ -0,0 +1,1474 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "encoding" + "fmt" + "image" + "net" + "reflect" + "strings" + "testing" + "time" +) + +type T struct { + X string + Y int + Z int `json:"-"` +} + +type U struct { + Alphabet string `json:"alpha"` +} + +type V struct { + F1 interface{} + F2 int32 + F3 Number +} + +// ifaceNumAsFloat64/ifaceNumAsNumber are used to test unmarshaling with and +// without UseNumber +var ifaceNumAsFloat64 = map[string]interface{}{ + "k1": float64(1), + "k2": "s", + "k3": []interface{}{float64(1), float64(2.0), float64(3e-3)}, + "k4": map[string]interface{}{"kk1": "s", "kk2": float64(2)}, +} + +var ifaceNumAsNumber = map[string]interface{}{ + "k1": Number("1"), + "k2": "s", + "k3": []interface{}{Number("1"), Number("2.0"), Number("3e-3")}, + "k4": map[string]interface{}{"kk1": "s", "kk2": Number("2")}, +} + +type tx struct { + x int +} + +// A type that can unmarshal itself. + +type unmarshaler struct { + T bool +} + +func (u *unmarshaler) UnmarshalJSON(b []byte) error { + *u = unmarshaler{true} // All we need to see that UnmarshalJSON is called. + return nil +} + +type ustruct struct { + M unmarshaler +} + +type unmarshalerText struct { + T bool +} + +// needed for re-marshaling tests +func (u *unmarshalerText) MarshalText() ([]byte, error) { + return []byte(""), nil +} + +func (u *unmarshalerText) UnmarshalText(b []byte) error { + *u = unmarshalerText{true} // All we need to see that UnmarshalText is called. + return nil +} + +var _ encoding.TextUnmarshaler = (*unmarshalerText)(nil) + +type ustructText struct { + M unmarshalerText +} + +var ( + um0, um1 unmarshaler // target2 of unmarshaling + ump = &um1 + umtrue = unmarshaler{true} + umslice = []unmarshaler{{true}} + umslicep = new([]unmarshaler) + umstruct = ustruct{unmarshaler{true}} + + um0T, um1T unmarshalerText // target2 of unmarshaling + umpT = &um1T + umtrueT = unmarshalerText{true} + umsliceT = []unmarshalerText{{true}} + umslicepT = new([]unmarshalerText) + umstructT = ustructText{unmarshalerText{true}} +) + +// Test data structures for anonymous fields. + +type Point struct { + Z int +} + +type Top struct { + Level0 int + Embed0 + *Embed0a + *Embed0b `json:"e,omitempty"` // treated as named + Embed0c `json:"-"` // ignored + Loop + Embed0p // has Point with X, Y, used + Embed0q // has Point with Z, used + embed // contains exported field +} + +type Embed0 struct { + Level1a int // overridden by Embed0a's Level1a with json tag + Level1b int // used because Embed0a's Level1b is renamed + Level1c int // used because Embed0a's Level1c is ignored + Level1d int // annihilated by Embed0a's Level1d + Level1e int `json:"x"` // annihilated by Embed0a.Level1e +} + +type Embed0a struct { + Level1a int `json:"Level1a,omitempty"` + Level1b int `json:"LEVEL1B,omitempty"` + Level1c int `json:"-"` + Level1d int // annihilated by Embed0's Level1d + Level1f int `json:"x"` // annihilated by Embed0's Level1e +} + +type Embed0b Embed0 + +type Embed0c Embed0 + +type Embed0p struct { + image.Point +} + +type Embed0q struct { + Point +} + +type embed struct { + Q int +} + +type Loop struct { + Loop1 int `json:",omitempty"` + Loop2 int `json:",omitempty"` + *Loop +} + +// From reflect test: +// The X in S6 and S7 annihilate, but they also block the X in S8.S9. +type S5 struct { + S6 + S7 + S8 +} + +type S6 struct { + X int +} + +type S7 S6 + +type S8 struct { + S9 +} + +type S9 struct { + X int + Y int +} + +// From reflect test: +// The X in S11.S6 and S12.S6 annihilate, but they also block the X in S13.S8.S9. +type S10 struct { + S11 + S12 + S13 +} + +type S11 struct { + S6 +} + +type S12 struct { + S6 +} + +type S13 struct { + S8 +} + +type unmarshalTest struct { + in string + ptr interface{} + out interface{} + err error + useNumber bool +} + +type XYZ struct { + X interface{} + Y interface{} + Z interface{} +} + +func sliceAddr(x []int) *[]int { return &x } +func mapAddr(x map[string]int) *map[string]int { return &x } + +var unmarshalTests = []unmarshalTest{ + // basic types + {in: `true`, ptr: new(bool), out: true}, + {in: `1`, ptr: new(int), out: 1}, + {in: `1.2`, ptr: new(float64), out: 1.2}, + {in: `-5`, ptr: new(int16), out: int16(-5)}, + {in: `2`, ptr: new(Number), out: Number("2"), useNumber: true}, + {in: `2`, ptr: new(Number), out: Number("2")}, + {in: `2`, ptr: new(interface{}), out: float64(2.0)}, + {in: `2`, ptr: new(interface{}), out: Number("2"), useNumber: true}, + {in: `"a\u1234"`, ptr: new(string), out: "a\u1234"}, + {in: `"http:\/\/"`, ptr: new(string), out: "http://"}, + {in: `"g-clef: \uD834\uDD1E"`, ptr: new(string), out: "g-clef: \U0001D11E"}, + {in: `"invalid: \uD834x\uDD1E"`, ptr: new(string), out: "invalid: \uFFFDx\uFFFD"}, + {in: "null", ptr: new(interface{}), out: nil}, + {in: `{"X": [1,2,3], "Y": 4}`, ptr: new(T), out: T{Y: 4}, err: &UnmarshalTypeError{"array", reflect.TypeOf(""), 7}}, + {in: `{"x": 1}`, ptr: new(tx), out: tx{}}, + {in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: float64(1), F2: int32(2), F3: Number("3")}}, + {in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: Number("1"), F2: int32(2), F3: Number("3")}, useNumber: true}, + {in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(interface{}), out: ifaceNumAsFloat64}, + {in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(interface{}), out: ifaceNumAsNumber, useNumber: true}, + + // raw values with whitespace + {in: "\n true ", ptr: new(bool), out: true}, + {in: "\t 1 ", ptr: new(int), out: 1}, + {in: "\r 1.2 ", ptr: new(float64), out: 1.2}, + {in: "\t -5 \n", ptr: new(int16), out: int16(-5)}, + {in: "\t \"a\\u1234\" \n", ptr: new(string), out: "a\u1234"}, + + // Z has a "-" tag. + {in: `{"Y": 1, "Z": 2}`, ptr: new(T), out: T{Y: 1}}, + + {in: `{"alpha": "abc", "alphabet": "xyz"}`, ptr: new(U), out: U{Alphabet: "abc"}}, + {in: `{"alpha": "abc"}`, ptr: new(U), out: U{Alphabet: "abc"}}, + {in: `{"alphabet": "xyz"}`, ptr: new(U), out: U{}}, + + // syntax errors + {in: `{"X": "foo", "Y"}`, err: &SyntaxError{"invalid character '}' after object key", 17}}, + {in: `[1, 2, 3+]`, err: &SyntaxError{"invalid character '+' after array element", 9}}, + {in: `{"X":12x}`, err: &SyntaxError{"invalid character 'x' after object key:value pair", 8}, useNumber: true}, + + // raw value errors + {in: "\x01 42", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, + {in: " 42 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 5}}, + {in: "\x01 true", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, + {in: " false \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 8}}, + {in: "\x01 1.2", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, + {in: " 3.4 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 6}}, + {in: "\x01 \"string\"", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, + {in: " \"string\" \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 11}}, + + // array tests + {in: `[1, 2, 3]`, ptr: new([3]int), out: [3]int{1, 2, 3}}, + {in: `[1, 2, 3]`, ptr: new([1]int), out: [1]int{1}}, + {in: `[1, 2, 3]`, ptr: new([5]int), out: [5]int{1, 2, 3, 0, 0}}, + + // empty array to interface test + {in: `[]`, ptr: new([]interface{}), out: []interface{}{}}, + {in: `null`, ptr: new([]interface{}), out: []interface{}(nil)}, + {in: `{"T":[]}`, ptr: new(map[string]interface{}), out: map[string]interface{}{"T": []interface{}{}}}, + {in: `{"T":null}`, ptr: new(map[string]interface{}), out: map[string]interface{}{"T": interface{}(nil)}}, + + // composite tests + {in: allValueIndent, ptr: new(All), out: allValue}, + {in: allValueCompact, ptr: new(All), out: allValue}, + {in: allValueIndent, ptr: new(*All), out: &allValue}, + {in: allValueCompact, ptr: new(*All), out: &allValue}, + {in: pallValueIndent, ptr: new(All), out: pallValue}, + {in: pallValueCompact, ptr: new(All), out: pallValue}, + {in: pallValueIndent, ptr: new(*All), out: &pallValue}, + {in: pallValueCompact, ptr: new(*All), out: &pallValue}, + + // unmarshal interface test + {in: `{"T":false}`, ptr: &um0, out: umtrue}, // use "false" so test will fail if custom unmarshaler is not called + {in: `{"T":false}`, ptr: &ump, out: &umtrue}, + {in: `[{"T":false}]`, ptr: &umslice, out: umslice}, + {in: `[{"T":false}]`, ptr: &umslicep, out: &umslice}, + {in: `{"M":{"T":false}}`, ptr: &umstruct, out: umstruct}, + + // UnmarshalText interface test + {in: `"X"`, ptr: &um0T, out: umtrueT}, // use "false" so test will fail if custom unmarshaler is not called + {in: `"X"`, ptr: &umpT, out: &umtrueT}, + {in: `["X"]`, ptr: &umsliceT, out: umsliceT}, + {in: `["X"]`, ptr: &umslicepT, out: &umsliceT}, + {in: `{"M":"X"}`, ptr: &umstructT, out: umstructT}, + + // Overwriting of data. + // This is different from package xml, but it's what we've always done. + // Now documented and tested. + {in: `[2]`, ptr: sliceAddr([]int{1}), out: []int{2}}, + {in: `{"key": 2}`, ptr: mapAddr(map[string]int{"old": 0, "key": 1}), out: map[string]int{"key": 2}}, + + { + in: `{ + "Level0": 1, + "Level1b": 2, + "Level1c": 3, + "x": 4, + "Level1a": 5, + "LEVEL1B": 6, + "e": { + "Level1a": 8, + "Level1b": 9, + "Level1c": 10, + "Level1d": 11, + "x": 12 + }, + "Loop1": 13, + "Loop2": 14, + "X": 15, + "Y": 16, + "Z": 17, + "Q": 18 + }`, + ptr: new(Top), + out: Top{ + Level0: 1, + Embed0: Embed0{ + Level1b: 2, + Level1c: 3, + }, + Embed0a: &Embed0a{ + Level1a: 5, + Level1b: 6, + }, + Embed0b: &Embed0b{ + Level1a: 8, + Level1b: 9, + Level1c: 10, + Level1d: 11, + Level1e: 12, + }, + Loop: Loop{ + Loop1: 13, + Loop2: 14, + }, + Embed0p: Embed0p{ + Point: image.Point{X: 15, Y: 16}, + }, + Embed0q: Embed0q{ + Point: Point{Z: 17}, + }, + embed: embed{ + Q: 18, + }, + }, + }, + { + in: `{"X": 1,"Y":2}`, + ptr: new(S5), + out: S5{S8: S8{S9: S9{Y: 2}}}, + }, + { + in: `{"X": 1,"Y":2}`, + ptr: new(S10), + out: S10{S13: S13{S8: S8{S9: S9{Y: 2}}}}, + }, + + // invalid UTF-8 is coerced to valid UTF-8. + { + in: "\"hello\xffworld\"", + ptr: new(string), + out: "hello\ufffdworld", + }, + { + in: "\"hello\xc2\xc2world\"", + ptr: new(string), + out: "hello\ufffd\ufffdworld", + }, + { + in: "\"hello\xc2\xffworld\"", + ptr: new(string), + out: "hello\ufffd\ufffdworld", + }, + { + in: "\"hello\\ud800world\"", + ptr: new(string), + out: "hello\ufffdworld", + }, + { + in: "\"hello\\ud800\\ud800world\"", + ptr: new(string), + out: "hello\ufffd\ufffdworld", + }, + { + in: "\"hello\\ud800\\ud800world\"", + ptr: new(string), + out: "hello\ufffd\ufffdworld", + }, + { + in: "\"hello\xed\xa0\x80\xed\xb0\x80world\"", + ptr: new(string), + out: "hello\ufffd\ufffd\ufffd\ufffd\ufffd\ufffdworld", + }, + + // issue 8305 + { + in: `{"2009-11-10T23:00:00Z": "hello world"}`, + ptr: &map[time.Time]string{}, + err: &UnmarshalTypeError{"object", reflect.TypeOf(map[time.Time]string{}), 1}, + }, +} + +func TestMarshal(t *testing.T) { + b, err := Marshal(allValue) + if err != nil { + t.Fatalf("Marshal allValue: %v", err) + } + if string(b) != allValueCompact { + t.Errorf("Marshal allValueCompact") + diff(t, b, []byte(allValueCompact)) + return + } + + b, err = Marshal(pallValue) + if err != nil { + t.Fatalf("Marshal pallValue: %v", err) + } + if string(b) != pallValueCompact { + t.Errorf("Marshal pallValueCompact") + diff(t, b, []byte(pallValueCompact)) + return + } +} + +var badUTF8 = []struct { + in, out string +}{ + {"hello\xffworld", `"hello\ufffdworld"`}, + {"", `""`}, + {"\xff", `"\ufffd"`}, + {"\xff\xff", `"\ufffd\ufffd"`}, + {"a\xffb", `"a\ufffdb"`}, + {"\xe6\x97\xa5\xe6\x9c\xac\xff\xaa\x9e", `"日本\ufffd\ufffd\ufffd"`}, +} + +func TestMarshalBadUTF8(t *testing.T) { + for _, tt := range badUTF8 { + b, err := Marshal(tt.in) + if string(b) != tt.out || err != nil { + t.Errorf("Marshal(%q) = %#q, %v, want %#q, nil", tt.in, b, err, tt.out) + } + } +} + +func TestMarshalNumberZeroVal(t *testing.T) { + var n Number + out, err := Marshal(n) + if err != nil { + t.Fatal(err) + } + outStr := string(out) + if outStr != "0" { + t.Fatalf("Invalid zero val for Number: %q", outStr) + } +} + +func TestMarshalEmbeds(t *testing.T) { + top := &Top{ + Level0: 1, + Embed0: Embed0{ + Level1b: 2, + Level1c: 3, + }, + Embed0a: &Embed0a{ + Level1a: 5, + Level1b: 6, + }, + Embed0b: &Embed0b{ + Level1a: 8, + Level1b: 9, + Level1c: 10, + Level1d: 11, + Level1e: 12, + }, + Loop: Loop{ + Loop1: 13, + Loop2: 14, + }, + Embed0p: Embed0p{ + Point: image.Point{X: 15, Y: 16}, + }, + Embed0q: Embed0q{ + Point: Point{Z: 17}, + }, + embed: embed{ + Q: 18, + }, + } + b, err := Marshal(top) + if err != nil { + t.Fatal(err) + } + want := "{\"Level0\":1,\"Level1b\":2,\"Level1c\":3,\"Level1a\":5,\"LEVEL1B\":6,\"e\":{\"Level1a\":8,\"Level1b\":9,\"Level1c\":10,\"Level1d\":11,\"x\":12},\"Loop1\":13,\"Loop2\":14,\"X\":15,\"Y\":16,\"Z\":17,\"Q\":18}" + if string(b) != want { + t.Errorf("Wrong marshal result.\n got: %q\nwant: %q", b, want) + } +} + +func TestUnmarshal(t *testing.T) { + for i, tt := range unmarshalTests { + var scan scanner + in := []byte(tt.in) + if err := checkValid(in, &scan); err != nil { + if !reflect.DeepEqual(err, tt.err) { + t.Errorf("#%d: checkValid: %#v", i, err) + continue + } + } + if tt.ptr == nil { + continue + } + + // v = new(right-type) + v := reflect.New(reflect.TypeOf(tt.ptr).Elem()) + dec := NewDecoder(bytes.NewReader(in)) + if tt.useNumber { + dec.UseNumber() + } + if err := dec.Decode(v.Interface()); !reflect.DeepEqual(err, tt.err) { + t.Errorf("#%d: %v, want %v", i, err, tt.err) + continue + } else if err != nil { + continue + } + if !reflect.DeepEqual(v.Elem().Interface(), tt.out) { + t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), tt.out) + data, _ := Marshal(v.Elem().Interface()) + println(string(data)) + data, _ = Marshal(tt.out) + println(string(data)) + continue + } + + // Check round trip. + if tt.err == nil { + enc, err := Marshal(v.Interface()) + if err != nil { + t.Errorf("#%d: error re-marshaling: %v", i, err) + continue + } + vv := reflect.New(reflect.TypeOf(tt.ptr).Elem()) + dec = NewDecoder(bytes.NewReader(enc)) + if tt.useNumber { + dec.UseNumber() + } + if err := dec.Decode(vv.Interface()); err != nil { + t.Errorf("#%d: error re-unmarshaling %#q: %v", i, enc, err) + continue + } + if !reflect.DeepEqual(v.Elem().Interface(), vv.Elem().Interface()) { + t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), vv.Elem().Interface()) + t.Errorf(" In: %q", strings.Map(noSpace, string(in))) + t.Errorf("Marshal: %q", strings.Map(noSpace, string(enc))) + continue + } + } + } +} + +func TestUnmarshalMarshal(t *testing.T) { + initBig() + var v interface{} + if err := Unmarshal(jsonBig, &v); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + b, err := Marshal(v) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if !bytes.Equal(jsonBig, b) { + t.Errorf("Marshal jsonBig") + diff(t, b, jsonBig) + return + } +} + +var numberTests = []struct { + in string + i int64 + intErr string + f float64 + floatErr string +}{ + {in: "-1.23e1", intErr: "strconv.ParseInt: parsing \"-1.23e1\": invalid syntax", f: -1.23e1}, + {in: "-12", i: -12, f: -12.0}, + {in: "1e1000", intErr: "strconv.ParseInt: parsing \"1e1000\": invalid syntax", floatErr: "strconv.ParseFloat: parsing \"1e1000\": value out of range"}, +} + +// Independent of Decode, basic coverage of the accessors in Number +func TestNumberAccessors(t *testing.T) { + for _, tt := range numberTests { + n := Number(tt.in) + if s := n.String(); s != tt.in { + t.Errorf("Number(%q).String() is %q", tt.in, s) + } + if i, err := n.Int64(); err == nil && tt.intErr == "" && i != tt.i { + t.Errorf("Number(%q).Int64() is %d", tt.in, i) + } else if (err == nil && tt.intErr != "") || (err != nil && err.Error() != tt.intErr) { + t.Errorf("Number(%q).Int64() wanted error %q but got: %v", tt.in, tt.intErr, err) + } + if f, err := n.Float64(); err == nil && tt.floatErr == "" && f != tt.f { + t.Errorf("Number(%q).Float64() is %g", tt.in, f) + } else if (err == nil && tt.floatErr != "") || (err != nil && err.Error() != tt.floatErr) { + t.Errorf("Number(%q).Float64() wanted error %q but got: %v", tt.in, tt.floatErr, err) + } + } +} + +func TestLargeByteSlice(t *testing.T) { + s0 := make([]byte, 2000) + for i := range s0 { + s0[i] = byte(i) + } + b, err := Marshal(s0) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + var s1 []byte + if err := Unmarshal(b, &s1); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if !bytes.Equal(s0, s1) { + t.Errorf("Marshal large byte slice") + diff(t, s0, s1) + } +} + +type Xint struct { + X int +} + +func TestUnmarshalInterface(t *testing.T) { + var xint Xint + var i interface{} = &xint + if err := Unmarshal([]byte(`{"X":1}`), &i); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if xint.X != 1 { + t.Fatalf("Did not write to xint") + } +} + +func TestUnmarshalPtrPtr(t *testing.T) { + var xint Xint + pxint := &xint + if err := Unmarshal([]byte(`{"X":1}`), &pxint); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if xint.X != 1 { + t.Fatalf("Did not write to xint") + } +} + +func TestEscape(t *testing.T) { + const input = `"foobar"` + " [\u2028 \u2029]" + const expected = `"\"foobar\"\u003chtml\u003e [\u2028 \u2029]"` + b, err := Marshal(input) + if err != nil { + t.Fatalf("Marshal error: %v", err) + } + if s := string(b); s != expected { + t.Errorf("Encoding of [%s]:\n got [%s]\nwant [%s]", input, s, expected) + } +} + +// WrongString is a struct that's misusing the ,string modifier. +type WrongString struct { + Message string `json:"result,string"` +} + +type wrongStringTest struct { + in, err string +} + +var wrongStringTests = []wrongStringTest{ + {`{"result":"x"}`, `json: invalid use of ,string struct tag, trying to unmarshal "x" into string`}, + {`{"result":"foo"}`, `json: invalid use of ,string struct tag, trying to unmarshal "foo" into string`}, + {`{"result":"123"}`, `json: invalid use of ,string struct tag, trying to unmarshal "123" into string`}, + {`{"result":123}`, `json: invalid use of ,string struct tag, trying to unmarshal unquoted value into string`}, +} + +// If people misuse the ,string modifier, the error message should be +// helpful, telling the user that they're doing it wrong. +func TestErrorMessageFromMisusedString(t *testing.T) { + for n, tt := range wrongStringTests { + r := strings.NewReader(tt.in) + var s WrongString + err := NewDecoder(r).Decode(&s) + got := fmt.Sprintf("%v", err) + if got != tt.err { + t.Errorf("%d. got err = %q, want %q", n, got, tt.err) + } + } +} + +func noSpace(c rune) rune { + if isSpace(byte(c)) { //only used for ascii + return -1 + } + return c +} + +type All struct { + Bool bool + Int int + Int8 int8 + Int16 int16 + Int32 int32 + Int64 int64 + Uint uint + Uint8 uint8 + Uint16 uint16 + Uint32 uint32 + Uint64 uint64 + Uintptr uintptr + Float32 float32 + Float64 float64 + + Foo string `json:"bar"` + Foo2 string `json:"bar2,dummyopt"` + + IntStr int64 `json:",string"` + + PBool *bool + PInt *int + PInt8 *int8 + PInt16 *int16 + PInt32 *int32 + PInt64 *int64 + PUint *uint + PUint8 *uint8 + PUint16 *uint16 + PUint32 *uint32 + PUint64 *uint64 + PUintptr *uintptr + PFloat32 *float32 + PFloat64 *float64 + + String string + PString *string + + Map map[string]Small + MapP map[string]*Small + PMap *map[string]Small + PMapP *map[string]*Small + + EmptyMap map[string]Small + NilMap map[string]Small + + Slice []Small + SliceP []*Small + PSlice *[]Small + PSliceP *[]*Small + + EmptySlice []Small + NilSlice []Small + + StringSlice []string + ByteSlice []byte + + Small Small + PSmall *Small + PPSmall **Small + + Interface interface{} + PInterface *interface{} + + unexported int +} + +type Small struct { + Tag string +} + +var allValue = All{ + Bool: true, + Int: 2, + Int8: 3, + Int16: 4, + Int32: 5, + Int64: 6, + Uint: 7, + Uint8: 8, + Uint16: 9, + Uint32: 10, + Uint64: 11, + Uintptr: 12, + Float32: 14.1, + Float64: 15.1, + Foo: "foo", + Foo2: "foo2", + IntStr: 42, + String: "16", + Map: map[string]Small{ + "17": {Tag: "tag17"}, + "18": {Tag: "tag18"}, + }, + MapP: map[string]*Small{ + "19": {Tag: "tag19"}, + "20": nil, + }, + EmptyMap: map[string]Small{}, + Slice: []Small{{Tag: "tag20"}, {Tag: "tag21"}}, + SliceP: []*Small{{Tag: "tag22"}, nil, {Tag: "tag23"}}, + EmptySlice: []Small{}, + StringSlice: []string{"str24", "str25", "str26"}, + ByteSlice: []byte{27, 28, 29}, + Small: Small{Tag: "tag30"}, + PSmall: &Small{Tag: "tag31"}, + Interface: 5.2, +} + +var pallValue = All{ + PBool: &allValue.Bool, + PInt: &allValue.Int, + PInt8: &allValue.Int8, + PInt16: &allValue.Int16, + PInt32: &allValue.Int32, + PInt64: &allValue.Int64, + PUint: &allValue.Uint, + PUint8: &allValue.Uint8, + PUint16: &allValue.Uint16, + PUint32: &allValue.Uint32, + PUint64: &allValue.Uint64, + PUintptr: &allValue.Uintptr, + PFloat32: &allValue.Float32, + PFloat64: &allValue.Float64, + PString: &allValue.String, + PMap: &allValue.Map, + PMapP: &allValue.MapP, + PSlice: &allValue.Slice, + PSliceP: &allValue.SliceP, + PPSmall: &allValue.PSmall, + PInterface: &allValue.Interface, +} + +var allValueIndent = `{ + "Bool": true, + "Int": 2, + "Int8": 3, + "Int16": 4, + "Int32": 5, + "Int64": 6, + "Uint": 7, + "Uint8": 8, + "Uint16": 9, + "Uint32": 10, + "Uint64": 11, + "Uintptr": 12, + "Float32": 14.1, + "Float64": 15.1, + "bar": "foo", + "bar2": "foo2", + "IntStr": "42", + "PBool": null, + "PInt": null, + "PInt8": null, + "PInt16": null, + "PInt32": null, + "PInt64": null, + "PUint": null, + "PUint8": null, + "PUint16": null, + "PUint32": null, + "PUint64": null, + "PUintptr": null, + "PFloat32": null, + "PFloat64": null, + "String": "16", + "PString": null, + "Map": { + "17": { + "Tag": "tag17" + }, + "18": { + "Tag": "tag18" + } + }, + "MapP": { + "19": { + "Tag": "tag19" + }, + "20": null + }, + "PMap": null, + "PMapP": null, + "EmptyMap": {}, + "NilMap": null, + "Slice": [ + { + "Tag": "tag20" + }, + { + "Tag": "tag21" + } + ], + "SliceP": [ + { + "Tag": "tag22" + }, + null, + { + "Tag": "tag23" + } + ], + "PSlice": null, + "PSliceP": null, + "EmptySlice": [], + "NilSlice": null, + "StringSlice": [ + "str24", + "str25", + "str26" + ], + "ByteSlice": "Gxwd", + "Small": { + "Tag": "tag30" + }, + "PSmall": { + "Tag": "tag31" + }, + "PPSmall": null, + "Interface": 5.2, + "PInterface": null +}` + +var allValueCompact = strings.Map(noSpace, allValueIndent) + +var pallValueIndent = `{ + "Bool": false, + "Int": 0, + "Int8": 0, + "Int16": 0, + "Int32": 0, + "Int64": 0, + "Uint": 0, + "Uint8": 0, + "Uint16": 0, + "Uint32": 0, + "Uint64": 0, + "Uintptr": 0, + "Float32": 0, + "Float64": 0, + "bar": "", + "bar2": "", + "IntStr": "0", + "PBool": true, + "PInt": 2, + "PInt8": 3, + "PInt16": 4, + "PInt32": 5, + "PInt64": 6, + "PUint": 7, + "PUint8": 8, + "PUint16": 9, + "PUint32": 10, + "PUint64": 11, + "PUintptr": 12, + "PFloat32": 14.1, + "PFloat64": 15.1, + "String": "", + "PString": "16", + "Map": null, + "MapP": null, + "PMap": { + "17": { + "Tag": "tag17" + }, + "18": { + "Tag": "tag18" + } + }, + "PMapP": { + "19": { + "Tag": "tag19" + }, + "20": null + }, + "EmptyMap": null, + "NilMap": null, + "Slice": null, + "SliceP": null, + "PSlice": [ + { + "Tag": "tag20" + }, + { + "Tag": "tag21" + } + ], + "PSliceP": [ + { + "Tag": "tag22" + }, + null, + { + "Tag": "tag23" + } + ], + "EmptySlice": null, + "NilSlice": null, + "StringSlice": null, + "ByteSlice": null, + "Small": { + "Tag": "" + }, + "PSmall": null, + "PPSmall": { + "Tag": "tag31" + }, + "Interface": null, + "PInterface": 5.2 +}` + +var pallValueCompact = strings.Map(noSpace, pallValueIndent) + +func TestRefUnmarshal(t *testing.T) { + type S struct { + // Ref is defined in encode_test.go. + R0 Ref + R1 *Ref + R2 RefText + R3 *RefText + } + want := S{ + R0: 12, + R1: new(Ref), + R2: 13, + R3: new(RefText), + } + *want.R1 = 12 + *want.R3 = 13 + + var got S + if err := Unmarshal([]byte(`{"R0":"ref","R1":"ref","R2":"ref","R3":"ref"}`), &got); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } +} + +// Test that the empty string doesn't panic decoding when ,string is specified +// Issue 3450 +func TestEmptyString(t *testing.T) { + type T2 struct { + Number1 int `json:",string"` + Number2 int `json:",string"` + } + data := `{"Number1":"1", "Number2":""}` + dec := NewDecoder(strings.NewReader(data)) + var t2 T2 + err := dec.Decode(&t2) + if err == nil { + t.Fatal("Decode: did not return error") + } + if t2.Number1 != 1 { + t.Fatal("Decode: did not set Number1") + } +} + +// Test that a null for ,string is not replaced with the previous quoted string (issue 7046). +// It should also not be an error (issue 2540, issue 8587). +func TestNullString(t *testing.T) { + type T struct { + A int `json:",string"` + B int `json:",string"` + C *int `json:",string"` + } + data := []byte(`{"A": "1", "B": null, "C": null}`) + var s T + s.B = 1 + s.C = new(int) + *s.C = 2 + err := Unmarshal(data, &s) + if err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if s.B != 1 || s.C != nil { + t.Fatalf("after Unmarshal, s.B=%d, s.C=%p, want 1, nil", s.B, s.C) + } +} + +func intp(x int) *int { + p := new(int) + *p = x + return p +} + +func intpp(x *int) **int { + pp := new(*int) + *pp = x + return pp +} + +var interfaceSetTests = []struct { + pre interface{} + json string + post interface{} +}{ + {"foo", `"bar"`, "bar"}, + {"foo", `2`, 2.0}, + {"foo", `true`, true}, + {"foo", `null`, nil}, + + {nil, `null`, nil}, + {new(int), `null`, nil}, + {(*int)(nil), `null`, nil}, + {new(*int), `null`, new(*int)}, + {(**int)(nil), `null`, nil}, + {intp(1), `null`, nil}, + {intpp(nil), `null`, intpp(nil)}, + {intpp(intp(1)), `null`, intpp(nil)}, +} + +func TestInterfaceSet(t *testing.T) { + for _, tt := range interfaceSetTests { + b := struct{ X interface{} }{tt.pre} + blob := `{"X":` + tt.json + `}` + if err := Unmarshal([]byte(blob), &b); err != nil { + t.Errorf("Unmarshal %#q: %v", blob, err) + continue + } + if !reflect.DeepEqual(b.X, tt.post) { + t.Errorf("Unmarshal %#q into %#v: X=%#v, want %#v", blob, tt.pre, b.X, tt.post) + } + } +} + +// JSON null values should be ignored for primitives and string values instead of resulting in an error. +// Issue 2540 +func TestUnmarshalNulls(t *testing.T) { + jsonData := []byte(`{ + "Bool" : null, + "Int" : null, + "Int8" : null, + "Int16" : null, + "Int32" : null, + "Int64" : null, + "Uint" : null, + "Uint8" : null, + "Uint16" : null, + "Uint32" : null, + "Uint64" : null, + "Float32" : null, + "Float64" : null, + "String" : null}`) + + nulls := All{ + Bool: true, + Int: 2, + Int8: 3, + Int16: 4, + Int32: 5, + Int64: 6, + Uint: 7, + Uint8: 8, + Uint16: 9, + Uint32: 10, + Uint64: 11, + Float32: 12.1, + Float64: 13.1, + String: "14"} + + err := Unmarshal(jsonData, &nulls) + if err != nil { + t.Errorf("Unmarshal of null values failed: %v", err) + } + if !nulls.Bool || nulls.Int != 2 || nulls.Int8 != 3 || nulls.Int16 != 4 || nulls.Int32 != 5 || nulls.Int64 != 6 || + nulls.Uint != 7 || nulls.Uint8 != 8 || nulls.Uint16 != 9 || nulls.Uint32 != 10 || nulls.Uint64 != 11 || + nulls.Float32 != 12.1 || nulls.Float64 != 13.1 || nulls.String != "14" { + + t.Errorf("Unmarshal of null values affected primitives") + } +} + +func TestStringKind(t *testing.T) { + type stringKind string + + var m1, m2 map[stringKind]int + m1 = map[stringKind]int{ + "foo": 42, + } + + data, err := Marshal(m1) + if err != nil { + t.Errorf("Unexpected error marshaling: %v", err) + } + + err = Unmarshal(data, &m2) + if err != nil { + t.Errorf("Unexpected error unmarshaling: %v", err) + } + + if !reflect.DeepEqual(m1, m2) { + t.Error("Items should be equal after encoding and then decoding") + } +} + +// Custom types with []byte as underlying type could not be marshalled +// and then unmarshalled. +// Issue 8962. +func TestByteKind(t *testing.T) { + type byteKind []byte + + a := byteKind("hello") + + data, err := Marshal(a) + if err != nil { + t.Error(err) + } + var b byteKind + err = Unmarshal(data, &b) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(a, b) { + t.Errorf("expected %v == %v", a, b) + } +} + +// The fix for issue 8962 introduced a regression. +// Issue 12921. +func TestSliceOfCustomByte(t *testing.T) { + type Uint8 uint8 + + a := []Uint8("hello") + + data, err := Marshal(a) + if err != nil { + t.Fatal(err) + } + var b []Uint8 + err = Unmarshal(data, &b) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(a, b) { + t.Fatal("expected %v == %v", a, b) + } +} + +var decodeTypeErrorTests = []struct { + dest interface{} + src string +}{ + {new(string), `{"user": "name"}`}, // issue 4628. + {new(error), `{}`}, // issue 4222 + {new(error), `[]`}, + {new(error), `""`}, + {new(error), `123`}, + {new(error), `true`}, +} + +func TestUnmarshalTypeError(t *testing.T) { + for _, item := range decodeTypeErrorTests { + err := Unmarshal([]byte(item.src), item.dest) + if _, ok := err.(*UnmarshalTypeError); !ok { + t.Errorf("expected type error for Unmarshal(%q, type %T): got %T", + item.src, item.dest, err) + } + } +} + +var unmarshalSyntaxTests = []string{ + "tru", + "fals", + "nul", + "123e", + `"hello`, + `[1,2,3`, + `{"key":1`, + `{"key":1,`, +} + +func TestUnmarshalSyntax(t *testing.T) { + var x interface{} + for _, src := range unmarshalSyntaxTests { + err := Unmarshal([]byte(src), &x) + if _, ok := err.(*SyntaxError); !ok { + t.Errorf("expected syntax error for Unmarshal(%q): got %T", src, err) + } + } +} + +// Test handling of unexported fields that should be ignored. +// Issue 4660 +type unexportedFields struct { + Name string + m map[string]interface{} `json:"-"` + m2 map[string]interface{} `json:"abcd"` +} + +func TestUnmarshalUnexported(t *testing.T) { + input := `{"Name": "Bob", "m": {"x": 123}, "m2": {"y": 456}, "abcd": {"z": 789}}` + want := &unexportedFields{Name: "Bob"} + + out := &unexportedFields{} + err := Unmarshal([]byte(input), out) + if err != nil { + t.Errorf("got error %v, expected nil", err) + } + if !reflect.DeepEqual(out, want) { + t.Errorf("got %q, want %q", out, want) + } +} + +// Time3339 is a time.Time which encodes to and from JSON +// as an RFC 3339 time in UTC. +type Time3339 time.Time + +func (t *Time3339) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("types: failed to unmarshal non-string value %q as an RFC 3339 time", b) + } + tm, err := time.Parse(time.RFC3339, string(b[1:len(b)-1])) + if err != nil { + return err + } + *t = Time3339(tm) + return nil +} + +func TestUnmarshalJSONLiteralError(t *testing.T) { + var t3 Time3339 + err := Unmarshal([]byte(`"0000-00-00T00:00:00Z"`), &t3) + if err == nil { + t.Fatalf("expected error; got time %v", time.Time(t3)) + } + if !strings.Contains(err.Error(), "range") { + t.Errorf("got err = %v; want out of range error", err) + } +} + +// Test that extra object elements in an array do not result in a +// "data changing underfoot" error. +// Issue 3717 +func TestSkipArrayObjects(t *testing.T) { + json := `[{}]` + var dest [0]interface{} + + err := Unmarshal([]byte(json), &dest) + if err != nil { + t.Errorf("got error %q, want nil", err) + } +} + +// Test semantics of pre-filled struct fields and pre-filled map fields. +// Issue 4900. +func TestPrefilled(t *testing.T) { + ptrToMap := func(m map[string]interface{}) *map[string]interface{} { return &m } + + // Values here change, cannot reuse table across runs. + var prefillTests = []struct { + in string + ptr interface{} + out interface{} + }{ + { + in: `{"X": 1, "Y": 2}`, + ptr: &XYZ{X: float32(3), Y: int16(4), Z: 1.5}, + out: &XYZ{X: float64(1), Y: float64(2), Z: 1.5}, + }, + { + in: `{"X": 1, "Y": 2}`, + ptr: ptrToMap(map[string]interface{}{"X": float32(3), "Y": int16(4), "Z": 1.5}), + out: ptrToMap(map[string]interface{}{"X": float64(1), "Y": float64(2), "Z": 1.5}), + }, + } + + for _, tt := range prefillTests { + ptrstr := fmt.Sprintf("%v", tt.ptr) + err := Unmarshal([]byte(tt.in), tt.ptr) // tt.ptr edited here + if err != nil { + t.Errorf("Unmarshal: %v", err) + } + if !reflect.DeepEqual(tt.ptr, tt.out) { + t.Errorf("Unmarshal(%#q, %s): have %v, want %v", tt.in, ptrstr, tt.ptr, tt.out) + } + } +} + +var invalidUnmarshalTests = []struct { + v interface{} + want string +}{ + {nil, "json: Unmarshal(nil)"}, + {struct{}{}, "json: Unmarshal(non-pointer struct {})"}, + {(*int)(nil), "json: Unmarshal(nil *int)"}, +} + +func TestInvalidUnmarshal(t *testing.T) { + buf := []byte(`{"a":"1"}`) + for _, tt := range invalidUnmarshalTests { + err := Unmarshal(buf, tt.v) + if err == nil { + t.Errorf("Unmarshal expecting error, got nil") + continue + } + if got := err.Error(); got != tt.want { + t.Errorf("Unmarshal = %q; want %q", got, tt.want) + } + } +} + +var invalidUnmarshalTextTests = []struct { + v interface{} + want string +}{ + {nil, "json: Unmarshal(nil)"}, + {struct{}{}, "json: Unmarshal(non-pointer struct {})"}, + {(*int)(nil), "json: Unmarshal(nil *int)"}, + {new(net.IP), "json: cannot unmarshal string into Go value of type *net.IP"}, +} + +func TestInvalidUnmarshalText(t *testing.T) { + buf := []byte(`123`) + for _, tt := range invalidUnmarshalTextTests { + err := Unmarshal(buf, tt.v) + if err == nil { + t.Errorf("Unmarshal expecting error, got nil") + continue + } + if got := err.Error(); got != tt.want { + t.Errorf("Unmarshal = %q; want %q", got, tt.want) + } + } +} + +// Test that string option is ignored for invalid types. +// Issue 9812. +func TestInvalidStringOption(t *testing.T) { + num := 0 + item := struct { + T time.Time `json:",string"` + M map[string]string `json:",string"` + S []string `json:",string"` + A [1]string `json:",string"` + I interface{} `json:",string"` + P *int `json:",string"` + }{M: make(map[string]string), S: make([]string, 0), I: num, P: &num} + + data, err := Marshal(item) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + err = Unmarshal(data, &item) + if err != nil { + t.Fatalf("Unmarshal: %v", err) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/encode.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/encode.go new file mode 100644 index 000000000..1dae8bb7c --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/encode.go @@ -0,0 +1,1197 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON objects as defined in +// RFC 4627. The mapping between JSON objects and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// https://golang.org/doc/articles/json_and_go.html +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" +// to keep some browsers from misinterpreting JSON output as HTML. +// Ampersand "&" is also escaped to "\u0026" for the same reason. +// +// Array and slice values encode as JSON arrays, except that +// []byte encodes as a base64-encoded string, and a nil slice +// encodes as the null JSON object. +// +// Struct values encode as JSON objects. Each exported struct field +// becomes a member of the object unless +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option. +// The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or string of +// length zero. The object's default key string is the struct field name +// but can be specified in the struct field's tag value. The "json" key in +// the struct field's tag value is the key name, followed by an optional comma +// and options. Examples: +// +// // Field is ignored by this package. +// Field int `json:"-"` +// +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` +// +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` +// +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` +// +// The "string" option signals that a field is stored as JSON inside a +// JSON-encoded string. It applies only to fields of string, floating point, +// integer, or boolean types. This extra level of encoding is sometimes used +// when communicating with JavaScript programs: +// +// Int64String int64 `json:",string"` +// +// The key name will be used if it's a non-empty string consisting of +// only Unicode letters, digits, dollar signs, percent signs, hyphens, +// underscores and slashes. +// +// Anonymous struct fields are usually marshaled as if their inner exported fields +// were fields in the outer struct, subject to the usual Go visibility rules amended +// as described in the next paragraph. +// An anonymous struct field with a name given in its JSON tag is treated as +// having that name, rather than being anonymous. +// An anonymous struct field of interface type is treated the same as having +// that type as its name, rather than being anonymous. +// +// The Go visibility rules for struct fields are amended for JSON when +// deciding which field to marshal or unmarshal. If there are +// multiple fields at the same level, and that level is the least +// nested (and would therefore be the nesting level selected by the +// usual Go rules), the following extra rules apply: +// +// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, +// even if there are multiple untagged fields that would otherwise conflict. +// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. +// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. +// +// Handling of anonymous struct fields is new in Go 1.1. +// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of +// an anonymous struct field in both current and earlier versions, give the field +// a JSON tag of "-". +// +// Map values encode as JSON objects. +// The map's key type must be string; the map keys are used as JSON object +// keys, subject to the UTF-8 coercion described for string values above. +// +// Pointer values encode as the value pointed to. +// A nil pointer encodes as the null JSON object. +// +// Interface values encode as the value contained in the interface. +// A nil interface value encodes as the null JSON object. +// +// Channel, complex, and function values cannot be encoded in JSON. +// Attempting to encode such a value causes Marshal to return +// an UnsupportedTypeError. +// +// JSON cannot represent cyclic data structures and Marshal does not +// handle them. Passing cyclic structures to Marshal will result in +// an infinite recursion. +// +func Marshal(v interface{}) ([]byte, error) { + e := &encodeState{} + err := e.marshal(v) + if err != nil { + return nil, err + } + return e.Bytes(), nil +} + +// MarshalIndent is like Marshal but applies Indent to format the output. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + b, err := Marshal(v) + if err != nil { + return nil, err + } + var buf bytes.Buffer + err = Indent(&buf, b, prefix, indent) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 +// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 +// so that the JSON will be safe to embed inside HTML