Update GCS storage library to support GetObject API on gzipped objects (#6506)

Fixes #6280
This commit is contained in:
kannappanr 2018-09-21 11:43:10 -07:00 committed by GitHub
parent 3c8fabd116
commit c1798cc89a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
132 changed files with 66518 additions and 1280 deletions

View File

@ -775,7 +775,13 @@ func (l *gcsGateway) GetObject(ctx context.Context, bucket string, key string, s
return gcsToObjectError(err, bucket) return gcsToObjectError(err, bucket)
} }
object := l.client.Bucket(bucket).Object(key) // GCS storage decompresses a gzipped object by default and returns the data.
// Refer to https://cloud.google.com/storage/docs/transcoding#decompressive_transcoding
// Need to set `Accept-Encoding` header to `gzip` when issuing a GetObject call, to be able
// to download the object in compressed state.
// Calling ReadCompressed with true accomplishes that.
object := l.client.Bucket(bucket).Object(key).ReadCompressed(true)
r, err := object.NewRangeReader(l.ctx, startOffset, length) r, err := object.NewRangeReader(l.ctx, startOffset, length)
if err != nil { if err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)

2
vendor/cloud.google.com/go/LICENSE generated vendored
View File

@ -187,7 +187,7 @@
same "printed page" as the copyright notice for easier same "printed page" as the copyright notice for easier
identification within third-party archives. identification within third-party archives.
Copyright 2014 Google Inc. Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2014 Google Inc. All Rights Reserved. // Copyright 2014 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -64,7 +64,7 @@ var (
) )
var ( var (
metaClient = &http.Client{ defaultClient = &Client{hc: &http.Client{
Transport: &http.Transport{ Transport: &http.Transport{
Dial: (&net.Dialer{ Dial: (&net.Dialer{
Timeout: 2 * time.Second, Timeout: 2 * time.Second,
@ -72,15 +72,15 @@ var (
}).Dial, }).Dial,
ResponseHeaderTimeout: 2 * time.Second, ResponseHeaderTimeout: 2 * time.Second,
}, },
} }}
subscribeClient = &http.Client{ subscribeClient = &Client{hc: &http.Client{
Transport: &http.Transport{ Transport: &http.Transport{
Dial: (&net.Dialer{ Dial: (&net.Dialer{
Timeout: 2 * time.Second, Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second, KeepAlive: 30 * time.Second,
}).Dial, }).Dial,
}, },
} }}
) )
// NotDefinedError is returned when requested metadata is not defined. // NotDefinedError is returned when requested metadata is not defined.
@ -95,74 +95,16 @@ func (suffix NotDefinedError) Error() string {
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
} }
// Get returns a value from the metadata service. func (c *cachedValue) get(cl *Client) (v string, err error) {
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
//
// If the GCE_METADATA_HOST environment variable is not defined, a default of
// 169.254.169.254 will be used instead.
//
// If the requested metadata is not defined, the returned error will
// be of type NotDefinedError.
func Get(suffix string) (string, error) {
val, _, err := getETag(metaClient, suffix)
return val, err
}
// getETag returns a value from the metadata service as well as the associated
// ETag using the provided client. This func is otherwise equivalent to Get.
func getETag(client *http.Client, suffix string) (value, etag string, err error) {
// Using a fixed IP makes it very difficult to spoof the metadata service in
// a container, which is an important use-case for local testing of cloud
// deployments. To enable spoofing of the metadata service, the environment
// variable GCE_METADATA_HOST is first inspected to decide where metadata
// requests shall go.
host := os.Getenv(metadataHostEnv)
if host == "" {
// Using 169.254.169.254 instead of "metadata" here because Go
// binaries built with the "netgo" tag and without cgo won't
// know the search suffix for "metadata" is
// ".google.internal", and this IP address is documented as
// being stable anyway.
host = metadataIP
}
url := "http://" + host + "/computeMetadata/v1/" + suffix
req, _ := http.NewRequest("GET", url, nil)
req.Header.Set("Metadata-Flavor", "Google")
req.Header.Set("User-Agent", userAgent)
res, err := client.Do(req)
if err != nil {
return "", "", err
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return "", "", NotDefinedError(suffix)
}
if res.StatusCode != 200 {
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
}
all, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", "", err
}
return string(all), res.Header.Get("Etag"), nil
}
func getTrimmed(suffix string) (s string, err error) {
s, err = Get(suffix)
s = strings.TrimSpace(s)
return
}
func (c *cachedValue) get() (v string, err error) {
defer c.mu.Unlock() defer c.mu.Unlock()
c.mu.Lock() c.mu.Lock()
if c.v != "" { if c.v != "" {
return c.v, nil return c.v, nil
} }
if c.trim { if c.trim {
v, err = getTrimmed(c.k) v, err = cl.getTrimmed(c.k)
} else { } else {
v, err = Get(c.k) v, err = cl.Get(c.k)
} }
if err == nil { if err == nil {
c.v = v c.v = v
@ -201,7 +143,7 @@ func testOnGCE() bool {
go func() { go func() {
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
req.Header.Set("User-Agent", userAgent) req.Header.Set("User-Agent", userAgent)
res, err := ctxhttp.Do(ctx, metaClient, req) res, err := ctxhttp.Do(ctx, defaultClient.hc, req)
if err != nil { if err != nil {
resc <- false resc <- false
return return
@ -266,6 +208,255 @@ func systemInfoSuggestsGCE() bool {
return name == "Google" || name == "Google Compute Engine" return name == "Google" || name == "Google Compute Engine"
} }
// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no
// ResponseHeaderTimeout).
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
return subscribeClient.Subscribe(suffix, fn)
}
// Get calls Client.Get on the default client.
func Get(suffix string) (string, error) { return defaultClient.Get(suffix) }
// ProjectID returns the current instance's project ID string.
func ProjectID() (string, error) { return defaultClient.ProjectID() }
// NumericProjectID returns the current instance's numeric project ID.
func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() }
// InternalIP returns the instance's primary internal IP address.
func InternalIP() (string, error) { return defaultClient.InternalIP() }
// ExternalIP returns the instance's primary external (public) IP address.
func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func Hostname() (string, error) { return defaultClient.Hostname() }
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() }
// InstanceID returns the current VM's numeric instance ID.
func InstanceID() (string, error) { return defaultClient.InstanceID() }
// InstanceName returns the current VM's instance ID string.
func InstanceName() (string, error) { return defaultClient.InstanceName() }
// Zone returns the current VM's zone, such as "us-central1-b".
func Zone() (string, error) { return defaultClient.Zone() }
// InstanceAttributes calls Client.InstanceAttributes on the default client.
func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() }
// ProjectAttributes calls Client.ProjectAttributes on the default client.
func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() }
// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
func InstanceAttributeValue(attr string) (string, error) {
return defaultClient.InstanceAttributeValue(attr)
}
// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
func ProjectAttributeValue(attr string) (string, error) {
return defaultClient.ProjectAttributeValue(attr)
}
// Scopes calls Client.Scopes on the default client.
func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) }
func strsContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}
// A Client provides metadata.
type Client struct {
hc *http.Client
}
// NewClient returns a Client that can be used to fetch metadata. All HTTP requests
// will use the given http.Client instead of the default client.
func NewClient(c *http.Client) *Client {
return &Client{hc: c}
}
// getETag returns a value from the metadata service as well as the associated ETag.
// This func is otherwise equivalent to Get.
func (c *Client) getETag(suffix string) (value, etag string, err error) {
// Using a fixed IP makes it very difficult to spoof the metadata service in
// a container, which is an important use-case for local testing of cloud
// deployments. To enable spoofing of the metadata service, the environment
// variable GCE_METADATA_HOST is first inspected to decide where metadata
// requests shall go.
host := os.Getenv(metadataHostEnv)
if host == "" {
// Using 169.254.169.254 instead of "metadata" here because Go
// binaries built with the "netgo" tag and without cgo won't
// know the search suffix for "metadata" is
// ".google.internal", and this IP address is documented as
// being stable anyway.
host = metadataIP
}
url := "http://" + host + "/computeMetadata/v1/" + suffix
req, _ := http.NewRequest("GET", url, nil)
req.Header.Set("Metadata-Flavor", "Google")
req.Header.Set("User-Agent", userAgent)
res, err := c.hc.Do(req)
if err != nil {
return "", "", err
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return "", "", NotDefinedError(suffix)
}
if res.StatusCode != 200 {
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
}
all, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", "", err
}
return string(all), res.Header.Get("Etag"), nil
}
// Get returns a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
//
// If the GCE_METADATA_HOST environment variable is not defined, a default of
// 169.254.169.254 will be used instead.
//
// If the requested metadata is not defined, the returned error will
// be of type NotDefinedError.
func (c *Client) Get(suffix string) (string, error) {
val, _, err := c.getETag(suffix)
return val, err
}
func (c *Client) getTrimmed(suffix string) (s string, err error) {
s, err = c.Get(suffix)
s = strings.TrimSpace(s)
return
}
func (c *Client) lines(suffix string) ([]string, error) {
j, err := c.Get(suffix)
if err != nil {
return nil, err
}
s := strings.Split(strings.TrimSpace(j), "\n")
for i := range s {
s[i] = strings.TrimSpace(s[i])
}
return s, nil
}
// ProjectID returns the current instance's project ID string.
func (c *Client) ProjectID() (string, error) { return projID.get(c) }
// NumericProjectID returns the current instance's numeric project ID.
func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) }
// InstanceID returns the current VM's numeric instance ID.
func (c *Client) InstanceID() (string, error) { return instID.get(c) }
// InternalIP returns the instance's primary internal IP address.
func (c *Client) InternalIP() (string, error) {
return c.getTrimmed("instance/network-interfaces/0/ip")
}
// ExternalIP returns the instance's primary external (public) IP address.
func (c *Client) ExternalIP() (string, error) {
return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
}
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func (c *Client) Hostname() (string, error) {
return c.getTrimmed("instance/hostname")
}
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func (c *Client) InstanceTags() ([]string, error) {
var s []string
j, err := c.Get("instance/tags")
if err != nil {
return nil, err
}
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
return nil, err
}
return s, nil
}
// InstanceName returns the current VM's instance ID string.
func (c *Client) InstanceName() (string, error) {
host, err := c.Hostname()
if err != nil {
return "", err
}
return strings.Split(host, ".")[0], nil
}
// Zone returns the current VM's zone, such as "us-central1-b".
func (c *Client) Zone() (string, error) {
zone, err := c.getTrimmed("instance/zone")
// zone is of the form "projects/<projNum>/zones/<zoneName>".
if err != nil {
return "", err
}
return zone[strings.LastIndex(zone, "/")+1:], nil
}
// InstanceAttributes returns the list of user-defined attributes,
// assigned when initially creating a GCE VM instance. The value of an
// attribute can be obtained with InstanceAttributeValue.
func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") }
// ProjectAttributes returns the list of user-defined attributes
// applying to the project as a whole, not just this VM. The value of
// an attribute can be obtained with ProjectAttributeValue.
func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") }
// InstanceAttributeValue returns the value of the provided VM
// instance attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// InstanceAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func (c *Client) InstanceAttributeValue(attr string) (string, error) {
return c.Get("instance/attributes/" + attr)
}
// ProjectAttributeValue returns the value of the provided
// project attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// ProjectAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func (c *Client) ProjectAttributeValue(attr string) (string, error) {
return c.Get("project/attributes/" + attr)
}
// Scopes returns the service account scopes for the given account.
// The account may be empty or the string "default" to use the instance's
// main account.
func (c *Client) Scopes(serviceAccount string) ([]string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
return c.lines("instance/service-accounts/" + serviceAccount + "/scopes")
}
// Subscribe subscribes to a value from the metadata service. // Subscribe subscribes to a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". // The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
// The suffix may contain query parameters. // The suffix may contain query parameters.
@ -275,11 +466,11 @@ func systemInfoSuggestsGCE() bool {
// and ok false. Subscribe blocks until fn returns a non-nil error or the value // and ok false. Subscribe blocks until fn returns a non-nil error or the value
// is deleted. Subscribe returns the error value returned from the last call to // is deleted. Subscribe returns the error value returned from the last call to
// fn, which may be nil when ok == false. // fn, which may be nil when ok == false.
func Subscribe(suffix string, fn func(v string, ok bool) error) error { func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error {
const failedSubscribeSleep = time.Second * 5 const failedSubscribeSleep = time.Second * 5
// First check to see if the metadata value exists at all. // First check to see if the metadata value exists at all.
val, lastETag, err := getETag(subscribeClient, suffix) val, lastETag, err := c.getETag(suffix)
if err != nil { if err != nil {
return err return err
} }
@ -295,7 +486,7 @@ func Subscribe(suffix string, fn func(v string, ok bool) error) error {
suffix += "?wait_for_change=true&last_etag=" suffix += "?wait_for_change=true&last_etag="
} }
for { for {
val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag)) val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag))
if err != nil { if err != nil {
if _, deleted := err.(NotDefinedError); !deleted { if _, deleted := err.(NotDefinedError); !deleted {
time.Sleep(failedSubscribeSleep) time.Sleep(failedSubscribeSleep)
@ -310,128 +501,3 @@ func Subscribe(suffix string, fn func(v string, ok bool) error) error {
} }
} }
} }
// ProjectID returns the current instance's project ID string.
func ProjectID() (string, error) { return projID.get() }
// NumericProjectID returns the current instance's numeric project ID.
func NumericProjectID() (string, error) { return projNum.get() }
// InternalIP returns the instance's primary internal IP address.
func InternalIP() (string, error) {
return getTrimmed("instance/network-interfaces/0/ip")
}
// ExternalIP returns the instance's primary external (public) IP address.
func ExternalIP() (string, error) {
return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
}
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func Hostname() (string, error) {
return getTrimmed("instance/hostname")
}
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func InstanceTags() ([]string, error) {
var s []string
j, err := Get("instance/tags")
if err != nil {
return nil, err
}
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
return nil, err
}
return s, nil
}
// InstanceID returns the current VM's numeric instance ID.
func InstanceID() (string, error) {
return instID.get()
}
// InstanceName returns the current VM's instance ID string.
func InstanceName() (string, error) {
host, err := Hostname()
if err != nil {
return "", err
}
return strings.Split(host, ".")[0], nil
}
// Zone returns the current VM's zone, such as "us-central1-b".
func Zone() (string, error) {
zone, err := getTrimmed("instance/zone")
// zone is of the form "projects/<projNum>/zones/<zoneName>".
if err != nil {
return "", err
}
return zone[strings.LastIndex(zone, "/")+1:], nil
}
// InstanceAttributes returns the list of user-defined attributes,
// assigned when initially creating a GCE VM instance. The value of an
// attribute can be obtained with InstanceAttributeValue.
func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
// ProjectAttributes returns the list of user-defined attributes
// applying to the project as a whole, not just this VM. The value of
// an attribute can be obtained with ProjectAttributeValue.
func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
func lines(suffix string) ([]string, error) {
j, err := Get(suffix)
if err != nil {
return nil, err
}
s := strings.Split(strings.TrimSpace(j), "\n")
for i := range s {
s[i] = strings.TrimSpace(s[i])
}
return s, nil
}
// InstanceAttributeValue returns the value of the provided VM
// instance attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// InstanceAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func InstanceAttributeValue(attr string) (string, error) {
return Get("instance/attributes/" + attr)
}
// ProjectAttributeValue returns the value of the provided
// project attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// ProjectAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func ProjectAttributeValue(attr string) (string, error) {
return Get("project/attributes/" + attr)
}
// Scopes returns the service account scopes for the given account.
// The account may be empty or the string "default" to use the instance's
// main account.
func Scopes(serviceAccount string) ([]string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
return lines("instance/service-accounts/" + serviceAccount + "/scopes")
}
func strsContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved. // Copyright 2016 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -22,9 +22,13 @@
package iam package iam
import ( import (
"time"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context" "golang.org/x/net/context"
pb "google.golang.org/genproto/googleapis/iam/v1" pb "google.golang.org/genproto/googleapis/iam/v1"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/codes"
) )
// client abstracts the IAMPolicy API to allow multiple implementations. // client abstracts the IAMPolicy API to allow multiple implementations.
@ -39,26 +43,50 @@ type grpcClient struct {
c pb.IAMPolicyClient c pb.IAMPolicyClient
} }
var withRetry = gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60 * time.Second,
Multiplier: 1.3,
})
})
func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) { func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) {
proto, err := g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource}) var proto *pb.Policy
err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
var err error
proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource})
return err
}, withRetry)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return proto, nil return proto, nil
} }
func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error { func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error {
return gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
_, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{ _, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{
Resource: resource, Resource: resource,
Policy: p, Policy: p,
}) })
return err return err
}, withRetry)
} }
func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) { func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
res, err := g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{ var res *pb.TestIamPermissionsResponse
err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
var err error
res, err = g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{
Resource: resource, Resource: resource,
Permissions: perms, Permissions: perms,
}) })
return err
}, withRetry)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -76,7 +104,15 @@ type Handle struct {
// InternalNewHandle returns a Handle for resource. // InternalNewHandle returns a Handle for resource.
// The conn parameter refers to a server that must support the IAMPolicy service. // The conn parameter refers to a server that must support the IAMPolicy service.
func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle { func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle {
return InternalNewHandleClient(&grpcClient{c: pb.NewIAMPolicyClient(conn)}, resource) return InternalNewHandleGRPCClient(pb.NewIAMPolicyClient(conn), resource)
}
// InternalNewHandleGRPCClient is for use by the Google Cloud Libraries only.
//
// InternalNewHandleClient returns a Handle for resource using the given
// grpc service that implements IAM as a mixin
func InternalNewHandleGRPCClient(c pb.IAMPolicyClient, resource string) *Handle {
return InternalNewHandleClient(&grpcClient{c: c}, resource)
} }
// InternalNewHandleClient is for use by the Google Cloud Libraries only. // InternalNewHandleClient is for use by the Google Cloud Libraries only.

54
vendor/cloud.google.com/go/internal/annotate.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"fmt"
"google.golang.org/api/googleapi"
"google.golang.org/grpc/status"
)
// Annotate prepends msg to the error message in err, attempting
// to preserve other information in err, like an error code.
//
// Annotate panics if err is nil.
//
// Annotate knows about these error types:
// - "google.golang.org/grpc/status".Status
// - "google.golang.org/api/googleapi".Error
// If the error is not one of these types, Annotate behaves
// like
// fmt.Errorf("%s: %v", msg, err)
func Annotate(err error, msg string) error {
if err == nil {
panic("Annotate called with nil")
}
if s, ok := status.FromError(err); ok {
p := s.Proto()
p.Message = msg + ": " + p.Message
return status.ErrorProto(p)
}
if g, ok := err.(*googleapi.Error); ok {
g.Message = msg + ": " + g.Message
return g
}
return fmt.Errorf("%s: %v", msg, err)
}
// Annotatef uses format and args to format a string, then calls Annotate.
func Annotatef(err error, format string, args ...interface{}) error {
return Annotate(err, fmt.Sprintf(format, args...))
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved. // Copyright 2016 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -20,6 +20,7 @@ package optional
import ( import (
"fmt" "fmt"
"strings" "strings"
"time"
) )
type ( type (
@ -37,6 +38,9 @@ type (
// Float64 is either a float64 or nil. // Float64 is either a float64 or nil.
Float64 interface{} Float64 interface{}
// Duration is either a time.Duration or nil.
Duration interface{}
) )
// ToBool returns its argument as a bool. // ToBool returns its argument as a bool.
@ -89,6 +93,16 @@ func ToFloat64(v Float64) float64 {
return x return x
} }
// ToDuration returns its argument as a time.Duration.
// It panics if its argument is nil or not a time.Duration.
func ToDuration(v Duration) time.Duration {
x, ok := v.(time.Duration)
if !ok {
doPanic("Duration", v)
}
return x
}
func doPanic(capType string, v interface{}) { func doPanic(capType string, v interface{}) {
panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v)) panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v))
} }

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved. // Copyright 2016 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -15,7 +15,6 @@
package internal package internal
import ( import (
"fmt"
"time" "time"
gax "github.com/googleapis/gax-go" gax "github.com/googleapis/gax-go"
@ -48,7 +47,7 @@ func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error),
p := bo.Pause() p := bo.Pause()
if cerr := sleep(ctx, p); cerr != nil { if cerr := sleep(ctx, p); cerr != nil {
if lastErr != nil { if lastErr != nil {
return fmt.Errorf("%v; last function err: %v", cerr, lastErr) return Annotatef(lastErr, "retry failed with %v; last error", cerr)
} }
return cerr return cerr
} }

83
vendor/cloud.google.com/go/internal/trace/go18.go generated vendored Normal file
View File

@ -0,0 +1,83 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.8
package trace
import (
"go.opencensus.io/trace"
"golang.org/x/net/context"
"google.golang.org/api/googleapi"
"google.golang.org/genproto/googleapis/rpc/code"
"google.golang.org/grpc/status"
)
func StartSpan(ctx context.Context, name string) context.Context {
ctx, _ = trace.StartSpan(ctx, name)
return ctx
}
func EndSpan(ctx context.Context, err error) {
span := trace.FromContext(ctx)
if err != nil {
span.SetStatus(toStatus(err))
}
span.End()
}
// ToStatus interrogates an error and converts it to an appropriate
// OpenCensus status.
func toStatus(err error) trace.Status {
if err2, ok := err.(*googleapi.Error); ok {
return trace.Status{Code: httpStatusCodeToOCCode(err2.Code), Message: err2.Message}
} else if s, ok := status.FromError(err); ok {
return trace.Status{Code: int32(s.Code()), Message: s.Message()}
} else {
return trace.Status{Code: int32(code.Code_UNKNOWN), Message: err.Error()}
}
}
// TODO (deklerk): switch to using OpenCensus function when it becomes available.
// Reference: https://github.com/googleapis/googleapis/blob/26b634d2724ac5dd30ae0b0cbfb01f07f2e4050e/google/rpc/code.proto
func httpStatusCodeToOCCode(httpStatusCode int) int32 {
switch httpStatusCode {
case 200:
return int32(code.Code_OK)
case 499:
return int32(code.Code_CANCELLED)
case 500:
return int32(code.Code_UNKNOWN) // Could also be Code_INTERNAL, Code_DATA_LOSS
case 400:
return int32(code.Code_INVALID_ARGUMENT) // Could also be Code_OUT_OF_RANGE
case 504:
return int32(code.Code_DEADLINE_EXCEEDED)
case 404:
return int32(code.Code_NOT_FOUND)
case 409:
return int32(code.Code_ALREADY_EXISTS) // Could also be Code_ABORTED
case 403:
return int32(code.Code_PERMISSION_DENIED)
case 401:
return int32(code.Code_UNAUTHENTICATED)
case 429:
return int32(code.Code_RESOURCE_EXHAUSTED)
case 501:
return int32(code.Code_UNIMPLEMENTED)
case 503:
return int32(code.Code_UNAVAILABLE)
default:
return int32(code.Code_UNKNOWN)
}
}

30
vendor/cloud.google.com/go/internal/trace/not_go18.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !go1.8
package trace
import (
"golang.org/x/net/context"
)
// OpenCensus only supports go 1.8 and higher.
func StartSpan(ctx context.Context, _ string) context.Context {
return ctx
}
func EndSpan(context.Context, error) {
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved. // Copyright 2016 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -26,7 +26,7 @@ import (
// Repo is the current version of the client libraries in this // Repo is the current version of the client libraries in this
// repo. It should be a date in YYYYMMDD format. // repo. It should be a date in YYYYMMDD format.
const Repo = "20170404" const Repo = "20180226"
// Go returns the Go runtime version. The returned string // Go returns the Go runtime version. The returned string
// has no whitespace. // has no whitespace.

View File

@ -1,4 +1,4 @@
// Copyright 2014 Google Inc. All Rights Reserved. // Copyright 2014 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -15,9 +15,12 @@
package storage package storage
import ( import (
"fmt" "net/http"
"reflect"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/api/googleapi"
raw "google.golang.org/api/storage/v1" raw "google.golang.org/api/storage/v1"
) )
@ -45,10 +48,21 @@ const (
AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers" AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers"
) )
// ACLRule represents a grant for a role to an entity (user, group or team) for a Google Cloud Storage object or bucket. // ACLRule represents a grant for a role to an entity (user, group or team) for a
// Google Cloud Storage object or bucket.
type ACLRule struct { type ACLRule struct {
Entity ACLEntity Entity ACLEntity
EntityID string
Role ACLRole Role ACLRole
Domain string
Email string
ProjectTeam *ProjectTeam
}
// ProjectTeam is the project team associated with the entity, if any.
type ProjectTeam struct {
ProjectNumber string
Team string
} }
// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object. // ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object.
@ -57,10 +71,14 @@ type ACLHandle struct {
bucket string bucket string
object string object string
isDefault bool isDefault bool
userProject string // for requester-pays buckets
} }
// Delete permanently deletes the ACL entry for the given entity. // Delete permanently deletes the ACL entry for the given entity.
func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) error { func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Delete")
defer func() { trace.EndSpan(ctx, err) }()
if a.object != "" { if a.object != "" {
return a.objectDelete(ctx, entity) return a.objectDelete(ctx, entity)
} }
@ -70,19 +88,25 @@ func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) error {
return a.bucketDelete(ctx, entity) return a.bucketDelete(ctx, entity)
} }
// Set sets the permission level for the given entity. // Set sets the role for the given entity.
func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) error { func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Set")
defer func() { trace.EndSpan(ctx, err) }()
if a.object != "" { if a.object != "" {
return a.objectSet(ctx, entity, role) return a.objectSet(ctx, entity, role, false)
} }
if a.isDefault { if a.isDefault {
return a.bucketDefaultSet(ctx, entity, role) return a.objectSet(ctx, entity, role, true)
} }
return a.bucketSet(ctx, entity, role) return a.bucketSet(ctx, entity, role)
} }
// List retrieves ACL entries. // List retrieves ACL entries.
func (a *ACLHandle) List(ctx context.Context) ([]ACLRule, error) { func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.List")
defer func() { trace.EndSpan(ctx, err) }()
if a.object != "" { if a.object != "" {
return a.objectList(ctx) return a.objectList(ctx)
} }
@ -96,65 +120,38 @@ func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
var acls *raw.ObjectAccessControls var acls *raw.ObjectAccessControls
var err error var err error
err = runWithRetry(ctx, func() error { err = runWithRetry(ctx, func() error {
req := a.c.raw.DefaultObjectAccessControls.List(a.bucket).Context(ctx) req := a.c.raw.DefaultObjectAccessControls.List(a.bucket)
setClientHeader(req.Header()) a.configureCall(req, ctx)
acls, err = req.Do() acls, err = req.Do()
return err return err
}) })
if err != nil { if err != nil {
return nil, fmt.Errorf("storage: error listing default object ACL for bucket %q: %v", a.bucket, err) return nil, err
} }
return toACLRules(acls.Items), nil return toObjectACLRules(acls.Items), nil
}
func (a *ACLHandle) bucketDefaultSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
acl := &raw.ObjectAccessControl{
Bucket: a.bucket,
Entity: string(entity),
Role: string(role),
}
err := runWithRetry(ctx, func() error {
req := a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl).Context(ctx)
setClientHeader(req.Header())
_, err := req.Do()
return err
})
if err != nil {
return fmt.Errorf("storage: error updating default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
}
return nil
} }
func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error { func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
err := runWithRetry(ctx, func() error { return runWithRetry(ctx, func() error {
req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity)).Context(ctx) req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity))
setClientHeader(req.Header()) a.configureCall(req, ctx)
return req.Do() return req.Do()
}) })
if err != nil {
return fmt.Errorf("storage: error deleting default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
}
return nil
} }
func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) { func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
var acls *raw.BucketAccessControls var acls *raw.BucketAccessControls
var err error var err error
err = runWithRetry(ctx, func() error { err = runWithRetry(ctx, func() error {
req := a.c.raw.BucketAccessControls.List(a.bucket).Context(ctx) req := a.c.raw.BucketAccessControls.List(a.bucket)
setClientHeader(req.Header()) a.configureCall(req, ctx)
acls, err = req.Do() acls, err = req.Do()
return err return err
}) })
if err != nil { if err != nil {
return nil, fmt.Errorf("storage: error listing bucket ACL for bucket %q: %v", a.bucket, err) return nil, err
} }
r := make([]ACLRule, len(acls.Items)) return toBucketACLRules(acls.Items), nil
for i, v := range acls.Items {
r[i].Entity = ACLEntity(v.Entity)
r[i].Role = ACLRole(v.Role)
}
return r, nil
} }
func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error { func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
@ -164,78 +161,175 @@ func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRol
Role: string(role), Role: string(role),
} }
err := runWithRetry(ctx, func() error { err := runWithRetry(ctx, func() error {
req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl).Context(ctx) req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl)
setClientHeader(req.Header()) a.configureCall(req, ctx)
_, err := req.Do() _, err := req.Do()
return err return err
}) })
if err != nil { if err != nil {
return fmt.Errorf("storage: error updating bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err) return err
} }
return nil return nil
} }
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error { func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
err := runWithRetry(ctx, func() error { return runWithRetry(ctx, func() error {
req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity)).Context(ctx) req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity))
setClientHeader(req.Header()) a.configureCall(req, ctx)
return req.Do() return req.Do()
}) })
if err != nil {
return fmt.Errorf("storage: error deleting bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
}
return nil
} }
func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) { func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
var acls *raw.ObjectAccessControls var acls *raw.ObjectAccessControls
var err error var err error
err = runWithRetry(ctx, func() error { err = runWithRetry(ctx, func() error {
req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object).Context(ctx) req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object)
setClientHeader(req.Header()) a.configureCall(req, ctx)
acls, err = req.Do() acls, err = req.Do()
return err return err
}) })
if err != nil { if err != nil {
return nil, fmt.Errorf("storage: error listing object ACL for bucket %q, file %q: %v", a.bucket, a.object, err) return nil, err
} }
return toACLRules(acls.Items), nil return toObjectACLRules(acls.Items), nil
}
func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error {
type setRequest interface {
Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error)
Header() http.Header
} }
func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
acl := &raw.ObjectAccessControl{ acl := &raw.ObjectAccessControl{
Bucket: a.bucket, Bucket: a.bucket,
Entity: string(entity), Entity: string(entity),
Role: string(role), Role: string(role),
} }
err := runWithRetry(ctx, func() error { var req setRequest
req := a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl).Context(ctx) if isBucketDefault {
setClientHeader(req.Header()) req = a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl)
} else {
req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl)
}
a.configureCall(req, ctx)
return runWithRetry(ctx, func() error {
_, err := req.Do() _, err := req.Do()
return err return err
}) })
if err != nil {
return fmt.Errorf("storage: error updating object ACL entry for bucket %q, file %q, entity %q: %v", a.bucket, a.object, entity, err)
}
return nil
} }
func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error { func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
err := runWithRetry(ctx, func() error { return runWithRetry(ctx, func() error {
req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity)).Context(ctx) req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity))
setClientHeader(req.Header()) a.configureCall(req, ctx)
return req.Do() return req.Do()
}) })
if err != nil {
return fmt.Errorf("storage: error deleting object ACL entry for bucket %q, file %q, entity %q: %v", a.bucket, a.object, entity, err)
}
return nil
} }
func toACLRules(items []*raw.ObjectAccessControl) []ACLRule { func (a *ACLHandle) configureCall(call interface{ Header() http.Header }, ctx context.Context) {
r := make([]ACLRule, 0, len(items)) vc := reflect.ValueOf(call)
vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)})
if a.userProject != "" {
vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(a.userProject)})
}
setClientHeader(call.Header())
}
func toObjectACLRules(items []*raw.ObjectAccessControl) []ACLRule {
var rs []ACLRule
for _, item := range items { for _, item := range items {
r = append(r, ACLRule{Entity: ACLEntity(item.Entity), Role: ACLRole(item.Role)}) rs = append(rs, toObjectACLRule(item))
}
return rs
}
func toBucketACLRules(items []*raw.BucketAccessControl) []ACLRule {
var rs []ACLRule
for _, item := range items {
rs = append(rs, toBucketACLRule(item))
}
return rs
}
func toObjectACLRule(a *raw.ObjectAccessControl) ACLRule {
return ACLRule{
Entity: ACLEntity(a.Entity),
EntityID: a.EntityId,
Role: ACLRole(a.Role),
Domain: a.Domain,
Email: a.Email,
ProjectTeam: toObjectProjectTeam(a.ProjectTeam),
}
}
func toBucketACLRule(a *raw.BucketAccessControl) ACLRule {
return ACLRule{
Entity: ACLEntity(a.Entity),
EntityID: a.EntityId,
Role: ACLRole(a.Role),
Domain: a.Domain,
Email: a.Email,
ProjectTeam: toBucketProjectTeam(a.ProjectTeam),
}
}
func toRawObjectACL(rules []ACLRule) []*raw.ObjectAccessControl {
if len(rules) == 0 {
return nil
}
r := make([]*raw.ObjectAccessControl, 0, len(rules))
for _, rule := range rules {
r = append(r, rule.toRawObjectAccessControl("")) // bucket name unnecessary
} }
return r return r
} }
func toRawBucketACL(rules []ACLRule) []*raw.BucketAccessControl {
if len(rules) == 0 {
return nil
}
r := make([]*raw.BucketAccessControl, 0, len(rules))
for _, rule := range rules {
r = append(r, rule.toRawBucketAccessControl("")) // bucket name unnecessary
}
return r
}
func (r ACLRule) toRawBucketAccessControl(bucket string) *raw.BucketAccessControl {
return &raw.BucketAccessControl{
Bucket: bucket,
Entity: string(r.Entity),
Role: string(r.Role),
// The other fields are not settable.
}
}
func (r ACLRule) toRawObjectAccessControl(bucket string) *raw.ObjectAccessControl {
return &raw.ObjectAccessControl{
Bucket: bucket,
Entity: string(r.Entity),
Role: string(r.Role),
// The other fields are not settable.
}
}
func toBucketProjectTeam(p *raw.BucketAccessControlProjectTeam) *ProjectTeam {
if p == nil {
return nil
}
return &ProjectTeam{
ProjectNumber: p.ProjectNumber,
Team: p.Team,
}
}
func toObjectProjectTeam(p *raw.ObjectAccessControlProjectTeam) *ProjectTeam {
if p == nil {
return nil
}
return &ProjectTeam{
ProjectNumber: p.ProjectNumber,
Team: p.Team,
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved. // Copyright 2016 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -18,6 +18,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context" "golang.org/x/net/context"
raw "google.golang.org/api/storage/v1" raw "google.golang.org/api/storage/v1"
) )
@ -25,6 +26,9 @@ import (
// CopierFrom creates a Copier that can copy src to dst. // CopierFrom creates a Copier that can copy src to dst.
// You can immediately call Run on the returned Copier, or // You can immediately call Run on the returned Copier, or
// you can configure it first. // you can configure it first.
//
// For Requester Pays buckets, the user project of dst is billed, unless it is empty,
// in which case the user project of src is billed.
func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier { func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier {
return &Copier{dst: dst, src: src} return &Copier{dst: dst, src: src}
} }
@ -42,7 +46,7 @@ type Copier struct {
RewriteToken string RewriteToken string
// ProgressFunc can be used to monitor the progress of a multi-RPC copy // ProgressFunc can be used to monitor the progress of a multi-RPC copy
// operation. If ProgressFunc is not nil and CopyFrom requires multiple // operation. If ProgressFunc is not nil and copying requires multiple
// calls to the underlying service (see // calls to the underlying service (see
// https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then // https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then
// ProgressFunc will be invoked after each call with the number of bytes of // ProgressFunc will be invoked after each call with the number of bytes of
@ -56,47 +60,72 @@ type Copier struct {
// ProgressFunc should return quickly without blocking. // ProgressFunc should return quickly without blocking.
ProgressFunc func(copiedBytes, totalBytes uint64) ProgressFunc func(copiedBytes, totalBytes uint64)
// The Cloud KMS key, in the form projects/P/locations/L/keyRings/R/cryptoKeys/K,
// that will be used to encrypt the object. Overrides the object's KMSKeyName, if
// any.
//
// Providing both a DestinationKMSKeyName and a customer-supplied encryption key
// (via ObjectHandle.Key) on the destination object will result in an error when
// Run is called.
DestinationKMSKeyName string
dst, src *ObjectHandle dst, src *ObjectHandle
} }
// Run performs the copy. // Run performs the copy.
func (c *Copier) Run(ctx context.Context) (*ObjectAttrs, error) { func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Copier.Run")
defer func() { trace.EndSpan(ctx, err) }()
if err := c.src.validate(); err != nil { if err := c.src.validate(); err != nil {
return nil, err return nil, err
} }
if err := c.dst.validate(); err != nil { if err := c.dst.validate(); err != nil {
return nil, err return nil, err
} }
if c.DestinationKMSKeyName != "" && c.dst.encryptionKey != nil {
return nil, errors.New("storage: cannot use DestinationKMSKeyName with a customer-supplied encryption key")
}
// Convert destination attributes to raw form, omitting the bucket. // Convert destination attributes to raw form, omitting the bucket.
// If the bucket is included but name or content-type aren't, the service // If the bucket is included but name or content-type aren't, the service
// returns a 400 with "Required" as the only message. Omitting the bucket // returns a 400 with "Required" as the only message. Omitting the bucket
// does not cause any problems. // does not cause any problems.
rawObject := c.ObjectAttrs.toRawObject("") rawObject := c.ObjectAttrs.toRawObject("")
for { for {
res, err := c.callRewrite(ctx, c.src, rawObject) res, err := c.callRewrite(ctx, rawObject)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if c.ProgressFunc != nil { if c.ProgressFunc != nil {
c.ProgressFunc(res.TotalBytesRewritten, res.ObjectSize) c.ProgressFunc(uint64(res.TotalBytesRewritten), uint64(res.ObjectSize))
} }
if res.Done { // Finished successfully. if res.Done { // Finished successfully.
return newObject(res.Resource), nil return newObject(res.Resource), nil
} }
} }
return nil, nil
} }
func (c *Copier) callRewrite(ctx context.Context, src *ObjectHandle, rawObj *raw.Object) (*raw.RewriteResponse, error) { func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.RewriteResponse, error) {
call := c.dst.c.raw.Objects.Rewrite(src.bucket, src.object, c.dst.bucket, c.dst.object, rawObj) call := c.dst.c.raw.Objects.Rewrite(c.src.bucket, c.src.object, c.dst.bucket, c.dst.object, rawObj)
call.Context(ctx).Projection("full") call.Context(ctx).Projection("full")
if c.RewriteToken != "" { if c.RewriteToken != "" {
call.RewriteToken(c.RewriteToken) call.RewriteToken(c.RewriteToken)
} }
if c.DestinationKMSKeyName != "" {
call.DestinationKmsKeyName(c.DestinationKMSKeyName)
}
if c.PredefinedACL != "" {
call.DestinationPredefinedAcl(c.PredefinedACL)
}
if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil { if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil {
return nil, err return nil, err
} }
if c.dst.userProject != "" {
call.UserProject(c.dst.userProject)
} else if c.src.userProject != "" {
call.UserProject(c.src.userProject)
}
if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil { if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil {
return nil, err return nil, err
} }
@ -129,6 +158,8 @@ func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer {
} }
// A Composer composes source objects into a destination object. // A Composer composes source objects into a destination object.
//
// For Requester Pays buckets, the user project of dst is billed.
type Composer struct { type Composer struct {
// ObjectAttrs are optional attributes to set on the destination object. // ObjectAttrs are optional attributes to set on the destination object.
// Any attributes must be initialized before any calls on the Composer. Nil // Any attributes must be initialized before any calls on the Composer. Nil
@ -140,7 +171,10 @@ type Composer struct {
} }
// Run performs the compose operation. // Run performs the compose operation.
func (c *Composer) Run(ctx context.Context) (*ObjectAttrs, error) { func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Composer.Run")
defer func() { trace.EndSpan(ctx, err) }()
if err := c.dst.validate(); err != nil { if err := c.dst.validate(); err != nil {
return nil, err return nil, err
} }
@ -175,11 +209,16 @@ func (c *Composer) Run(ctx context.Context) (*ObjectAttrs, error) {
if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil { if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil {
return nil, err return nil, err
} }
if c.dst.userProject != "" {
call.UserProject(c.dst.userProject)
}
if c.PredefinedACL != "" {
call.DestinationPredefinedAcl(c.PredefinedACL)
}
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil { if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
return nil, err return nil, err
} }
var obj *raw.Object var obj *raw.Object
var err error
setClientHeader(call.Header()) setClientHeader(call.Header())
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
if err != nil { if err != nil {

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved. // Copyright 2016 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -19,11 +19,14 @@ Google Cloud Storage stores data in named objects, which are grouped into bucket
More information about Google Cloud Storage is available at More information about Google Cloud Storage is available at
https://cloud.google.com/storage/docs. https://cloud.google.com/storage/docs.
All of the methods of this package use exponential backoff to retry calls See https://godoc.org/cloud.google.com/go for authentication, timeouts,
that fail with certain errors, as described in connection pooling and similar aspects of this package.
https://cloud.google.com/storage/docs/exponential-backoff.
Note: This package is in beta. Some backwards-incompatible changes may occur. All of the methods of this package use exponential backoff to retry calls that fail
with certain errors, as described in
https://cloud.google.com/storage/docs/exponential-backoff. Retrying continues
indefinitely unless the controlling context is canceled or the client is closed. See
context.WithTimeout and context.WithCancel.
Creating a Client Creating a Client
@ -36,6 +39,13 @@ To start working with this package, create a client:
// TODO: Handle error. // TODO: Handle error.
} }
The client will use your default application credentials.
If you only wish to access public data, you can create
an unauthenticated client with
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
Buckets Buckets
A Google Cloud Storage bucket is a collection of objects. To work with a A Google Cloud Storage bucket is a collection of objects. To work with a
@ -56,7 +66,7 @@ global across all projects.
Each bucket has associated metadata, represented in this package by Each bucket has associated metadata, represented in this package by
BucketAttrs. The third argument to BucketHandle.Create allows you to set BucketAttrs. The third argument to BucketHandle.Create allows you to set
the intial BucketAttrs of a bucket. To retrieve a bucket's attributes, use the initial BucketAttrs of a bucket. To retrieve a bucket's attributes, use
Attrs: Attrs:
attrs, err := bkt.Attrs(ctx) attrs, err := bkt.Attrs(ctx)
@ -69,15 +79,16 @@ Attrs:
Objects Objects
An object holds arbitrary data as a sequence of bytes, like a file. You An object holds arbitrary data as a sequence of bytes, like a file. You
refer to objects using a handle, just as with buckets. You can use the refer to objects using a handle, just as with buckets, but unlike buckets
standard Go io.Reader and io.Writer interfaces to read and write you don't explicitly create an object. Instead, the first time you write
object data: to an object it will be created. You can use the standard Go io.Reader
and io.Writer interfaces to read and write object data:
obj := bkt.Object("data") obj := bkt.Object("data")
// Write something to obj. // Write something to obj.
// w implements io.Writer. // w implements io.Writer.
w := obj.NewWriter(ctx) w := obj.NewWriter(ctx)
// Write some text to obj. This will overwrite whatever is there. // Write some text to obj. This will either create the object or overwrite whatever is there already.
if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil { if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil {
// TODO: Handle error. // TODO: Handle error.
} }
@ -153,9 +164,13 @@ SignedURL for details.
} }
fmt.Println(url) fmt.Println(url)
Authentication Errors
See examples of authorization and authentication at Errors returned by this client are often of the type [`googleapi.Error`](https://godoc.org/google.golang.org/api/googleapi#Error).
https://godoc.org/cloud.google.com/go#pkg-examples. These errors can be introspected for more information by type asserting to the richer `googleapi.Error` type. For example:
if e, ok := err.(*googleapi.Error); ok {
if e.Code = 409 { ... }
}
*/ */
package storage // import "cloud.google.com/go/storage" package storage // import "cloud.google.com/go/storage"

32
vendor/cloud.google.com/go/storage/go110.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.10
package storage
import "google.golang.org/api/googleapi"
func shouldRetry(err error) bool {
switch e := err.(type) {
case *googleapi.Error:
// Retry on 429 and 5xx, according to
// https://cloud.google.com/storage/docs/exponential-backoff.
return e.Code == 429 || (e.Code >= 500 && e.Code < 600)
case interface{ Temporary() bool }:
return e.Temporary()
default:
return false
}
}

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved. // Copyright 2017 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -24,3 +24,7 @@ import (
func withContext(r *http.Request, ctx context.Context) *http.Request { func withContext(r *http.Request, ctx context.Context) *http.Request {
return r.WithContext(ctx) return r.WithContext(ctx)
} }
func goHTTPUncompressed(res *http.Response) bool {
return res.Uncompressed
}

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved. // Copyright 2017 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -16,6 +16,7 @@ package storage
import ( import (
"cloud.google.com/go/iam" "cloud.google.com/go/iam"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context" "golang.org/x/net/context"
raw "google.golang.org/api/storage/v1" raw "google.golang.org/api/storage/v1"
iampb "google.golang.org/genproto/googleapis/iam/v1" iampb "google.golang.org/genproto/googleapis/iam/v1"
@ -23,21 +24,30 @@ import (
// IAM provides access to IAM access control for the bucket. // IAM provides access to IAM access control for the bucket.
func (b *BucketHandle) IAM() *iam.Handle { func (b *BucketHandle) IAM() *iam.Handle {
return iam.InternalNewHandleClient(&iamClient{raw: b.c.raw}, b.name) return iam.InternalNewHandleClient(&iamClient{
raw: b.c.raw,
userProject: b.userProject,
}, b.name)
} }
// iamClient implements the iam.client interface. // iamClient implements the iam.client interface.
type iamClient struct { type iamClient struct {
raw *raw.Service raw *raw.Service
userProject string
} }
func (c *iamClient) Get(ctx context.Context, resource string) (*iampb.Policy, error) { func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) {
req := c.raw.Buckets.GetIamPolicy(resource) ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get")
setClientHeader(req.Header()) defer func() { trace.EndSpan(ctx, err) }()
call := c.raw.Buckets.GetIamPolicy(resource)
setClientHeader(call.Header())
if c.userProject != "" {
call.UserProject(c.userProject)
}
var rp *raw.Policy var rp *raw.Policy
var err error
err = runWithRetry(ctx, func() error { err = runWithRetry(ctx, func() error {
rp, err = req.Context(ctx).Do() rp, err = call.Context(ctx).Do()
return err return err
}) })
if err != nil { if err != nil {
@ -46,23 +56,34 @@ func (c *iamClient) Get(ctx context.Context, resource string) (*iampb.Policy, er
return iamFromStoragePolicy(rp), nil return iamFromStoragePolicy(rp), nil
} }
func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) error { func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Set")
defer func() { trace.EndSpan(ctx, err) }()
rp := iamToStoragePolicy(p) rp := iamToStoragePolicy(p)
req := c.raw.Buckets.SetIamPolicy(resource, rp) call := c.raw.Buckets.SetIamPolicy(resource, rp)
setClientHeader(req.Header()) setClientHeader(call.Header())
if c.userProject != "" {
call.UserProject(c.userProject)
}
return runWithRetry(ctx, func() error { return runWithRetry(ctx, func() error {
_, err := req.Context(ctx).Do() _, err := call.Context(ctx).Do()
return err return err
}) })
} }
func (c *iamClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) { func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) {
req := c.raw.Buckets.TestIamPermissions(resource, perms) ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Test")
setClientHeader(req.Header()) defer func() { trace.EndSpan(ctx, err) }()
call := c.raw.Buckets.TestIamPermissions(resource, perms)
setClientHeader(call.Header())
if c.userProject != "" {
call.UserProject(c.userProject)
}
var res *raw.TestIamPermissionsResponse var res *raw.TestIamPermissionsResponse
var err error
err = runWithRetry(ctx, func() error { err = runWithRetry(ctx, func() error {
res, err = req.Context(ctx).Do() res, err = call.Context(ctx).Do()
return err return err
}) })
if err != nil { if err != nil {

View File

@ -1,4 +1,4 @@
// Copyright 2014 Google Inc. All Rights Reserved. // Copyright 2014 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -18,7 +18,6 @@ import (
"cloud.google.com/go/internal" "cloud.google.com/go/internal"
gax "github.com/googleapis/gax-go" gax "github.com/googleapis/gax-go"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/api/googleapi"
) )
// runWithRetry calls the function until it returns nil or a non-retryable error, or // runWithRetry calls the function until it returns nil or a non-retryable error, or
@ -29,13 +28,7 @@ func runWithRetry(ctx context.Context, call func() error) error {
if err == nil { if err == nil {
return true, nil return true, nil
} }
e, ok := err.(*googleapi.Error) if shouldRetry(err) {
if !ok {
return true, err
}
// Retry on 429 and 5xx, according to
// https://cloud.google.com/storage/docs/exponential-backoff.
if e.Code == 429 || (e.Code >= 500 && e.Code < 600) {
return false, nil return false, nil
} }
return true, err return true, err

42
vendor/cloud.google.com/go/storage/not_go110.go generated vendored Normal file
View File

@ -0,0 +1,42 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !go1.10
package storage
import (
"net/url"
"strings"
"google.golang.org/api/googleapi"
)
func shouldRetry(err error) bool {
switch e := err.(type) {
case *googleapi.Error:
// Retry on 429 and 5xx, according to
// https://cloud.google.com/storage/docs/exponential-backoff.
return e.Code == 429 || (e.Code >= 500 && e.Code < 600)
case *url.Error:
// Retry on REFUSED_STREAM.
// Unfortunately the error type is unexported, so we resort to string
// matching.
return strings.Contains(e.Error(), "REFUSED_STREAM")
case interface{ Temporary() bool }:
return e.Temporary()
default:
return false
}
}

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved. // Copyright 2017 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -24,3 +24,10 @@ func withContext(r *http.Request, _ interface{}) *http.Request {
// In Go 1.6 and below, ignore the context. // In Go 1.6 and below, ignore the context.
return r return r
} }
// Go 1.6 doesn't have http.Response.Uncompressed, so we can't know whether the Go
// HTTP stack uncompressed a gzip file. As a good approximation, assume that
// the lack of a Content-Length header means that it did uncompress.
func goHTTPUncompressed(res *http.Response) bool {
return res.Header.Get("Content-Length") == ""
}

188
vendor/cloud.google.com/go/storage/notifications.go generated vendored Normal file
View File

@ -0,0 +1,188 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"errors"
"fmt"
"regexp"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
raw "google.golang.org/api/storage/v1"
)
// A Notification describes how to send Cloud PubSub messages when certain
// events occur in a bucket.
type Notification struct {
//The ID of the notification.
ID string
// The ID of the topic to which this subscription publishes.
TopicID string
// The ID of the project to which the topic belongs.
TopicProjectID string
// Only send notifications about listed event types. If empty, send notifications
// for all event types.
// See https://cloud.google.com/storage/docs/pubsub-notifications#events.
EventTypes []string
// If present, only apply this notification configuration to object names that
// begin with this prefix.
ObjectNamePrefix string
// An optional list of additional attributes to attach to each Cloud PubSub
// message published for this notification subscription.
CustomAttributes map[string]string
// The contents of the message payload.
// See https://cloud.google.com/storage/docs/pubsub-notifications#payload.
PayloadFormat string
}
// Values for Notification.PayloadFormat.
const (
// Send no payload with notification messages.
NoPayload = "NONE"
// Send object metadata as JSON with notification messages.
JSONPayload = "JSON_API_V1"
)
// Values for Notification.EventTypes.
const (
// Event that occurs when an object is successfully created.
ObjectFinalizeEvent = "OBJECT_FINALIZE"
// Event that occurs when the metadata of an existing object changes.
ObjectMetadataUpdateEvent = "OBJECT_METADATA_UPDATE"
// Event that occurs when an object is permanently deleted.
ObjectDeleteEvent = "OBJECT_DELETE"
// Event that occurs when the live version of an object becomes an
// archived version.
ObjectArchiveEvent = "OBJECT_ARCHIVE"
)
func toNotification(rn *raw.Notification) *Notification {
n := &Notification{
ID: rn.Id,
EventTypes: rn.EventTypes,
ObjectNamePrefix: rn.ObjectNamePrefix,
CustomAttributes: rn.CustomAttributes,
PayloadFormat: rn.PayloadFormat,
}
n.TopicProjectID, n.TopicID = parseNotificationTopic(rn.Topic)
return n
}
var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)")
// parseNotificationTopic extracts the project and topic IDs from from the full
// resource name returned by the service. If the name is malformed, it returns
// "?" for both IDs.
func parseNotificationTopic(nt string) (projectID, topicID string) {
matches := topicRE.FindStringSubmatch(nt)
if matches == nil {
return "?", "?"
}
return matches[1], matches[2]
}
func toRawNotification(n *Notification) *raw.Notification {
return &raw.Notification{
Id: n.ID,
Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s",
n.TopicProjectID, n.TopicID),
EventTypes: n.EventTypes,
ObjectNamePrefix: n.ObjectNamePrefix,
CustomAttributes: n.CustomAttributes,
PayloadFormat: string(n.PayloadFormat),
}
}
// AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID
// and PayloadFormat, and must not set its ID. The other fields are all optional. The
// returned Notification's ID can be used to refer to it.
func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (ret *Notification, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.AddNotification")
defer func() { trace.EndSpan(ctx, err) }()
if n.ID != "" {
return nil, errors.New("storage: AddNotification: ID must not be set")
}
if n.TopicProjectID == "" {
return nil, errors.New("storage: AddNotification: missing TopicProjectID")
}
if n.TopicID == "" {
return nil, errors.New("storage: AddNotification: missing TopicID")
}
call := b.c.raw.Notifications.Insert(b.name, toRawNotification(n))
setClientHeader(call.Header())
if b.userProject != "" {
call.UserProject(b.userProject)
}
rn, err := call.Context(ctx).Do()
if err != nil {
return nil, err
}
return toNotification(rn), nil
}
// Notifications returns all the Notifications configured for this bucket, as a map
// indexed by notification ID.
func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notification, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications")
defer func() { trace.EndSpan(ctx, err) }()
call := b.c.raw.Notifications.List(b.name)
setClientHeader(call.Header())
if b.userProject != "" {
call.UserProject(b.userProject)
}
var res *raw.Notifications
err = runWithRetry(ctx, func() error {
res, err = call.Context(ctx).Do()
return err
})
if err != nil {
return nil, err
}
return notificationsToMap(res.Items), nil
}
func notificationsToMap(rns []*raw.Notification) map[string]*Notification {
m := map[string]*Notification{}
for _, rn := range rns {
m[rn.Id] = toNotification(rn)
}
return m
}
// DeleteNotification deletes the notification with the given ID.
func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification")
defer func() { trace.EndSpan(ctx, err) }()
call := b.c.raw.Notifications.Delete(b.name, id)
setClientHeader(call.Header())
if b.userProject != "" {
call.UserProject(b.userProject)
}
return call.Context(ctx).Do()
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved. // Copyright 2016 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -15,22 +15,278 @@
package storage package storage
import ( import (
"errors"
"fmt" "fmt"
"hash/crc32" "hash/crc32"
"io" "io"
"io/ioutil"
"net/http"
"net/url"
"reflect"
"strconv"
"strings"
"time"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
"google.golang.org/api/googleapi"
) )
var crc32cTable = crc32.MakeTable(crc32.Castagnoli) var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
// ReaderObjectAttrs are attributes about the object being read. These are populated
// during the New call. This struct only holds a subset of object attributes: to
// get the full set of attributes, use ObjectHandle.Attrs.
//
// Each field is read-only.
type ReaderObjectAttrs struct {
// Size is the length of the object's content.
Size int64
// ContentType is the MIME type of the object's content.
ContentType string
// ContentEncoding is the encoding of the object's content.
ContentEncoding string
// CacheControl specifies whether and for how long browser and Internet
// caches are allowed to cache your objects.
CacheControl string
// LastModified is the time that the object was last modified.
LastModified time.Time
// Generation is the generation number of the object's content.
Generation int64
// Metageneration is the version of the metadata for this object at
// this generation. This field is used for preconditions and for
// detecting changes in metadata. A metageneration number is only
// meaningful in the context of a particular generation of a
// particular object.
Metageneration int64
}
// NewReader creates a new Reader to read the contents of the
// object.
// ErrObjectNotExist will be returned if the object is not found.
//
// The caller must call Close on the returned Reader when done reading.
func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
return o.NewRangeReader(ctx, 0, -1)
}
// NewRangeReader reads part of an object, reading at most length bytes
// starting at the given offset. If length is negative, the object is read
// until the end.
func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (r *Reader, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.NewRangeReader")
defer func() { trace.EndSpan(ctx, err) }()
if err := o.validate(); err != nil {
return nil, err
}
if offset < 0 {
return nil, fmt.Errorf("storage: invalid offset %d < 0", offset)
}
if o.conds != nil {
if err := o.conds.validate("NewRangeReader"); err != nil {
return nil, err
}
}
u := &url.URL{
Scheme: "https",
Host: "storage.googleapis.com",
Path: fmt.Sprintf("/%s/%s", o.bucket, o.object),
}
verb := "GET"
if length == 0 {
verb = "HEAD"
}
req, err := http.NewRequest(verb, u.String(), nil)
if err != nil {
return nil, err
}
req = withContext(req, ctx)
if o.userProject != "" {
req.Header.Set("X-Goog-User-Project", o.userProject)
}
if o.readCompressed {
req.Header.Set("Accept-Encoding", "gzip")
}
if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil {
return nil, err
}
gen := o.gen
// Define a function that initiates a Read with offset and length, assuming we
// have already read seen bytes.
reopen := func(seen int64) (*http.Response, error) {
start := offset + seen
if length < 0 && start > 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", start))
} else if length > 0 {
// The end character isn't affected by how many bytes we've seen.
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, offset+length-1))
}
// We wait to assign conditions here because the generation number can change in between reopen() runs.
req.URL.RawQuery = conditionsQuery(gen, o.conds)
var res *http.Response
err = runWithRetry(ctx, func() error {
res, err = o.c.hc.Do(req)
if err != nil {
return err
}
if res.StatusCode == http.StatusNotFound {
res.Body.Close()
return ErrObjectNotExist
}
if res.StatusCode < 200 || res.StatusCode > 299 {
body, _ := ioutil.ReadAll(res.Body)
res.Body.Close()
return &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
Body: string(body),
}
}
if start > 0 && length != 0 && res.StatusCode != http.StatusPartialContent {
res.Body.Close()
return errors.New("storage: partial request not satisfied")
}
// If a generation hasn't been specified, and this is the first response we get, let's record the
// generation. In future requests we'll use this generation as a precondition to avoid data races.
if gen < 0 && res.Header.Get("X-Goog-Generation") != "" {
gen64, err := strconv.ParseInt(res.Header.Get("X-Goog-Generation"), 10, 64)
if err != nil {
return err
}
gen = gen64
}
return nil
})
if err != nil {
return nil, err
}
return res, nil
}
res, err := reopen(0)
if err != nil {
return nil, err
}
var (
size int64 // total size of object, even if a range was requested.
checkCRC bool
crc uint32
)
if res.StatusCode == http.StatusPartialContent {
cr := strings.TrimSpace(res.Header.Get("Content-Range"))
if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") {
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
}
size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64)
if err != nil {
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
}
} else {
size = res.ContentLength
// Check the CRC iff all of the following hold:
// - We asked for content (length != 0).
// - We got all the content (status != PartialContent).
// - The server sent a CRC header.
// - The Go http stack did not uncompress the file.
// - We were not served compressed data that was uncompressed on download.
// The problem with the last two cases is that the CRC will not match -- GCS
// computes it on the compressed contents, but we compute it on the
// uncompressed contents.
if length != 0 && !goHTTPUncompressed(res) && !uncompressedByServer(res) {
crc, checkCRC = parseCRC32c(res)
}
}
remain := res.ContentLength
body := res.Body
if length == 0 {
remain = 0
body.Close()
body = emptyBody
}
var metaGen int64
if res.Header.Get("X-Goog-Generation") != "" {
metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64)
if err != nil {
return nil, err
}
}
var lm time.Time
if res.Header.Get("Last-Modified") != "" {
lm, err = http.ParseTime(res.Header.Get("Last-Modified"))
if err != nil {
return nil, err
}
}
attrs := ReaderObjectAttrs{
Size: size,
ContentType: res.Header.Get("Content-Type"),
ContentEncoding: res.Header.Get("Content-Encoding"),
CacheControl: res.Header.Get("Cache-Control"),
LastModified: lm,
Generation: gen,
Metageneration: metaGen,
}
return &Reader{
Attrs: attrs,
body: body,
size: size,
remain: remain,
wantCRC: crc,
checkCRC: checkCRC,
reopen: reopen,
}, nil
}
func uncompressedByServer(res *http.Response) bool {
// If the data is stored as gzip but is not encoded as gzip, then it
// was uncompressed by the server.
return res.Header.Get("X-Goog-Stored-Content-Encoding") == "gzip" &&
res.Header.Get("Content-Encoding") != "gzip"
}
func parseCRC32c(res *http.Response) (uint32, bool) {
const prefix = "crc32c="
for _, spec := range res.Header["X-Goog-Hash"] {
if strings.HasPrefix(spec, prefix) {
c, err := decodeUint32(spec[len(prefix):])
if err == nil {
return c, true
}
}
}
return 0, false
}
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
// Reader reads a Cloud Storage object. // Reader reads a Cloud Storage object.
// It implements io.Reader. // It implements io.Reader.
//
// Typically, a Reader computes the CRC of the downloaded content and compares it to
// the stored CRC, returning an error from Read if there is a mismatch. This integrity check
// is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding.
type Reader struct { type Reader struct {
Attrs ReaderObjectAttrs
body io.ReadCloser body io.ReadCloser
remain, size int64 seen, remain, size int64
contentType string
checkCRC bool // should we check the CRC? checkCRC bool // should we check the CRC?
wantCRC uint32 // the CRC32c value the server sent in the header wantCRC uint32 // the CRC32c value the server sent in the header
gotCRC uint32 // running crc gotCRC uint32 // running crc
reopen func(seen int64) (*http.Response, error)
} }
// Close closes the Reader. It must be called when done reading. // Close closes the Reader. It must be called when done reading.
@ -39,7 +295,7 @@ func (r *Reader) Close() error {
} }
func (r *Reader) Read(p []byte) (int, error) { func (r *Reader) Read(p []byte) (int, error) {
n, err := r.body.Read(p) n, err := r.readWithRetry(p)
if r.remain != -1 { if r.remain != -1 {
r.remain -= int64(n) r.remain -= int64(n)
} }
@ -48,19 +304,52 @@ func (r *Reader) Read(p []byte) (int, error) {
// Check CRC here. It would be natural to check it in Close, but // Check CRC here. It would be natural to check it in Close, but
// everybody defers Close on the assumption that it doesn't return // everybody defers Close on the assumption that it doesn't return
// anything worth looking at. // anything worth looking at.
if r.remain == 0 && r.gotCRC != r.wantCRC { if err == io.EOF {
if r.gotCRC != r.wantCRC {
return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d", return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d",
r.gotCRC, r.wantCRC) r.gotCRC, r.wantCRC)
} }
} }
}
return n, err return n, err
} }
func (r *Reader) readWithRetry(p []byte) (int, error) {
n := 0
for len(p[n:]) > 0 {
m, err := r.body.Read(p[n:])
n += m
r.seen += int64(m)
if !shouldRetryRead(err) {
return n, err
}
// Read failed, but we will try again. Send a ranged read request that takes
// into account the number of bytes we've already seen.
res, err := r.reopen(r.seen)
if err != nil {
// reopen already retries
return n, err
}
r.body.Close()
r.body = res.Body
}
return n, nil
}
func shouldRetryRead(err error) bool {
if err == nil {
return false
}
return strings.HasSuffix(err.Error(), "INTERNAL_ERROR") && strings.Contains(reflect.TypeOf(err).String(), "http2")
}
// Size returns the size of the object in bytes. // Size returns the size of the object in bytes.
// The returned value is always the same and is not affected by // The returned value is always the same and is not affected by
// calls to Read or Close. // calls to Read or Close.
//
// Deprecated: use Reader.Attrs.Size.
func (r *Reader) Size() int64 { func (r *Reader) Size() int64 {
return r.size return r.Attrs.Size
} }
// Remain returns the number of bytes left to read, or -1 if unknown. // Remain returns the number of bytes left to read, or -1 if unknown.
@ -69,6 +358,29 @@ func (r *Reader) Remain() int64 {
} }
// ContentType returns the content type of the object. // ContentType returns the content type of the object.
//
// Deprecated: use Reader.Attrs.ContentType.
func (r *Reader) ContentType() string { func (r *Reader) ContentType() string {
return r.contentType return r.Attrs.ContentType
}
// ContentEncoding returns the content encoding of the object.
//
// Deprecated: use Reader.Attrs.ContentEncoding.
func (r *Reader) ContentEncoding() string {
return r.Attrs.ContentEncoding
}
// CacheControl returns the cache control of the object.
//
// Deprecated: use Reader.Attrs.CacheControl.
func (r *Reader) CacheControl() string {
return r.Attrs.CacheControl
}
// LastModified returns the value of the Last-Modified header.
//
// Deprecated: use Reader.Attrs.LastModified.
func (r *Reader) LastModified() (time.Time, error) {
return r.Attrs.LastModified, nil
} }

View File

@ -1,4 +1,4 @@
// Copyright 2014 Google Inc. All Rights Reserved. // Copyright 2014 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -26,17 +26,19 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"reflect" "reflect"
"regexp"
"sort"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"unicode/utf8" "unicode/utf8"
"cloud.google.com/go/internal/trace"
"google.golang.org/api/option" "google.golang.org/api/option"
"google.golang.org/api/transport" htransport "google.golang.org/api/transport/http"
"cloud.google.com/go/internal/optional" "cloud.google.com/go/internal/optional"
"cloud.google.com/go/internal/version" "cloud.google.com/go/internal/version"
@ -89,7 +91,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
option.WithUserAgent(userAgent), option.WithUserAgent(userAgent),
} }
opts = append(o, opts...) opts = append(o, opts...)
hc, ep, err := transport.NewHTTPClient(ctx, opts...) hc, ep, err := htransport.NewClient(ctx, opts...)
if err != nil { if err != nil {
return nil, fmt.Errorf("dialing: %v", err) return nil, fmt.Errorf("dialing: %v", err)
} }
@ -110,43 +112,12 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
// //
// Close need not be called at program exit. // Close need not be called at program exit.
func (c *Client) Close() error { func (c *Client) Close() error {
// Set fields to nil so that subsequent uses will panic.
c.hc = nil c.hc = nil
c.raw = nil
return nil return nil
} }
// BucketHandle provides operations on a Google Cloud Storage bucket.
// Use Client.Bucket to get a handle.
type BucketHandle struct {
acl ACLHandle
defaultObjectACL ACLHandle
c *Client
name string
}
// Bucket returns a BucketHandle, which provides operations on the named bucket.
// This call does not perform any network operations.
//
// The supplied name must contain only lowercase letters, numbers, dashes,
// underscores, and dots. The full specification for valid bucket names can be
// found at:
// https://cloud.google.com/storage/docs/bucket-naming
func (c *Client) Bucket(name string) *BucketHandle {
return &BucketHandle{
c: c,
name: name,
acl: ACLHandle{
c: c,
bucket: name,
},
defaultObjectACL: ACLHandle{
c: c,
bucket: name,
isDefault: true,
},
}
}
// SignedURLOptions allows you to restrict the access to the signed URL. // SignedURLOptions allows you to restrict the access to the signed URL.
type SignedURLOptions struct { type SignedURLOptions struct {
// GoogleAccessID represents the authorizer of the signed URL generation. // GoogleAccessID represents the authorizer of the signed URL generation.
@ -200,7 +171,7 @@ type SignedURLOptions struct {
// Optional. // Optional.
ContentType string ContentType string
// Headers is a list of extention headers the client must provide // Headers is a list of extension headers the client must provide
// in order to use the generated signed URL. // in order to use the generated signed URL.
// Optional. // Optional.
Headers []string Headers []string
@ -212,6 +183,60 @@ type SignedURLOptions struct {
MD5 string MD5 string
} }
var (
canonicalHeaderRegexp = regexp.MustCompile(`(?i)^(x-goog-[^:]+):(.*)?$`)
excludedCanonicalHeaders = map[string]bool{
"x-goog-encryption-key": true,
"x-goog-encryption-key-sha256": true,
}
)
// sanitizeHeaders applies the specifications for canonical extension headers at
// https://cloud.google.com/storage/docs/access-control/signed-urls#about-canonical-extension-headers.
func sanitizeHeaders(hdrs []string) []string {
headerMap := map[string][]string{}
for _, hdr := range hdrs {
// No leading or trailing whitespaces.
sanitizedHeader := strings.TrimSpace(hdr)
// Only keep canonical headers, discard any others.
headerMatches := canonicalHeaderRegexp.FindStringSubmatch(sanitizedHeader)
if len(headerMatches) == 0 {
continue
}
header := strings.ToLower(strings.TrimSpace(headerMatches[1]))
if excludedCanonicalHeaders[headerMatches[1]] {
// Do not keep any deliberately excluded canonical headers when signing.
continue
}
value := strings.TrimSpace(headerMatches[2])
if len(value) > 0 {
// Remove duplicate headers by appending the values of duplicates
// in their order of appearance.
headerMap[header] = append(headerMap[header], value)
}
}
var sanitizedHeaders []string
for header, values := range headerMap {
// There should be no spaces around the colon separating the
// header name from the header value or around the values
// themselves. The values should be separated by commas.
// NOTE: The semantics for headers without a value are not clear.
// However from specifications these should be edge-cases
// anyway and we should assume that there will be no
// canonical headers using empty values. Any such headers
// are discarded at the regexp stage above.
sanitizedHeaders = append(
sanitizedHeaders,
fmt.Sprintf("%s:%s", header, strings.Join(values, ",")),
)
}
sort.Strings(sanitizedHeaders)
return sanitizedHeaders
}
// SignedURL returns a URL for the specified object. Signed URLs allow // SignedURL returns a URL for the specified object. Signed URLs allow
// the users access to a restricted resource for a limited time without having a // the users access to a restricted resource for a limited time without having a
// Google account or signing in. For more information about the signed // Google account or signing in. For more information about the signed
@ -238,6 +263,7 @@ func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) {
return "", errors.New("storage: invalid MD5 checksum") return "", errors.New("storage: invalid MD5 checksum")
} }
} }
opts.Headers = sanitizeHeaders(opts.Headers)
signBytes := opts.SignBytes signBytes := opts.SignBytes
if opts.PrivateKey != nil { if opts.PrivateKey != nil {
@ -295,6 +321,8 @@ type ObjectHandle struct {
gen int64 // a negative value indicates latest gen int64 // a negative value indicates latest
conds *Conditions conds *Conditions
encryptionKey []byte // AES-256 key encryptionKey []byte // AES-256 key
userProject string // for requester-pays buckets
readCompressed bool // Accept-Encoding: gzip
} }
// ACL provides access to the object's access control list. // ACL provides access to the object's access control list.
@ -317,7 +345,7 @@ func (o *ObjectHandle) Generation(gen int64) *ObjectHandle {
// If returns a new ObjectHandle that applies a set of preconditions. // If returns a new ObjectHandle that applies a set of preconditions.
// Preconditions already set on the ObjectHandle are ignored. // Preconditions already set on the ObjectHandle are ignored.
// Operations on the new handle will only occur if the preconditions are // Operations on the new handle will return an error if the preconditions are not
// satisfied. See https://cloud.google.com/storage/docs/generations-preconditions // satisfied. See https://cloud.google.com/storage/docs/generations-preconditions
// for more details. // for more details.
func (o *ObjectHandle) If(conds Conditions) *ObjectHandle { func (o *ObjectHandle) If(conds Conditions) *ObjectHandle {
@ -339,7 +367,10 @@ func (o *ObjectHandle) Key(encryptionKey []byte) *ObjectHandle {
// Attrs returns meta information about the object. // Attrs returns meta information about the object.
// ErrObjectNotExist will be returned if the object is not found. // ErrObjectNotExist will be returned if the object is not found.
func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) { func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Attrs")
defer func() { trace.EndSpan(ctx, err) }()
if err := o.validate(); err != nil { if err := o.validate(); err != nil {
return nil, err return nil, err
} }
@ -347,11 +378,13 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) {
if err := applyConds("Attrs", o.gen, o.conds, call); err != nil { if err := applyConds("Attrs", o.gen, o.conds, call); err != nil {
return nil, err return nil, err
} }
if o.userProject != "" {
call.UserProject(o.userProject)
}
if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil { if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil {
return nil, err return nil, err
} }
var obj *raw.Object var obj *raw.Object
var err error
setClientHeader(call.Header()) setClientHeader(call.Header())
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
@ -366,7 +399,10 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) {
// Update updates an object with the provided attributes. // Update updates an object with the provided attributes.
// All zero-value attributes are ignored. // All zero-value attributes are ignored.
// ErrObjectNotExist will be returned if the object is not found. // ErrObjectNotExist will be returned if the object is not found.
func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (*ObjectAttrs, error) { func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (oa *ObjectAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Update")
defer func() { trace.EndSpan(ctx, err) }()
if err := o.validate(); err != nil { if err := o.validate(); err != nil {
return nil, err return nil, err
} }
@ -375,11 +411,17 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
var forceSendFields, nullFields []string var forceSendFields, nullFields []string
if uattrs.ContentType != nil { if uattrs.ContentType != nil {
attrs.ContentType = optional.ToString(uattrs.ContentType) attrs.ContentType = optional.ToString(uattrs.ContentType)
// For ContentType, sending the empty string is a no-op.
// Instead we send a null.
if attrs.ContentType == "" {
nullFields = append(nullFields, "ContentType")
} else {
forceSendFields = append(forceSendFields, "ContentType") forceSendFields = append(forceSendFields, "ContentType")
} }
}
if uattrs.ContentLanguage != nil { if uattrs.ContentLanguage != nil {
attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage) attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage)
// For ContentLanguage It's an error to send the empty string. // For ContentLanguage it's an error to send the empty string.
// Instead we send a null. // Instead we send a null.
if attrs.ContentLanguage == "" { if attrs.ContentLanguage == "" {
nullFields = append(nullFields, "ContentLanguage") nullFields = append(nullFields, "ContentLanguage")
@ -389,7 +431,7 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
} }
if uattrs.ContentEncoding != nil { if uattrs.ContentEncoding != nil {
attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding) attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding)
forceSendFields = append(forceSendFields, "ContentType") forceSendFields = append(forceSendFields, "ContentEncoding")
} }
if uattrs.ContentDisposition != nil { if uattrs.ContentDisposition != nil {
attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition) attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition)
@ -399,6 +441,14 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
attrs.CacheControl = optional.ToString(uattrs.CacheControl) attrs.CacheControl = optional.ToString(uattrs.CacheControl)
forceSendFields = append(forceSendFields, "CacheControl") forceSendFields = append(forceSendFields, "CacheControl")
} }
if uattrs.EventBasedHold != nil {
attrs.EventBasedHold = optional.ToBool(uattrs.EventBasedHold)
forceSendFields = append(forceSendFields, "EventBasedHold")
}
if uattrs.TemporaryHold != nil {
attrs.TemporaryHold = optional.ToBool(uattrs.TemporaryHold)
forceSendFields = append(forceSendFields, "TemporaryHold")
}
if uattrs.Metadata != nil { if uattrs.Metadata != nil {
attrs.Metadata = uattrs.Metadata attrs.Metadata = uattrs.Metadata
if len(attrs.Metadata) == 0 { if len(attrs.Metadata) == 0 {
@ -421,11 +471,16 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
if err := applyConds("Update", o.gen, o.conds, call); err != nil { if err := applyConds("Update", o.gen, o.conds, call); err != nil {
return nil, err return nil, err
} }
if o.userProject != "" {
call.UserProject(o.userProject)
}
if uattrs.PredefinedACL != "" {
call.PredefinedAcl(uattrs.PredefinedACL)
}
if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil { if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil {
return nil, err return nil, err
} }
var obj *raw.Object var obj *raw.Object
var err error
setClientHeader(call.Header()) setClientHeader(call.Header())
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
@ -437,6 +492,16 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
return newObject(obj), nil return newObject(obj), nil
} }
// BucketName returns the name of the bucket.
func (o *ObjectHandle) BucketName() string {
return o.bucket
}
// ObjectName returns the name of the object.
func (o *ObjectHandle) ObjectName() string {
return o.object
}
// ObjectAttrsToUpdate is used to update the attributes of an object. // ObjectAttrsToUpdate is used to update the attributes of an object.
// Only fields set to non-nil values will be updated. // Only fields set to non-nil values will be updated.
// Set a field to its zero value to delete it. // Set a field to its zero value to delete it.
@ -449,6 +514,8 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
// Metadata: map[string]string{}, // Metadata: map[string]string{},
// } // }
type ObjectAttrsToUpdate struct { type ObjectAttrsToUpdate struct {
EventBasedHold optional.Bool
TemporaryHold optional.Bool
ContentType optional.String ContentType optional.String
ContentLanguage optional.String ContentLanguage optional.String
ContentEncoding optional.String ContentEncoding optional.String
@ -456,6 +523,10 @@ type ObjectAttrsToUpdate struct {
CacheControl optional.String CacheControl optional.String
Metadata map[string]string // set to map[string]string{} to delete Metadata map[string]string // set to map[string]string{} to delete
ACL []ACLRule ACL []ACLRule
// If not empty, applies a predefined set of access controls. ACL must be nil.
// See https://cloud.google.com/storage/docs/json_api/v1/objects/patch.
PredefinedACL string
} }
// Delete deletes the single specified object. // Delete deletes the single specified object.
@ -467,6 +538,10 @@ func (o *ObjectHandle) Delete(ctx context.Context) error {
if err := applyConds("Delete", o.gen, o.conds, call); err != nil { if err := applyConds("Delete", o.gen, o.conds, call); err != nil {
return err return err
} }
if o.userProject != "" {
call.UserProject(o.userProject)
}
// Encryption doesn't apply to Delete.
setClientHeader(call.Header()) setClientHeader(call.Header())
err := runWithRetry(ctx, func() error { return call.Do() }) err := runWithRetry(ctx, func() error { return call.Do() })
switch e := err.(type) { switch e := err.(type) {
@ -480,136 +555,13 @@ func (o *ObjectHandle) Delete(ctx context.Context) error {
return err return err
} }
// NewReader creates a new Reader to read the contents of the // ReadCompressed when true causes the read to happen without decompressing.
// object. func (o *ObjectHandle) ReadCompressed(compressed bool) *ObjectHandle {
// ErrObjectNotExist will be returned if the object is not found. o2 := *o
// o2.readCompressed = compressed
// The caller must call Close on the returned Reader when done reading. return &o2
func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
return o.NewRangeReader(ctx, 0, -1)
} }
// NewRangeReader reads part of an object, reading at most length bytes
// starting at the given offset. If length is negative, the object is read
// until the end.
func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (*Reader, error) {
if err := o.validate(); err != nil {
return nil, err
}
if offset < 0 {
return nil, fmt.Errorf("storage: invalid offset %d < 0", offset)
}
if o.conds != nil {
if err := o.conds.validate("NewRangeReader"); err != nil {
return nil, err
}
}
u := &url.URL{
Scheme: "https",
Host: "storage.googleapis.com",
Path: fmt.Sprintf("/%s/%s", o.bucket, o.object),
RawQuery: conditionsQuery(o.gen, o.conds),
}
verb := "GET"
if length == 0 {
verb = "HEAD"
}
req, err := http.NewRequest(verb, u.String(), nil)
if err != nil {
return nil, err
}
req = withContext(req, ctx)
if length < 0 && offset > 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
} else if length > 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
}
if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil {
return nil, err
}
var res *http.Response
err = runWithRetry(ctx, func() error {
res, err = o.c.hc.Do(req)
if err != nil {
return err
}
if res.StatusCode == http.StatusNotFound {
res.Body.Close()
return ErrObjectNotExist
}
if res.StatusCode < 200 || res.StatusCode > 299 {
body, _ := ioutil.ReadAll(res.Body)
res.Body.Close()
return &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
Body: string(body),
}
}
if offset > 0 && length != 0 && res.StatusCode != http.StatusPartialContent {
res.Body.Close()
return errors.New("storage: partial request not satisfied")
}
return nil
})
if err != nil {
return nil, err
}
var size int64 // total size of object, even if a range was requested.
if res.StatusCode == http.StatusPartialContent {
cr := strings.TrimSpace(res.Header.Get("Content-Range"))
if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") {
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
}
size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64)
if err != nil {
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
}
} else {
size = res.ContentLength
}
remain := res.ContentLength
body := res.Body
if length == 0 {
remain = 0
body.Close()
body = emptyBody
}
var (
checkCRC bool
crc uint32
)
// Even if there is a CRC header, we can't compute the hash on partial data.
if remain == size {
crc, checkCRC = parseCRC32c(res)
}
return &Reader{
body: body,
size: size,
remain: remain,
contentType: res.Header.Get("Content-Type"),
wantCRC: crc,
checkCRC: checkCRC,
}, nil
}
func parseCRC32c(res *http.Response) (uint32, bool) {
const prefix = "crc32c="
for _, spec := range res.Header["X-Goog-Hash"] {
if strings.HasPrefix(spec, prefix) {
c, err := decodeUint32(spec[len(prefix):])
if err == nil {
return c, true
}
}
}
return 0, false
}
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
// NewWriter returns a storage Writer that writes to the GCS object // NewWriter returns a storage Writer that writes to the GCS object
// associated with this ObjectHandle. // associated with this ObjectHandle.
// //
@ -623,7 +575,8 @@ var emptyBody = ioutil.NopCloser(strings.NewReader(""))
// attribute is specified, the content type will be automatically sniffed // attribute is specified, the content type will be automatically sniffed
// using net/http.DetectContentType. // using net/http.DetectContentType.
// //
// It is the caller's responsibility to call Close when writing is done. // It is the caller's responsibility to call Close when writing is done. To
// stop writing without saving the data, cancel the context.
func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer { func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer {
return &Writer{ return &Writer{
ctx: ctx, ctx: ctx,
@ -647,11 +600,10 @@ func (o *ObjectHandle) validate() error {
return nil return nil
} }
// parseKey converts the binary contents of a private key file // parseKey converts the binary contents of a private key file to an
// to an *rsa.PrivateKey. It detects whether the private key is in a // *rsa.PrivateKey. It detects whether the private key is in a PEM container or
// PEM container or not. If so, it extracts the the private key // not. If so, it extracts the private key from PEM container before
// from PEM container before conversion. It only supports PEM // conversion. It only supports PEM containers with no passphrase.
// containers with no passphrase.
func parseKey(key []byte) (*rsa.PrivateKey, error) { func parseKey(key []byte) (*rsa.PrivateKey, error) {
if block, _ := pem.Decode(key); block != nil { if block, _ := pem.Decode(key); block != nil {
key = block.Bytes key = block.Bytes
@ -670,33 +622,25 @@ func parseKey(key []byte) (*rsa.PrivateKey, error) {
return parsed, nil return parsed, nil
} }
func toRawObjectACL(oldACL []ACLRule) []*raw.ObjectAccessControl {
var acl []*raw.ObjectAccessControl
if len(oldACL) > 0 {
acl = make([]*raw.ObjectAccessControl, len(oldACL))
for i, rule := range oldACL {
acl[i] = &raw.ObjectAccessControl{
Entity: string(rule.Entity),
Role: string(rule.Role),
}
}
}
return acl
}
// toRawObject copies the editable attributes from o to the raw library's Object type. // toRawObject copies the editable attributes from o to the raw library's Object type.
func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object { func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object {
acl := toRawObjectACL(o.ACL) var ret string
if !o.RetentionExpirationTime.IsZero() {
ret = o.RetentionExpirationTime.Format(time.RFC3339)
}
return &raw.Object{ return &raw.Object{
Bucket: bucket, Bucket: bucket,
Name: o.Name, Name: o.Name,
EventBasedHold: o.EventBasedHold,
TemporaryHold: o.TemporaryHold,
RetentionExpirationTime: ret,
ContentType: o.ContentType, ContentType: o.ContentType,
ContentEncoding: o.ContentEncoding, ContentEncoding: o.ContentEncoding,
ContentLanguage: o.ContentLanguage, ContentLanguage: o.ContentLanguage,
CacheControl: o.CacheControl, CacheControl: o.CacheControl,
ContentDisposition: o.ContentDisposition, ContentDisposition: o.ContentDisposition,
StorageClass: o.StorageClass, StorageClass: o.StorageClass,
Acl: acl, Acl: toRawObjectACL(o.ACL),
Metadata: o.Metadata, Metadata: o.Metadata,
} }
} }
@ -721,9 +665,32 @@ type ObjectAttrs struct {
// headers when serving the object data. // headers when serving the object data.
CacheControl string CacheControl string
// EventBasedHold specifies whether an object is under event-based hold. New
// objects created in a bucket whose DefaultEventBasedHold is set will
// default to that value.
EventBasedHold bool
// TemporaryHold specifies whether an object is under temporary hold. While
// this flag is set to true, the object is protected against deletion and
// overwrites.
TemporaryHold bool
// RetentionExpirationTime is a server-determined value that specifies the
// earliest time that the object's retention period expires.
// This is a read-only field.
RetentionExpirationTime time.Time
// ACL is the list of access control rules for the object. // ACL is the list of access control rules for the object.
ACL []ACLRule ACL []ACLRule
// If not empty, applies a predefined set of access controls. It should be set
// only when writing, copying or composing an object. When copying or composing,
// it acts as the destinationPredefinedAcl parameter.
// PredefinedACL is always empty for ObjectAttrs returned from the service.
// See https://cloud.google.com/storage/docs/json_api/v1/objects/insert
// for valid values.
PredefinedACL string
// Owner is the owner of the object. This field is read-only. // Owner is the owner of the object. This field is read-only.
// //
// If non-zero, it is in the form of "user-<userId>". // If non-zero, it is in the form of "user-<userId>".
@ -739,11 +706,16 @@ type ObjectAttrs struct {
// sent in the response headers. // sent in the response headers.
ContentDisposition string ContentDisposition string
// MD5 is the MD5 hash of the object's content. This field is read-only. // MD5 is the MD5 hash of the object's content. This field is read-only,
// except when used from a Writer. If set on a Writer, the uploaded
// data is rejected if its MD5 hash does not match this field.
MD5 []byte MD5 []byte
// CRC32C is the CRC32 checksum of the object's content using // CRC32C is the CRC32 checksum of the object's content using
// the Castagnoli93 polynomial. This field is read-only. // the Castagnoli93 polynomial. This field is read-only, except when
// used from a Writer. If set on a Writer and Writer.SendCRC32C
// is true, the uploaded data is rejected if its CRC32c hash does not
// match this field.
CRC32C uint32 CRC32C uint32
// MediaLink is an URL to the object's content. This field is read-only. // MediaLink is an URL to the object's content. This field is read-only.
@ -792,6 +764,14 @@ type ObjectAttrs struct {
// encryption in Google Cloud Storage. // encryption in Google Cloud Storage.
CustomerKeySHA256 string CustomerKeySHA256 string
// Cloud KMS key name, in the form
// projects/P/locations/L/keyRings/R/cryptoKeys/K, used to encrypt this object,
// if the object is encrypted by such a key.
//
// Providing both a KMSKeyName and a customer-supplied encryption key (via
// ObjectHandle.Key) will result in an error when writing an object.
KMSKeyName string
// Prefix is set only for ObjectAttrs which represent synthetic "directory // Prefix is set only for ObjectAttrs which represent synthetic "directory
// entries" when iterating over buckets using Query.Delimiter. See // entries" when iterating over buckets using Query.Delimiter. See
// ObjectIterator.Next. When set, no other fields in ObjectAttrs will be // ObjectIterator.Next. When set, no other fields in ObjectAttrs will be
@ -813,13 +793,6 @@ func newObject(o *raw.Object) *ObjectAttrs {
if o == nil { if o == nil {
return nil return nil
} }
acl := make([]ACLRule, len(o.Acl))
for i, rule := range o.Acl {
acl[i] = ACLRule{
Entity: ACLEntity(rule.Entity),
Role: ACLRole(rule.Role),
}
}
owner := "" owner := ""
if o.Owner != nil { if o.Owner != nil {
owner = o.Owner.Entity owner = o.Owner.Entity
@ -836,9 +809,13 @@ func newObject(o *raw.Object) *ObjectAttrs {
ContentType: o.ContentType, ContentType: o.ContentType,
ContentLanguage: o.ContentLanguage, ContentLanguage: o.ContentLanguage,
CacheControl: o.CacheControl, CacheControl: o.CacheControl,
ACL: acl, EventBasedHold: o.EventBasedHold,
TemporaryHold: o.TemporaryHold,
RetentionExpirationTime: convertTime(o.RetentionExpirationTime),
ACL: toObjectACLRules(o.Acl),
Owner: owner, Owner: owner,
ContentEncoding: o.ContentEncoding, ContentEncoding: o.ContentEncoding,
ContentDisposition: o.ContentDisposition,
Size: int64(o.Size), Size: int64(o.Size),
MD5: md5, MD5: md5,
CRC32C: crc32c, CRC32C: crc32c,
@ -848,6 +825,7 @@ func newObject(o *raw.Object) *ObjectAttrs {
Metageneration: o.Metageneration, Metageneration: o.Metageneration,
StorageClass: o.StorageClass, StorageClass: o.StorageClass,
CustomerKeySHA256: sha256, CustomerKeySHA256: sha256,
KMSKeyName: o.KmsKeyName,
Created: convertTime(o.TimeCreated), Created: convertTime(o.TimeCreated),
Deleted: convertTime(o.TimeDeleted), Deleted: convertTime(o.TimeDeleted),
Updated: convertTime(o.Updated), Updated: convertTime(o.Updated),
@ -905,7 +883,7 @@ func (c *contentTyper) ContentType() string {
} }
// Conditions constrain methods to act on specific generations of // Conditions constrain methods to act on specific generations of
// resources. // objects.
// //
// The zero value is an empty set of constraints. Not all conditions or // The zero value is an empty set of constraints. Not all conditions or
// combinations of conditions are applicable to all methods. // combinations of conditions are applicable to all methods.
@ -1133,4 +1111,12 @@ func setEncryptionHeaders(headers http.Header, key []byte, copySource bool) erro
return nil return nil
} }
// TODO(jbd): Add storage.objects.watch. // ServiceAccount fetches the email address of the given project's Google Cloud Storage service account.
func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string, error) {
r := c.raw.Projects.ServiceAccount.Get(projectID)
res, err := r.Context(ctx).Do()
if err != nil {
return "", err
}
return res.EmailAddress, nil
}

53792
vendor/cloud.google.com/go/storage/storage.replay generated vendored Normal file

File diff suppressed because one or more lines are too long

View File

@ -1,4 +1,4 @@
// Copyright 2014 Google Inc. All Rights Reserved. // Copyright 2014 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -19,6 +19,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"sync"
"unicode/utf8" "unicode/utf8"
"golang.org/x/net/context" "golang.org/x/net/context"
@ -36,6 +37,8 @@ type Writer struct {
// SendCRC specifies whether to transmit a CRC32C field. It should be set // SendCRC specifies whether to transmit a CRC32C field. It should be set
// to true in addition to setting the Writer's CRC32C field, because zero // to true in addition to setting the Writer's CRC32C field, because zero
// is a valid CRC and normally a zero would not be transmitted. // is a valid CRC and normally a zero would not be transmitted.
// If a CRC32C is sent, and the data written does not match the checksum,
// the write will be rejected.
SendCRC32C bool SendCRC32C bool
// ChunkSize controls the maximum number of bytes of the object that the // ChunkSize controls the maximum number of bytes of the object that the
@ -45,10 +48,23 @@ type Writer struct {
// to the nearest multiple of 256K. If zero, chunking will be disabled and // to the nearest multiple of 256K. If zero, chunking will be disabled and
// the object will be uploaded in a single request. // the object will be uploaded in a single request.
// //
// ChunkSize will default to a reasonable value. Any custom configuration // ChunkSize will default to a reasonable value. If you perform many concurrent
// must be done before the first Write call. // writes of small objects, you may wish set ChunkSize to a value that matches
// your objects' sizes to avoid consuming large amounts of memory.
//
// ChunkSize must be set before the first Write call.
ChunkSize int ChunkSize int
// ProgressFunc can be used to monitor the progress of a large write.
// operation. If ProgressFunc is not nil and writing requires multiple
// calls to the underlying service (see
// https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload),
// then ProgressFunc will be invoked after each call with the number of bytes of
// content copied so far.
//
// ProgressFunc should return quickly without blocking.
ProgressFunc func(int64)
ctx context.Context ctx context.Context
o *ObjectHandle o *ObjectHandle
@ -56,8 +72,10 @@ type Writer struct {
pw *io.PipeWriter pw *io.PipeWriter
donec chan struct{} // closed after err and obj are set. donec chan struct{} // closed after err and obj are set.
err error
obj *ObjectAttrs obj *ObjectAttrs
mu sync.Mutex
err error
} }
func (w *Writer) open() error { func (w *Writer) open() error {
@ -70,12 +88,15 @@ func (w *Writer) open() error {
if !utf8.ValidString(attrs.Name) { if !utf8.ValidString(attrs.Name) {
return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name) return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name)
} }
if attrs.KMSKeyName != "" && w.o.encryptionKey != nil {
return errors.New("storage: cannot use KMSKeyName with a customer-supplied encryption key")
}
pr, pw := io.Pipe() pr, pw := io.Pipe()
w.pw = pw w.pw = pw
w.opened = true w.opened = true
if w.ChunkSize < 0 { if w.ChunkSize < 0 {
return errors.New("storage: Writer.ChunkSize must non-negative") return errors.New("storage: Writer.ChunkSize must be non-negative")
} }
mediaOpts := []googleapi.MediaOption{ mediaOpts := []googleapi.MediaOption{
googleapi.ChunkSize(w.ChunkSize), googleapi.ChunkSize(w.ChunkSize),
@ -98,20 +119,50 @@ func (w *Writer) open() error {
Media(pr, mediaOpts...). Media(pr, mediaOpts...).
Projection("full"). Projection("full").
Context(w.ctx) Context(w.ctx)
if w.ProgressFunc != nil {
call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) })
}
if attrs.KMSKeyName != "" {
call.KmsKeyName(attrs.KMSKeyName)
}
if attrs.PredefinedACL != "" {
call.PredefinedAcl(attrs.PredefinedACL)
}
if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil { if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil {
w.mu.Lock()
w.err = err w.err = err
pr.CloseWithError(w.err) w.mu.Unlock()
pr.CloseWithError(err)
return return
} }
var resp *raw.Object var resp *raw.Object
err := applyConds("NewWriter", w.o.gen, w.o.conds, call) err := applyConds("NewWriter", w.o.gen, w.o.conds, call)
if err == nil { if err == nil {
if w.o.userProject != "" {
call.UserProject(w.o.userProject)
}
setClientHeader(call.Header()) setClientHeader(call.Header())
// If the chunk size is zero, then no chunking is done on the Reader,
// which means we cannot retry: the first call will read the data, and if
// it fails, there is no way to re-read.
if w.ChunkSize == 0 {
resp, err = call.Do() resp, err = call.Do()
} else {
// We will only retry here if the initial POST, which obtains a URI for
// the resumable upload, fails with a retryable error. The upload itself
// has its own retry logic.
err = runWithRetry(w.ctx, func() error {
var err2 error
resp, err2 = call.Do()
return err2
})
}
} }
if err != nil { if err != nil {
w.mu.Lock()
w.err = err w.err = err
pr.CloseWithError(w.err) w.mu.Unlock()
pr.CloseWithError(err)
return return
} }
w.obj = newObject(resp) w.obj = newObject(resp)
@ -120,9 +171,17 @@ func (w *Writer) open() error {
} }
// Write appends to w. It implements the io.Writer interface. // Write appends to w. It implements the io.Writer interface.
//
// Since writes happen asynchronously, Write may return a nil
// error even though the write failed (or will fail). Always
// use the error returned from Writer.Close to determine if
// the upload was successful.
func (w *Writer) Write(p []byte) (n int, err error) { func (w *Writer) Write(p []byte) (n int, err error) {
if w.err != nil { w.mu.Lock()
return 0, w.err werr := w.err
w.mu.Unlock()
if werr != nil {
return 0, werr
} }
if !w.opened { if !w.opened {
if err := w.open(); err != nil { if err := w.open(); err != nil {
@ -145,11 +204,15 @@ func (w *Writer) Close() error {
return err return err
} }
<-w.donec <-w.donec
w.mu.Lock()
defer w.mu.Unlock()
return w.err return w.err
} }
// CloseWithError aborts the write operation with the provided error. // CloseWithError aborts the write operation with the provided error.
// CloseWithError always returns nil. // CloseWithError always returns nil.
//
// Deprecated: cancel the context passed to NewWriter instead.
func (w *Writer) CloseWithError(err error) error { func (w *Writer) CloseWithError(err error) error {
if !w.opened { if !w.opened {
return nil return nil

View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,94 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package propagation implement X-Cloud-Trace-Context header propagation used
// by Google Cloud products.
package propagation // import "contrib.go.opencensus.io/exporter/stackdriver/propagation"
import (
"encoding/binary"
"encoding/hex"
"fmt"
"net/http"
"strconv"
"strings"
"go.opencensus.io/trace"
"go.opencensus.io/trace/propagation"
)
const (
httpHeaderMaxSize = 200
httpHeader = `X-Cloud-Trace-Context`
)
var _ propagation.HTTPFormat = (*HTTPFormat)(nil)
// HTTPFormat implements propagation.HTTPFormat to propagate
// traces in HTTP headers for Google Cloud Platform and Stackdriver Trace.
type HTTPFormat struct{}
// SpanContextFromRequest extracts a Stackdriver Trace span context from incoming requests.
func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) {
h := req.Header.Get(httpHeader)
// See https://cloud.google.com/trace/docs/faq for the header HTTPFormat.
// Return if the header is empty or missing, or if the header is unreasonably
// large, to avoid making unnecessary copies of a large string.
if h == "" || len(h) > httpHeaderMaxSize {
return trace.SpanContext{}, false
}
// Parse the trace id field.
slash := strings.Index(h, `/`)
if slash == -1 {
return trace.SpanContext{}, false
}
tid, h := h[:slash], h[slash+1:]
buf, err := hex.DecodeString(tid)
if err != nil {
return trace.SpanContext{}, false
}
copy(sc.TraceID[:], buf)
// Parse the span id field.
spanstr := h
semicolon := strings.Index(h, `;`)
if semicolon != -1 {
spanstr, h = h[:semicolon], h[semicolon+1:]
}
sid, err := strconv.ParseUint(spanstr, 10, 64)
if err != nil {
return trace.SpanContext{}, false
}
binary.BigEndian.PutUint64(sc.SpanID[:], sid)
// Parse the options field, options field is optional.
if !strings.HasPrefix(h, "o=") {
return sc, true
}
o, err := strconv.ParseUint(h[2:], 10, 64)
if err != nil {
return trace.SpanContext{}, false
}
sc.TraceOptions = trace.TraceOptions(o)
return sc, true
}
// SpanContextToRequest modifies the given request to include a Stackdriver Trace header.
func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) {
sid := binary.BigEndian.Uint64(sc.SpanID[:])
header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions))
req.Header.Set(httpHeader, header)
}

1
vendor/go.opencensus.io/AUTHORS generated vendored Normal file
View File

@ -0,0 +1 @@
Google Inc.

56
vendor/go.opencensus.io/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,56 @@
# How to contribute
We'd love to accept your patches and contributions to this project. There are
just a few small guidelines you need to follow.
## Contributor License Agreement
Contributions to this project must be accompanied by a Contributor License
Agreement. You (or your employer) retain the copyright to your contribution,
this simply gives us permission to use and redistribute your contributions as
part of the project. Head over to <https://cla.developers.google.com/> to see
your current agreements on file or to sign a new one.
You generally only need to submit a CLA once, so if you've already submitted one
(even if it was for a different project), you probably don't need to do it
again.
## Code reviews
All submissions, including submissions by project members, require review. We
use GitHub pull requests for this purpose. Consult [GitHub Help] for more
information on using pull requests.
[GitHub Help]: https://help.github.com/articles/about-pull-requests/
## Instructions
Fork the repo, checkout the upstream repo to your GOPATH by:
```
$ go get -d go.opencensus.io
```
Add your fork as an origin:
```
cd $(go env GOPATH)/src/go.opencensus.io
git remote add fork git@github.com:YOUR_GITHUB_USERNAME/opencensus-go.git
```
Run tests:
```
$ go test ./...
```
Checkout a new branch, make modifications and push the branch to your fork:
```
$ git checkout -b feature
# edit files
$ git commit
$ git push fork feature
```
Open a pull request against the main opencensus-go repo.

231
vendor/go.opencensus.io/Gopkg.lock generated vendored Normal file
View File

@ -0,0 +1,231 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
branch = "master"
digest = "1:eee9386329f4fcdf8d6c0def0c9771b634bdd5ba460d888aa98c17d59b37a76c"
name = "git.apache.org/thrift.git"
packages = ["lib/go/thrift"]
pruneopts = "UT"
revision = "6e67faa92827ece022380b211c2caaadd6145bf5"
source = "github.com/apache/thrift"
[[projects]]
branch = "master"
digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d"
name = "github.com/beorn7/perks"
packages = ["quantile"]
pruneopts = "UT"
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
[[projects]]
digest = "1:4c0989ca0bcd10799064318923b9bc2db6b4d6338dd75f3f2d86c3511aaaf5cf"
name = "github.com/golang/protobuf"
packages = [
"proto",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/timestamp",
]
pruneopts = "UT"
revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
version = "v1.2.0"
[[projects]]
digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc"
name = "github.com/matttproud/golang_protobuf_extensions"
packages = ["pbutil"]
pruneopts = "UT"
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
version = "v1.0.1"
[[projects]]
digest = "1:824c8f3aa4c5f23928fa84ebbd5ed2e9443b3f0cb958a40c1f2fbed5cf5e64b1"
name = "github.com/openzipkin/zipkin-go"
packages = [
".",
"idgenerator",
"model",
"propagation",
"reporter",
"reporter/http",
]
pruneopts = "UT"
revision = "d455a5674050831c1e187644faa4046d653433c2"
version = "v0.1.1"
[[projects]]
digest = "1:d14a5f4bfecf017cb780bdde1b6483e5deb87e12c332544d2c430eda58734bcb"
name = "github.com/prometheus/client_golang"
packages = [
"prometheus",
"prometheus/promhttp",
]
pruneopts = "UT"
revision = "c5b7fccd204277076155f10851dad72b76a49317"
version = "v0.8.0"
[[projects]]
branch = "master"
digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4"
name = "github.com/prometheus/client_model"
packages = ["go"]
pruneopts = "UT"
revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f"
[[projects]]
branch = "master"
digest = "1:63b68062b8968092eb86bedc4e68894bd096ea6b24920faca8b9dcf451f54bb5"
name = "github.com/prometheus/common"
packages = [
"expfmt",
"internal/bitbucket.org/ww/goautoneg",
"model",
]
pruneopts = "UT"
revision = "c7de2306084e37d54b8be01f3541a8464345e9a5"
[[projects]]
branch = "master"
digest = "1:8c49953a1414305f2ff5465147ee576dd705487c35b15918fcd4efdc0cb7a290"
name = "github.com/prometheus/procfs"
packages = [
".",
"internal/util",
"nfs",
"xfs",
]
pruneopts = "UT"
revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92"
[[projects]]
branch = "master"
digest = "1:deafe4ab271911fec7de5b693d7faae3f38796d9eb8622e2b9e7df42bb3dfea9"
name = "golang.org/x/net"
packages = [
"context",
"http/httpguts",
"http2",
"http2/hpack",
"idna",
"internal/timeseries",
"trace",
]
pruneopts = "UT"
revision = "922f4815f713f213882e8ef45e0d315b164d705c"
[[projects]]
branch = "master"
digest = "1:e0140c0c868c6e0f01c0380865194592c011fe521d6e12d78bfd33e756fe018a"
name = "golang.org/x/sync"
packages = ["semaphore"]
pruneopts = "UT"
revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca"
[[projects]]
branch = "master"
digest = "1:a3f00ac457c955fe86a41e1495e8f4c54cb5399d609374c5cc26aa7d72e542c8"
name = "golang.org/x/sys"
packages = ["unix"]
pruneopts = "UT"
revision = "3b58ed4ad3395d483fc92d5d14123ce2c3581fec"
[[projects]]
digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18"
name = "golang.org/x/text"
packages = [
"collate",
"collate/build",
"internal/colltab",
"internal/gen",
"internal/tag",
"internal/triegen",
"internal/ucd",
"language",
"secure/bidirule",
"transform",
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable",
]
pruneopts = "UT"
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
branch = "master"
digest = "1:c0c17c94fe8bc1ab34e7f586a4a8b788c5e1f4f9f750ff23395b8b2f5a523530"
name = "google.golang.org/api"
packages = ["support/bundler"]
pruneopts = "UT"
revision = "e21acd801f91da814261b938941d193bb036441a"
[[projects]]
branch = "master"
digest = "1:077c1c599507b3b3e9156d17d36e1e61928ee9b53a5b420f10f28ebd4a0b275c"
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
pruneopts = "UT"
revision = "c66870c02cf823ceb633bcd05be3c7cda29976f4"
[[projects]]
digest = "1:3dd7996ce6bf52dec6a2f69fa43e7c4cefea1d4dfa3c8ab7a5f8a9f7434e239d"
name = "google.golang.org/grpc"
packages = [
".",
"balancer",
"balancer/base",
"balancer/roundrobin",
"codes",
"connectivity",
"credentials",
"encoding",
"encoding/proto",
"grpclog",
"internal",
"internal/backoff",
"internal/channelz",
"internal/envconfig",
"internal/grpcrand",
"internal/transport",
"keepalive",
"metadata",
"naming",
"peer",
"resolver",
"resolver/dns",
"resolver/passthrough",
"stats",
"status",
"tap",
]
pruneopts = "UT"
revision = "32fb0ac620c32ba40a4626ddf94d90d12cce3455"
version = "v1.14.0"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
input-imports = [
"git.apache.org/thrift.git/lib/go/thrift",
"github.com/golang/protobuf/proto",
"github.com/openzipkin/zipkin-go",
"github.com/openzipkin/zipkin-go/model",
"github.com/openzipkin/zipkin-go/reporter",
"github.com/openzipkin/zipkin-go/reporter/http",
"github.com/prometheus/client_golang/prometheus",
"github.com/prometheus/client_golang/prometheus/promhttp",
"golang.org/x/net/context",
"golang.org/x/net/http2",
"google.golang.org/api/support/bundler",
"google.golang.org/grpc",
"google.golang.org/grpc/codes",
"google.golang.org/grpc/grpclog",
"google.golang.org/grpc/metadata",
"google.golang.org/grpc/stats",
"google.golang.org/grpc/status",
]
solver-name = "gps-cdcl"
solver-version = 1

36
vendor/go.opencensus.io/Gopkg.toml generated vendored Normal file
View File

@ -0,0 +1,36 @@
# For v0.x.y dependencies, prefer adding a constraints of the form: version=">= 0.x.y"
# to avoid locking to a particular minor version which can cause dep to not be
# able to find a satisfying dependency graph.
[[constraint]]
branch = "master"
name = "git.apache.org/thrift.git"
source = "github.com/apache/thrift"
[[constraint]]
name = "github.com/golang/protobuf"
version = "1.0.0"
[[constraint]]
name = "github.com/openzipkin/zipkin-go"
version = ">=0.1.0"
[[constraint]]
name = "github.com/prometheus/client_golang"
version = ">=0.8.0"
[[constraint]]
branch = "master"
name = "golang.org/x/net"
[[constraint]]
branch = "master"
name = "google.golang.org/api"
[[constraint]]
name = "google.golang.org/grpc"
version = "1.11.3"
[prune]
go-tests = true
unused-packages = true

202
vendor/go.opencensus.io/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

262
vendor/go.opencensus.io/README.md generated vendored Normal file
View File

@ -0,0 +1,262 @@
# OpenCensus Libraries for Go
[![Build Status][travis-image]][travis-url]
[![Windows Build Status][appveyor-image]][appveyor-url]
[![GoDoc][godoc-image]][godoc-url]
[![Gitter chat][gitter-image]][gitter-url]
OpenCensus Go is a Go implementation of OpenCensus, a toolkit for
collecting application performance and behavior monitoring data.
Currently it consists of three major components: tags, stats, and tracing.
## Installation
```
$ go get -u go.opencensus.io
```
The API of this project is still evolving, see: [Deprecation Policy](#deprecation-policy).
The use of vendoring or a dependency management tool is recommended.
## Prerequisites
OpenCensus Go libraries require Go 1.8 or later.
## Getting Started
The easiest way to get started using OpenCensus in your application is to use an existing
integration with your RPC framework:
* [net/http](https://godoc.org/go.opencensus.io/plugin/ochttp)
* [gRPC](https://godoc.org/go.opencensus.io/plugin/ocgrpc)
* [database/sql](https://godoc.org/github.com/basvanbeek/ocsql)
* [Go kit](https://godoc.org/github.com/go-kit/kit/tracing/opencensus)
* [Groupcache](https://godoc.org/github.com/orijtech/groupcache)
* [Caddy webserver](https://godoc.org/github.com/orijtech/caddy)
* [MongoDB](https://godoc.org/github.com/orijtech/mongo-go-driver)
* [Redis gomodule/redigo](https://godoc.org/github.com/orijtech/redigo)
* [Redis goredis/redis](https://godoc.org/github.com/orijtech/redis)
* [Memcache](https://godoc.org/github.com/orijtech/gomemcache)
If you're a framework not listed here, you could either implement your own middleware for your
framework or use [custom stats](#stats) and [spans](#spans) directly in your application.
## Exporters
OpenCensus can export instrumentation data to various backends.
OpenCensus has exporter implementations for the following, users
can implement their own exporters by implementing the exporter interfaces
([stats](https://godoc.org/go.opencensus.io/stats/view#Exporter),
[trace](https://godoc.org/go.opencensus.io/trace#Exporter)):
* [Prometheus][exporter-prom] for stats
* [OpenZipkin][exporter-zipkin] for traces
* [Stackdriver][exporter-stackdriver] Monitoring for stats and Trace for traces
* [Jaeger][exporter-jaeger] for traces
* [AWS X-Ray][exporter-xray] for traces
* [Datadog][exporter-datadog] for stats and traces
* [Graphite][exporter-graphite] for stats
## Overview
![OpenCensus Overview](https://i.imgur.com/cf4ElHE.jpg)
In a microservices environment, a user request may go through
multiple services until there is a response. OpenCensus allows
you to instrument your services and collect diagnostics data all
through your services end-to-end.
## Tags
Tags represent propagated key-value pairs. They are propagated using `context.Context`
in the same process or can be encoded to be transmitted on the wire. Usually, this will
be handled by an integration plugin, e.g. `ocgrpc.ServerHandler` and `ocgrpc.ClientHandler`
for gRPC.
Package tag allows adding or modifying tags in the current context.
[embedmd]:# (internal/readme/tags.go new)
```go
ctx, err = tag.New(ctx,
tag.Insert(osKey, "macOS-10.12.5"),
tag.Upsert(userIDKey, "cde36753ed"),
)
if err != nil {
log.Fatal(err)
}
```
## Stats
OpenCensus is a low-overhead framework even if instrumentation is always enabled.
In order to be so, it is optimized to make recording of data points fast
and separate from the data aggregation.
OpenCensus stats collection happens in two stages:
* Definition of measures and recording of data points
* Definition of views and aggregation of the recorded data
### Recording
Measurements are data points associated with a measure.
Recording implicitly tags the set of Measurements with the tags from the
provided context:
[embedmd]:# (internal/readme/stats.go record)
```go
stats.Record(ctx, videoSize.M(102478))
```
### Views
Views are how Measures are aggregated. You can think of them as queries over the
set of recorded data points (measurements).
Views have two parts: the tags to group by and the aggregation type used.
Currently three types of aggregations are supported:
* CountAggregation is used to count the number of times a sample was recorded.
* DistributionAggregation is used to provide a histogram of the values of the samples.
* SumAggregation is used to sum up all sample values.
[embedmd]:# (internal/readme/stats.go aggs)
```go
distAgg := view.Distribution(0, 1<<32, 2<<32, 3<<32)
countAgg := view.Count()
sumAgg := view.Sum()
```
Here we create a view with the DistributionAggregation over our measure.
[embedmd]:# (internal/readme/stats.go view)
```go
if err := view.Register(&view.View{
Name: "example.com/video_size_distribution",
Description: "distribution of processed video size over time",
Measure: videoSize,
Aggregation: view.Distribution(0, 1<<32, 2<<32, 3<<32),
}); err != nil {
log.Fatalf("Failed to register view: %v", err)
}
```
Register begins collecting data for the view. Registered views' data will be
exported via the registered exporters.
## Traces
A distributed trace tracks the progression of a single user request as
it is handled by the services and processes that make up an application.
Each step is called a span in the trace. Spans include metadata about the step,
including especially the time spent in the step, called the spans latency.
Below you see a trace and several spans underneath it.
![Traces and spans](https://i.imgur.com/7hZwRVj.png)
### Spans
Span is the unit step in a trace. Each span has a name, latency, status and
additional metadata.
Below we are starting a span for a cache read and ending it
when we are done:
[embedmd]:# (internal/readme/trace.go startend)
```go
ctx, span := trace.StartSpan(ctx, "cache.Get")
defer span.End()
// Do work to get from cache.
```
### Propagation
Spans can have parents or can be root spans if they don't have any parents.
The current span is propagated in-process and across the network to allow associating
new child spans with the parent.
In the same process, context.Context is used to propagate spans.
trace.StartSpan creates a new span as a root if the current context
doesn't contain a span. Or, it creates a child of the span that is
already in current context. The returned context can be used to keep
propagating the newly created span in the current context.
[embedmd]:# (internal/readme/trace.go startend)
```go
ctx, span := trace.StartSpan(ctx, "cache.Get")
defer span.End()
// Do work to get from cache.
```
Across the network, OpenCensus provides different propagation
methods for different protocols.
* gRPC integrations uses the OpenCensus' [binary propagation format](https://godoc.org/go.opencensus.io/trace/propagation).
* HTTP integrations uses Zipkin's [B3](https://github.com/openzipkin/b3-propagation)
by default but can be configured to use a custom propagation method by setting another
[propagation.HTTPFormat](https://godoc.org/go.opencensus.io/trace/propagation#HTTPFormat).
## Execution Tracer
With Go 1.11, OpenCensus Go will support integration with the Go execution tracer.
See [Debugging Latency in Go](https://medium.com/observability/debugging-latency-in-go-1-11-9f97a7910d68)
for an example of their mutual use.
## Profiles
OpenCensus tags can be applied as profiler labels
for users who are on Go 1.9 and above.
[embedmd]:# (internal/readme/tags.go profiler)
```go
ctx, err = tag.New(ctx,
tag.Insert(osKey, "macOS-10.12.5"),
tag.Insert(userIDKey, "fff0989878"),
)
if err != nil {
log.Fatal(err)
}
tag.Do(ctx, func(ctx context.Context) {
// Do work.
// When profiling is on, samples will be
// recorded with the key/values from the tag map.
})
```
A screenshot of the CPU profile from the program above:
![CPU profile](https://i.imgur.com/jBKjlkw.png)
## Deprecation Policy
Before version 1.0.0, the following deprecation policy will be observed:
No backwards-incompatible changes will be made except for the removal of symbols that have
been marked as *Deprecated* for at least one minor release (e.g. 0.9.0 to 0.10.0). A release
removing the *Deprecated* functionality will be made no sooner than 28 days after the first
release in which the functionality was marked *Deprecated*.
[travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master
[travis-url]: https://travis-ci.org/census-instrumentation/opencensus-go
[appveyor-image]: https://ci.appveyor.com/api/projects/status/vgtt29ps1783ig38?svg=true
[appveyor-url]: https://ci.appveyor.com/project/opencensusgoteam/opencensus-go/branch/master
[godoc-image]: https://godoc.org/go.opencensus.io?status.svg
[godoc-url]: https://godoc.org/go.opencensus.io
[gitter-image]: https://badges.gitter.im/census-instrumentation/lobby.svg
[gitter-url]: https://gitter.im/census-instrumentation/lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
[new-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap
[new-replace-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap--Replace
[exporter-prom]: https://godoc.org/go.opencensus.io/exporter/prometheus
[exporter-stackdriver]: https://godoc.org/contrib.go.opencensus.io/exporter/stackdriver
[exporter-zipkin]: https://godoc.org/go.opencensus.io/exporter/zipkin
[exporter-jaeger]: https://godoc.org/go.opencensus.io/exporter/jaeger
[exporter-xray]: https://github.com/census-ecosystem/opencensus-go-exporter-aws
[exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog
[exporter-graphite]: https://github.com/census-ecosystem/opencensus-go-exporter-graphite

24
vendor/go.opencensus.io/appveyor.yml generated vendored Normal file
View File

@ -0,0 +1,24 @@
version: "{build}"
platform: x64
clone_folder: c:\gopath\src\go.opencensus.io
environment:
GOPATH: 'c:\gopath'
GOVERSION: '1.11'
GO111MODULE: 'on'
CGO_ENABLED: '0' # See: https://github.com/appveyor/ci/issues/2613
install:
- set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
- go version
- go env
build: false
deploy: false
test_script:
- cd %APPVEYOR_BUILD_FOLDER%
- go build -v .\...
- go test -v .\... # No -race because cgo is disabled

21
vendor/go.opencensus.io/go.mod generated vendored Normal file
View File

@ -0,0 +1,21 @@
module go.opencensus.io
require (
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973
github.com/golang/protobuf v1.2.0
github.com/google/go-cmp v0.2.0
github.com/matttproud/golang_protobuf_extensions v1.0.1
github.com/openzipkin/zipkin-go v0.1.1
github.com/prometheus/client_golang v0.8.0
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e
golang.org/x/text v0.3.0
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b
google.golang.org/grpc v1.14.0
)

38
vendor/go.opencensus.io/go.sum generated vendored Normal file
View File

@ -0,0 +1,38 @@
git.apache.org/thrift.git v0.0.0-20180807212849-6e67faa92827/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999 h1:sihTnRgTOUSCQz0iS0pjZuFQy/z7GXCJgSBg3+rZKHw=
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/openzipkin/zipkin-go v0.1.1 h1:A/ADD6HaPnAKj3yS7HjGHRK77qi41Hi0DirOOIQAeIw=
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
github.com/prometheus/client_golang v0.8.0 h1:1921Yw9Gc3iSc4VQh3PIoOqgPCZS7G/4xQNVUp8Mda8=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e h1:n/3MEhJQjQxrOUCzh1Y3Re6aJUUWRp2M9+Oc3eVn/54=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 h1:agujYaXJSxSo18YNX3jzl+4G6Bstwt+kqv47GS12uL0=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
golang.org/x/net v0.0.0-20180821023952-922f4815f713/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180821140842-3b58ed4ad339/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
google.golang.org/api v0.0.0-20180818000503-e21acd801f91/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf h1:rjxqQmxjyqerRKEj+tZW+MCm4LgpFXu18bsEoCMgDsk=
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b h1:lohp5blsw53GBXtLyLNaTXPXS9pJ1tiTw61ZHUoE9Qw=
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/grpc v1.14.0 h1:ArxJuB1NWfPY6r9Gp9gqwplT0Ge7nqv9msgu03lHLmo=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=

37
vendor/go.opencensus.io/internal/internal.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal // import "go.opencensus.io/internal"
import (
"fmt"
"time"
"go.opencensus.io"
)
// UserAgent is the user agent to be added to the outgoing
// requests from the exporters.
var UserAgent = fmt.Sprintf("opencensus-go [%s]", opencensus.Version())
// MonotonicEndTime returns the end time at present
// but offset from start, monotonically.
//
// The monotonic clock is used in subtractions hence
// the duration since start added back to start gives
// end as a monotonic time.
// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks
func MonotonicEndTime(start time.Time) time.Time {
return start.Add(time.Now().Sub(start))
}

50
vendor/go.opencensus.io/internal/sanitize.go generated vendored Normal file
View File

@ -0,0 +1,50 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"strings"
"unicode"
)
const labelKeySizeLimit = 100
// Sanitize returns a string that is trunacated to 100 characters if it's too
// long, and replaces non-alphanumeric characters to underscores.
func Sanitize(s string) string {
if len(s) == 0 {
return s
}
if len(s) > labelKeySizeLimit {
s = s[:labelKeySizeLimit]
}
s = strings.Map(sanitizeRune, s)
if unicode.IsDigit(rune(s[0])) {
s = "key_" + s
}
if s[0] == '_' {
s = "key" + s
}
return s
}
// converts anything that is not a letter or digit to an underscore
func sanitizeRune(r rune) rune {
if unicode.IsLetter(r) || unicode.IsDigit(r) {
return r
}
// Everything else turns into an underscore
return '_'
}

View File

@ -0,0 +1,72 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Package tagencoding contains the tag encoding
// used interally by the stats collector.
package tagencoding // import "go.opencensus.io/internal/tagencoding"
type Values struct {
Buffer []byte
WriteIndex int
ReadIndex int
}
func (vb *Values) growIfRequired(expected int) {
if len(vb.Buffer)-vb.WriteIndex < expected {
tmp := make([]byte, 2*(len(vb.Buffer)+1)+expected)
copy(tmp, vb.Buffer)
vb.Buffer = tmp
}
}
func (vb *Values) WriteValue(v []byte) {
length := len(v) & 0xff
vb.growIfRequired(1 + length)
// writing length of v
vb.Buffer[vb.WriteIndex] = byte(length)
vb.WriteIndex++
if length == 0 {
// No value was encoded for this key
return
}
// writing v
copy(vb.Buffer[vb.WriteIndex:], v[:length])
vb.WriteIndex += length
}
// ReadValue is the helper method to read the values when decoding valuesBytes to a map[Key][]byte.
func (vb *Values) ReadValue() []byte {
// read length of v
length := int(vb.Buffer[vb.ReadIndex])
vb.ReadIndex++
if length == 0 {
// No value was encoded for this key
return nil
}
// read value of v
v := make([]byte, length)
endIdx := vb.ReadIndex + length
copy(v, vb.Buffer[vb.ReadIndex:endIdx])
vb.ReadIndex = endIdx
return v
}
func (vb *Values) Bytes() []byte {
return vb.Buffer[:vb.WriteIndex]
}

52
vendor/go.opencensus.io/internal/traceinternals.go generated vendored Normal file
View File

@ -0,0 +1,52 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"time"
)
// Trace allows internal access to some trace functionality.
// TODO(#412): remove this
var Trace interface{}
var LocalSpanStoreEnabled bool
// BucketConfiguration stores the number of samples to store for span buckets
// for successful and failed spans for a particular span name.
type BucketConfiguration struct {
Name string
MaxRequestsSucceeded int
MaxRequestsErrors int
}
// PerMethodSummary is a summary of the spans stored for a single span name.
type PerMethodSummary struct {
Active int
LatencyBuckets []LatencyBucketSummary
ErrorBuckets []ErrorBucketSummary
}
// LatencyBucketSummary is a summary of a latency bucket.
type LatencyBucketSummary struct {
MinLatency, MaxLatency time.Duration
Size int
}
// ErrorBucketSummary is a summary of an error bucket.
type ErrorBucketSummary struct {
ErrorCode int32
Size int
}

21
vendor/go.opencensus.io/opencensus.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package opencensus contains Go support for OpenCensus.
package opencensus // import "go.opencensus.io"
// Version is the current release version of OpenCensus in use.
func Version() string {
return "0.18.0"
}

56
vendor/go.opencensus.io/plugin/ocgrpc/client.go generated vendored Normal file
View File

@ -0,0 +1,56 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ocgrpc
import (
"go.opencensus.io/trace"
"golang.org/x/net/context"
"google.golang.org/grpc/stats"
)
// ClientHandler implements a gRPC stats.Handler for recording OpenCensus stats and
// traces. Use with gRPC clients only.
type ClientHandler struct {
// StartOptions allows configuring the StartOptions used to create new spans.
//
// StartOptions.SpanKind will always be set to trace.SpanKindClient
// for spans started by this handler.
StartOptions trace.StartOptions
}
// HandleConn exists to satisfy gRPC stats.Handler.
func (c *ClientHandler) HandleConn(ctx context.Context, cs stats.ConnStats) {
// no-op
}
// TagConn exists to satisfy gRPC stats.Handler.
func (c *ClientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context {
// no-op
return ctx
}
// HandleRPC implements per-RPC tracing and stats instrumentation.
func (c *ClientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
traceHandleRPC(ctx, rs)
statsHandleRPC(ctx, rs)
}
// TagRPC implements per-RPC context management.
func (c *ClientHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
ctx = c.traceTagRPC(ctx, rti)
ctx = c.statsTagRPC(ctx, rti)
return ctx
}

107
vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go generated vendored Normal file
View File

@ -0,0 +1,107 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package ocgrpc
import (
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
)
// The following variables are measures are recorded by ClientHandler:
var (
ClientSentMessagesPerRPC = stats.Int64("grpc.io/client/sent_messages_per_rpc", "Number of messages sent in the RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless)
ClientSentBytesPerRPC = stats.Int64("grpc.io/client/sent_bytes_per_rpc", "Total bytes sent across all request messages per RPC.", stats.UnitBytes)
ClientReceivedMessagesPerRPC = stats.Int64("grpc.io/client/received_messages_per_rpc", "Number of response messages received per RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless)
ClientReceivedBytesPerRPC = stats.Int64("grpc.io/client/received_bytes_per_rpc", "Total bytes received across all response messages per RPC.", stats.UnitBytes)
ClientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds)
ClientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds)
)
// Predefined views may be registered to collect data for the above measures.
// As always, you may also define your own custom views over measures collected by this
// package. These are declared as a convenience only; none are registered by
// default.
var (
ClientSentBytesPerRPCView = &view.View{
Measure: ClientSentBytesPerRPC,
Name: "grpc.io/client/sent_bytes_per_rpc",
Description: "Distribution of bytes sent per RPC, by method.",
TagKeys: []tag.Key{KeyClientMethod},
Aggregation: DefaultBytesDistribution,
}
ClientReceivedBytesPerRPCView = &view.View{
Measure: ClientReceivedBytesPerRPC,
Name: "grpc.io/client/received_bytes_per_rpc",
Description: "Distribution of bytes received per RPC, by method.",
TagKeys: []tag.Key{KeyClientMethod},
Aggregation: DefaultBytesDistribution,
}
ClientRoundtripLatencyView = &view.View{
Measure: ClientRoundtripLatency,
Name: "grpc.io/client/roundtrip_latency",
Description: "Distribution of round-trip latency, by method.",
TagKeys: []tag.Key{KeyClientMethod},
Aggregation: DefaultMillisecondsDistribution,
}
ClientCompletedRPCsView = &view.View{
Measure: ClientRoundtripLatency,
Name: "grpc.io/client/completed_rpcs",
Description: "Count of RPCs by method and status.",
TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus},
Aggregation: view.Count(),
}
ClientSentMessagesPerRPCView = &view.View{
Measure: ClientSentMessagesPerRPC,
Name: "grpc.io/client/sent_messages_per_rpc",
Description: "Distribution of sent messages count per RPC, by method.",
TagKeys: []tag.Key{KeyClientMethod},
Aggregation: DefaultMessageCountDistribution,
}
ClientReceivedMessagesPerRPCView = &view.View{
Measure: ClientReceivedMessagesPerRPC,
Name: "grpc.io/client/received_messages_per_rpc",
Description: "Distribution of received messages count per RPC, by method.",
TagKeys: []tag.Key{KeyClientMethod},
Aggregation: DefaultMessageCountDistribution,
}
ClientServerLatencyView = &view.View{
Measure: ClientServerLatency,
Name: "grpc.io/client/server_latency",
Description: "Distribution of server latency as viewed by client, by method.",
TagKeys: []tag.Key{KeyClientMethod},
Aggregation: DefaultMillisecondsDistribution,
}
)
// DefaultClientViews are the default client views provided by this package.
var DefaultClientViews = []*view.View{
ClientSentBytesPerRPCView,
ClientReceivedBytesPerRPCView,
ClientRoundtripLatencyView,
ClientCompletedRPCsView,
}
// TODO(jbd): Add roundtrip_latency, uncompressed_request_bytes, uncompressed_response_bytes, request_count, response_count.
// TODO(acetechnologist): This is temporary and will need to be replaced by a
// mechanism to load these defaults from a common repository/config shared by
// all supported languages. Likely a serialized protobuf of these defaults.

View File

@ -0,0 +1,49 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package ocgrpc
import (
"time"
"go.opencensus.io/tag"
"golang.org/x/net/context"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/stats"
)
// statsTagRPC gets the tag.Map populated by the application code, serializes
// its tags into the GRPC metadata in order to be sent to the server.
func (h *ClientHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
startTime := time.Now()
if info == nil {
if grpclog.V(2) {
grpclog.Infof("clientHandler.TagRPC called with nil info.", info.FullMethodName)
}
return ctx
}
d := &rpcData{
startTime: startTime,
method: info.FullMethodName,
}
ts := tag.FromContext(ctx)
if ts != nil {
encoded := tag.Encode(ts)
ctx = stats.SetTags(ctx, encoded)
}
return context.WithValue(ctx, rpcDataKey, d)
}

19
vendor/go.opencensus.io/plugin/ocgrpc/doc.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package ocgrpc contains OpenCensus stats and trace
// integrations for gRPC.
//
// Use ServerHandler for servers and ClientHandler for clients.
package ocgrpc // import "go.opencensus.io/plugin/ocgrpc"

80
vendor/go.opencensus.io/plugin/ocgrpc/server.go generated vendored Normal file
View File

@ -0,0 +1,80 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ocgrpc
import (
"go.opencensus.io/trace"
"golang.org/x/net/context"
"google.golang.org/grpc/stats"
)
// ServerHandler implements gRPC stats.Handler recording OpenCensus stats and
// traces. Use with gRPC servers.
//
// When installed (see Example), tracing metadata is read from inbound RPCs
// by default. If no tracing metadata is present, or if the tracing metadata is
// present but the SpanContext isn't sampled, then a new trace may be started
// (as determined by Sampler).
type ServerHandler struct {
// IsPublicEndpoint may be set to true to always start a new trace around
// each RPC. Any SpanContext in the RPC metadata will be added as a linked
// span instead of making it the parent of the span created around the
// server RPC.
//
// Be aware that if you leave this false (the default) on a public-facing
// server, callers will be able to send tracing metadata in gRPC headers
// and trigger traces in your backend.
IsPublicEndpoint bool
// StartOptions to use for to spans started around RPCs handled by this server.
//
// These will apply even if there is tracing metadata already
// present on the inbound RPC but the SpanContext is not sampled. This
// ensures that each service has some opportunity to be traced. If you would
// like to not add any additional traces for this gRPC service, set:
//
// StartOptions.Sampler = trace.ProbabilitySampler(0.0)
//
// StartOptions.SpanKind will always be set to trace.SpanKindServer
// for spans started by this handler.
StartOptions trace.StartOptions
}
var _ stats.Handler = (*ServerHandler)(nil)
// HandleConn exists to satisfy gRPC stats.Handler.
func (s *ServerHandler) HandleConn(ctx context.Context, cs stats.ConnStats) {
// no-op
}
// TagConn exists to satisfy gRPC stats.Handler.
func (s *ServerHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context {
// no-op
return ctx
}
// HandleRPC implements per-RPC tracing and stats instrumentation.
func (s *ServerHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
traceHandleRPC(ctx, rs)
statsHandleRPC(ctx, rs)
}
// TagRPC implements per-RPC context management.
func (s *ServerHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
ctx = s.traceTagRPC(ctx, rti)
ctx = s.statsTagRPC(ctx, rti)
return ctx
}

View File

@ -0,0 +1,97 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package ocgrpc
import (
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
)
// The following variables are measures are recorded by ServerHandler:
var (
ServerReceivedMessagesPerRPC = stats.Int64("grpc.io/server/received_messages_per_rpc", "Number of messages received in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless)
ServerReceivedBytesPerRPC = stats.Int64("grpc.io/server/received_bytes_per_rpc", "Total bytes received across all messages per RPC.", stats.UnitBytes)
ServerSentMessagesPerRPC = stats.Int64("grpc.io/server/sent_messages_per_rpc", "Number of messages sent in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless)
ServerSentBytesPerRPC = stats.Int64("grpc.io/server/sent_bytes_per_rpc", "Total bytes sent in across all response messages per RPC.", stats.UnitBytes)
ServerLatency = stats.Float64("grpc.io/server/server_latency", "Time between first byte of request received to last byte of response sent, or terminal error.", stats.UnitMilliseconds)
)
// TODO(acetechnologist): This is temporary and will need to be replaced by a
// mechanism to load these defaults from a common repository/config shared by
// all supported languages. Likely a serialized protobuf of these defaults.
// Predefined views may be registered to collect data for the above measures.
// As always, you may also define your own custom views over measures collected by this
// package. These are declared as a convenience only; none are registered by
// default.
var (
ServerReceivedBytesPerRPCView = &view.View{
Name: "grpc.io/server/received_bytes_per_rpc",
Description: "Distribution of received bytes per RPC, by method.",
Measure: ServerReceivedBytesPerRPC,
TagKeys: []tag.Key{KeyServerMethod},
Aggregation: DefaultBytesDistribution,
}
ServerSentBytesPerRPCView = &view.View{
Name: "grpc.io/server/sent_bytes_per_rpc",
Description: "Distribution of total sent bytes per RPC, by method.",
Measure: ServerSentBytesPerRPC,
TagKeys: []tag.Key{KeyServerMethod},
Aggregation: DefaultBytesDistribution,
}
ServerLatencyView = &view.View{
Name: "grpc.io/server/server_latency",
Description: "Distribution of server latency in milliseconds, by method.",
TagKeys: []tag.Key{KeyServerMethod},
Measure: ServerLatency,
Aggregation: DefaultMillisecondsDistribution,
}
ServerCompletedRPCsView = &view.View{
Name: "grpc.io/server/completed_rpcs",
Description: "Count of RPCs by method and status.",
TagKeys: []tag.Key{KeyServerMethod, KeyServerStatus},
Measure: ServerLatency,
Aggregation: view.Count(),
}
ServerReceivedMessagesPerRPCView = &view.View{
Name: "grpc.io/server/received_messages_per_rpc",
Description: "Distribution of messages received count per RPC, by method.",
TagKeys: []tag.Key{KeyServerMethod},
Measure: ServerReceivedMessagesPerRPC,
Aggregation: DefaultMessageCountDistribution,
}
ServerSentMessagesPerRPCView = &view.View{
Name: "grpc.io/server/sent_messages_per_rpc",
Description: "Distribution of messages sent count per RPC, by method.",
TagKeys: []tag.Key{KeyServerMethod},
Measure: ServerSentMessagesPerRPC,
Aggregation: DefaultMessageCountDistribution,
}
)
// DefaultServerViews are the default server views provided by this package.
var DefaultServerViews = []*view.View{
ServerReceivedBytesPerRPCView,
ServerSentBytesPerRPCView,
ServerLatencyView,
ServerCompletedRPCsView,
}

View File

@ -0,0 +1,63 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package ocgrpc
import (
"time"
"golang.org/x/net/context"
"go.opencensus.io/tag"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/stats"
)
// statsTagRPC gets the metadata from gRPC context, extracts the encoded tags from
// it and creates a new tag.Map and puts them into the returned context.
func (h *ServerHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
startTime := time.Now()
if info == nil {
if grpclog.V(2) {
grpclog.Infof("opencensus: TagRPC called with nil info.")
}
return ctx
}
d := &rpcData{
startTime: startTime,
method: info.FullMethodName,
}
propagated := h.extractPropagatedTags(ctx)
ctx = tag.NewContext(ctx, propagated)
ctx, _ = tag.New(ctx, tag.Upsert(KeyServerMethod, methodName(info.FullMethodName)))
return context.WithValue(ctx, rpcDataKey, d)
}
// extractPropagatedTags creates a new tag map containing the tags extracted from the
// gRPC metadata.
func (h *ServerHandler) extractPropagatedTags(ctx context.Context) *tag.Map {
buf := stats.Tags(ctx)
if buf == nil {
return nil
}
propagated, err := tag.Decode(buf)
if err != nil {
if grpclog.V(2) {
grpclog.Warningf("opencensus: Failed to decode tags from gRPC metadata failed to decode: %v", err)
}
return nil
}
return propagated
}

205
vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go generated vendored Normal file
View File

@ -0,0 +1,205 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package ocgrpc
import (
"context"
"strconv"
"strings"
"sync/atomic"
"time"
ocstats "go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
)
type grpcInstrumentationKey string
// rpcData holds the instrumentation RPC data that is needed between the start
// and end of an call. It holds the info that this package needs to keep track
// of between the various GRPC events.
type rpcData struct {
// reqCount and respCount has to be the first words
// in order to be 64-aligned on 32-bit architectures.
sentCount, sentBytes, recvCount, recvBytes int64 // access atomically
// startTime represents the time at which TagRPC was invoked at the
// beginning of an RPC. It is an appoximation of the time when the
// application code invoked GRPC code.
startTime time.Time
method string
}
// The following variables define the default hard-coded auxiliary data used by
// both the default GRPC client and GRPC server metrics.
var (
DefaultBytesDistribution = view.Distribution(0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296)
DefaultMillisecondsDistribution = view.Distribution(0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000)
DefaultMessageCountDistribution = view.Distribution(0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536)
)
// Server tags are applied to the context used to process each RPC, as well as
// the measures at the end of each RPC.
var (
KeyServerMethod, _ = tag.NewKey("grpc_server_method")
KeyServerStatus, _ = tag.NewKey("grpc_server_status")
)
// Client tags are applied to measures at the end of each RPC.
var (
KeyClientMethod, _ = tag.NewKey("grpc_client_method")
KeyClientStatus, _ = tag.NewKey("grpc_client_status")
)
var (
rpcDataKey = grpcInstrumentationKey("opencensus-rpcData")
)
func methodName(fullname string) string {
return strings.TrimLeft(fullname, "/")
}
// statsHandleRPC processes the RPC events.
func statsHandleRPC(ctx context.Context, s stats.RPCStats) {
switch st := s.(type) {
case *stats.Begin, *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer:
// do nothing for client
case *stats.OutPayload:
handleRPCOutPayload(ctx, st)
case *stats.InPayload:
handleRPCInPayload(ctx, st)
case *stats.End:
handleRPCEnd(ctx, st)
default:
grpclog.Infof("unexpected stats: %T", st)
}
}
func handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) {
d, ok := ctx.Value(rpcDataKey).(*rpcData)
if !ok {
if grpclog.V(2) {
grpclog.Infoln("Failed to retrieve *rpcData from context.")
}
return
}
atomic.AddInt64(&d.sentBytes, int64(s.Length))
atomic.AddInt64(&d.sentCount, 1)
}
func handleRPCInPayload(ctx context.Context, s *stats.InPayload) {
d, ok := ctx.Value(rpcDataKey).(*rpcData)
if !ok {
if grpclog.V(2) {
grpclog.Infoln("Failed to retrieve *rpcData from context.")
}
return
}
atomic.AddInt64(&d.recvBytes, int64(s.Length))
atomic.AddInt64(&d.recvCount, 1)
}
func handleRPCEnd(ctx context.Context, s *stats.End) {
d, ok := ctx.Value(rpcDataKey).(*rpcData)
if !ok {
if grpclog.V(2) {
grpclog.Infoln("Failed to retrieve *rpcData from context.")
}
return
}
elapsedTime := time.Since(d.startTime)
var st string
if s.Error != nil {
s, ok := status.FromError(s.Error)
if ok {
st = statusCodeToString(s)
}
} else {
st = "OK"
}
latencyMillis := float64(elapsedTime) / float64(time.Millisecond)
if s.Client {
ctx, _ = tag.New(ctx,
tag.Upsert(KeyClientMethod, methodName(d.method)),
tag.Upsert(KeyClientStatus, st))
ocstats.Record(ctx,
ClientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)),
ClientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)),
ClientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)),
ClientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)),
ClientRoundtripLatency.M(latencyMillis))
} else {
ctx, _ = tag.New(ctx, tag.Upsert(KeyServerStatus, st))
ocstats.Record(ctx,
ServerSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)),
ServerSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)),
ServerReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)),
ServerReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)),
ServerLatency.M(latencyMillis))
}
}
func statusCodeToString(s *status.Status) string {
// see https://github.com/grpc/grpc/blob/master/doc/statuscodes.md
switch c := s.Code(); c {
case codes.OK:
return "OK"
case codes.Canceled:
return "CANCELLED"
case codes.Unknown:
return "UNKNOWN"
case codes.InvalidArgument:
return "INVALID_ARGUMENT"
case codes.DeadlineExceeded:
return "DEADLINE_EXCEEDED"
case codes.NotFound:
return "NOT_FOUND"
case codes.AlreadyExists:
return "ALREADY_EXISTS"
case codes.PermissionDenied:
return "PERMISSION_DENIED"
case codes.ResourceExhausted:
return "RESOURCE_EXHAUSTED"
case codes.FailedPrecondition:
return "FAILED_PRECONDITION"
case codes.Aborted:
return "ABORTED"
case codes.OutOfRange:
return "OUT_OF_RANGE"
case codes.Unimplemented:
return "UNIMPLEMENTED"
case codes.Internal:
return "INTERNAL"
case codes.Unavailable:
return "UNAVAILABLE"
case codes.DataLoss:
return "DATA_LOSS"
case codes.Unauthenticated:
return "UNAUTHENTICATED"
default:
return "CODE_" + strconv.FormatInt(int64(c), 10)
}
}

107
vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go generated vendored Normal file
View File

@ -0,0 +1,107 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ocgrpc
import (
"strings"
"google.golang.org/grpc/codes"
"go.opencensus.io/trace"
"go.opencensus.io/trace/propagation"
"golang.org/x/net/context"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
)
const traceContextKey = "grpc-trace-bin"
// TagRPC creates a new trace span for the client side of the RPC.
//
// It returns ctx with the new trace span added and a serialization of the
// SpanContext added to the outgoing gRPC metadata.
func (c *ClientHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
name := strings.TrimPrefix(rti.FullMethodName, "/")
name = strings.Replace(name, "/", ".", -1)
ctx, span := trace.StartSpan(ctx, name,
trace.WithSampler(c.StartOptions.Sampler),
trace.WithSpanKind(trace.SpanKindClient)) // span is ended by traceHandleRPC
traceContextBinary := propagation.Binary(span.SpanContext())
return metadata.AppendToOutgoingContext(ctx, traceContextKey, string(traceContextBinary))
}
// TagRPC creates a new trace span for the server side of the RPC.
//
// It checks the incoming gRPC metadata in ctx for a SpanContext, and if
// it finds one, uses that SpanContext as the parent context of the new span.
//
// It returns ctx, with the new trace span added.
func (s *ServerHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
md, _ := metadata.FromIncomingContext(ctx)
name := strings.TrimPrefix(rti.FullMethodName, "/")
name = strings.Replace(name, "/", ".", -1)
traceContext := md[traceContextKey]
var (
parent trace.SpanContext
haveParent bool
)
if len(traceContext) > 0 {
// Metadata with keys ending in -bin are actually binary. They are base64
// encoded before being put on the wire, see:
// https://github.com/grpc/grpc-go/blob/08d6261/Documentation/grpc-metadata.md#storing-binary-data-in-metadata
traceContextBinary := []byte(traceContext[0])
parent, haveParent = propagation.FromBinary(traceContextBinary)
if haveParent && !s.IsPublicEndpoint {
ctx, _ := trace.StartSpanWithRemoteParent(ctx, name, parent,
trace.WithSpanKind(trace.SpanKindServer),
trace.WithSampler(s.StartOptions.Sampler),
)
return ctx
}
}
ctx, span := trace.StartSpan(ctx, name,
trace.WithSpanKind(trace.SpanKindServer),
trace.WithSampler(s.StartOptions.Sampler))
if haveParent {
span.AddLink(trace.Link{TraceID: parent.TraceID, SpanID: parent.SpanID, Type: trace.LinkTypeChild})
}
return ctx
}
func traceHandleRPC(ctx context.Context, rs stats.RPCStats) {
span := trace.FromContext(ctx)
// TODO: compressed and uncompressed sizes are not populated in every message.
switch rs := rs.(type) {
case *stats.Begin:
span.AddAttributes(
trace.BoolAttribute("Client", rs.Client),
trace.BoolAttribute("FailFast", rs.FailFast))
case *stats.InPayload:
span.AddMessageReceiveEvent(0 /* TODO: messageID */, int64(rs.Length), int64(rs.WireLength))
case *stats.OutPayload:
span.AddMessageSendEvent(0, int64(rs.Length), int64(rs.WireLength))
case *stats.End:
if rs.Error != nil {
s, ok := status.FromError(rs.Error)
if ok {
span.SetStatus(trace.Status{Code: int32(s.Code()), Message: s.Message()})
} else {
span.SetStatus(trace.Status{Code: int32(codes.Internal), Message: rs.Error.Error()})
}
}
span.End()
}
}

107
vendor/go.opencensus.io/plugin/ochttp/client.go generated vendored Normal file
View File

@ -0,0 +1,107 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ochttp
import (
"net/http"
"net/http/httptrace"
"go.opencensus.io/trace"
"go.opencensus.io/trace/propagation"
)
// Transport is an http.RoundTripper that instruments all outgoing requests with
// OpenCensus stats and tracing.
//
// The zero value is intended to be a useful default, but for
// now it's recommended that you explicitly set Propagation, since the default
// for this may change.
type Transport struct {
// Base may be set to wrap another http.RoundTripper that does the actual
// requests. By default http.DefaultTransport is used.
//
// If base HTTP roundtripper implements CancelRequest,
// the returned round tripper will be cancelable.
Base http.RoundTripper
// Propagation defines how traces are propagated. If unspecified, a default
// (currently B3 format) will be used.
Propagation propagation.HTTPFormat
// StartOptions are applied to the span started by this Transport around each
// request.
//
// StartOptions.SpanKind will always be set to trace.SpanKindClient
// for spans started by this transport.
StartOptions trace.StartOptions
// NameFromRequest holds the function to use for generating the span name
// from the information found in the outgoing HTTP Request. By default the
// name equals the URL Path.
FormatSpanName func(*http.Request) string
// NewClientTrace may be set to a function allowing the current *trace.Span
// to be annotated with HTTP request event information emitted by the
// httptrace package.
NewClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace
// TODO: Implement tag propagation for HTTP.
}
// RoundTrip implements http.RoundTripper, delegating to Base and recording stats and traces for the request.
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
rt := t.base()
if isHealthEndpoint(req.URL.Path) {
return rt.RoundTrip(req)
}
// TODO: remove excessive nesting of http.RoundTrippers here.
format := t.Propagation
if format == nil {
format = defaultFormat
}
spanNameFormatter := t.FormatSpanName
if spanNameFormatter == nil {
spanNameFormatter = spanNameFromURL
}
rt = &traceTransport{
base: rt,
format: format,
startOptions: trace.StartOptions{
Sampler: t.StartOptions.Sampler,
SpanKind: trace.SpanKindClient,
},
formatSpanName: spanNameFormatter,
newClientTrace: t.NewClientTrace,
}
rt = statsTransport{base: rt}
return rt.RoundTrip(req)
}
func (t *Transport) base() http.RoundTripper {
if t.Base != nil {
return t.Base
}
return http.DefaultTransport
}
// CancelRequest cancels an in-flight request by closing its connection.
func (t *Transport) CancelRequest(req *http.Request) {
type canceler interface {
CancelRequest(*http.Request)
}
if cr, ok := t.base().(canceler); ok {
cr.CancelRequest(req)
}
}

125
vendor/go.opencensus.io/plugin/ochttp/client_stats.go generated vendored Normal file
View File

@ -0,0 +1,125 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ochttp
import (
"context"
"io"
"net/http"
"strconv"
"sync"
"time"
"go.opencensus.io/stats"
"go.opencensus.io/tag"
)
// statsTransport is an http.RoundTripper that collects stats for the outgoing requests.
type statsTransport struct {
base http.RoundTripper
}
// RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request.
func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) {
ctx, _ := tag.New(req.Context(),
tag.Upsert(Host, req.URL.Host),
tag.Upsert(Path, req.URL.Path),
tag.Upsert(Method, req.Method))
req = req.WithContext(ctx)
track := &tracker{
start: time.Now(),
ctx: ctx,
}
if req.Body == nil {
// TODO: Handle cases where ContentLength is not set.
track.reqSize = -1
} else if req.ContentLength > 0 {
track.reqSize = req.ContentLength
}
stats.Record(ctx, ClientRequestCount.M(1))
// Perform request.
resp, err := t.base.RoundTrip(req)
if err != nil {
track.statusCode = http.StatusInternalServerError
track.end()
} else {
track.statusCode = resp.StatusCode
if resp.Body == nil {
track.end()
} else {
track.body = resp.Body
resp.Body = track
}
}
return resp, err
}
// CancelRequest cancels an in-flight request by closing its connection.
func (t statsTransport) CancelRequest(req *http.Request) {
type canceler interface {
CancelRequest(*http.Request)
}
if cr, ok := t.base.(canceler); ok {
cr.CancelRequest(req)
}
}
type tracker struct {
ctx context.Context
respSize int64
reqSize int64
start time.Time
body io.ReadCloser
statusCode int
endOnce sync.Once
}
var _ io.ReadCloser = (*tracker)(nil)
func (t *tracker) end() {
t.endOnce.Do(func() {
m := []stats.Measurement{
ClientLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)),
ClientResponseBytes.M(t.respSize),
}
if t.reqSize >= 0 {
m = append(m, ClientRequestBytes.M(t.reqSize))
}
ctx, _ := tag.New(t.ctx, tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)))
stats.Record(ctx, m...)
})
}
func (t *tracker) Read(b []byte) (int, error) {
n, err := t.body.Read(b)
switch err {
case nil:
t.respSize += int64(n)
return n, nil
case io.EOF:
t.end()
}
return n, err
}
func (t *tracker) Close() error {
// Invoking endSpan on Close will help catch the cases
// in which a read returned a non-nil error, we set the
// span status but didn't end the span.
t.end()
return t.body.Close()
}

19
vendor/go.opencensus.io/plugin/ochttp/doc.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package ochttp provides OpenCensus instrumentation for net/http package.
//
// For server instrumentation, see Handler. For client-side instrumentation,
// see Transport.
package ochttp // import "go.opencensus.io/plugin/ochttp"

View File

@ -0,0 +1,123 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package b3 contains a propagation.HTTPFormat implementation
// for B3 propagation. See https://github.com/openzipkin/b3-propagation
// for more details.
package b3 // import "go.opencensus.io/plugin/ochttp/propagation/b3"
import (
"encoding/hex"
"net/http"
"go.opencensus.io/trace"
"go.opencensus.io/trace/propagation"
)
// B3 headers that OpenCensus understands.
const (
TraceIDHeader = "X-B3-TraceId"
SpanIDHeader = "X-B3-SpanId"
SampledHeader = "X-B3-Sampled"
)
// HTTPFormat implements propagation.HTTPFormat to propagate
// traces in HTTP headers in B3 propagation format.
// HTTPFormat skips the X-B3-ParentId and X-B3-Flags headers
// because there are additional fields not represented in the
// OpenCensus span context. Spans created from the incoming
// header will be the direct children of the client-side span.
// Similarly, reciever of the outgoing spans should use client-side
// span created by OpenCensus as the parent.
type HTTPFormat struct{}
var _ propagation.HTTPFormat = (*HTTPFormat)(nil)
// SpanContextFromRequest extracts a B3 span context from incoming requests.
func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) {
tid, ok := ParseTraceID(req.Header.Get(TraceIDHeader))
if !ok {
return trace.SpanContext{}, false
}
sid, ok := ParseSpanID(req.Header.Get(SpanIDHeader))
if !ok {
return trace.SpanContext{}, false
}
sampled, _ := ParseSampled(req.Header.Get(SampledHeader))
return trace.SpanContext{
TraceID: tid,
SpanID: sid,
TraceOptions: sampled,
}, true
}
// ParseTraceID parses the value of the X-B3-TraceId header.
func ParseTraceID(tid string) (trace.TraceID, bool) {
if tid == "" {
return trace.TraceID{}, false
}
b, err := hex.DecodeString(tid)
if err != nil {
return trace.TraceID{}, false
}
var traceID trace.TraceID
if len(b) <= 8 {
// The lower 64-bits.
start := 8 + (8 - len(b))
copy(traceID[start:], b)
} else {
start := 16 - len(b)
copy(traceID[start:], b)
}
return traceID, true
}
// ParseSpanID parses the value of the X-B3-SpanId or X-B3-ParentSpanId headers.
func ParseSpanID(sid string) (spanID trace.SpanID, ok bool) {
if sid == "" {
return trace.SpanID{}, false
}
b, err := hex.DecodeString(sid)
if err != nil {
return trace.SpanID{}, false
}
start := 8 - len(b)
copy(spanID[start:], b)
return spanID, true
}
// ParseSampled parses the value of the X-B3-Sampled header.
func ParseSampled(sampled string) (trace.TraceOptions, bool) {
switch sampled {
case "true", "1":
return trace.TraceOptions(1), true
default:
return trace.TraceOptions(0), false
}
}
// SpanContextToRequest modifies the given request to include B3 headers.
func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) {
req.Header.Set(TraceIDHeader, hex.EncodeToString(sc.TraceID[:]))
req.Header.Set(SpanIDHeader, hex.EncodeToString(sc.SpanID[:]))
var sampled string
if sc.IsSampled() {
sampled = "1"
} else {
sampled = "0"
}
req.Header.Set(SampledHeader, sampled)
}

51
vendor/go.opencensus.io/plugin/ochttp/route.go generated vendored Normal file
View File

@ -0,0 +1,51 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ochttp
import (
"net/http"
"go.opencensus.io/tag"
)
// WithRouteTag returns an http.Handler that records stats with the
// http_server_route tag set to the given value.
func WithRouteTag(handler http.Handler, route string) http.Handler {
return taggedHandlerFunc(func(w http.ResponseWriter, r *http.Request) []tag.Mutator {
addRoute := []tag.Mutator{tag.Upsert(KeyServerRoute, route)}
ctx, _ := tag.New(r.Context(), addRoute...)
r = r.WithContext(ctx)
handler.ServeHTTP(w, r)
return addRoute
})
}
// taggedHandlerFunc is a http.Handler that returns tags describing the
// processing of the request. These tags will be recorded along with the
// measures in this package at the end of the request.
type taggedHandlerFunc func(w http.ResponseWriter, r *http.Request) []tag.Mutator
func (h taggedHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) {
tags := h(w, r)
if a, ok := r.Context().Value(addedTagsKey{}).(*addedTags); ok {
a.t = append(a.t, tags...)
}
}
type addedTagsKey struct{}
type addedTags struct {
t []tag.Mutator
}

430
vendor/go.opencensus.io/plugin/ochttp/server.go generated vendored Normal file
View File

@ -0,0 +1,430 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ochttp
import (
"context"
"io"
"net/http"
"strconv"
"sync"
"time"
"go.opencensus.io/stats"
"go.opencensus.io/tag"
"go.opencensus.io/trace"
"go.opencensus.io/trace/propagation"
)
// Handler is an http.Handler wrapper to instrument your HTTP server with
// OpenCensus. It supports both stats and tracing.
//
// Tracing
//
// This handler is aware of the incoming request's span, reading it from request
// headers as configured using the Propagation field.
// The extracted span can be accessed from the incoming request's
// context.
//
// span := trace.FromContext(r.Context())
//
// The server span will be automatically ended at the end of ServeHTTP.
type Handler struct {
// Propagation defines how traces are propagated. If unspecified,
// B3 propagation will be used.
Propagation propagation.HTTPFormat
// Handler is the handler used to handle the incoming request.
Handler http.Handler
// StartOptions are applied to the span started by this Handler around each
// request.
//
// StartOptions.SpanKind will always be set to trace.SpanKindServer
// for spans started by this transport.
StartOptions trace.StartOptions
// IsPublicEndpoint should be set to true for publicly accessible HTTP(S)
// servers. If true, any trace metadata set on the incoming request will
// be added as a linked trace instead of being added as a parent of the
// current trace.
IsPublicEndpoint bool
// FormatSpanName holds the function to use for generating the span name
// from the information found in the incoming HTTP Request. By default the
// name equals the URL Path.
FormatSpanName func(*http.Request) string
}
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
var tags addedTags
r, traceEnd := h.startTrace(w, r)
defer traceEnd()
w, statsEnd := h.startStats(w, r)
defer statsEnd(&tags)
handler := h.Handler
if handler == nil {
handler = http.DefaultServeMux
}
r = r.WithContext(context.WithValue(r.Context(), addedTagsKey{}, &tags))
handler.ServeHTTP(w, r)
}
func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) {
if isHealthEndpoint(r.URL.Path) {
return r, func() {}
}
var name string
if h.FormatSpanName == nil {
name = spanNameFromURL(r)
} else {
name = h.FormatSpanName(r)
}
ctx := r.Context()
var span *trace.Span
sc, ok := h.extractSpanContext(r)
if ok && !h.IsPublicEndpoint {
ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc,
trace.WithSampler(h.StartOptions.Sampler),
trace.WithSpanKind(trace.SpanKindServer))
} else {
ctx, span = trace.StartSpan(ctx, name,
trace.WithSampler(h.StartOptions.Sampler),
trace.WithSpanKind(trace.SpanKindServer),
)
if ok {
span.AddLink(trace.Link{
TraceID: sc.TraceID,
SpanID: sc.SpanID,
Type: trace.LinkTypeChild,
Attributes: nil,
})
}
}
span.AddAttributes(requestAttrs(r)...)
return r.WithContext(ctx), span.End
}
func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) {
if h.Propagation == nil {
return defaultFormat.SpanContextFromRequest(r)
}
return h.Propagation.SpanContextFromRequest(r)
}
func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func(tags *addedTags)) {
ctx, _ := tag.New(r.Context(),
tag.Upsert(Host, r.URL.Host),
tag.Upsert(Path, r.URL.Path),
tag.Upsert(Method, r.Method))
track := &trackingResponseWriter{
start: time.Now(),
ctx: ctx,
writer: w,
}
if r.Body == nil {
// TODO: Handle cases where ContentLength is not set.
track.reqSize = -1
} else if r.ContentLength > 0 {
track.reqSize = r.ContentLength
}
stats.Record(ctx, ServerRequestCount.M(1))
return track.wrappedResponseWriter(), track.end
}
type trackingResponseWriter struct {
ctx context.Context
reqSize int64
respSize int64
start time.Time
statusCode int
statusLine string
endOnce sync.Once
writer http.ResponseWriter
}
// Compile time assertion for ResponseWriter interface
var _ http.ResponseWriter = (*trackingResponseWriter)(nil)
var logTagsErrorOnce sync.Once
func (t *trackingResponseWriter) end(tags *addedTags) {
t.endOnce.Do(func() {
if t.statusCode == 0 {
t.statusCode = 200
}
span := trace.FromContext(t.ctx)
span.SetStatus(TraceStatus(t.statusCode, t.statusLine))
m := []stats.Measurement{
ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)),
ServerResponseBytes.M(t.respSize),
}
if t.reqSize >= 0 {
m = append(m, ServerRequestBytes.M(t.reqSize))
}
allTags := make([]tag.Mutator, len(tags.t)+1)
allTags[0] = tag.Upsert(StatusCode, strconv.Itoa(t.statusCode))
copy(allTags[1:], tags.t)
ctx, _ := tag.New(t.ctx, allTags...)
stats.Record(ctx, m...)
})
}
func (t *trackingResponseWriter) Header() http.Header {
return t.writer.Header()
}
func (t *trackingResponseWriter) Write(data []byte) (int, error) {
n, err := t.writer.Write(data)
t.respSize += int64(n)
return n, err
}
func (t *trackingResponseWriter) WriteHeader(statusCode int) {
t.writer.WriteHeader(statusCode)
t.statusCode = statusCode
t.statusLine = http.StatusText(t.statusCode)
}
// wrappedResponseWriter returns a wrapped version of the original
// ResponseWriter and only implements the same combination of additional
// interfaces as the original.
// This implementation is based on https://github.com/felixge/httpsnoop.
func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter {
var (
hj, i0 = t.writer.(http.Hijacker)
cn, i1 = t.writer.(http.CloseNotifier)
pu, i2 = t.writer.(http.Pusher)
fl, i3 = t.writer.(http.Flusher)
rf, i4 = t.writer.(io.ReaderFrom)
)
switch {
case !i0 && !i1 && !i2 && !i3 && !i4:
return struct {
http.ResponseWriter
}{t}
case !i0 && !i1 && !i2 && !i3 && i4:
return struct {
http.ResponseWriter
io.ReaderFrom
}{t, rf}
case !i0 && !i1 && !i2 && i3 && !i4:
return struct {
http.ResponseWriter
http.Flusher
}{t, fl}
case !i0 && !i1 && !i2 && i3 && i4:
return struct {
http.ResponseWriter
http.Flusher
io.ReaderFrom
}{t, fl, rf}
case !i0 && !i1 && i2 && !i3 && !i4:
return struct {
http.ResponseWriter
http.Pusher
}{t, pu}
case !i0 && !i1 && i2 && !i3 && i4:
return struct {
http.ResponseWriter
http.Pusher
io.ReaderFrom
}{t, pu, rf}
case !i0 && !i1 && i2 && i3 && !i4:
return struct {
http.ResponseWriter
http.Pusher
http.Flusher
}{t, pu, fl}
case !i0 && !i1 && i2 && i3 && i4:
return struct {
http.ResponseWriter
http.Pusher
http.Flusher
io.ReaderFrom
}{t, pu, fl, rf}
case !i0 && i1 && !i2 && !i3 && !i4:
return struct {
http.ResponseWriter
http.CloseNotifier
}{t, cn}
case !i0 && i1 && !i2 && !i3 && i4:
return struct {
http.ResponseWriter
http.CloseNotifier
io.ReaderFrom
}{t, cn, rf}
case !i0 && i1 && !i2 && i3 && !i4:
return struct {
http.ResponseWriter
http.CloseNotifier
http.Flusher
}{t, cn, fl}
case !i0 && i1 && !i2 && i3 && i4:
return struct {
http.ResponseWriter
http.CloseNotifier
http.Flusher
io.ReaderFrom
}{t, cn, fl, rf}
case !i0 && i1 && i2 && !i3 && !i4:
return struct {
http.ResponseWriter
http.CloseNotifier
http.Pusher
}{t, cn, pu}
case !i0 && i1 && i2 && !i3 && i4:
return struct {
http.ResponseWriter
http.CloseNotifier
http.Pusher
io.ReaderFrom
}{t, cn, pu, rf}
case !i0 && i1 && i2 && i3 && !i4:
return struct {
http.ResponseWriter
http.CloseNotifier
http.Pusher
http.Flusher
}{t, cn, pu, fl}
case !i0 && i1 && i2 && i3 && i4:
return struct {
http.ResponseWriter
http.CloseNotifier
http.Pusher
http.Flusher
io.ReaderFrom
}{t, cn, pu, fl, rf}
case i0 && !i1 && !i2 && !i3 && !i4:
return struct {
http.ResponseWriter
http.Hijacker
}{t, hj}
case i0 && !i1 && !i2 && !i3 && i4:
return struct {
http.ResponseWriter
http.Hijacker
io.ReaderFrom
}{t, hj, rf}
case i0 && !i1 && !i2 && i3 && !i4:
return struct {
http.ResponseWriter
http.Hijacker
http.Flusher
}{t, hj, fl}
case i0 && !i1 && !i2 && i3 && i4:
return struct {
http.ResponseWriter
http.Hijacker
http.Flusher
io.ReaderFrom
}{t, hj, fl, rf}
case i0 && !i1 && i2 && !i3 && !i4:
return struct {
http.ResponseWriter
http.Hijacker
http.Pusher
}{t, hj, pu}
case i0 && !i1 && i2 && !i3 && i4:
return struct {
http.ResponseWriter
http.Hijacker
http.Pusher
io.ReaderFrom
}{t, hj, pu, rf}
case i0 && !i1 && i2 && i3 && !i4:
return struct {
http.ResponseWriter
http.Hijacker
http.Pusher
http.Flusher
}{t, hj, pu, fl}
case i0 && !i1 && i2 && i3 && i4:
return struct {
http.ResponseWriter
http.Hijacker
http.Pusher
http.Flusher
io.ReaderFrom
}{t, hj, pu, fl, rf}
case i0 && i1 && !i2 && !i3 && !i4:
return struct {
http.ResponseWriter
http.Hijacker
http.CloseNotifier
}{t, hj, cn}
case i0 && i1 && !i2 && !i3 && i4:
return struct {
http.ResponseWriter
http.Hijacker
http.CloseNotifier
io.ReaderFrom
}{t, hj, cn, rf}
case i0 && i1 && !i2 && i3 && !i4:
return struct {
http.ResponseWriter
http.Hijacker
http.CloseNotifier
http.Flusher
}{t, hj, cn, fl}
case i0 && i1 && !i2 && i3 && i4:
return struct {
http.ResponseWriter
http.Hijacker
http.CloseNotifier
http.Flusher
io.ReaderFrom
}{t, hj, cn, fl, rf}
case i0 && i1 && i2 && !i3 && !i4:
return struct {
http.ResponseWriter
http.Hijacker
http.CloseNotifier
http.Pusher
}{t, hj, cn, pu}
case i0 && i1 && i2 && !i3 && i4:
return struct {
http.ResponseWriter
http.Hijacker
http.CloseNotifier
http.Pusher
io.ReaderFrom
}{t, hj, cn, pu, rf}
case i0 && i1 && i2 && i3 && !i4:
return struct {
http.ResponseWriter
http.Hijacker
http.CloseNotifier
http.Pusher
http.Flusher
}{t, hj, cn, pu, fl}
case i0 && i1 && i2 && i3 && i4:
return struct {
http.ResponseWriter
http.Hijacker
http.CloseNotifier
http.Pusher
http.Flusher
io.ReaderFrom
}{t, hj, cn, pu, fl, rf}
default:
return struct {
http.ResponseWriter
}{t}
}
}

View File

@ -0,0 +1,167 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ochttp
import (
"crypto/tls"
"net/http"
"net/http/httptrace"
"strings"
"go.opencensus.io/trace"
)
type spanAnnotator struct {
sp *trace.Span
}
// TODO: Remove NewSpanAnnotator at the next release.
// Deprecated: Use NewSpanAnnotatingClientTrace instead
func NewSpanAnnotator(r *http.Request, s *trace.Span) *httptrace.ClientTrace {
return NewSpanAnnotatingClientTrace(r, s)
}
// NewSpanAnnotatingClientTrace returns a httptrace.ClientTrace which annotates
// all emitted httptrace events on the provided Span.
func NewSpanAnnotatingClientTrace(_ *http.Request, s *trace.Span) *httptrace.ClientTrace {
sa := spanAnnotator{sp: s}
return &httptrace.ClientTrace{
GetConn: sa.getConn,
GotConn: sa.gotConn,
PutIdleConn: sa.putIdleConn,
GotFirstResponseByte: sa.gotFirstResponseByte,
Got100Continue: sa.got100Continue,
DNSStart: sa.dnsStart,
DNSDone: sa.dnsDone,
ConnectStart: sa.connectStart,
ConnectDone: sa.connectDone,
TLSHandshakeStart: sa.tlsHandshakeStart,
TLSHandshakeDone: sa.tlsHandshakeDone,
WroteHeaders: sa.wroteHeaders,
Wait100Continue: sa.wait100Continue,
WroteRequest: sa.wroteRequest,
}
}
func (s spanAnnotator) getConn(hostPort string) {
attrs := []trace.Attribute{
trace.StringAttribute("httptrace.get_connection.host_port", hostPort),
}
s.sp.Annotate(attrs, "GetConn")
}
func (s spanAnnotator) gotConn(info httptrace.GotConnInfo) {
attrs := []trace.Attribute{
trace.BoolAttribute("httptrace.got_connection.reused", info.Reused),
trace.BoolAttribute("httptrace.got_connection.was_idle", info.WasIdle),
}
if info.WasIdle {
attrs = append(attrs,
trace.StringAttribute("httptrace.got_connection.idle_time", info.IdleTime.String()))
}
s.sp.Annotate(attrs, "GotConn")
}
// PutIdleConn implements a httptrace.ClientTrace hook
func (s spanAnnotator) putIdleConn(err error) {
var attrs []trace.Attribute
if err != nil {
attrs = append(attrs,
trace.StringAttribute("httptrace.put_idle_connection.error", err.Error()))
}
s.sp.Annotate(attrs, "PutIdleConn")
}
func (s spanAnnotator) gotFirstResponseByte() {
s.sp.Annotate(nil, "GotFirstResponseByte")
}
func (s spanAnnotator) got100Continue() {
s.sp.Annotate(nil, "Got100Continue")
}
func (s spanAnnotator) dnsStart(info httptrace.DNSStartInfo) {
attrs := []trace.Attribute{
trace.StringAttribute("httptrace.dns_start.host", info.Host),
}
s.sp.Annotate(attrs, "DNSStart")
}
func (s spanAnnotator) dnsDone(info httptrace.DNSDoneInfo) {
var addrs []string
for _, addr := range info.Addrs {
addrs = append(addrs, addr.String())
}
attrs := []trace.Attribute{
trace.StringAttribute("httptrace.dns_done.addrs", strings.Join(addrs, " , ")),
}
if info.Err != nil {
attrs = append(attrs,
trace.StringAttribute("httptrace.dns_done.error", info.Err.Error()))
}
s.sp.Annotate(attrs, "DNSDone")
}
func (s spanAnnotator) connectStart(network, addr string) {
attrs := []trace.Attribute{
trace.StringAttribute("httptrace.connect_start.network", network),
trace.StringAttribute("httptrace.connect_start.addr", addr),
}
s.sp.Annotate(attrs, "ConnectStart")
}
func (s spanAnnotator) connectDone(network, addr string, err error) {
attrs := []trace.Attribute{
trace.StringAttribute("httptrace.connect_done.network", network),
trace.StringAttribute("httptrace.connect_done.addr", addr),
}
if err != nil {
attrs = append(attrs,
trace.StringAttribute("httptrace.connect_done.error", err.Error()))
}
s.sp.Annotate(attrs, "ConnectDone")
}
func (s spanAnnotator) tlsHandshakeStart() {
s.sp.Annotate(nil, "TLSHandshakeStart")
}
func (s spanAnnotator) tlsHandshakeDone(_ tls.ConnectionState, err error) {
var attrs []trace.Attribute
if err != nil {
attrs = append(attrs,
trace.StringAttribute("httptrace.tls_handshake_done.error", err.Error()))
}
s.sp.Annotate(attrs, "TLSHandshakeDone")
}
func (s spanAnnotator) wroteHeaders() {
s.sp.Annotate(nil, "WroteHeaders")
}
func (s spanAnnotator) wait100Continue() {
s.sp.Annotate(nil, "Wait100Continue")
}
func (s spanAnnotator) wroteRequest(info httptrace.WroteRequestInfo) {
var attrs []trace.Attribute
if info.Err != nil {
attrs = append(attrs,
trace.StringAttribute("httptrace.wrote_request.error", info.Err.Error()))
}
s.sp.Annotate(attrs, "WroteRequest")
}

186
vendor/go.opencensus.io/plugin/ochttp/stats.go generated vendored Normal file
View File

@ -0,0 +1,186 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ochttp
import (
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
)
// The following client HTTP measures are supported for use in custom views.
var (
ClientRequestCount = stats.Int64("opencensus.io/http/client/request_count", "Number of HTTP requests started", stats.UnitDimensionless)
ClientRequestBytes = stats.Int64("opencensus.io/http/client/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes)
ClientResponseBytes = stats.Int64("opencensus.io/http/client/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes)
ClientLatency = stats.Float64("opencensus.io/http/client/latency", "End-to-end latency", stats.UnitMilliseconds)
)
// The following server HTTP measures are supported for use in custom views:
var (
ServerRequestCount = stats.Int64("opencensus.io/http/server/request_count", "Number of HTTP requests started", stats.UnitDimensionless)
ServerRequestBytes = stats.Int64("opencensus.io/http/server/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes)
ServerResponseBytes = stats.Int64("opencensus.io/http/server/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes)
ServerLatency = stats.Float64("opencensus.io/http/server/latency", "End-to-end latency", stats.UnitMilliseconds)
)
// The following tags are applied to stats recorded by this package. Host, Path
// and Method are applied to all measures. StatusCode is not applied to
// ClientRequestCount or ServerRequestCount, since it is recorded before the status is known.
var (
// Host is the value of the HTTP Host header.
//
// The value of this tag can be controlled by the HTTP client, so you need
// to watch out for potentially generating high-cardinality labels in your
// metrics backend if you use this tag in views.
Host, _ = tag.NewKey("http.host")
// StatusCode is the numeric HTTP response status code,
// or "error" if a transport error occurred and no status code was read.
StatusCode, _ = tag.NewKey("http.status")
// Path is the URL path (not including query string) in the request.
//
// The value of this tag can be controlled by the HTTP client, so you need
// to watch out for potentially generating high-cardinality labels in your
// metrics backend if you use this tag in views.
Path, _ = tag.NewKey("http.path")
// Method is the HTTP method of the request, capitalized (GET, POST, etc.).
Method, _ = tag.NewKey("http.method")
// KeyServerRoute is a low cardinality string representing the logical
// handler of the request. This is usually the pattern registered on the a
// ServeMux (or similar string).
KeyServerRoute, _ = tag.NewKey("http_server_route")
)
// Default distributions used by views in this package.
var (
DefaultSizeDistribution = view.Distribution(0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296)
DefaultLatencyDistribution = view.Distribution(0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000)
)
// Package ochttp provides some convenience views.
// You need to register the views for data to actually be collected.
var (
ClientRequestCountView = &view.View{
Name: "opencensus.io/http/client/request_count",
Description: "Count of HTTP requests started",
Measure: ClientRequestCount,
Aggregation: view.Count(),
}
ClientRequestBytesView = &view.View{
Name: "opencensus.io/http/client/request_bytes",
Description: "Size distribution of HTTP request body",
Measure: ClientRequestBytes,
Aggregation: DefaultSizeDistribution,
}
ClientResponseBytesView = &view.View{
Name: "opencensus.io/http/client/response_bytes",
Description: "Size distribution of HTTP response body",
Measure: ClientResponseBytes,
Aggregation: DefaultSizeDistribution,
}
ClientLatencyView = &view.View{
Name: "opencensus.io/http/client/latency",
Description: "Latency distribution of HTTP requests",
Measure: ClientLatency,
Aggregation: DefaultLatencyDistribution,
}
ClientRequestCountByMethod = &view.View{
Name: "opencensus.io/http/client/request_count_by_method",
Description: "Client request count by HTTP method",
TagKeys: []tag.Key{Method},
Measure: ClientRequestCount,
Aggregation: view.Count(),
}
ClientResponseCountByStatusCode = &view.View{
Name: "opencensus.io/http/client/response_count_by_status_code",
Description: "Client response count by status code",
TagKeys: []tag.Key{StatusCode},
Measure: ClientLatency,
Aggregation: view.Count(),
}
ServerRequestCountView = &view.View{
Name: "opencensus.io/http/server/request_count",
Description: "Count of HTTP requests started",
Measure: ServerRequestCount,
Aggregation: view.Count(),
}
ServerRequestBytesView = &view.View{
Name: "opencensus.io/http/server/request_bytes",
Description: "Size distribution of HTTP request body",
Measure: ServerRequestBytes,
Aggregation: DefaultSizeDistribution,
}
ServerResponseBytesView = &view.View{
Name: "opencensus.io/http/server/response_bytes",
Description: "Size distribution of HTTP response body",
Measure: ServerResponseBytes,
Aggregation: DefaultSizeDistribution,
}
ServerLatencyView = &view.View{
Name: "opencensus.io/http/server/latency",
Description: "Latency distribution of HTTP requests",
Measure: ServerLatency,
Aggregation: DefaultLatencyDistribution,
}
ServerRequestCountByMethod = &view.View{
Name: "opencensus.io/http/server/request_count_by_method",
Description: "Server request count by HTTP method",
TagKeys: []tag.Key{Method},
Measure: ServerRequestCount,
Aggregation: view.Count(),
}
ServerResponseCountByStatusCode = &view.View{
Name: "opencensus.io/http/server/response_count_by_status_code",
Description: "Server response count by status code",
TagKeys: []tag.Key{StatusCode},
Measure: ServerLatency,
Aggregation: view.Count(),
}
)
// DefaultClientViews are the default client views provided by this package.
var DefaultClientViews = []*view.View{
ClientRequestCountView,
ClientRequestBytesView,
ClientResponseBytesView,
ClientLatencyView,
ClientRequestCountByMethod,
ClientResponseCountByStatusCode,
}
// DefaultServerViews are the default server views provided by this package.
var DefaultServerViews = []*view.View{
ServerRequestCountView,
ServerRequestBytesView,
ServerResponseBytesView,
ServerLatencyView,
ServerRequestCountByMethod,
ServerResponseCountByStatusCode,
}

228
vendor/go.opencensus.io/plugin/ochttp/trace.go generated vendored Normal file
View File

@ -0,0 +1,228 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ochttp
import (
"io"
"net/http"
"net/http/httptrace"
"go.opencensus.io/plugin/ochttp/propagation/b3"
"go.opencensus.io/trace"
"go.opencensus.io/trace/propagation"
)
// TODO(jbd): Add godoc examples.
var defaultFormat propagation.HTTPFormat = &b3.HTTPFormat{}
// Attributes recorded on the span for the requests.
// Only trace exporters will need them.
const (
HostAttribute = "http.host"
MethodAttribute = "http.method"
PathAttribute = "http.path"
UserAgentAttribute = "http.user_agent"
StatusCodeAttribute = "http.status_code"
)
type traceTransport struct {
base http.RoundTripper
startOptions trace.StartOptions
format propagation.HTTPFormat
formatSpanName func(*http.Request) string
newClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace
}
// TODO(jbd): Add message events for request and response size.
// RoundTrip creates a trace.Span and inserts it into the outgoing request's headers.
// The created span can follow a parent span, if a parent is presented in
// the request's context.
func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) {
name := t.formatSpanName(req)
// TODO(jbd): Discuss whether we want to prefix
// outgoing requests with Sent.
ctx, span := trace.StartSpan(req.Context(), name,
trace.WithSampler(t.startOptions.Sampler),
trace.WithSpanKind(trace.SpanKindClient))
if t.newClientTrace != nil {
req = req.WithContext(httptrace.WithClientTrace(ctx, t.newClientTrace(req, span)))
} else {
req = req.WithContext(ctx)
}
if t.format != nil {
// SpanContextToRequest will modify its Request argument, which is
// contrary to the contract for http.RoundTripper, so we need to
// pass it a copy of the Request.
// However, the Request struct itself was already copied by
// the WithContext calls above and so we just need to copy the header.
header := make(http.Header)
for k, v := range req.Header {
header[k] = v
}
req.Header = header
t.format.SpanContextToRequest(span.SpanContext(), req)
}
span.AddAttributes(requestAttrs(req)...)
resp, err := t.base.RoundTrip(req)
if err != nil {
span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()})
span.End()
return resp, err
}
span.AddAttributes(responseAttrs(resp)...)
span.SetStatus(TraceStatus(resp.StatusCode, resp.Status))
// span.End() will be invoked after
// a read from resp.Body returns io.EOF or when
// resp.Body.Close() is invoked.
resp.Body = &bodyTracker{rc: resp.Body, span: span}
return resp, err
}
// bodyTracker wraps a response.Body and invokes
// trace.EndSpan on encountering io.EOF on reading
// the body of the original response.
type bodyTracker struct {
rc io.ReadCloser
span *trace.Span
}
var _ io.ReadCloser = (*bodyTracker)(nil)
func (bt *bodyTracker) Read(b []byte) (int, error) {
n, err := bt.rc.Read(b)
switch err {
case nil:
return n, nil
case io.EOF:
bt.span.End()
default:
// For all other errors, set the span status
bt.span.SetStatus(trace.Status{
// Code 2 is the error code for Internal server error.
Code: 2,
Message: err.Error(),
})
}
return n, err
}
func (bt *bodyTracker) Close() error {
// Invoking endSpan on Close will help catch the cases
// in which a read returned a non-nil error, we set the
// span status but didn't end the span.
bt.span.End()
return bt.rc.Close()
}
// CancelRequest cancels an in-flight request by closing its connection.
func (t *traceTransport) CancelRequest(req *http.Request) {
type canceler interface {
CancelRequest(*http.Request)
}
if cr, ok := t.base.(canceler); ok {
cr.CancelRequest(req)
}
}
func spanNameFromURL(req *http.Request) string {
return req.URL.Path
}
func requestAttrs(r *http.Request) []trace.Attribute {
return []trace.Attribute{
trace.StringAttribute(PathAttribute, r.URL.Path),
trace.StringAttribute(HostAttribute, r.URL.Host),
trace.StringAttribute(MethodAttribute, r.Method),
trace.StringAttribute(UserAgentAttribute, r.UserAgent()),
}
}
func responseAttrs(resp *http.Response) []trace.Attribute {
return []trace.Attribute{
trace.Int64Attribute(StatusCodeAttribute, int64(resp.StatusCode)),
}
}
// TraceStatus is a utility to convert the HTTP status code to a trace.Status that
// represents the outcome as closely as possible.
func TraceStatus(httpStatusCode int, statusLine string) trace.Status {
var code int32
if httpStatusCode < 200 || httpStatusCode >= 400 {
code = trace.StatusCodeUnknown
}
switch httpStatusCode {
case 499:
code = trace.StatusCodeCancelled
case http.StatusBadRequest:
code = trace.StatusCodeInvalidArgument
case http.StatusGatewayTimeout:
code = trace.StatusCodeDeadlineExceeded
case http.StatusNotFound:
code = trace.StatusCodeNotFound
case http.StatusForbidden:
code = trace.StatusCodePermissionDenied
case http.StatusUnauthorized: // 401 is actually unauthenticated.
code = trace.StatusCodeUnauthenticated
case http.StatusTooManyRequests:
code = trace.StatusCodeResourceExhausted
case http.StatusNotImplemented:
code = trace.StatusCodeUnimplemented
case http.StatusServiceUnavailable:
code = trace.StatusCodeUnavailable
case http.StatusOK:
code = trace.StatusCodeOK
}
return trace.Status{Code: code, Message: codeToStr[code]}
}
var codeToStr = map[int32]string{
trace.StatusCodeOK: `OK`,
trace.StatusCodeCancelled: `CANCELLED`,
trace.StatusCodeUnknown: `UNKNOWN`,
trace.StatusCodeInvalidArgument: `INVALID_ARGUMENT`,
trace.StatusCodeDeadlineExceeded: `DEADLINE_EXCEEDED`,
trace.StatusCodeNotFound: `NOT_FOUND`,
trace.StatusCodeAlreadyExists: `ALREADY_EXISTS`,
trace.StatusCodePermissionDenied: `PERMISSION_DENIED`,
trace.StatusCodeResourceExhausted: `RESOURCE_EXHAUSTED`,
trace.StatusCodeFailedPrecondition: `FAILED_PRECONDITION`,
trace.StatusCodeAborted: `ABORTED`,
trace.StatusCodeOutOfRange: `OUT_OF_RANGE`,
trace.StatusCodeUnimplemented: `UNIMPLEMENTED`,
trace.StatusCodeInternal: `INTERNAL`,
trace.StatusCodeUnavailable: `UNAVAILABLE`,
trace.StatusCodeDataLoss: `DATA_LOSS`,
trace.StatusCodeUnauthenticated: `UNAUTHENTICATED`,
}
func isHealthEndpoint(path string) bool {
// Health checking is pretty frequent and
// traces collected for health endpoints
// can be extremely noisy and expensive.
// Disable canonical health checking endpoints
// like /healthz and /_ah/health for now.
if path == "/healthz" || path == "/_ah/health" {
return true
}
return false
}

55
vendor/go.opencensus.io/stats/doc.go generated vendored Normal file
View File

@ -0,0 +1,55 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
/*
Package stats contains support for OpenCensus stats recording.
OpenCensus allows users to create typed measures, record measurements,
aggregate the collected data, and export the aggregated data.
Measures
A measure represents a type of metric to be tracked and recorded.
For example, latency, request Mb/s, and response Mb/s are measures
to collect from a server.
Each measure needs to be registered before being used. Measure
constructors such as Int64 and Float64 automatically
register the measure by the given name. Each registered measure needs
to be unique by name. Measures also have a description and a unit.
Libraries can define and export measures for their end users to
create views and collect instrumentation data.
Recording measurements
Measurement is a data point to be collected for a measure. For example,
for a latency (ms) measure, 100 is a measurement that represents a 100ms
latency event. Users collect data points on the existing measures with
the current context. Tags from the current context are recorded with the
measurements if they are any.
Recorded measurements are dropped immediately if user is not aggregating
them via views. Users don't necessarily need to conditionally enable/disable
recording to reduce cost. Recording of measurements is cheap.
Libraries can always record measurements, and end-users can later decide
on which measurements they want to collect by registering views. This allows
libraries to turn on the instrumentation by default.
*/
package stats // import "go.opencensus.io/stats"
// TODO(acetechnologist): Add a link to the language independent OpenCensus
// spec when it is available.

25
vendor/go.opencensus.io/stats/internal/record.go generated vendored Normal file
View File

@ -0,0 +1,25 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"go.opencensus.io/tag"
)
// DefaultRecorder will be called for each Record call.
var DefaultRecorder func(*tag.Map, interface{})
// SubscriptionReporter reports when a view subscribed with a measure.
var SubscriptionReporter func(measure string)

28
vendor/go.opencensus.io/stats/internal/validation.go generated vendored Normal file
View File

@ -0,0 +1,28 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal // import "go.opencensus.io/stats/internal"
const (
MaxNameLength = 255
)
func IsPrintable(str string) bool {
for _, r := range str {
if !(r >= ' ' && r <= '~') {
return false
}
}
return true
}

123
vendor/go.opencensus.io/stats/measure.go generated vendored Normal file
View File

@ -0,0 +1,123 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package stats
import (
"sync"
"sync/atomic"
)
// Measure represents a single numeric value to be tracked and recorded.
// For example, latency, request bytes, and response bytes could be measures
// to collect from a server.
//
// Measures by themselves have no outside effects. In order to be exported,
// the measure needs to be used in a View. If no Views are defined over a
// measure, there is very little cost in recording it.
type Measure interface {
// Name returns the name of this measure.
//
// Measure names are globally unique (among all libraries linked into your program).
// We recommend prefixing the measure name with a domain name relevant to your
// project or application.
//
// Measure names are never sent over the wire or exported to backends.
// They are only used to create Views.
Name() string
// Description returns the human-readable description of this measure.
Description() string
// Unit returns the units for the values this measure takes on.
//
// Units are encoded according to the case-sensitive abbreviations from the
// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html
Unit() string
}
// measureDescriptor is the untyped descriptor associated with each measure.
// Int64Measure and Float64Measure wrap measureDescriptor to provide typed
// recording APIs.
// Two Measures with the same name will have the same measureDescriptor.
type measureDescriptor struct {
subs int32 // access atomically
name string
description string
unit string
}
func (m *measureDescriptor) subscribe() {
atomic.StoreInt32(&m.subs, 1)
}
func (m *measureDescriptor) subscribed() bool {
return atomic.LoadInt32(&m.subs) == 1
}
// Name returns the name of the measure.
func (m *measureDescriptor) Name() string {
return m.name
}
// Description returns the description of the measure.
func (m *measureDescriptor) Description() string {
return m.description
}
// Unit returns the unit of the measure.
func (m *measureDescriptor) Unit() string {
return m.unit
}
var (
mu sync.RWMutex
measures = make(map[string]*measureDescriptor)
)
func registerMeasureHandle(name, desc, unit string) *measureDescriptor {
mu.Lock()
defer mu.Unlock()
if stored, ok := measures[name]; ok {
return stored
}
m := &measureDescriptor{
name: name,
description: desc,
unit: unit,
}
measures[name] = m
return m
}
// Measurement is the numeric value measured when recording stats. Each measure
// provides methods to create measurements of their kind. For example, Int64Measure
// provides M to convert an int64 into a measurement.
type Measurement struct {
v float64
m *measureDescriptor
}
// Value returns the value of the Measurement as a float64.
func (m Measurement) Value() float64 {
return m.v
}
// Measure returns the Measure from which this Measurement was created.
func (m Measurement) Measure() Measure {
return m.m
}

36
vendor/go.opencensus.io/stats/measure_float64.go generated vendored Normal file
View File

@ -0,0 +1,36 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package stats
// Float64Measure is a measure for float64 values.
type Float64Measure struct {
*measureDescriptor
}
// M creates a new float64 measurement.
// Use Record to record measurements.
func (m *Float64Measure) M(v float64) Measurement {
return Measurement{m: m.measureDescriptor, v: v}
}
// Float64 creates a new measure for float64 values.
//
// See the documentation for interface Measure for more guidance on the
// parameters of this function.
func Float64(name, description, unit string) *Float64Measure {
mi := registerMeasureHandle(name, description, unit)
return &Float64Measure{mi}
}

36
vendor/go.opencensus.io/stats/measure_int64.go generated vendored Normal file
View File

@ -0,0 +1,36 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package stats
// Int64Measure is a measure for int64 values.
type Int64Measure struct {
*measureDescriptor
}
// M creates a new int64 measurement.
// Use Record to record measurements.
func (m *Int64Measure) M(v int64) Measurement {
return Measurement{m: m.measureDescriptor, v: float64(v)}
}
// Int64 creates a new measure for int64 values.
//
// See the documentation for interface Measure for more guidance on the
// parameters of this function.
func Int64(name, description, unit string) *Int64Measure {
mi := registerMeasureHandle(name, description, unit)
return &Int64Measure{mi}
}

54
vendor/go.opencensus.io/stats/record.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package stats
import (
"context"
"go.opencensus.io/stats/internal"
"go.opencensus.io/tag"
)
func init() {
internal.SubscriptionReporter = func(measure string) {
mu.Lock()
measures[measure].subscribe()
mu.Unlock()
}
}
// Record records one or multiple measurements with the same context at once.
// If there are any tags in the context, measurements will be tagged with them.
func Record(ctx context.Context, ms ...Measurement) {
recorder := internal.DefaultRecorder
if recorder == nil {
return
}
if len(ms) == 0 {
return
}
record := false
for _, m := range ms {
if m.m.subscribed() {
record = true
break
}
}
if !record {
return
}
recorder(tag.FromContext(ctx), ms)
}

25
vendor/go.opencensus.io/stats/units.go generated vendored Normal file
View File

@ -0,0 +1,25 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package stats
// Units are encoded according to the case-sensitive abbreviations from the
// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html
const (
UnitNone = "1" // Deprecated: Use UnitDimensionless.
UnitDimensionless = "1"
UnitBytes = "By"
UnitMilliseconds = "ms"
)

120
vendor/go.opencensus.io/stats/view/aggregation.go generated vendored Normal file
View File

@ -0,0 +1,120 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package view
// AggType represents the type of aggregation function used on a View.
type AggType int
// All available aggregation types.
const (
AggTypeNone AggType = iota // no aggregation; reserved for future use.
AggTypeCount // the count aggregation, see Count.
AggTypeSum // the sum aggregation, see Sum.
AggTypeDistribution // the distribution aggregation, see Distribution.
AggTypeLastValue // the last value aggregation, see LastValue.
)
func (t AggType) String() string {
return aggTypeName[t]
}
var aggTypeName = map[AggType]string{
AggTypeNone: "None",
AggTypeCount: "Count",
AggTypeSum: "Sum",
AggTypeDistribution: "Distribution",
AggTypeLastValue: "LastValue",
}
// Aggregation represents a data aggregation method. Use one of the functions:
// Count, Sum, or Distribution to construct an Aggregation.
type Aggregation struct {
Type AggType // Type is the AggType of this Aggregation.
Buckets []float64 // Buckets are the bucket endpoints if this Aggregation represents a distribution, see Distribution.
newData func() AggregationData
}
var (
aggCount = &Aggregation{
Type: AggTypeCount,
newData: func() AggregationData {
return &CountData{}
},
}
aggSum = &Aggregation{
Type: AggTypeSum,
newData: func() AggregationData {
return &SumData{}
},
}
)
// Count indicates that data collected and aggregated
// with this method will be turned into a count value.
// For example, total number of accepted requests can be
// aggregated by using Count.
func Count() *Aggregation {
return aggCount
}
// Sum indicates that data collected and aggregated
// with this method will be summed up.
// For example, accumulated request bytes can be aggregated by using
// Sum.
func Sum() *Aggregation {
return aggSum
}
// Distribution indicates that the desired aggregation is
// a histogram distribution.
//
// An distribution aggregation may contain a histogram of the values in the
// population. The bucket boundaries for that histogram are described
// by the bounds. This defines len(bounds)+1 buckets.
//
// If len(bounds) >= 2 then the boundaries for bucket index i are:
//
// [-infinity, bounds[i]) for i = 0
// [bounds[i-1], bounds[i]) for 0 < i < length
// [bounds[i-1], +infinity) for i = length
//
// If len(bounds) is 0 then there is no histogram associated with the
// distribution. There will be a single bucket with boundaries
// (-infinity, +infinity).
//
// If len(bounds) is 1 then there is no finite buckets, and that single
// element is the common boundary of the overflow and underflow buckets.
func Distribution(bounds ...float64) *Aggregation {
return &Aggregation{
Type: AggTypeDistribution,
Buckets: bounds,
newData: func() AggregationData {
return newDistributionData(bounds)
},
}
}
// LastValue only reports the last value recorded using this
// aggregation. All other measurements will be dropped.
func LastValue() *Aggregation {
return &Aggregation{
Type: AggTypeLastValue,
newData: func() AggregationData {
return &LastValueData{}
},
}
}

207
vendor/go.opencensus.io/stats/view/aggregation_data.go generated vendored Normal file
View File

@ -0,0 +1,207 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package view
import (
"math"
)
// AggregationData represents an aggregated value from a collection.
// They are reported on the view data during exporting.
// Mosts users won't directly access aggregration data.
type AggregationData interface {
isAggregationData() bool
addSample(v float64)
clone() AggregationData
equal(other AggregationData) bool
}
const epsilon = 1e-9
// CountData is the aggregated data for the Count aggregation.
// A count aggregation processes data and counts the recordings.
//
// Most users won't directly access count data.
type CountData struct {
Value int64
}
func (a *CountData) isAggregationData() bool { return true }
func (a *CountData) addSample(v float64) {
a.Value = a.Value + 1
}
func (a *CountData) clone() AggregationData {
return &CountData{Value: a.Value}
}
func (a *CountData) equal(other AggregationData) bool {
a2, ok := other.(*CountData)
if !ok {
return false
}
return a.Value == a2.Value
}
// SumData is the aggregated data for the Sum aggregation.
// A sum aggregation processes data and sums up the recordings.
//
// Most users won't directly access sum data.
type SumData struct {
Value float64
}
func (a *SumData) isAggregationData() bool { return true }
func (a *SumData) addSample(f float64) {
a.Value += f
}
func (a *SumData) clone() AggregationData {
return &SumData{Value: a.Value}
}
func (a *SumData) equal(other AggregationData) bool {
a2, ok := other.(*SumData)
if !ok {
return false
}
return math.Pow(a.Value-a2.Value, 2) < epsilon
}
// DistributionData is the aggregated data for the
// Distribution aggregation.
//
// Most users won't directly access distribution data.
type DistributionData struct {
Count int64 // number of data points aggregated
Min float64 // minimum value in the distribution
Max float64 // max value in the distribution
Mean float64 // mean of the distribution
SumOfSquaredDev float64 // sum of the squared deviation from the mean
CountPerBucket []int64 // number of occurrences per bucket
bounds []float64 // histogram distribution of the values
}
func newDistributionData(bounds []float64) *DistributionData {
return &DistributionData{
CountPerBucket: make([]int64, len(bounds)+1),
bounds: bounds,
Min: math.MaxFloat64,
Max: math.SmallestNonzeroFloat64,
}
}
// Sum returns the sum of all samples collected.
func (a *DistributionData) Sum() float64 { return a.Mean * float64(a.Count) }
func (a *DistributionData) variance() float64 {
if a.Count <= 1 {
return 0
}
return a.SumOfSquaredDev / float64(a.Count-1)
}
func (a *DistributionData) isAggregationData() bool { return true }
func (a *DistributionData) addSample(f float64) {
if f < a.Min {
a.Min = f
}
if f > a.Max {
a.Max = f
}
a.Count++
a.incrementBucketCount(f)
if a.Count == 1 {
a.Mean = f
return
}
oldMean := a.Mean
a.Mean = a.Mean + (f-a.Mean)/float64(a.Count)
a.SumOfSquaredDev = a.SumOfSquaredDev + (f-oldMean)*(f-a.Mean)
}
func (a *DistributionData) incrementBucketCount(f float64) {
if len(a.bounds) == 0 {
a.CountPerBucket[0]++
return
}
for i, b := range a.bounds {
if f < b {
a.CountPerBucket[i]++
return
}
}
a.CountPerBucket[len(a.bounds)]++
}
func (a *DistributionData) clone() AggregationData {
counts := make([]int64, len(a.CountPerBucket))
copy(counts, a.CountPerBucket)
c := *a
c.CountPerBucket = counts
return &c
}
func (a *DistributionData) equal(other AggregationData) bool {
a2, ok := other.(*DistributionData)
if !ok {
return false
}
if a2 == nil {
return false
}
if len(a.CountPerBucket) != len(a2.CountPerBucket) {
return false
}
for i := range a.CountPerBucket {
if a.CountPerBucket[i] != a2.CountPerBucket[i] {
return false
}
}
return a.Count == a2.Count && a.Min == a2.Min && a.Max == a2.Max && math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon
}
// LastValueData returns the last value recorded for LastValue aggregation.
type LastValueData struct {
Value float64
}
func (l *LastValueData) isAggregationData() bool {
return true
}
func (l *LastValueData) addSample(v float64) {
l.Value = v
}
func (l *LastValueData) clone() AggregationData {
return &LastValueData{l.Value}
}
func (l *LastValueData) equal(other AggregationData) bool {
a2, ok := other.(*LastValueData)
if !ok {
return false
}
return l.Value == a2.Value
}

85
vendor/go.opencensus.io/stats/view/collector.go generated vendored Normal file
View File

@ -0,0 +1,85 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package view
import (
"sort"
"go.opencensus.io/internal/tagencoding"
"go.opencensus.io/tag"
)
type collector struct {
// signatures holds the aggregations values for each unique tag signature
// (values for all keys) to its aggregator.
signatures map[string]AggregationData
// Aggregation is the description of the aggregation to perform for this
// view.
a *Aggregation
}
func (c *collector) addSample(s string, v float64) {
aggregator, ok := c.signatures[s]
if !ok {
aggregator = c.a.newData()
c.signatures[s] = aggregator
}
aggregator.addSample(v)
}
// collectRows returns a snapshot of the collected Row values.
func (c *collector) collectedRows(keys []tag.Key) []*Row {
rows := make([]*Row, 0, len(c.signatures))
for sig, aggregator := range c.signatures {
tags := decodeTags([]byte(sig), keys)
row := &Row{Tags: tags, Data: aggregator.clone()}
rows = append(rows, row)
}
return rows
}
func (c *collector) clearRows() {
c.signatures = make(map[string]AggregationData)
}
// encodeWithKeys encodes the map by using values
// only associated with the keys provided.
func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte {
vb := &tagencoding.Values{
Buffer: make([]byte, len(keys)),
}
for _, k := range keys {
v, _ := m.Value(k)
vb.WriteValue([]byte(v))
}
return vb.Bytes()
}
// decodeTags decodes tags from the buffer and
// orders them by the keys.
func decodeTags(buf []byte, keys []tag.Key) []tag.Tag {
vb := &tagencoding.Values{Buffer: buf}
var tags []tag.Tag
for _, k := range keys {
v := vb.ReadValue()
if v != nil {
tags = append(tags, tag.Tag{Key: k, Value: string(v)})
}
}
vb.ReadIndex = 0
sort.Slice(tags, func(i, j int) bool { return tags[i].Key.Name() < tags[j].Key.Name() })
return tags
}

47
vendor/go.opencensus.io/stats/view/doc.go generated vendored Normal file
View File

@ -0,0 +1,47 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Package view contains support for collecting and exposing aggregates over stats.
//
// In order to collect measurements, views need to be defined and registered.
// A view allows recorded measurements to be filtered and aggregated.
//
// All recorded measurements can be grouped by a list of tags.
//
// OpenCensus provides several aggregation methods: Count, Distribution and Sum.
//
// Count only counts the number of measurement points recorded.
// Distribution provides statistical summary of the aggregated data by counting
// how many recorded measurements fall into each bucket.
// Sum adds up the measurement values.
// LastValue just keeps track of the most recently recorded measurement value.
// All aggregations are cumulative.
//
// Views can be registerd and unregistered at any time during program execution.
//
// Libraries can define views but it is recommended that in most cases registering
// views be left up to applications.
//
// Exporting
//
// Collected and aggregated data can be exported to a metric collection
// backend by registering its exporter.
//
// Multiple exporters can be registered to upload the data to various
// different back ends.
package view // import "go.opencensus.io/stats/view"
// TODO(acetechnologist): Add a link to the language independent OpenCensus
// spec when it is available.

58
vendor/go.opencensus.io/stats/view/export.go generated vendored Normal file
View File

@ -0,0 +1,58 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package view
import "sync"
var (
exportersMu sync.RWMutex // guards exporters
exporters = make(map[Exporter]struct{})
)
// Exporter exports the collected records as view data.
//
// The ExportView method should return quickly; if an
// Exporter takes a significant amount of time to
// process a Data, that work should be done on another goroutine.
//
// It is safe to assume that ExportView will not be called concurrently from
// multiple goroutines.
//
// The Data should not be modified.
type Exporter interface {
ExportView(viewData *Data)
}
// RegisterExporter registers an exporter.
// Collected data will be reported via all the
// registered exporters. Once you no longer
// want data to be exported, invoke UnregisterExporter
// with the previously registered exporter.
//
// Binaries can register exporters, libraries shouldn't register exporters.
func RegisterExporter(e Exporter) {
exportersMu.Lock()
defer exportersMu.Unlock()
exporters[e] = struct{}{}
}
// UnregisterExporter unregisters an exporter.
func UnregisterExporter(e Exporter) {
exportersMu.Lock()
defer exportersMu.Unlock()
delete(exporters, e)
}

183
vendor/go.opencensus.io/stats/view/view.go generated vendored Normal file
View File

@ -0,0 +1,183 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package view
import (
"bytes"
"fmt"
"reflect"
"sort"
"sync/atomic"
"time"
"go.opencensus.io/stats"
"go.opencensus.io/stats/internal"
"go.opencensus.io/tag"
)
// View allows users to aggregate the recorded stats.Measurements.
// Views need to be passed to the Register function to be before data will be
// collected and sent to Exporters.
type View struct {
Name string // Name of View. Must be unique. If unset, will default to the name of the Measure.
Description string // Description is a human-readable description for this view.
// TagKeys are the tag keys describing the grouping of this view.
// A single Row will be produced for each combination of associated tag values.
TagKeys []tag.Key
// Measure is a stats.Measure to aggregate in this view.
Measure stats.Measure
// Aggregation is the aggregation function tp apply to the set of Measurements.
Aggregation *Aggregation
}
// WithName returns a copy of the View with a new name. This is useful for
// renaming views to cope with limitations placed on metric names by various
// backends.
func (v *View) WithName(name string) *View {
vNew := *v
vNew.Name = name
return &vNew
}
// same compares two views and returns true if they represent the same aggregation.
func (v *View) same(other *View) bool {
if v == other {
return true
}
if v == nil {
return false
}
return reflect.DeepEqual(v.Aggregation, other.Aggregation) &&
v.Measure.Name() == other.Measure.Name()
}
// canonicalize canonicalizes v by setting explicit
// defaults for Name and Description and sorting the TagKeys
func (v *View) canonicalize() error {
if v.Measure == nil {
return fmt.Errorf("cannot register view %q: measure not set", v.Name)
}
if v.Aggregation == nil {
return fmt.Errorf("cannot register view %q: aggregation not set", v.Name)
}
if v.Name == "" {
v.Name = v.Measure.Name()
}
if v.Description == "" {
v.Description = v.Measure.Description()
}
if err := checkViewName(v.Name); err != nil {
return err
}
sort.Slice(v.TagKeys, func(i, j int) bool {
return v.TagKeys[i].Name() < v.TagKeys[j].Name()
})
return nil
}
// viewInternal is the internal representation of a View.
type viewInternal struct {
view *View // view is the canonicalized View definition associated with this view.
subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access
collector *collector
}
func newViewInternal(v *View) (*viewInternal, error) {
return &viewInternal{
view: v,
collector: &collector{make(map[string]AggregationData), v.Aggregation},
}, nil
}
func (v *viewInternal) subscribe() {
atomic.StoreUint32(&v.subscribed, 1)
}
func (v *viewInternal) unsubscribe() {
atomic.StoreUint32(&v.subscribed, 0)
}
// isSubscribed returns true if the view is exporting
// data by subscription.
func (v *viewInternal) isSubscribed() bool {
return atomic.LoadUint32(&v.subscribed) == 1
}
func (v *viewInternal) clearRows() {
v.collector.clearRows()
}
func (v *viewInternal) collectedRows() []*Row {
return v.collector.collectedRows(v.view.TagKeys)
}
func (v *viewInternal) addSample(m *tag.Map, val float64) {
if !v.isSubscribed() {
return
}
sig := string(encodeWithKeys(m, v.view.TagKeys))
v.collector.addSample(sig, val)
}
// A Data is a set of rows about usage of the single measure associated
// with the given view. Each row is specific to a unique set of tags.
type Data struct {
View *View
Start, End time.Time
Rows []*Row
}
// Row is the collected value for a specific set of key value pairs a.k.a tags.
type Row struct {
Tags []tag.Tag
Data AggregationData
}
func (r *Row) String() string {
var buffer bytes.Buffer
buffer.WriteString("{ ")
buffer.WriteString("{ ")
for _, t := range r.Tags {
buffer.WriteString(fmt.Sprintf("{%v %v}", t.Key.Name(), t.Value))
}
buffer.WriteString(" }")
buffer.WriteString(fmt.Sprintf("%v", r.Data))
buffer.WriteString(" }")
return buffer.String()
}
// Equal returns true if both rows are equal. Tags are expected to be ordered
// by the key name. Even both rows have the same tags but the tags appear in
// different orders it will return false.
func (r *Row) Equal(other *Row) bool {
if r == other {
return true
}
return reflect.DeepEqual(r.Tags, other.Tags) && r.Data.equal(other.Data)
}
func checkViewName(name string) error {
if len(name) > internal.MaxNameLength {
return fmt.Errorf("view name cannot be larger than %v", internal.MaxNameLength)
}
if !internal.IsPrintable(name) {
return fmt.Errorf("view name needs to be an ASCII string")
}
return nil
}

227
vendor/go.opencensus.io/stats/view/worker.go generated vendored Normal file
View File

@ -0,0 +1,227 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package view
import (
"fmt"
"time"
"go.opencensus.io/stats"
"go.opencensus.io/stats/internal"
"go.opencensus.io/tag"
)
func init() {
defaultWorker = newWorker()
go defaultWorker.start()
internal.DefaultRecorder = record
}
type measureRef struct {
measure string
views map[*viewInternal]struct{}
}
type worker struct {
measures map[string]*measureRef
views map[string]*viewInternal
startTimes map[*viewInternal]time.Time
timer *time.Ticker
c chan command
quit, done chan bool
}
var defaultWorker *worker
var defaultReportingDuration = 10 * time.Second
// Find returns a registered view associated with this name.
// If no registered view is found, nil is returned.
func Find(name string) (v *View) {
req := &getViewByNameReq{
name: name,
c: make(chan *getViewByNameResp),
}
defaultWorker.c <- req
resp := <-req.c
return resp.v
}
// Register begins collecting data for the given views.
// Once a view is registered, it reports data to the registered exporters.
func Register(views ...*View) error {
for _, v := range views {
if err := v.canonicalize(); err != nil {
return err
}
}
req := &registerViewReq{
views: views,
err: make(chan error),
}
defaultWorker.c <- req
return <-req.err
}
// Unregister the given views. Data will not longer be exported for these views
// after Unregister returns.
// It is not necessary to unregister from views you expect to collect for the
// duration of your program execution.
func Unregister(views ...*View) {
names := make([]string, len(views))
for i := range views {
names[i] = views[i].Name
}
req := &unregisterFromViewReq{
views: names,
done: make(chan struct{}),
}
defaultWorker.c <- req
<-req.done
}
// RetrieveData gets a snapshot of the data collected for the the view registered
// with the given name. It is intended for testing only.
func RetrieveData(viewName string) ([]*Row, error) {
req := &retrieveDataReq{
now: time.Now(),
v: viewName,
c: make(chan *retrieveDataResp),
}
defaultWorker.c <- req
resp := <-req.c
return resp.rows, resp.err
}
func record(tags *tag.Map, ms interface{}) {
req := &recordReq{
tm: tags,
ms: ms.([]stats.Measurement),
}
defaultWorker.c <- req
}
// SetReportingPeriod sets the interval between reporting aggregated views in
// the program. If duration is less than or equal to zero, it enables the
// default behavior.
//
// Note: each exporter makes different promises about what the lowest supported
// duration is. For example, the Stackdriver exporter recommends a value no
// lower than 1 minute. Consult each exporter per your needs.
func SetReportingPeriod(d time.Duration) {
// TODO(acetechnologist): ensure that the duration d is more than a certain
// value. e.g. 1s
req := &setReportingPeriodReq{
d: d,
c: make(chan bool),
}
defaultWorker.c <- req
<-req.c // don't return until the timer is set to the new duration.
}
func newWorker() *worker {
return &worker{
measures: make(map[string]*measureRef),
views: make(map[string]*viewInternal),
startTimes: make(map[*viewInternal]time.Time),
timer: time.NewTicker(defaultReportingDuration),
c: make(chan command, 1024),
quit: make(chan bool),
done: make(chan bool),
}
}
func (w *worker) start() {
for {
select {
case cmd := <-w.c:
cmd.handleCommand(w)
case <-w.timer.C:
w.reportUsage(time.Now())
case <-w.quit:
w.timer.Stop()
close(w.c)
w.done <- true
return
}
}
}
func (w *worker) stop() {
w.quit <- true
<-w.done
}
func (w *worker) getMeasureRef(name string) *measureRef {
if mr, ok := w.measures[name]; ok {
return mr
}
mr := &measureRef{
measure: name,
views: make(map[*viewInternal]struct{}),
}
w.measures[name] = mr
return mr
}
func (w *worker) tryRegisterView(v *View) (*viewInternal, error) {
vi, err := newViewInternal(v)
if err != nil {
return nil, err
}
if x, ok := w.views[vi.view.Name]; ok {
if !x.view.same(vi.view) {
return nil, fmt.Errorf("cannot register view %q; a different view with the same name is already registered", v.Name)
}
// the view is already registered so there is nothing to do and the
// command is considered successful.
return x, nil
}
w.views[vi.view.Name] = vi
ref := w.getMeasureRef(vi.view.Measure.Name())
ref.views[vi] = struct{}{}
return vi, nil
}
func (w *worker) reportView(v *viewInternal, now time.Time) {
if !v.isSubscribed() {
return
}
rows := v.collectedRows()
_, ok := w.startTimes[v]
if !ok {
w.startTimes[v] = now
}
viewData := &Data{
View: v.view,
Start: w.startTimes[v],
End: time.Now(),
Rows: rows,
}
exportersMu.Lock()
for e := range exporters {
e.ExportView(viewData)
}
exportersMu.Unlock()
}
func (w *worker) reportUsage(now time.Time) {
for _, v := range w.views {
w.reportView(v, now)
}
}

174
vendor/go.opencensus.io/stats/view/worker_commands.go generated vendored Normal file
View File

@ -0,0 +1,174 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package view
import (
"errors"
"fmt"
"strings"
"time"
"go.opencensus.io/stats"
"go.opencensus.io/stats/internal"
"go.opencensus.io/tag"
)
type command interface {
handleCommand(w *worker)
}
// getViewByNameReq is the command to get a view given its name.
type getViewByNameReq struct {
name string
c chan *getViewByNameResp
}
type getViewByNameResp struct {
v *View
}
func (cmd *getViewByNameReq) handleCommand(w *worker) {
v := w.views[cmd.name]
if v == nil {
cmd.c <- &getViewByNameResp{nil}
return
}
cmd.c <- &getViewByNameResp{v.view}
}
// registerViewReq is the command to register a view.
type registerViewReq struct {
views []*View
err chan error
}
func (cmd *registerViewReq) handleCommand(w *worker) {
var errstr []string
for _, view := range cmd.views {
vi, err := w.tryRegisterView(view)
if err != nil {
errstr = append(errstr, fmt.Sprintf("%s: %v", view.Name, err))
continue
}
internal.SubscriptionReporter(view.Measure.Name())
vi.subscribe()
}
if len(errstr) > 0 {
cmd.err <- errors.New(strings.Join(errstr, "\n"))
} else {
cmd.err <- nil
}
}
// unregisterFromViewReq is the command to unregister to a view. Has no
// impact on the data collection for client that are pulling data from the
// library.
type unregisterFromViewReq struct {
views []string
done chan struct{}
}
func (cmd *unregisterFromViewReq) handleCommand(w *worker) {
for _, name := range cmd.views {
vi, ok := w.views[name]
if !ok {
continue
}
// Report pending data for this view before removing it.
w.reportView(vi, time.Now())
vi.unsubscribe()
if !vi.isSubscribed() {
// this was the last subscription and view is not collecting anymore.
// The collected data can be cleared.
vi.clearRows()
}
delete(w.views, name)
}
cmd.done <- struct{}{}
}
// retrieveDataReq is the command to retrieve data for a view.
type retrieveDataReq struct {
now time.Time
v string
c chan *retrieveDataResp
}
type retrieveDataResp struct {
rows []*Row
err error
}
func (cmd *retrieveDataReq) handleCommand(w *worker) {
vi, ok := w.views[cmd.v]
if !ok {
cmd.c <- &retrieveDataResp{
nil,
fmt.Errorf("cannot retrieve data; view %q is not registered", cmd.v),
}
return
}
if !vi.isSubscribed() {
cmd.c <- &retrieveDataResp{
nil,
fmt.Errorf("cannot retrieve data; view %q has no subscriptions or collection is not forcibly started", cmd.v),
}
return
}
cmd.c <- &retrieveDataResp{
vi.collectedRows(),
nil,
}
}
// recordReq is the command to record data related to multiple measures
// at once.
type recordReq struct {
tm *tag.Map
ms []stats.Measurement
}
func (cmd *recordReq) handleCommand(w *worker) {
for _, m := range cmd.ms {
if (m == stats.Measurement{}) { // not registered
continue
}
ref := w.getMeasureRef(m.Measure().Name())
for v := range ref.views {
v.addSample(cmd.tm, m.Value())
}
}
}
// setReportingPeriodReq is the command to modify the duration between
// reporting the collected data to the registered clients.
type setReportingPeriodReq struct {
d time.Duration
c chan bool
}
func (cmd *setReportingPeriodReq) handleCommand(w *worker) {
w.timer.Stop()
if cmd.d <= 0 {
w.timer = time.NewTicker(defaultReportingDuration)
} else {
w.timer = time.NewTicker(cmd.d)
}
cmd.c <- true
}

41
vendor/go.opencensus.io/tag/context.go generated vendored Normal file
View File

@ -0,0 +1,41 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package tag
import "context"
// FromContext returns the tag map stored in the context.
func FromContext(ctx context.Context) *Map {
// The returned tag map shouldn't be mutated.
ts := ctx.Value(mapCtxKey)
if ts == nil {
return nil
}
return ts.(*Map)
}
// NewContext creates a new context with the given tag map.
// To propagate a tag map to downstream methods and downstream RPCs, add a tag map
// to the current context. NewContext will return a copy of the current context,
// and put the tag map into the returned one.
// If there is already a tag map in the current context, it will be replaced with m.
func NewContext(ctx context.Context, m *Map) context.Context {
return context.WithValue(ctx, mapCtxKey, m)
}
type ctxKey struct{}
var mapCtxKey = ctxKey{}

26
vendor/go.opencensus.io/tag/doc.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
/*
Package tag contains OpenCensus tags.
Tags are key-value pairs. Tags provide additional cardinality to
the OpenCensus instrumentation data.
Tags can be propagated on the wire and in the same
process via context.Context. Encode and Decode should be
used to represent tags into their binary propagation form.
*/
package tag // import "go.opencensus.io/tag"

35
vendor/go.opencensus.io/tag/key.go generated vendored Normal file
View File

@ -0,0 +1,35 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package tag
// Key represents a tag key.
type Key struct {
name string
}
// NewKey creates or retrieves a string key identified by name.
// Calling NewKey consequently with the same name returns the same key.
func NewKey(name string) (Key, error) {
if !checkKeyName(name) {
return Key{}, errInvalidKeyName
}
return Key{name: name}, nil
}
// Name returns the name of the key.
func (k Key) Name() string {
return k.name
}

197
vendor/go.opencensus.io/tag/map.go generated vendored Normal file
View File

@ -0,0 +1,197 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package tag
import (
"bytes"
"context"
"fmt"
"sort"
)
// Tag is a key value pair that can be propagated on wire.
type Tag struct {
Key Key
Value string
}
// Map is a map of tags. Use New to create a context containing
// a new Map.
type Map struct {
m map[Key]string
}
// Value returns the value for the key if a value for the key exists.
func (m *Map) Value(k Key) (string, bool) {
if m == nil {
return "", false
}
v, ok := m.m[k]
return v, ok
}
func (m *Map) String() string {
if m == nil {
return "nil"
}
keys := make([]Key, 0, len(m.m))
for k := range m.m {
keys = append(keys, k)
}
sort.Slice(keys, func(i, j int) bool { return keys[i].Name() < keys[j].Name() })
var buffer bytes.Buffer
buffer.WriteString("{ ")
for _, k := range keys {
buffer.WriteString(fmt.Sprintf("{%v %v}", k.name, m.m[k]))
}
buffer.WriteString(" }")
return buffer.String()
}
func (m *Map) insert(k Key, v string) {
if _, ok := m.m[k]; ok {
return
}
m.m[k] = v
}
func (m *Map) update(k Key, v string) {
if _, ok := m.m[k]; ok {
m.m[k] = v
}
}
func (m *Map) upsert(k Key, v string) {
m.m[k] = v
}
func (m *Map) delete(k Key) {
delete(m.m, k)
}
func newMap() *Map {
return &Map{m: make(map[Key]string)}
}
// Mutator modifies a tag map.
type Mutator interface {
Mutate(t *Map) (*Map, error)
}
// Insert returns a mutator that inserts a
// value associated with k. If k already exists in the tag map,
// mutator doesn't update the value.
func Insert(k Key, v string) Mutator {
return &mutator{
fn: func(m *Map) (*Map, error) {
if !checkValue(v) {
return nil, errInvalidValue
}
m.insert(k, v)
return m, nil
},
}
}
// Update returns a mutator that updates the
// value of the tag associated with k with v. If k doesn't
// exists in the tag map, the mutator doesn't insert the value.
func Update(k Key, v string) Mutator {
return &mutator{
fn: func(m *Map) (*Map, error) {
if !checkValue(v) {
return nil, errInvalidValue
}
m.update(k, v)
return m, nil
},
}
}
// Upsert returns a mutator that upserts the
// value of the tag associated with k with v. It inserts the
// value if k doesn't exist already. It mutates the value
// if k already exists.
func Upsert(k Key, v string) Mutator {
return &mutator{
fn: func(m *Map) (*Map, error) {
if !checkValue(v) {
return nil, errInvalidValue
}
m.upsert(k, v)
return m, nil
},
}
}
// Delete returns a mutator that deletes
// the value associated with k.
func Delete(k Key) Mutator {
return &mutator{
fn: func(m *Map) (*Map, error) {
m.delete(k)
return m, nil
},
}
}
// New returns a new context that contains a tag map
// originated from the incoming context and modified
// with the provided mutators.
func New(ctx context.Context, mutator ...Mutator) (context.Context, error) {
m := newMap()
orig := FromContext(ctx)
if orig != nil {
for k, v := range orig.m {
if !checkKeyName(k.Name()) {
return ctx, fmt.Errorf("key:%q: %v", k, errInvalidKeyName)
}
if !checkValue(v) {
return ctx, fmt.Errorf("key:%q value:%q: %v", k.Name(), v, errInvalidValue)
}
m.insert(k, v)
}
}
var err error
for _, mod := range mutator {
m, err = mod.Mutate(m)
if err != nil {
return ctx, err
}
}
return NewContext(ctx, m), nil
}
// Do is similar to pprof.Do: a convenience for installing the tags
// from the context as Go profiler labels. This allows you to
// correlated runtime profiling with stats.
//
// It converts the key/values from the given map to Go profiler labels
// and calls pprof.Do.
//
// Do is going to do nothing if your Go version is below 1.9.
func Do(ctx context.Context, f func(ctx context.Context)) {
do(ctx, f)
}
type mutator struct {
fn func(t *Map) (*Map, error)
}
func (m *mutator) Mutate(t *Map) (*Map, error) {
return m.fn(t)
}

234
vendor/go.opencensus.io/tag/map_codec.go generated vendored Normal file
View File

@ -0,0 +1,234 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package tag
import (
"encoding/binary"
"fmt"
)
// KeyType defines the types of keys allowed. Currently only keyTypeString is
// supported.
type keyType byte
const (
keyTypeString keyType = iota
keyTypeInt64
keyTypeTrue
keyTypeFalse
tagsVersionID = byte(0)
)
type encoderGRPC struct {
buf []byte
writeIdx, readIdx int
}
// writeKeyString writes the fieldID '0' followed by the key string and value
// string.
func (eg *encoderGRPC) writeTagString(k, v string) {
eg.writeByte(byte(keyTypeString))
eg.writeStringWithVarintLen(k)
eg.writeStringWithVarintLen(v)
}
func (eg *encoderGRPC) writeTagUint64(k string, i uint64) {
eg.writeByte(byte(keyTypeInt64))
eg.writeStringWithVarintLen(k)
eg.writeUint64(i)
}
func (eg *encoderGRPC) writeTagTrue(k string) {
eg.writeByte(byte(keyTypeTrue))
eg.writeStringWithVarintLen(k)
}
func (eg *encoderGRPC) writeTagFalse(k string) {
eg.writeByte(byte(keyTypeFalse))
eg.writeStringWithVarintLen(k)
}
func (eg *encoderGRPC) writeBytesWithVarintLen(bytes []byte) {
length := len(bytes)
eg.growIfRequired(binary.MaxVarintLen64 + length)
eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length))
copy(eg.buf[eg.writeIdx:], bytes)
eg.writeIdx += length
}
func (eg *encoderGRPC) writeStringWithVarintLen(s string) {
length := len(s)
eg.growIfRequired(binary.MaxVarintLen64 + length)
eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length))
copy(eg.buf[eg.writeIdx:], s)
eg.writeIdx += length
}
func (eg *encoderGRPC) writeByte(v byte) {
eg.growIfRequired(1)
eg.buf[eg.writeIdx] = v
eg.writeIdx++
}
func (eg *encoderGRPC) writeUint32(i uint32) {
eg.growIfRequired(4)
binary.LittleEndian.PutUint32(eg.buf[eg.writeIdx:], i)
eg.writeIdx += 4
}
func (eg *encoderGRPC) writeUint64(i uint64) {
eg.growIfRequired(8)
binary.LittleEndian.PutUint64(eg.buf[eg.writeIdx:], i)
eg.writeIdx += 8
}
func (eg *encoderGRPC) readByte() byte {
b := eg.buf[eg.readIdx]
eg.readIdx++
return b
}
func (eg *encoderGRPC) readUint32() uint32 {
i := binary.LittleEndian.Uint32(eg.buf[eg.readIdx:])
eg.readIdx += 4
return i
}
func (eg *encoderGRPC) readUint64() uint64 {
i := binary.LittleEndian.Uint64(eg.buf[eg.readIdx:])
eg.readIdx += 8
return i
}
func (eg *encoderGRPC) readBytesWithVarintLen() ([]byte, error) {
if eg.readEnded() {
return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx)
}
length, valueStart := binary.Uvarint(eg.buf[eg.readIdx:])
if valueStart <= 0 {
return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx)
}
valueStart += eg.readIdx
valueEnd := valueStart + int(length)
if valueEnd > len(eg.buf) {
return nil, fmt.Errorf("malformed encoding: length:%v, upper:%v, maxLength:%v", length, valueEnd, len(eg.buf))
}
eg.readIdx = valueEnd
return eg.buf[valueStart:valueEnd], nil
}
func (eg *encoderGRPC) readStringWithVarintLen() (string, error) {
bytes, err := eg.readBytesWithVarintLen()
if err != nil {
return "", err
}
return string(bytes), nil
}
func (eg *encoderGRPC) growIfRequired(expected int) {
if len(eg.buf)-eg.writeIdx < expected {
tmp := make([]byte, 2*(len(eg.buf)+1)+expected)
copy(tmp, eg.buf)
eg.buf = tmp
}
}
func (eg *encoderGRPC) readEnded() bool {
return eg.readIdx >= len(eg.buf)
}
func (eg *encoderGRPC) bytes() []byte {
return eg.buf[:eg.writeIdx]
}
// Encode encodes the tag map into a []byte. It is useful to propagate
// the tag maps on wire in binary format.
func Encode(m *Map) []byte {
eg := &encoderGRPC{
buf: make([]byte, len(m.m)),
}
eg.writeByte(byte(tagsVersionID))
for k, v := range m.m {
eg.writeByte(byte(keyTypeString))
eg.writeStringWithVarintLen(k.name)
eg.writeBytesWithVarintLen([]byte(v))
}
return eg.bytes()
}
// Decode decodes the given []byte into a tag map.
func Decode(bytes []byte) (*Map, error) {
ts := newMap()
err := DecodeEach(bytes, ts.upsert)
if err != nil {
// no partial failures
return nil, err
}
return ts, nil
}
// DecodeEach decodes the given serialized tag map, calling handler for each
// tag key and value decoded.
func DecodeEach(bytes []byte, fn func(key Key, val string)) error {
eg := &encoderGRPC{
buf: bytes,
}
if len(eg.buf) == 0 {
return nil
}
version := eg.readByte()
if version > tagsVersionID {
return fmt.Errorf("cannot decode: unsupported version: %q; supports only up to: %q", version, tagsVersionID)
}
for !eg.readEnded() {
typ := keyType(eg.readByte())
if typ != keyTypeString {
return fmt.Errorf("cannot decode: invalid key type: %q", typ)
}
k, err := eg.readBytesWithVarintLen()
if err != nil {
return err
}
v, err := eg.readBytesWithVarintLen()
if err != nil {
return err
}
key, err := NewKey(string(k))
if err != nil {
return err
}
val := string(v)
if !checkValue(val) {
return errInvalidValue
}
fn(key, val)
if err != nil {
return err
}
}
return nil
}

31
vendor/go.opencensus.io/tag/profile_19.go generated vendored Normal file
View File

@ -0,0 +1,31 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.9
package tag
import (
"context"
"runtime/pprof"
)
func do(ctx context.Context, f func(ctx context.Context)) {
m := FromContext(ctx)
keyvals := make([]string, 0, 2*len(m.m))
for k, v := range m.m {
keyvals = append(keyvals, k.Name(), v)
}
pprof.Do(ctx, pprof.Labels(keyvals...), f)
}

23
vendor/go.opencensus.io/tag/profile_not19.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !go1.9
package tag
import "context"
func do(ctx context.Context, f func(ctx context.Context)) {
f(ctx)
}

56
vendor/go.opencensus.io/tag/validate.go generated vendored Normal file
View File

@ -0,0 +1,56 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tag
import "errors"
const (
maxKeyLength = 255
// valid are restricted to US-ASCII subset (range 0x20 (' ') to 0x7e ('~')).
validKeyValueMin = 32
validKeyValueMax = 126
)
var (
errInvalidKeyName = errors.New("invalid key name: only ASCII characters accepted; max length must be 255 characters")
errInvalidValue = errors.New("invalid value: only ASCII characters accepted; max length must be 255 characters")
)
func checkKeyName(name string) bool {
if len(name) == 0 {
return false
}
if len(name) > maxKeyLength {
return false
}
return isASCII(name)
}
func isASCII(s string) bool {
for _, c := range s {
if (c < validKeyValueMin) || (c > validKeyValueMax) {
return false
}
}
return true
}
func checkValue(v string) bool {
if len(v) > maxKeyLength {
return false
}
return isASCII(v)
}

114
vendor/go.opencensus.io/trace/basetypes.go generated vendored Normal file
View File

@ -0,0 +1,114 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trace
import (
"fmt"
"time"
)
type (
// TraceID is a 16-byte identifier for a set of spans.
TraceID [16]byte
// SpanID is an 8-byte identifier for a single span.
SpanID [8]byte
)
func (t TraceID) String() string {
return fmt.Sprintf("%02x", t[:])
}
func (s SpanID) String() string {
return fmt.Sprintf("%02x", s[:])
}
// Annotation represents a text annotation with a set of attributes and a timestamp.
type Annotation struct {
Time time.Time
Message string
Attributes map[string]interface{}
}
// Attribute represents a key-value pair on a span, link or annotation.
// Construct with one of: BoolAttribute, Int64Attribute, or StringAttribute.
type Attribute struct {
key string
value interface{}
}
// BoolAttribute returns a bool-valued attribute.
func BoolAttribute(key string, value bool) Attribute {
return Attribute{key: key, value: value}
}
// Int64Attribute returns an int64-valued attribute.
func Int64Attribute(key string, value int64) Attribute {
return Attribute{key: key, value: value}
}
// StringAttribute returns a string-valued attribute.
func StringAttribute(key string, value string) Attribute {
return Attribute{key: key, value: value}
}
// LinkType specifies the relationship between the span that had the link
// added, and the linked span.
type LinkType int32
// LinkType values.
const (
LinkTypeUnspecified LinkType = iota // The relationship of the two spans is unknown.
LinkTypeChild // The current span is a child of the linked span.
LinkTypeParent // The current span is the parent of the linked span.
)
// Link represents a reference from one span to another span.
type Link struct {
TraceID TraceID
SpanID SpanID
Type LinkType
// Attributes is a set of attributes on the link.
Attributes map[string]interface{}
}
// MessageEventType specifies the type of message event.
type MessageEventType int32
// MessageEventType values.
const (
MessageEventTypeUnspecified MessageEventType = iota // Unknown event type.
MessageEventTypeSent // Indicates a sent RPC message.
MessageEventTypeRecv // Indicates a received RPC message.
)
// MessageEvent represents an event describing a message sent or received on the network.
type MessageEvent struct {
Time time.Time
EventType MessageEventType
MessageID int64
UncompressedByteSize int64
CompressedByteSize int64
}
// Status is the status of a Span.
type Status struct {
// Code is a status code. Zero indicates success.
//
// If Code will be propagated to Google APIs, it ideally should be a value from
// https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto .
Code int32
Message string
}

48
vendor/go.opencensus.io/trace/config.go generated vendored Normal file
View File

@ -0,0 +1,48 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trace
import (
"sync"
"go.opencensus.io/trace/internal"
)
// Config represents the global tracing configuration.
type Config struct {
// DefaultSampler is the default sampler used when creating new spans.
DefaultSampler Sampler
// IDGenerator is for internal use only.
IDGenerator internal.IDGenerator
}
var configWriteMu sync.Mutex
// ApplyConfig applies changes to the global tracing configuration.
//
// Fields not provided in the given config are going to be preserved.
func ApplyConfig(cfg Config) {
configWriteMu.Lock()
defer configWriteMu.Unlock()
c := *config.Load().(*Config)
if cfg.DefaultSampler != nil {
c.DefaultSampler = cfg.DefaultSampler
}
if cfg.IDGenerator != nil {
c.IDGenerator = cfg.IDGenerator
}
config.Store(&c)
}

53
vendor/go.opencensus.io/trace/doc.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package trace contains support for OpenCensus distributed tracing.
The following assumes a basic familiarity with OpenCensus concepts.
See http://opencensus.io
Exporting Traces
To export collected tracing data, register at least one exporter. You can use
one of the provided exporters or write your own.
trace.RegisterExporter(exporter)
By default, traces will be sampled relatively rarely. To change the sampling
frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler
to sample a subset of traces, or use AlwaysSample to collect a trace on every run:
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
Be careful about using trace.AlwaysSample in a production application with
significant traffic: a new trace will be started and exported for every request.
Adding Spans to a Trace
A trace consists of a tree of spans. In Go, the current span is carried in a
context.Context.
It is common to want to capture all the activity of a function call in a span. For
this to work, the function must take a context.Context as a parameter. Add these two
lines to the top of the function:
ctx, span := trace.StartSpan(ctx, "example.com/Run")
defer span.End()
StartSpan will create a new top-level span if the context
doesn't contain another span, otherwise it will create a child span.
*/
package trace // import "go.opencensus.io/trace"

90
vendor/go.opencensus.io/trace/export.go generated vendored Normal file
View File

@ -0,0 +1,90 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trace
import (
"sync"
"sync/atomic"
"time"
)
// Exporter is a type for functions that receive sampled trace spans.
//
// The ExportSpan method should be safe for concurrent use and should return
// quickly; if an Exporter takes a significant amount of time to process a
// SpanData, that work should be done on another goroutine.
//
// The SpanData should not be modified, but a pointer to it can be kept.
type Exporter interface {
ExportSpan(s *SpanData)
}
type exportersMap map[Exporter]struct{}
var (
exporterMu sync.Mutex
exporters atomic.Value
)
// RegisterExporter adds to the list of Exporters that will receive sampled
// trace spans.
//
// Binaries can register exporters, libraries shouldn't register exporters.
func RegisterExporter(e Exporter) {
exporterMu.Lock()
new := make(exportersMap)
if old, ok := exporters.Load().(exportersMap); ok {
for k, v := range old {
new[k] = v
}
}
new[e] = struct{}{}
exporters.Store(new)
exporterMu.Unlock()
}
// UnregisterExporter removes from the list of Exporters the Exporter that was
// registered with the given name.
func UnregisterExporter(e Exporter) {
exporterMu.Lock()
new := make(exportersMap)
if old, ok := exporters.Load().(exportersMap); ok {
for k, v := range old {
new[k] = v
}
}
delete(new, e)
exporters.Store(new)
exporterMu.Unlock()
}
// SpanData contains all the information collected by a Span.
type SpanData struct {
SpanContext
ParentSpanID SpanID
SpanKind int
Name string
StartTime time.Time
// The wall clock time of EndTime will be adjusted to always be offset
// from StartTime by the duration of the span.
EndTime time.Time
// The values of Attributes each have type string, bool, or int64.
Attributes map[string]interface{}
Annotations []Annotation
MessageEvents []MessageEvent
Status
Links []Link
HasRemoteParent bool
}

21
vendor/go.opencensus.io/trace/internal/internal.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package internal provides trace internals.
package internal
type IDGenerator interface {
NewTraceID() [16]byte
NewSpanID() [8]byte
}

View File

@ -0,0 +1,108 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package propagation implements the binary trace context format.
package propagation // import "go.opencensus.io/trace/propagation"
// TODO: link to external spec document.
// BinaryFormat format:
//
// Binary value: <version_id><version_format>
// version_id: 1 byte representing the version id.
//
// For version_id = 0:
//
// version_format: <field><field>
// field_format: <field_id><field_format>
//
// Fields:
//
// TraceId: (field_id = 0, len = 16, default = "0000000000000000") - 16-byte array representing the trace_id.
// SpanId: (field_id = 1, len = 8, default = "00000000") - 8-byte array representing the span_id.
// TraceOptions: (field_id = 2, len = 1, default = "0") - 1-byte array representing the trace_options.
//
// Fields MUST be encoded using the field id order (smaller to higher).
//
// Valid value example:
//
// {0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 1, 97,
// 98, 99, 100, 101, 102, 103, 104, 2, 1}
//
// version_id = 0;
// trace_id = {64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79}
// span_id = {97, 98, 99, 100, 101, 102, 103, 104};
// trace_options = {1};
import (
"net/http"
"go.opencensus.io/trace"
)
// Binary returns the binary format representation of a SpanContext.
//
// If sc is the zero value, Binary returns nil.
func Binary(sc trace.SpanContext) []byte {
if sc == (trace.SpanContext{}) {
return nil
}
var b [29]byte
copy(b[2:18], sc.TraceID[:])
b[18] = 1
copy(b[19:27], sc.SpanID[:])
b[27] = 2
b[28] = uint8(sc.TraceOptions)
return b[:]
}
// FromBinary returns the SpanContext represented by b.
//
// If b has an unsupported version ID or contains no TraceID, FromBinary
// returns with ok==false.
func FromBinary(b []byte) (sc trace.SpanContext, ok bool) {
if len(b) == 0 || b[0] != 0 {
return trace.SpanContext{}, false
}
b = b[1:]
if len(b) >= 17 && b[0] == 0 {
copy(sc.TraceID[:], b[1:17])
b = b[17:]
} else {
return trace.SpanContext{}, false
}
if len(b) >= 9 && b[0] == 1 {
copy(sc.SpanID[:], b[1:9])
b = b[9:]
}
if len(b) >= 2 && b[0] == 2 {
sc.TraceOptions = trace.TraceOptions(b[1])
}
return sc, true
}
// HTTPFormat implementations propagate span contexts
// in HTTP requests.
//
// SpanContextFromRequest extracts a span context from incoming
// requests.
//
// SpanContextToRequest modifies the given request to include the given
// span context.
type HTTPFormat interface {
SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool)
SpanContextToRequest(sc trace.SpanContext, req *http.Request)
}
// TODO(jbd): Find a more representative but short name for HTTPFormat.

75
vendor/go.opencensus.io/trace/sampling.go generated vendored Normal file
View File

@ -0,0 +1,75 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trace
import (
"encoding/binary"
)
const defaultSamplingProbability = 1e-4
// Sampler decides whether a trace should be sampled and exported.
type Sampler func(SamplingParameters) SamplingDecision
// SamplingParameters contains the values passed to a Sampler.
type SamplingParameters struct {
ParentContext SpanContext
TraceID TraceID
SpanID SpanID
Name string
HasRemoteParent bool
}
// SamplingDecision is the value returned by a Sampler.
type SamplingDecision struct {
Sample bool
}
// ProbabilitySampler returns a Sampler that samples a given fraction of traces.
//
// It also samples spans whose parents are sampled.
func ProbabilitySampler(fraction float64) Sampler {
if !(fraction >= 0) {
fraction = 0
} else if fraction >= 1 {
return AlwaysSample()
}
traceIDUpperBound := uint64(fraction * (1 << 63))
return Sampler(func(p SamplingParameters) SamplingDecision {
if p.ParentContext.IsSampled() {
return SamplingDecision{Sample: true}
}
x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1
return SamplingDecision{Sample: x < traceIDUpperBound}
})
}
// AlwaysSample returns a Sampler that samples every trace.
// Be careful about using this sampler in a production application with
// significant traffic: a new trace will be started and exported for every
// request.
func AlwaysSample() Sampler {
return func(p SamplingParameters) SamplingDecision {
return SamplingDecision{Sample: true}
}
}
// NeverSample returns a Sampler that samples no traces.
func NeverSample() Sampler {
return func(p SamplingParameters) SamplingDecision {
return SamplingDecision{Sample: false}
}
}

130
vendor/go.opencensus.io/trace/spanbucket.go generated vendored Normal file
View File

@ -0,0 +1,130 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trace
import (
"time"
)
// samplePeriod is the minimum time between accepting spans in a single bucket.
const samplePeriod = time.Second
// defaultLatencies contains the default latency bucket bounds.
// TODO: consider defaults, make configurable
var defaultLatencies = [...]time.Duration{
10 * time.Microsecond,
100 * time.Microsecond,
time.Millisecond,
10 * time.Millisecond,
100 * time.Millisecond,
time.Second,
10 * time.Second,
time.Minute,
}
// bucket is a container for a set of spans for a particular error code or latency range.
type bucket struct {
nextTime time.Time // next time we can accept a span
buffer []*SpanData // circular buffer of spans
nextIndex int // location next SpanData should be placed in buffer
overflow bool // whether the circular buffer has wrapped around
}
func makeBucket(bufferSize int) bucket {
return bucket{
buffer: make([]*SpanData, bufferSize),
}
}
// add adds a span to the bucket, if nextTime has been reached.
func (b *bucket) add(s *SpanData) {
if s.EndTime.Before(b.nextTime) {
return
}
if len(b.buffer) == 0 {
return
}
b.nextTime = s.EndTime.Add(samplePeriod)
b.buffer[b.nextIndex] = s
b.nextIndex++
if b.nextIndex == len(b.buffer) {
b.nextIndex = 0
b.overflow = true
}
}
// size returns the number of spans in the bucket.
func (b *bucket) size() int {
if b.overflow {
return len(b.buffer)
}
return b.nextIndex
}
// span returns the ith span in the bucket.
func (b *bucket) span(i int) *SpanData {
if !b.overflow {
return b.buffer[i]
}
if i < len(b.buffer)-b.nextIndex {
return b.buffer[b.nextIndex+i]
}
return b.buffer[b.nextIndex+i-len(b.buffer)]
}
// resize changes the size of the bucket to n, keeping up to n existing spans.
func (b *bucket) resize(n int) {
cur := b.size()
newBuffer := make([]*SpanData, n)
if cur < n {
for i := 0; i < cur; i++ {
newBuffer[i] = b.span(i)
}
b.buffer = newBuffer
b.nextIndex = cur
b.overflow = false
return
}
for i := 0; i < n; i++ {
newBuffer[i] = b.span(i + cur - n)
}
b.buffer = newBuffer
b.nextIndex = 0
b.overflow = true
}
// latencyBucket returns the appropriate bucket number for a given latency.
func latencyBucket(latency time.Duration) int {
i := 0
for i < len(defaultLatencies) && latency >= defaultLatencies[i] {
i++
}
return i
}
// latencyBucketBounds returns the lower and upper bounds for a latency bucket
// number.
//
// The lower bound is inclusive, the upper bound is exclusive (except for the
// last bucket.)
func latencyBucketBounds(index int) (lower time.Duration, upper time.Duration) {
if index == 0 {
return 0, defaultLatencies[index]
}
if index == len(defaultLatencies) {
return defaultLatencies[index-1], 1<<63 - 1
}
return defaultLatencies[index-1], defaultLatencies[index]
}

306
vendor/go.opencensus.io/trace/spanstore.go generated vendored Normal file
View File

@ -0,0 +1,306 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trace
import (
"sync"
"time"
"go.opencensus.io/internal"
)
const (
maxBucketSize = 100000
defaultBucketSize = 10
)
var (
ssmu sync.RWMutex // protects spanStores
spanStores = make(map[string]*spanStore)
)
// This exists purely to avoid exposing internal methods used by z-Pages externally.
type internalOnly struct{}
func init() {
//TODO(#412): remove
internal.Trace = &internalOnly{}
}
// ReportActiveSpans returns the active spans for the given name.
func (i internalOnly) ReportActiveSpans(name string) []*SpanData {
s := spanStoreForName(name)
if s == nil {
return nil
}
var out []*SpanData
s.mu.Lock()
defer s.mu.Unlock()
for span := range s.active {
out = append(out, span.makeSpanData())
}
return out
}
// ReportSpansByError returns a sample of error spans.
//
// If code is nonzero, only spans with that status code are returned.
func (i internalOnly) ReportSpansByError(name string, code int32) []*SpanData {
s := spanStoreForName(name)
if s == nil {
return nil
}
var out []*SpanData
s.mu.Lock()
defer s.mu.Unlock()
if code != 0 {
if b, ok := s.errors[code]; ok {
for _, sd := range b.buffer {
if sd == nil {
break
}
out = append(out, sd)
}
}
} else {
for _, b := range s.errors {
for _, sd := range b.buffer {
if sd == nil {
break
}
out = append(out, sd)
}
}
}
return out
}
// ConfigureBucketSizes sets the number of spans to keep per latency and error
// bucket for different span names.
func (i internalOnly) ConfigureBucketSizes(bcs []internal.BucketConfiguration) {
for _, bc := range bcs {
latencyBucketSize := bc.MaxRequestsSucceeded
if latencyBucketSize < 0 {
latencyBucketSize = 0
}
if latencyBucketSize > maxBucketSize {
latencyBucketSize = maxBucketSize
}
errorBucketSize := bc.MaxRequestsErrors
if errorBucketSize < 0 {
errorBucketSize = 0
}
if errorBucketSize > maxBucketSize {
errorBucketSize = maxBucketSize
}
spanStoreSetSize(bc.Name, latencyBucketSize, errorBucketSize)
}
}
// ReportSpansPerMethod returns a summary of what spans are being stored for each span name.
func (i internalOnly) ReportSpansPerMethod() map[string]internal.PerMethodSummary {
out := make(map[string]internal.PerMethodSummary)
ssmu.RLock()
defer ssmu.RUnlock()
for name, s := range spanStores {
s.mu.Lock()
p := internal.PerMethodSummary{
Active: len(s.active),
}
for code, b := range s.errors {
p.ErrorBuckets = append(p.ErrorBuckets, internal.ErrorBucketSummary{
ErrorCode: code,
Size: b.size(),
})
}
for i, b := range s.latency {
min, max := latencyBucketBounds(i)
p.LatencyBuckets = append(p.LatencyBuckets, internal.LatencyBucketSummary{
MinLatency: min,
MaxLatency: max,
Size: b.size(),
})
}
s.mu.Unlock()
out[name] = p
}
return out
}
// ReportSpansByLatency returns a sample of successful spans.
//
// minLatency is the minimum latency of spans to be returned.
// maxLatency, if nonzero, is the maximum latency of spans to be returned.
func (i internalOnly) ReportSpansByLatency(name string, minLatency, maxLatency time.Duration) []*SpanData {
s := spanStoreForName(name)
if s == nil {
return nil
}
var out []*SpanData
s.mu.Lock()
defer s.mu.Unlock()
for i, b := range s.latency {
min, max := latencyBucketBounds(i)
if i+1 != len(s.latency) && max <= minLatency {
continue
}
if maxLatency != 0 && maxLatency < min {
continue
}
for _, sd := range b.buffer {
if sd == nil {
break
}
if minLatency != 0 || maxLatency != 0 {
d := sd.EndTime.Sub(sd.StartTime)
if d < minLatency {
continue
}
if maxLatency != 0 && d > maxLatency {
continue
}
}
out = append(out, sd)
}
}
return out
}
// spanStore keeps track of spans stored for a particular span name.
//
// It contains all active spans; a sample of spans for failed requests,
// categorized by error code; and a sample of spans for successful requests,
// bucketed by latency.
type spanStore struct {
mu sync.Mutex // protects everything below.
active map[*Span]struct{}
errors map[int32]*bucket
latency []bucket
maxSpansPerErrorBucket int
}
// newSpanStore creates a span store.
func newSpanStore(name string, latencyBucketSize int, errorBucketSize int) *spanStore {
s := &spanStore{
active: make(map[*Span]struct{}),
latency: make([]bucket, len(defaultLatencies)+1),
maxSpansPerErrorBucket: errorBucketSize,
}
for i := range s.latency {
s.latency[i] = makeBucket(latencyBucketSize)
}
return s
}
// spanStoreForName returns the spanStore for the given name.
//
// It returns nil if it doesn't exist.
func spanStoreForName(name string) *spanStore {
var s *spanStore
ssmu.RLock()
s, _ = spanStores[name]
ssmu.RUnlock()
return s
}
// spanStoreForNameCreateIfNew returns the spanStore for the given name.
//
// It creates it if it didn't exist.
func spanStoreForNameCreateIfNew(name string) *spanStore {
ssmu.RLock()
s, ok := spanStores[name]
ssmu.RUnlock()
if ok {
return s
}
ssmu.Lock()
defer ssmu.Unlock()
s, ok = spanStores[name]
if ok {
return s
}
s = newSpanStore(name, defaultBucketSize, defaultBucketSize)
spanStores[name] = s
return s
}
// spanStoreSetSize resizes the spanStore for the given name.
//
// It creates it if it didn't exist.
func spanStoreSetSize(name string, latencyBucketSize int, errorBucketSize int) {
ssmu.RLock()
s, ok := spanStores[name]
ssmu.RUnlock()
if ok {
s.resize(latencyBucketSize, errorBucketSize)
return
}
ssmu.Lock()
defer ssmu.Unlock()
s, ok = spanStores[name]
if ok {
s.resize(latencyBucketSize, errorBucketSize)
return
}
s = newSpanStore(name, latencyBucketSize, errorBucketSize)
spanStores[name] = s
}
func (s *spanStore) resize(latencyBucketSize int, errorBucketSize int) {
s.mu.Lock()
for i := range s.latency {
s.latency[i].resize(latencyBucketSize)
}
for _, b := range s.errors {
b.resize(errorBucketSize)
}
s.maxSpansPerErrorBucket = errorBucketSize
s.mu.Unlock()
}
// add adds a span to the active bucket of the spanStore.
func (s *spanStore) add(span *Span) {
s.mu.Lock()
s.active[span] = struct{}{}
s.mu.Unlock()
}
// finished removes a span from the active set, and adds a corresponding
// SpanData to a latency or error bucket.
func (s *spanStore) finished(span *Span, sd *SpanData) {
latency := sd.EndTime.Sub(sd.StartTime)
if latency < 0 {
latency = 0
}
code := sd.Status.Code
s.mu.Lock()
delete(s.active, span)
if code == 0 {
s.latency[latencyBucket(latency)].add(sd)
} else {
if s.errors == nil {
s.errors = make(map[int32]*bucket)
}
if b := s.errors[code]; b != nil {
b.add(sd)
} else {
b := makeBucket(s.maxSpansPerErrorBucket)
s.errors[code] = &b
b.add(sd)
}
}
s.mu.Unlock()
}

37
vendor/go.opencensus.io/trace/status_codes.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trace
// Status codes for use with Span.SetStatus. These correspond to the status
// codes used by gRPC defined here: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
const (
StatusCodeOK = 0
StatusCodeCancelled = 1
StatusCodeUnknown = 2
StatusCodeInvalidArgument = 3
StatusCodeDeadlineExceeded = 4
StatusCodeNotFound = 5
StatusCodeAlreadyExists = 6
StatusCodePermissionDenied = 7
StatusCodeResourceExhausted = 8
StatusCodeFailedPrecondition = 9
StatusCodeAborted = 10
StatusCodeOutOfRange = 11
StatusCodeUnimplemented = 12
StatusCodeInternal = 13
StatusCodeUnavailable = 14
StatusCodeDataLoss = 15
StatusCodeUnauthenticated = 16
)

513
vendor/go.opencensus.io/trace/trace.go generated vendored Normal file
View File

@ -0,0 +1,513 @@
// Copyright 2017, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trace
import (
"context"
crand "crypto/rand"
"encoding/binary"
"fmt"
"math/rand"
"sync"
"sync/atomic"
"time"
"go.opencensus.io/internal"
"go.opencensus.io/trace/tracestate"
)
// Span represents a span of a trace. It has an associated SpanContext, and
// stores data accumulated while the span is active.
//
// Ideally users should interact with Spans by calling the functions in this
// package that take a Context parameter.
type Span struct {
// data contains information recorded about the span.
//
// It will be non-nil if we are exporting the span or recording events for it.
// Otherwise, data is nil, and the Span is simply a carrier for the
// SpanContext, so that the trace ID is propagated.
data *SpanData
mu sync.Mutex // protects the contents of *data (but not the pointer value.)
spanContext SpanContext
// spanStore is the spanStore this span belongs to, if any, otherwise it is nil.
*spanStore
endOnce sync.Once
executionTracerTaskEnd func() // ends the execution tracer span
}
// IsRecordingEvents returns true if events are being recorded for this span.
// Use this check to avoid computing expensive annotations when they will never
// be used.
func (s *Span) IsRecordingEvents() bool {
if s == nil {
return false
}
return s.data != nil
}
// TraceOptions contains options associated with a trace span.
type TraceOptions uint32
// IsSampled returns true if the span will be exported.
func (sc SpanContext) IsSampled() bool {
return sc.TraceOptions.IsSampled()
}
// setIsSampled sets the TraceOptions bit that determines whether the span will be exported.
func (sc *SpanContext) setIsSampled(sampled bool) {
if sampled {
sc.TraceOptions |= 1
} else {
sc.TraceOptions &= ^TraceOptions(1)
}
}
// IsSampled returns true if the span will be exported.
func (t TraceOptions) IsSampled() bool {
return t&1 == 1
}
// SpanContext contains the state that must propagate across process boundaries.
//
// SpanContext is not an implementation of context.Context.
// TODO: add reference to external Census docs for SpanContext.
type SpanContext struct {
TraceID TraceID
SpanID SpanID
TraceOptions TraceOptions
Tracestate *tracestate.Tracestate
}
type contextKey struct{}
// FromContext returns the Span stored in a context, or nil if there isn't one.
func FromContext(ctx context.Context) *Span {
s, _ := ctx.Value(contextKey{}).(*Span)
return s
}
// NewContext returns a new context with the given Span attached.
func NewContext(parent context.Context, s *Span) context.Context {
return context.WithValue(parent, contextKey{}, s)
}
// All available span kinds. Span kind must be either one of these values.
const (
SpanKindUnspecified = iota
SpanKindServer
SpanKindClient
)
// StartOptions contains options concerning how a span is started.
type StartOptions struct {
// Sampler to consult for this Span. If provided, it is always consulted.
//
// If not provided, then the behavior differs based on whether
// the parent of this Span is remote, local, or there is no parent.
// In the case of a remote parent or no parent, the
// default sampler (see Config) will be consulted. Otherwise,
// when there is a non-remote parent, no new sampling decision will be made:
// we will preserve the sampling of the parent.
Sampler Sampler
// SpanKind represents the kind of a span. If none is set,
// SpanKindUnspecified is used.
SpanKind int
}
// StartOption apply changes to StartOptions.
type StartOption func(*StartOptions)
// WithSpanKind makes new spans to be created with the given kind.
func WithSpanKind(spanKind int) StartOption {
return func(o *StartOptions) {
o.SpanKind = spanKind
}
}
// WithSampler makes new spans to be be created with a custom sampler.
// Otherwise, the global sampler is used.
func WithSampler(sampler Sampler) StartOption {
return func(o *StartOptions) {
o.Sampler = sampler
}
}
// StartSpan starts a new child span of the current span in the context. If
// there is no span in the context, creates a new trace and span.
//
// Returned context contains the newly created span. You can use it to
// propagate the returned span in process.
func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) {
var opts StartOptions
var parent SpanContext
if p := FromContext(ctx); p != nil {
parent = p.spanContext
}
for _, op := range o {
op(&opts)
}
span := startSpanInternal(name, parent != SpanContext{}, parent, false, opts)
ctx, end := startExecutionTracerTask(ctx, name)
span.executionTracerTaskEnd = end
return NewContext(ctx, span), span
}
// StartSpanWithRemoteParent starts a new child span of the span from the given parent.
//
// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is
// preferred for cases where the parent is propagated via an incoming request.
//
// Returned context contains the newly created span. You can use it to
// propagate the returned span in process.
func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) {
var opts StartOptions
for _, op := range o {
op(&opts)
}
span := startSpanInternal(name, parent != SpanContext{}, parent, true, opts)
ctx, end := startExecutionTracerTask(ctx, name)
span.executionTracerTaskEnd = end
return NewContext(ctx, span), span
}
func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *Span {
span := &Span{}
span.spanContext = parent
cfg := config.Load().(*Config)
if !hasParent {
span.spanContext.TraceID = cfg.IDGenerator.NewTraceID()
}
span.spanContext.SpanID = cfg.IDGenerator.NewSpanID()
sampler := cfg.DefaultSampler
if !hasParent || remoteParent || o.Sampler != nil {
// If this span is the child of a local span and no Sampler is set in the
// options, keep the parent's TraceOptions.
//
// Otherwise, consult the Sampler in the options if it is non-nil, otherwise
// the default sampler.
if o.Sampler != nil {
sampler = o.Sampler
}
span.spanContext.setIsSampled(sampler(SamplingParameters{
ParentContext: parent,
TraceID: span.spanContext.TraceID,
SpanID: span.spanContext.SpanID,
Name: name,
HasRemoteParent: remoteParent}).Sample)
}
if !internal.LocalSpanStoreEnabled && !span.spanContext.IsSampled() {
return span
}
span.data = &SpanData{
SpanContext: span.spanContext,
StartTime: time.Now(),
SpanKind: o.SpanKind,
Name: name,
HasRemoteParent: remoteParent,
}
if hasParent {
span.data.ParentSpanID = parent.SpanID
}
if internal.LocalSpanStoreEnabled {
var ss *spanStore
ss = spanStoreForNameCreateIfNew(name)
if ss != nil {
span.spanStore = ss
ss.add(span)
}
}
return span
}
// End ends the span.
func (s *Span) End() {
if !s.IsRecordingEvents() {
return
}
s.endOnce.Do(func() {
if s.executionTracerTaskEnd != nil {
s.executionTracerTaskEnd()
}
exp, _ := exporters.Load().(exportersMap)
mustExport := s.spanContext.IsSampled() && len(exp) > 0
if s.spanStore != nil || mustExport {
sd := s.makeSpanData()
sd.EndTime = internal.MonotonicEndTime(sd.StartTime)
if s.spanStore != nil {
s.spanStore.finished(s, sd)
}
if mustExport {
for e := range exp {
e.ExportSpan(sd)
}
}
}
})
}
// makeSpanData produces a SpanData representing the current state of the Span.
// It requires that s.data is non-nil.
func (s *Span) makeSpanData() *SpanData {
var sd SpanData
s.mu.Lock()
sd = *s.data
if s.data.Attributes != nil {
sd.Attributes = make(map[string]interface{})
for k, v := range s.data.Attributes {
sd.Attributes[k] = v
}
}
s.mu.Unlock()
return &sd
}
// SpanContext returns the SpanContext of the span.
func (s *Span) SpanContext() SpanContext {
if s == nil {
return SpanContext{}
}
return s.spanContext
}
// SetName sets the name of the span, if it is recording events.
func (s *Span) SetName(name string) {
if !s.IsRecordingEvents() {
return
}
s.mu.Lock()
s.data.Name = name
s.mu.Unlock()
}
// SetStatus sets the status of the span, if it is recording events.
func (s *Span) SetStatus(status Status) {
if !s.IsRecordingEvents() {
return
}
s.mu.Lock()
s.data.Status = status
s.mu.Unlock()
}
// AddAttributes sets attributes in the span.
//
// Existing attributes whose keys appear in the attributes parameter are overwritten.
func (s *Span) AddAttributes(attributes ...Attribute) {
if !s.IsRecordingEvents() {
return
}
s.mu.Lock()
if s.data.Attributes == nil {
s.data.Attributes = make(map[string]interface{})
}
copyAttributes(s.data.Attributes, attributes)
s.mu.Unlock()
}
// copyAttributes copies a slice of Attributes into a map.
func copyAttributes(m map[string]interface{}, attributes []Attribute) {
for _, a := range attributes {
m[a.key] = a.value
}
}
func (s *Span) lazyPrintfInternal(attributes []Attribute, format string, a ...interface{}) {
now := time.Now()
msg := fmt.Sprintf(format, a...)
var m map[string]interface{}
s.mu.Lock()
if len(attributes) != 0 {
m = make(map[string]interface{})
copyAttributes(m, attributes)
}
s.data.Annotations = append(s.data.Annotations, Annotation{
Time: now,
Message: msg,
Attributes: m,
})
s.mu.Unlock()
}
func (s *Span) printStringInternal(attributes []Attribute, str string) {
now := time.Now()
var a map[string]interface{}
s.mu.Lock()
if len(attributes) != 0 {
a = make(map[string]interface{})
copyAttributes(a, attributes)
}
s.data.Annotations = append(s.data.Annotations, Annotation{
Time: now,
Message: str,
Attributes: a,
})
s.mu.Unlock()
}
// Annotate adds an annotation with attributes.
// Attributes can be nil.
func (s *Span) Annotate(attributes []Attribute, str string) {
if !s.IsRecordingEvents() {
return
}
s.printStringInternal(attributes, str)
}
// Annotatef adds an annotation with attributes.
func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}) {
if !s.IsRecordingEvents() {
return
}
s.lazyPrintfInternal(attributes, format, a...)
}
// AddMessageSendEvent adds a message send event to the span.
//
// messageID is an identifier for the message, which is recommended to be
// unique in this span and the same between the send event and the receive
// event (this allows to identify a message between the sender and receiver).
// For example, this could be a sequence id.
func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) {
if !s.IsRecordingEvents() {
return
}
now := time.Now()
s.mu.Lock()
s.data.MessageEvents = append(s.data.MessageEvents, MessageEvent{
Time: now,
EventType: MessageEventTypeSent,
MessageID: messageID,
UncompressedByteSize: uncompressedByteSize,
CompressedByteSize: compressedByteSize,
})
s.mu.Unlock()
}
// AddMessageReceiveEvent adds a message receive event to the span.
//
// messageID is an identifier for the message, which is recommended to be
// unique in this span and the same between the send event and the receive
// event (this allows to identify a message between the sender and receiver).
// For example, this could be a sequence id.
func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) {
if !s.IsRecordingEvents() {
return
}
now := time.Now()
s.mu.Lock()
s.data.MessageEvents = append(s.data.MessageEvents, MessageEvent{
Time: now,
EventType: MessageEventTypeRecv,
MessageID: messageID,
UncompressedByteSize: uncompressedByteSize,
CompressedByteSize: compressedByteSize,
})
s.mu.Unlock()
}
// AddLink adds a link to the span.
func (s *Span) AddLink(l Link) {
if !s.IsRecordingEvents() {
return
}
s.mu.Lock()
s.data.Links = append(s.data.Links, l)
s.mu.Unlock()
}
func (s *Span) String() string {
if s == nil {
return "<nil>"
}
if s.data == nil {
return fmt.Sprintf("span %s", s.spanContext.SpanID)
}
s.mu.Lock()
str := fmt.Sprintf("span %s %q", s.spanContext.SpanID, s.data.Name)
s.mu.Unlock()
return str
}
var config atomic.Value // access atomically
func init() {
gen := &defaultIDGenerator{}
// initialize traceID and spanID generators.
var rngSeed int64
for _, p := range []interface{}{
&rngSeed, &gen.traceIDAdd, &gen.nextSpanID, &gen.spanIDInc,
} {
binary.Read(crand.Reader, binary.LittleEndian, p)
}
gen.traceIDRand = rand.New(rand.NewSource(rngSeed))
gen.spanIDInc |= 1
config.Store(&Config{
DefaultSampler: ProbabilitySampler(defaultSamplingProbability),
IDGenerator: gen,
})
}
type defaultIDGenerator struct {
sync.Mutex
// Please keep these as the first fields
// so that these 8 byte fields will be aligned on addresses
// divisible by 8, on both 32-bit and 64-bit machines when
// performing atomic increments and accesses.
// See:
// * https://github.com/census-instrumentation/opencensus-go/issues/587
// * https://github.com/census-instrumentation/opencensus-go/issues/865
// * https://golang.org/pkg/sync/atomic/#pkg-note-BUG
nextSpanID uint64
spanIDInc uint64
traceIDAdd [2]uint64
traceIDRand *rand.Rand
}
// NewSpanID returns a non-zero span ID from a randomly-chosen sequence.
func (gen *defaultIDGenerator) NewSpanID() [8]byte {
var id uint64
for id == 0 {
id = atomic.AddUint64(&gen.nextSpanID, gen.spanIDInc)
}
var sid [8]byte
binary.LittleEndian.PutUint64(sid[:], id)
return sid
}
// NewTraceID returns a non-zero trace ID from a randomly-chosen sequence.
// mu should be held while this function is called.
func (gen *defaultIDGenerator) NewTraceID() [16]byte {
var tid [16]byte
// Construct the trace ID from two outputs of traceIDRand, with a constant
// added to each half for additional entropy.
gen.Lock()
binary.LittleEndian.PutUint64(tid[0:8], gen.traceIDRand.Uint64()+gen.traceIDAdd[0])
binary.LittleEndian.PutUint64(tid[8:16], gen.traceIDRand.Uint64()+gen.traceIDAdd[1])
gen.Unlock()
return tid
}

32
vendor/go.opencensus.io/trace/trace_go11.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.11
package trace
import (
"context"
t "runtime/trace"
)
func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) {
if !t.IsEnabled() {
// Avoid additional overhead if
// runtime/trace is not enabled.
return ctx, func() {}
}
nctx, task := t.NewTask(ctx, name)
return nctx, task.End
}

25
vendor/go.opencensus.io/trace/trace_nongo11.go generated vendored Normal file
View File

@ -0,0 +1,25 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !go1.11
package trace
import (
"context"
)
func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) {
return ctx, func() {}
}

145
vendor/go.opencensus.io/trace/tracestate/tracestate.go generated vendored Normal file
View File

@ -0,0 +1,145 @@
// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tracestate
import (
"fmt"
"regexp"
)
const (
keyMaxSize = 256
valueMaxSize = 256
maxKeyValuePairs = 32
)
const (
keyWithoutVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,255}`
keyWithVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}`
keyFormat = `(` + keyWithoutVendorFormat + `)|(` + keyWithVendorFormat + `)`
valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]`
)
var keyValidationRegExp = regexp.MustCompile(`^(` + keyFormat + `)$`)
var valueValidationRegExp = regexp.MustCompile(`^(` + valueFormat + `)$`)
// Tracestate represents tracing-system specific context in a list of key-value pairs. Tracestate allows different
// vendors propagate additional information and inter-operate with their legacy Id formats.
type Tracestate struct {
entries []Entry
}
// Entry represents one key-value pair in a list of key-value pair of Tracestate.
type Entry struct {
// Key is an opaque string up to 256 characters printable. It MUST begin with a lowercase letter,
// and can only contain lowercase letters a-z, digits 0-9, underscores _, dashes -, asterisks *, and
// forward slashes /.
Key string
// Value is an opaque string up to 256 characters printable ASCII RFC0020 characters (i.e., the
// range 0x20 to 0x7E) except comma , and =.
Value string
}
// Entries returns a slice of Entry.
func (ts *Tracestate) Entries() []Entry {
if ts == nil {
return nil
}
return ts.entries
}
func (ts *Tracestate) remove(key string) *Entry {
for index, entry := range ts.entries {
if entry.Key == key {
ts.entries = append(ts.entries[:index], ts.entries[index+1:]...)
return &entry
}
}
return nil
}
func (ts *Tracestate) add(entries []Entry) error {
for _, entry := range entries {
ts.remove(entry.Key)
}
if len(ts.entries)+len(entries) > maxKeyValuePairs {
return fmt.Errorf("adding %d key-value pairs to current %d pairs exceeds the limit of %d",
len(entries), len(ts.entries), maxKeyValuePairs)
}
ts.entries = append(entries, ts.entries...)
return nil
}
func isValid(entry Entry) bool {
return keyValidationRegExp.MatchString(entry.Key) &&
valueValidationRegExp.MatchString(entry.Value)
}
func containsDuplicateKey(entries ...Entry) (string, bool) {
keyMap := make(map[string]int)
for _, entry := range entries {
if _, ok := keyMap[entry.Key]; ok {
return entry.Key, true
}
keyMap[entry.Key] = 1
}
return "", false
}
func areEntriesValid(entries ...Entry) (*Entry, bool) {
for _, entry := range entries {
if !isValid(entry) {
return &entry, false
}
}
return nil, true
}
// New creates a Tracestate object from a parent and/or entries (key-value pair).
// Entries from the parent are copied if present. The entries passed to this function
// are inserted in front of those copied from the parent. If an entry copied from the
// parent contains the same key as one of the entry in entries then the entry copied
// from the parent is removed. See add func.
//
// An error is returned with nil Tracestate if
// 1. one or more entry in entries is invalid.
// 2. two or more entries in the input entries have the same key.
// 3. the number of entries combined from the parent and the input entries exceeds maxKeyValuePairs.
// (duplicate entry is counted only once).
func New(parent *Tracestate, entries ...Entry) (*Tracestate, error) {
if parent == nil && len(entries) == 0 {
return nil, nil
}
if entry, ok := areEntriesValid(entries...); !ok {
return nil, fmt.Errorf("key-value pair {%s, %s} is invalid", entry.Key, entry.Value)
}
if key, duplicate := containsDuplicateKey(entries...); duplicate {
return nil, fmt.Errorf("contains duplicate keys (%s)", key)
}
tracestate := Tracestate{}
if parent != nil && len(parent.entries) > 0 {
tracestate.entries = append([]Entry{}, parent.entries...)
}
err := tracestate.add(entries)
if err != nil {
return nil, err
}
return &tracestate, nil
}

2
vendor/golang.org/x/oauth2/LICENSE generated vendored
View File

@ -1,4 +1,4 @@
Copyright (c) 2009 The oauth2 Authors. All rights reserved. Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are modification, are permitted provided that the following conditions are

View File

@ -18,14 +18,6 @@ import (
"golang.org/x/oauth2" "golang.org/x/oauth2"
) )
// DefaultCredentials holds "Application Default Credentials".
// For more details, see:
// https://developers.google.com/accounts/docs/application-default-credentials
type DefaultCredentials struct {
ProjectID string // may be empty
TokenSource oauth2.TokenSource
}
// DefaultClient returns an HTTP Client that uses the // DefaultClient returns an HTTP Client that uses the
// DefaultTokenSource to obtain authentication credentials. // DefaultTokenSource to obtain authentication credentials.
func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) {
@ -47,25 +39,12 @@ func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSourc
return creds.TokenSource, nil return creds.TokenSource, nil
} }
// FindDefaultCredentials searches for "Application Default Credentials". // Common implementation for FindDefaultCredentials.
// func findDefaultCredentials(ctx context.Context, scopes []string) (*DefaultCredentials, error) {
// It looks for credentials in the following places,
// preferring the first location found:
//
// 1. A JSON file whose path is specified by the
// GOOGLE_APPLICATION_CREDENTIALS environment variable.
// 2. A JSON file in a location known to the gcloud command-line tool.
// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json.
// On other systems, $HOME/.config/gcloud/application_default_credentials.json.
// 3. On Google App Engine it uses the appengine.AccessToken function.
// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches
// credentials from the metadata server.
// (In this final case any provided scopes are ignored.)
func FindDefaultCredentials(ctx context.Context, scope ...string) (*DefaultCredentials, error) {
// First, try the environment variable. // First, try the environment variable.
const envVar = "GOOGLE_APPLICATION_CREDENTIALS" const envVar = "GOOGLE_APPLICATION_CREDENTIALS"
if filename := os.Getenv(envVar); filename != "" { if filename := os.Getenv(envVar); filename != "" {
creds, err := readCredentialsFile(ctx, filename, scope) creds, err := readCredentialsFile(ctx, filename, scopes)
if err != nil { if err != nil {
return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err)
} }
@ -74,7 +53,7 @@ func FindDefaultCredentials(ctx context.Context, scope ...string) (*DefaultCrede
// Second, try a well-known file. // Second, try a well-known file.
filename := wellKnownFile() filename := wellKnownFile()
if creds, err := readCredentialsFile(ctx, filename, scope); err == nil { if creds, err := readCredentialsFile(ctx, filename, scopes); err == nil {
return creds, nil return creds, nil
} else if !os.IsNotExist(err) { } else if !os.IsNotExist(err) {
return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err)
@ -84,7 +63,7 @@ func FindDefaultCredentials(ctx context.Context, scope ...string) (*DefaultCrede
if appengineTokenFunc != nil && !appengineFlex { if appengineTokenFunc != nil && !appengineFlex {
return &DefaultCredentials{ return &DefaultCredentials{
ProjectID: appengineAppIDFunc(ctx), ProjectID: appengineAppIDFunc(ctx),
TokenSource: AppEngineTokenSource(ctx, scope...), TokenSource: AppEngineTokenSource(ctx, scopes...),
}, nil }, nil
} }
@ -102,6 +81,23 @@ func FindDefaultCredentials(ctx context.Context, scope ...string) (*DefaultCrede
return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url)
} }
// Common implementation for CredentialsFromJSON.
func credentialsFromJSON(ctx context.Context, jsonData []byte, scopes []string) (*DefaultCredentials, error) {
var f credentialsFile
if err := json.Unmarshal(jsonData, &f); err != nil {
return nil, err
}
ts, err := f.tokenSource(ctx, append([]string(nil), scopes...))
if err != nil {
return nil, err
}
return &DefaultCredentials{
ProjectID: f.ProjectID,
TokenSource: ts,
JSON: jsonData,
}, nil
}
func wellKnownFile() string { func wellKnownFile() string {
const f = "application_default_credentials.json" const f = "application_default_credentials.json"
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {
@ -115,16 +111,5 @@ func readCredentialsFile(ctx context.Context, filename string, scopes []string)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var f credentialsFile return CredentialsFromJSON(ctx, b, scopes...)
if err := json.Unmarshal(b, &f); err != nil {
return nil, err
}
ts, err := f.tokenSource(ctx, append([]string(nil), scopes...))
if err != nil {
return nil, err
}
return &DefaultCredentials{
ProjectID: f.ProjectID,
TokenSource: ts,
}, nil
} }

42
vendor/golang.org/x/oauth2/google/doc_go19.go generated vendored Normal file
View File

@ -0,0 +1,42 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.9
// Package google provides support for making OAuth2 authorized and authenticated
// HTTP requests to Google APIs. It supports the Web server flow, client-side
// credentials, service accounts, Google Compute Engine service accounts, and Google
// App Engine service accounts.
//
// A brief overview of the package follows. For more information, please read
// https://developers.google.com/accounts/docs/OAuth2
// and
// https://developers.google.com/accounts/docs/application-default-credentials.
//
// OAuth2 Configs
//
// Two functions in this package return golang.org/x/oauth2.Config values from Google credential
// data. Google supports two JSON formats for OAuth2 credentials: one is handled by ConfigFromJSON,
// the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or
// create an http.Client.
//
//
// Credentials
//
// The Credentials type represents Google credentials, including Application Default
// Credentials.
//
// Use FindDefaultCredentials to obtain Application Default Credentials.
// FindDefaultCredentials looks in some well-known places for a credentials file, and
// will call AppEngineTokenSource or ComputeTokenSource as needed.
//
// DefaultClient and DefaultTokenSource are convenience methods. They first call FindDefaultCredentials,
// then use the credentials to construct an http.Client or an oauth2.TokenSource.
//
// Use CredentialsFromJSON to obtain credentials from either of the two JSON formats
// described in OAuth2 Configs, above. The TokenSource in the returned value is the
// same as the one obtained from the oauth2.Config returned from ConfigFromJSON or
// JWTConfigFromJSON, but the Credentials may contain additional information
// that is useful is some circumstances.
package google // import "golang.org/x/oauth2/google"

Some files were not shown because too many files have changed in this diff Show More