From 94670a387ef62d239972fd88a56cdcf776a17a9d Mon Sep 17 00:00:00 2001 From: Aditya Manthramurthy Date: Fri, 29 Sep 2017 03:53:46 +0530 Subject: [PATCH] Update Azure SDK (#4985) --- cmd/gateway-azure-anonymous.go | 67 +- cmd/gateway-azure.go | 275 +-- cmd/gateway-azure_test.go | 44 +- .../github.com/Azure/azure-sdk-for-go/NOTICE | 5 + .../Azure/azure-sdk-for-go/storage/README.md | 5 - .../azure-sdk-for-go/storage/appendblob.go | 77 + .../azure-sdk-for-go/storage/authorization.go | 43 +- .../Azure/azure-sdk-for-go/storage/blob.go | 1642 ++++------------- .../azure-sdk-for-go/storage/blobsasuri.go | 108 ++ .../storage/blobserviceclient.go | 95 + .../azure-sdk-for-go/storage/blockblob.go | 256 +++ .../Azure/azure-sdk-for-go/storage/client.go | 342 +++- .../azure-sdk-for-go/storage/container.go | 453 +++++ .../azure-sdk-for-go/storage/copyblob.go | 223 +++ .../azure-sdk-for-go/storage/directory.go | 53 +- .../Azure/azure-sdk-for-go/storage/entity.go | 439 +++++ .../Azure/azure-sdk-for-go/storage/file.go | 226 ++- .../storage/fileserviceclient.go | 136 +- .../azure-sdk-for-go/storage/leaseblob.go | 187 ++ .../Azure/azure-sdk-for-go/storage/message.go | 153 ++ .../Azure/azure-sdk-for-go/storage/odata.go | 33 + .../azure-sdk-for-go/storage/pageblob.go | 190 ++ .../Azure/azure-sdk-for-go/storage/queue.go | 595 +++--- .../storage/queueserviceclient.go | 28 + .../Azure/azure-sdk-for-go/storage/share.go | 70 +- .../storage/storageservice.go | 117 ++ .../Azure/azure-sdk-for-go/storage/table.go | 373 ++-- .../azure-sdk-for-go/storage/table_batch.go | 302 +++ .../storage/table_entities.go | 345 ---- .../storage/tableserviceclient.go | 190 ++ .../Azure/azure-sdk-for-go/storage/util.go | 120 +- .../azure-sdk-for-go/storage/util_1.7.go | 12 + .../azure-sdk-for-go/storage/util_1.8.go | 18 + .../Azure/azure-sdk-for-go/storage/version.go | 2 +- vendor/github.com/Azure/go-autorest/LICENSE | 191 ++ .../Azure/go-autorest/autorest/adal/README.md | 253 +++ .../Azure/go-autorest/autorest/adal/config.go | 51 + .../go-autorest/autorest/adal/devicetoken.go | 228 +++ .../Azure/go-autorest/autorest/adal/msi.go | 6 + .../go-autorest/autorest/adal/msi_windows.go | 11 + .../go-autorest/autorest/adal/persist.go | 59 + .../Azure/go-autorest/autorest/adal/sender.go | 46 + .../Azure/go-autorest/autorest/adal/token.go | 413 +++++ .../go-autorest/autorest/authorization.go | 167 ++ .../Azure/go-autorest/autorest/autorest.go | 115 ++ .../Azure/go-autorest/autorest/azure/async.go | 302 +++ .../Azure/go-autorest/autorest/azure/azure.go | 180 ++ .../autorest/azure/environments.go | 130 ++ .../Azure/go-autorest/autorest/client.go | 236 +++ .../Azure/go-autorest/autorest/date/date.go | 82 + .../Azure/go-autorest/autorest/date/time.go | 89 + .../go-autorest/autorest/date/timerfc1123.go | 86 + .../go-autorest/autorest/date/unixtime.go | 109 ++ .../go-autorest/autorest/date/utility.go | 11 + .../Azure/go-autorest/autorest/error.go | 84 + .../Azure/go-autorest/autorest/preparer.go | 428 +++++ .../Azure/go-autorest/autorest/responder.go | 236 +++ .../go-autorest/autorest/retriablerequest.go | 38 + .../autorest/retriablerequest_1.7.go | 44 + .../autorest/retriablerequest_1.8.go | 56 + .../Azure/go-autorest/autorest/sender.go | 293 +++ .../Azure/go-autorest/autorest/utility.go | 178 ++ .../Azure/go-autorest/autorest/version.go | 35 + vendor/github.com/satori/uuid/LICENSE | 20 + vendor/github.com/satori/uuid/README.md | 65 + vendor/github.com/satori/uuid/uuid.go | 481 +++++ vendor/vendor.json | 36 +- 67 files changed, 9548 insertions(+), 2435 deletions(-) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/NOTICE delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/container.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/message.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.7.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.8.go create mode 100644 vendor/github.com/Azure/go-autorest/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/README.md create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/config.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/msi.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/persist.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/sender.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/token.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/authorization.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/autorest.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/async.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/azure.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/environments.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/client.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/date.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/time.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/utility.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/error.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/preparer.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/responder.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/sender.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/utility.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/version.go create mode 100644 vendor/github.com/satori/uuid/LICENSE create mode 100644 vendor/github.com/satori/uuid/README.md create mode 100644 vendor/github.com/satori/uuid/uuid.go diff --git a/cmd/gateway-azure-anonymous.go b/cmd/gateway-azure-anonymous.go index 47e39224b..69ea9a80c 100644 --- a/cmd/gateway-azure-anonymous.go +++ b/cmd/gateway-azure-anonymous.go @@ -24,11 +24,51 @@ import ( "net/http" "net/url" "strconv" + "strings" "time" "github.com/Azure/azure-sdk-for-go/storage" ) +// Copied from github.com/Azure/azure-sdk-for-go/storage/container.go +func azureListBlobsGetParameters(p storage.ListBlobsParameters) url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Delimiter != "" { + out.Set("delimiter", p.Delimiter) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.Include != nil { + addString := func(datasets []string, include bool, text string) []string { + if include { + datasets = append(datasets, text) + } + return datasets + } + + include := []string{} + include = addString(include, p.Include.Snapshots, "snapshots") + include = addString(include, p.Include.Metadata, "metadata") + include = addString(include, p.Include.UncommittedBlobs, "uncommittedblobs") + include = addString(include, p.Include.Copy, "copy") + fullInclude := strings.Join(include, ",") + out.Set("include", fullInclude) + } + if p.MaxResults != 0 { + out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) + } + if p.Timeout != 0 { + out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) + } + + return out +} + // Make anonymous HTTP request to azure endpoint. func azureAnonRequest(verb, urlStr string, header http.Header) (*http.Response, error) { req, err := http.NewRequest(verb, urlStr, nil) @@ -73,7 +113,8 @@ func azureAnonRequest(verb, urlStr string, header http.Header) (*http.Response, // AnonGetBucketInfo - Get bucket metadata from azure anonymously. func (a *azureObjects) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, err error) { - url, err := url.Parse(a.client.GetBlobURL(bucket, "")) + blobURL := a.client.GetContainerReference(bucket).GetBlobReference("").GetURL() + url, err := url.Parse(blobURL) if err != nil { return bucketInfo, azureToObjectError(traceError(err)) } @@ -116,7 +157,8 @@ func (a *azureObjects) AnonGetObject(bucket, object string, startOffset int64, l h.Add("Range", fmt.Sprintf("bytes=%d-", startOffset)) } - resp, err := azureAnonRequest(httpGET, a.client.GetBlobURL(bucket, object), h) + blobURL := a.client.GetContainerReference(bucket).GetBlobReference(object).GetURL() + resp, err := azureAnonRequest(httpGET, blobURL, h) if err != nil { return azureToObjectError(traceError(err), bucket, object) } @@ -133,7 +175,8 @@ func (a *azureObjects) AnonGetObject(bucket, object string, startOffset int64, l // AnonGetObjectInfo - Send HEAD request without authentication and convert the // result to ObjectInfo. func (a *azureObjects) AnonGetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) { - resp, err := azureAnonRequest(httpHEAD, a.client.GetBlobURL(bucket, object), nil) + blobURL := a.client.GetContainerReference(bucket).GetBlobReference(object).GetURL() + resp, err := azureAnonRequest(httpHEAD, blobURL, nil) if err != nil { return objInfo, azureToObjectError(traceError(err), bucket, object) } @@ -184,7 +227,8 @@ func (a *azureObjects) AnonListObjects(bucket, prefix, marker, delimiter string, q.Set("restype", "container") q.Set("comp", "list") - url, err := url.Parse(a.client.GetBlobURL(bucket, "")) + blobURL := a.client.GetContainerReference(bucket).GetBlobReference("").GetURL() + url, err := url.Parse(blobURL) if err != nil { return result, azureToObjectError(traceError(err)) } @@ -210,14 +254,10 @@ func (a *azureObjects) AnonListObjects(bucket, prefix, marker, delimiter string, result.IsTruncated = listResp.NextMarker != "" result.NextMarker = listResp.NextMarker for _, object := range listResp.Blobs { - t, e := time.Parse(time.RFC1123, object.Properties.LastModified) - if e != nil { - continue - } result.Objects = append(result.Objects, ObjectInfo{ Bucket: bucket, Name: object.Name, - ModTime: t, + ModTime: time.Time(object.Properties.LastModified), Size: object.Properties.ContentLength, ETag: object.Properties.Etag, ContentType: object.Properties.ContentType, @@ -241,7 +281,8 @@ func (a *azureObjects) AnonListObjectsV2(bucket, prefix, continuationToken strin q.Set("restype", "container") q.Set("comp", "list") - url, err := url.Parse(a.client.GetBlobURL(bucket, "")) + blobURL := a.client.GetContainerReference(bucket).GetBlobReference("").GetURL() + url, err := url.Parse(blobURL) if err != nil { return result, azureToObjectError(traceError(err)) } @@ -270,14 +311,10 @@ func (a *azureObjects) AnonListObjectsV2(bucket, prefix, continuationToken strin result.NextContinuationToken = listResp.NextMarker } for _, object := range listResp.Blobs { - t, e := time.Parse(time.RFC1123, object.Properties.LastModified) - if e != nil { - continue - } result.Objects = append(result.Objects, ObjectInfo{ Bucket: bucket, Name: object.Name, - ModTime: t, + ModTime: time.Time(object.Properties.LastModified), Size: object.Properties.ContentLength, ETag: canonicalizeETag(object.Properties.Etag), ContentType: object.Properties.ContentType, diff --git a/cmd/gateway-azure.go b/cmd/gateway-azure.go index 8bc7c1cc0..1be8540a9 100644 --- a/cmd/gateway-azure.go +++ b/cmd/gateway-azure.go @@ -27,7 +27,6 @@ import ( "fmt" "io" "net/http" - "net/url" "strconv" "strings" "time" @@ -41,52 +40,115 @@ const globalAzureAPIVersion = "2016-05-31" const azureBlockSize = 100 * humanize.MiByte const metadataObjectNameTemplate = globalMinioSysTmp + "multipart/v1/%s.%x/azure.json" -// Canonicalize the metadata headers, without this azure-sdk calculates -// incorrect signature. This attempt to canonicalize is to convert -// any HTTP header which is of form say `accept-encoding` should be -// converted to `Accept-Encoding` in its canonical form. -// Also replaces X-Amz-Meta prefix with X-Ms-Meta as Azure expects user -// defined metadata to have X-Ms-Meta prefix. -func s3ToAzureHeaders(headers map[string]string) (newHeaders map[string]string) { +// s3MetaToAzureProperties converts metadata meant for S3 PUT/COPY +// object into Azure data structures - BlobMetadata and +// BlobProperties. +// +// BlobMetadata contains user defined key-value pairs and each key is +// automatically prefixed with `X-Ms-Meta-` by the Azure SDK. S3 +// user-metadata is translated to Azure metadata by removing the +// `X-Amz-Meta-` prefix. +// +// BlobProperties contains commonly set metadata for objects such as +// Content-Encoding, etc. Such metadata that is accepted by S3 is +// copied into BlobProperties. +// +// Header names are canonicalized as in http.Header. +func s3MetaToAzureProperties(s3Metadata map[string]string) (storage.BlobMetadata, + storage.BlobProperties) { + + // Azure does not permit user-defined metadata key names to + // contain hyphens. So we map hyphens to underscores for + // encryption headers. More such headers may be added in the + // future. gatewayHeaders := map[string]string{ "X-Amz-Meta-X-Amz-Key": "X-Amz-Meta-x_minio_key", "X-Amz-Meta-X-Amz-Matdesc": "X-Amz-Meta-x_minio_matdesc", "X-Amz-Meta-X-Amz-Iv": "X-Amz-Meta-x_minio_iv", } - newHeaders = make(map[string]string) - for k, v := range headers { + var blobMeta storage.BlobMetadata = make(map[string]string) + var props storage.BlobProperties + for k, v := range s3Metadata { k = http.CanonicalHeaderKey(k) if nk, ok := gatewayHeaders[k]; ok { k = nk } - if strings.HasPrefix(k, "X-Amz-Meta") { - k = strings.Replace(k, "X-Amz-Meta", "X-Ms-Meta", -1) + switch { + case strings.HasPrefix(k, "X-Amz-Meta-"): + // Strip header prefix, to let Azure SDK + // handle it for storage. + k = strings.Replace(k, "X-Amz-Meta-", "", 1) + blobMeta[k] = v + + // All cases below, extract common metadata that is + // accepted by S3 into BlobProperties for setting on + // Azure - see + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html + case k == "Cache-Control": + props.CacheControl = v + case k == "Content-Disposition": + props.ContentDisposition = v + case k == "Content-Encoding": + props.ContentEncoding = v + case k == "Content-Length": + // assume this doesn't fail + props.ContentLength, _ = strconv.ParseInt(v, 10, 64) + case k == "Content-MD5": + props.ContentMD5 = v + case k == "Content-Type": + props.ContentType = v } - newHeaders[k] = v } - return newHeaders + return blobMeta, props } -// Prefix user metadata with "X-Amz-Meta-". -// client.GetBlobMetadata() already strips "X-Ms-Meta-" -func azureToS3Metadata(meta map[string]string) (newMeta map[string]string) { +// azurePropertiesToS3Meta converts Azure metadata/properties to S3 +// metadata. It is the reverse of s3MetaToAzureProperties. Azure's +// `.GetMetadata()` lower-cases all header keys, so this is taken into +// account by this function. +func azurePropertiesToS3Meta(meta storage.BlobMetadata, props storage.BlobProperties) map[string]string { + // Remap underscores to hyphens to restore encryption + // headers. See s3MetaToAzureProperties for details. gatewayHeaders := map[string]string{ "X-Amz-Meta-x_minio_key": "X-Amz-Meta-X-Amz-Key", "X-Amz-Meta-x_minio_matdesc": "X-Amz-Meta-X-Amz-Matdesc", "X-Amz-Meta-x_minio_iv": "X-Amz-Meta-X-Amz-Iv", } - newMeta = make(map[string]string) - + s3Metadata := make(map[string]string) for k, v := range meta { + // k's `x-ms-meta-` prefix is already stripped by + // Azure SDK, so we add the AMZ prefix. k = "X-Amz-Meta-" + k if nk, ok := gatewayHeaders[k]; ok { k = nk } - newMeta[k] = v + k = http.CanonicalHeaderKey(k) + s3Metadata[k] = v } - return newMeta + + // Add each property from BlobProperties that is supported by + // S3 PUT/COPY common metadata. + if props.CacheControl != "" { + s3Metadata["Cache-Control"] = props.CacheControl + } + if props.ContentDisposition != "" { + s3Metadata["Content-Disposition"] = props.ContentDisposition + } + if props.ContentEncoding != "" { + s3Metadata["Content-Encoding"] = props.ContentEncoding + } + if props.ContentLength != 0 { + s3Metadata["Content-Length"] = fmt.Sprintf("%d", props.ContentLength) + } + if props.ContentMD5 != "" { + s3Metadata["Content-MD5"] = props.ContentMD5 + } + if props.ContentType != "" { + s3Metadata["Content-Type"] = props.ContentType + } + return s3Metadata } // Append "-1" to etag so that clients do not interpret it as MD5. @@ -256,13 +318,17 @@ func (a *azureObjects) StorageInfo() (si StorageInfo) { // MakeBucketWithLocation - Create a new container on azure backend. func (a *azureObjects) MakeBucketWithLocation(bucket, location string) error { - err := a.client.CreateContainer(bucket, storage.ContainerAccessTypePrivate) + container := a.client.GetContainerReference(bucket) + err := container.Create(&storage.CreateContainerOptions{ + Access: storage.ContainerAccessTypePrivate, + }) return azureToObjectError(traceError(err), bucket) } // GetBucketInfo - Get bucket metadata.. func (a *azureObjects) GetBucketInfo(bucket string) (bi BucketInfo, e error) { - // Azure does not have an equivalent call, hence use ListContainers. + // Azure does not have an equivalent call, hence use + // ListContainers with prefix resp, err := a.client.ListContainers(storage.ListContainersParameters{ Prefix: bucket, }) @@ -304,13 +370,15 @@ func (a *azureObjects) ListBuckets() (buckets []BucketInfo, err error) { // DeleteBucket - delete a container on azure, uses Azure equivalent DeleteContainer. func (a *azureObjects) DeleteBucket(bucket string) error { - return azureToObjectError(traceError(a.client.DeleteContainer(bucket)), bucket) + container := a.client.GetContainerReference(bucket) + return azureToObjectError(traceError(container.Delete(nil)), bucket) } // ListObjects - lists all blobs on azure with in a container filtered by prefix // and marker, uses Azure equivalent ListBlobs. func (a *azureObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) { - resp, err := a.client.ListBlobs(bucket, storage.ListBlobsParameters{ + container := a.client.GetContainerReference(bucket) + resp, err := container.ListBlobs(storage.ListBlobsParameters{ Prefix: prefix, Marker: marker, Delimiter: delimiter, @@ -325,14 +393,10 @@ func (a *azureObjects) ListObjects(bucket, prefix, marker, delimiter string, max if strings.HasPrefix(object.Name, globalMinioSysTmp) { continue } - t, e := time.Parse(time.RFC1123, object.Properties.LastModified) - if e != nil { - continue - } result.Objects = append(result.Objects, ObjectInfo{ Bucket: bucket, Name: object.Name, - ModTime: t, + ModTime: time.Time(object.Properties.LastModified), Size: object.Properties.ContentLength, ETag: azureToS3ETag(object.Properties.Etag), ContentType: object.Properties.ContentType, @@ -353,7 +417,8 @@ func (a *azureObjects) ListObjects(bucket, prefix, marker, delimiter string, max // ListObjectsV2 - list all blobs in Azure bucket filtered by prefix func (a *azureObjects) ListObjectsV2(bucket, prefix, continuationToken string, fetchOwner bool, delimiter string, maxKeys int) (result ListObjectsV2Info, err error) { - resp, err := a.client.ListBlobs(bucket, storage.ListBlobsParameters{ + container := a.client.GetContainerReference(bucket) + resp, err := container.ListBlobs(storage.ListBlobsParameters{ Prefix: prefix, Marker: continuationToken, Delimiter: delimiter, @@ -371,14 +436,10 @@ func (a *azureObjects) ListObjectsV2(bucket, prefix, continuationToken string, f if strings.HasPrefix(object.Name, globalMinioSysTmp) { continue } - t, e := time.Parse(time.RFC1123, object.Properties.LastModified) - if e != nil { - continue - } result.Objects = append(result.Objects, ObjectInfo{ Bucket: bucket, Name: object.Name, - ModTime: t, + ModTime: time.Time(object.Properties.LastModified), Size: object.Properties.ContentLength, ETag: azureToS3ETag(object.Properties.Etag), ContentType: object.Properties.ContentType, @@ -404,17 +465,20 @@ func (a *azureObjects) ListObjectsV2(bucket, prefix, continuationToken string, f // startOffset indicates the starting read location of the object. // length indicates the total length of the object. func (a *azureObjects) GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) error { - byteRange := fmt.Sprintf("%d-", startOffset) + blobRange := &storage.BlobRange{Start: uint64(startOffset)} if length > 0 && startOffset > 0 { - byteRange = fmt.Sprintf("%d-%d", startOffset, startOffset+length-1) + blobRange.End = uint64(startOffset + length - 1) } + blob := a.client.GetContainerReference(bucket).GetBlobReference(object) var rc io.ReadCloser var err error if startOffset == 0 && length == 0 { - rc, err = a.client.GetBlob(bucket, object) + rc, err = blob.Get(nil) } else { - rc, err = a.client.GetBlobRange(bucket, object, byteRange, nil) + rc, err = blob.GetRange(&storage.GetBlobRangeOptions{ + Range: blobRange, + }) } if err != nil { return azureToObjectError(traceError(err), bucket, object) @@ -427,36 +491,23 @@ func (a *azureObjects) GetObject(bucket, object string, startOffset int64, lengt // GetObjectInfo - reads blob metadata properties and replies back ObjectInfo, // uses zure equivalent GetBlobProperties. func (a *azureObjects) GetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) { - blobMeta, err := a.client.GetBlobMetadata(bucket, object) + blob := a.client.GetContainerReference(bucket).GetBlobReference(object) + err = blob.GetProperties(nil) if err != nil { return objInfo, azureToObjectError(traceError(err), bucket, object) } - meta := azureToS3Metadata(blobMeta) - - prop, err := a.client.GetBlobProperties(bucket, object) - if err != nil { - return objInfo, azureToObjectError(traceError(err), bucket, object) - } - t, err := time.Parse(time.RFC1123, prop.LastModified) - if err != nil { - return objInfo, traceError(err) - } - - if prop.ContentEncoding != "" { - meta["Content-Encoding"] = prop.ContentEncoding - } - meta["Content-Type"] = prop.ContentType - + meta := azurePropertiesToS3Meta(blob.Metadata, blob.Properties) objInfo = ObjectInfo{ - Bucket: bucket, - UserDefined: meta, - ETag: azureToS3ETag(prop.Etag), - ModTime: t, - Name: object, - Size: prop.ContentLength, + Bucket: bucket, + UserDefined: meta, + ETag: azureToS3ETag(blob.Properties.Etag), + ModTime: time.Time(blob.Properties.LastModified), + Name: object, + Size: blob.Properties.ContentLength, + ContentType: blob.Properties.ContentType, + ContentEncoding: blob.Properties.ContentEncoding, } - return objInfo, nil } @@ -464,12 +515,16 @@ func (a *azureObjects) GetObjectInfo(bucket, object string) (objInfo ObjectInfo, // uses Azure equivalent CreateBlockBlobFromReader. func (a *azureObjects) PutObject(bucket, object string, data *HashReader, metadata map[string]string) (objInfo ObjectInfo, err error) { delete(metadata, "etag") - err = a.client.CreateBlockBlobFromReader(bucket, object, uint64(data.Size()), data, s3ToAzureHeaders(metadata)) + blob := a.client.GetContainerReference(bucket).GetBlobReference(object) + blob.Metadata, blob.Properties = s3MetaToAzureProperties(metadata) + err = blob.CreateBlockBlobFromReader(data, nil) if err != nil { return objInfo, azureToObjectError(traceError(err), bucket, object) } if err = data.Verify(); err != nil { - a.client.DeleteBlob(bucket, object, nil) + errorIf(err, "Verification of uploaded object data failed against client provided checksums.") + derr := blob.Delete(nil) + errorIf(derr, "Failed to delete blob when cleaning up a bad blob upload.") return ObjectInfo{}, azureToObjectError(traceError(err)) } return a.GetObjectInfo(bucket, object) @@ -478,7 +533,9 @@ func (a *azureObjects) PutObject(bucket, object string, data *HashReader, metada // CopyObject - Copies a blob from source container to destination container. // Uses Azure equivalent CopyBlob API. func (a *azureObjects) CopyObject(srcBucket, srcObject, destBucket, destObject string, metadata map[string]string) (objInfo ObjectInfo, err error) { - err = a.client.CopyBlob(destBucket, destObject, a.client.GetBlobURL(srcBucket, srcObject)) + srcBlobURL := a.client.GetContainerReference(srcBucket).GetBlobReference(srcObject).GetURL() + destBlob := a.client.GetContainerReference(destBucket).GetBlobReference(destObject) + err = destBlob.Copy(srcBlobURL, nil) if err != nil { return objInfo, azureToObjectError(traceError(err), srcBucket, srcObject) } @@ -488,7 +545,8 @@ func (a *azureObjects) CopyObject(srcBucket, srcObject, destBucket, destObject s // DeleteObject - Deletes a blob on azure container, uses Azure // equivalent DeleteBlob API. func (a *azureObjects) DeleteObject(bucket, object string) error { - err := a.client.DeleteBlob(bucket, object, nil) + blob := a.client.GetContainerReference(bucket).GetBlobReference(object) + err := blob.Delete(nil) if err != nil { return azureToObjectError(traceError(err), bucket, object) } @@ -511,7 +569,9 @@ func getAzureMetadataObjectName(objectName, uploadID string) string { } func (a *azureObjects) checkUploadIDExists(bucketName, objectName, uploadID string) (err error) { - _, err = a.client.GetBlobMetadata(bucketName, getAzureMetadataObjectName(objectName, uploadID)) + blob := a.client.GetContainerReference(bucketName).GetBlobReference( + getAzureMetadataObjectName(objectName, uploadID)) + err = blob.GetMetadata(nil) err = azureToObjectError(traceError(err), bucketName, objectName) oerr := ObjectNotFound{bucketName, objectName} if errorCause(err) == oerr { @@ -531,14 +591,14 @@ func (a *azureObjects) NewMultipartUpload(bucket, object string, metadata map[st return "", traceError(errors.New("Upload ID name collision")) } metadataObject := getAzureMetadataObjectName(object, uploadID) - metadata = s3ToAzureHeaders(metadata) var jsonData []byte if jsonData, err = json.Marshal(azureMultipartMetadata{Name: object, Metadata: metadata}); err != nil { return "", traceError(err) } - err = a.client.CreateBlockBlobFromReader(bucket, metadataObject, uint64(len(jsonData)), bytes.NewBuffer(jsonData), nil) + blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject) + err = blob.CreateBlockBlobFromReader(bytes.NewBuffer(jsonData), nil) if err != nil { return "", azureToObjectError(traceError(err), bucket, metadataObject) } @@ -579,14 +639,18 @@ func (a *azureObjects) PutObjectPart(bucket, object, uploadID string, partID int } id := azureGetBlockID(partID, subPartNumber, uploadID, etag) - err = a.client.PutBlockWithLength(bucket, object, id, uint64(subPartSize), io.LimitReader(data, subPartSize), nil) + blob := a.client.GetContainerReference(bucket).GetBlobReference(object) + err = blob.PutBlockWithLength(id, uint64(subPartSize), io.LimitReader(data, subPartSize), nil) if err != nil { return info, azureToObjectError(traceError(err), bucket, object) } subPartNumber++ } if err = data.Verify(); err != nil { - a.client.DeleteBlob(bucket, object, nil) + errorIf(err, "Verification of uploaded object data failed against client provided checksums.") + blob := a.client.GetContainerReference(bucket).GetBlobReference(object) + derr := blob.Delete(nil) + errorIf(derr, "Failed to delete blob when cleaning up a bad blob upload.") return info, azureToObjectError(traceError(err), bucket, object) } @@ -615,7 +679,9 @@ func (a *azureObjects) AbortMultipartUpload(bucket, object, uploadID string) (er return err } - return a.client.DeleteBlob(bucket, getAzureMetadataObjectName(object, uploadID), nil) + blob := a.client.GetContainerReference(bucket).GetBlobReference( + getAzureMetadataObjectName(object, uploadID)) + return blob.Delete(nil) } // CompleteMultipartUpload - Use Azure equivalent PutBlockList. @@ -630,7 +696,8 @@ func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string, } var metadataReader io.Reader - if metadataReader, err = a.client.GetBlob(bucket, metadataObject); err != nil { + blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject) + if metadataReader, err = blob.Get(nil); err != nil { return objInfo, azureToObjectError(traceError(err), bucket, metadataObject) } @@ -639,17 +706,18 @@ func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string, return objInfo, azureToObjectError(traceError(err), bucket, metadataObject) } - meta := metadata.Metadata defer func() { if err != nil { return } - derr := a.client.DeleteBlob(bucket, metadataObject, nil) + blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject) + derr := blob.Delete(nil) errorIf(derr, "unable to remove meta data object for upload ID %s", uploadID) }() - resp, err := a.client.GetBlockList(bucket, object, storage.BlockListTypeUncommitted) + objBlob := a.client.GetContainerReference(bucket).GetBlobReference(object) + resp, err := objBlob.GetBlockList(storage.BlockListTypeUncommitted, nil) if err != nil { return objInfo, azureToObjectError(traceError(err), bucket, object) } @@ -705,23 +773,17 @@ func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string, } } - err = a.client.PutBlockList(bucket, object, allBlocks) + err = objBlob.PutBlockList(allBlocks, nil) if err != nil { return objInfo, azureToObjectError(traceError(err), bucket, object) } - if len(meta) > 0 { - prop := storage.BlobHeaders{ - ContentMD5: meta["Content-Md5"], - ContentLanguage: meta["Content-Language"], - ContentEncoding: meta["Content-Encoding"], - ContentType: meta["Content-Type"], - CacheControl: meta["Cache-Control"], - } - err = a.client.SetBlobProperties(bucket, object, prop) + if len(metadata.Metadata) > 0 { + objBlob.Metadata, objBlob.Properties = s3MetaToAzureProperties(metadata.Metadata) + err = objBlob.SetProperties(nil) if err != nil { return objInfo, azureToObjectError(traceError(err), bucket, object) } - err = a.client.SetBlobMetadata(bucket, object, nil, meta) + err = objBlob.SetMetadata(nil) if err != nil { return objInfo, azureToObjectError(traceError(err), bucket, object) } @@ -729,32 +791,6 @@ func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string, return a.GetObjectInfo(bucket, object) } -// Copied from github.com/Azure/azure-sdk-for-go/storage/blob.go -func azureListBlobsGetParameters(p storage.ListBlobsParameters) url.Values { - out := url.Values{} - - if p.Prefix != "" { - out.Set("prefix", p.Prefix) - } - if p.Delimiter != "" { - out.Set("delimiter", p.Delimiter) - } - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.Include != "" { - out.Set("include", p.Include) - } - if p.MaxResults != 0 { - out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) - } - if p.Timeout != 0 { - out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) - } - - return out -} - // SetBucketPolicies - Azure supports three types of container policies: // storage.ContainerAccessTypeContainer - readonly in minio terminology // storage.ContainerAccessTypeBlob - readonly without listing in minio terminology @@ -784,14 +820,16 @@ func (a *azureObjects) SetBucketPolicies(bucket string, policyInfo policy.Bucket AccessType: storage.ContainerAccessTypeContainer, AccessPolicies: nil, } - err := a.client.SetContainerPermissions(bucket, perm, 0, "") + container := a.client.GetContainerReference(bucket) + err := container.SetPermissions(perm, nil) return azureToObjectError(traceError(err), bucket) } // GetBucketPolicies - Get the container ACL and convert it to canonical []bucketAccessPolicy func (a *azureObjects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) { policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"} - perm, err := a.client.GetContainerPermissions(bucket, 0, "") + container := a.client.GetContainerReference(bucket) + perm, err := container.GetPermissions(nil) if err != nil { return policy.BucketAccessPolicy{}, azureToObjectError(traceError(err), bucket) } @@ -812,6 +850,7 @@ func (a *azureObjects) DeleteBucketPolicies(bucket string) error { AccessType: storage.ContainerAccessTypePrivate, AccessPolicies: nil, } - err := a.client.SetContainerPermissions(bucket, perm, 0, "") + container := a.client.GetContainerReference(bucket) + err := container.SetPermissions(perm, nil) return azureToObjectError(traceError(err)) } diff --git a/cmd/gateway-azure_test.go b/cmd/gateway-azure_test.go index c39093334..bd6cb6b56 100644 --- a/cmd/gateway-azure_test.go +++ b/cmd/gateway-azure_test.go @@ -43,7 +43,7 @@ func TestAzureToS3ETag(t *testing.T) { } // Test canonical metadata. -func TestS3ToAzureHeaders(t *testing.T) { +func TestS3MetaToAzureProperties(t *testing.T) { headers := map[string]string{ "accept-encoding": "gzip", "content-encoding": "gzip", @@ -52,21 +52,21 @@ func TestS3ToAzureHeaders(t *testing.T) { "X-Amz-Meta-X-Amz-Matdesc": "{}", "X-Amz-Meta-X-Amz-Iv": "eWmyryl8kq+EVnnsE7jpOg==", } + // Only X-Amz-Meta- prefixed entries will be returned in + // Metadata (without the prefix!) expectedHeaders := map[string]string{ - "Accept-Encoding": "gzip", - "Content-Encoding": "gzip", - "X-Ms-Meta-Hdr": "value", - "X-Ms-Meta-x_minio_key": "hu3ZSqtqwn+aL4V2VhAeov4i+bG3KyCtRMSXQFRHXOk=", - "X-Ms-Meta-x_minio_matdesc": "{}", - "X-Ms-Meta-x_minio_iv": "eWmyryl8kq+EVnnsE7jpOg==", + "Hdr": "value", + "x_minio_key": "hu3ZSqtqwn+aL4V2VhAeov4i+bG3KyCtRMSXQFRHXOk=", + "x_minio_matdesc": "{}", + "x_minio_iv": "eWmyryl8kq+EVnnsE7jpOg==", } - actualHeaders := s3ToAzureHeaders(headers) - if !reflect.DeepEqual(actualHeaders, expectedHeaders) { - t.Fatalf("Test failed, expected %#v, got %#v", expectedHeaders, actualHeaders) + meta, _ := s3MetaToAzureProperties(headers) + if !reflect.DeepEqual(map[string]string(meta), expectedHeaders) { + t.Fatalf("Test failed, expected %#v, got %#v", expectedHeaders, meta) } } -func TestAzureToS3Metadata(t *testing.T) { +func TestAzurePropertiesToS3Meta(t *testing.T) { // Just one testcase. Adding more test cases does not add value to the testcase // as azureToS3Metadata() just adds a prefix. metadata := map[string]string{ @@ -81,7 +81,7 @@ func TestAzureToS3Metadata(t *testing.T) { "X-Amz-Meta-X-Amz-Matdesc": "{}", "X-Amz-Meta-X-Amz-Iv": "eWmyryl8kq+EVnnsE7jpOg==", } - actualMeta := azureToS3Metadata(metadata) + actualMeta := azurePropertiesToS3Meta(metadata, storage.BlobProperties{}) if !reflect.DeepEqual(actualMeta, expectedMeta) { t.Fatalf("Test failed, expected %#v, got %#v", expectedMeta, actualMeta) } @@ -207,16 +207,30 @@ func TestAzureListBlobsGetParameters(t *testing.T) { expectedURLValues.Set("prefix", "test") expectedURLValues.Set("delimiter", "_") expectedURLValues.Set("marker", "marker") - expectedURLValues.Set("include", "hello") + expectedURLValues.Set("include", "metadata") expectedURLValues.Set("maxresults", "20") expectedURLValues.Set("timeout", "10") - setBlobParameters := storage.ListBlobsParameters{"test", "_", "marker", "hello", 20, 10} + setBlobParameters := storage.ListBlobsParameters{ + Prefix: "test", + Delimiter: "_", + Marker: "marker", + Include: &storage.IncludeBlobDataset{Metadata: true}, + MaxResults: 20, + Timeout: 10, + } // Test values set 2 expectedURLValues1 := url.Values{} - setBlobParameters1 := storage.ListBlobsParameters{"", "", "", "", 0, 0} + setBlobParameters1 := storage.ListBlobsParameters{ + Prefix: "", + Delimiter: "", + Marker: "", + Include: nil, + MaxResults: 0, + Timeout: 0, + } testCases := []struct { name string diff --git a/vendor/github.com/Azure/azure-sdk-for-go/NOTICE b/vendor/github.com/Azure/azure-sdk-for-go/NOTICE new file mode 100644 index 000000000..2d1d72608 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/NOTICE @@ -0,0 +1,5 @@ +Microsoft Azure-SDK-for-Go +Copyright 2014-2017 Microsoft + +This product includes software developed at +the Microsoft Corporation (https://www.microsoft.com). diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md b/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md deleted file mode 100644 index 0ab099848..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Azure Storage SDK for Go - -The `github.com/Azure/azure-sdk-for-go/storage` package is used to perform operations in Azure Storage Service. To manage your storage accounts (Azure Resource Manager / ARM), use the [github.com/Azure/azure-sdk-for-go/arm/storage](../arm/storage) package. For your classic storage accounts (Azure Service Management / ASM), use [github.com/Azure/azure-sdk-for-go/management/storageservice](../management/storageservice) package. - -This package includes support for [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/) \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go new file mode 100644 index 000000000..039aca887 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go @@ -0,0 +1,77 @@ +package storage + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "time" +) + +// PutAppendBlob initializes an empty append blob with specified name. An +// append blob must be created using this method before appending blocks. +// +// See CreateBlockBlobFromReader for more info on creating blobs. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob +func (b *Blob) PutAppendBlob(options *PutBlobOptions) error { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeAppend) + headers = mergeHeaders(headers, headersFromStruct(b.Properties)) + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return err + } + return b.respondCreation(resp, BlobTypeAppend) +} + +// AppendBlockOptions includes the options for an append block operation +type AppendBlockOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + MaxSize *uint `header:"x-ms-blob-condition-maxsize"` + AppendPosition *uint `header:"x-ms-blob-condition-appendpos"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` + ContentMD5 bool +} + +// AppendBlock appends a block to an append blob. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Append-Block +func (b *Blob) AppendBlock(chunk []byte, options *AppendBlockOptions) error { + params := url.Values{"comp": {"appendblock"}} + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeAppend) + headers["Content-Length"] = fmt.Sprintf("%v", len(chunk)) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + if options.ContentMD5 { + md5sum := md5.Sum(chunk) + headers[headerContentMD5] = base64.StdEncoding.EncodeToString(md5sum[:]) + } + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes.NewReader(chunk), b.Container.bsc.auth) + if err != nil { + return err + } + return b.respondCreation(resp, BlobTypeAppend) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go index 89a0d0b3c..d4ad5da27 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go @@ -20,20 +20,24 @@ const ( sharedKeyLiteForTable authentication = "sharedKeyLiteTable" // headers - headerAuthorization = "Authorization" - headerContentLength = "Content-Length" - headerDate = "Date" - headerXmsDate = "x-ms-date" - headerXmsVersion = "x-ms-version" - headerContentEncoding = "Content-Encoding" - headerContentLanguage = "Content-Language" - headerContentType = "Content-Type" - headerContentMD5 = "Content-MD5" - headerIfModifiedSince = "If-Modified-Since" - headerIfMatch = "If-Match" - headerIfNoneMatch = "If-None-Match" - headerIfUnmodifiedSince = "If-Unmodified-Since" - headerRange = "Range" + headerAcceptCharset = "Accept-Charset" + headerAuthorization = "Authorization" + headerContentLength = "Content-Length" + headerDate = "Date" + headerXmsDate = "x-ms-date" + headerXmsVersion = "x-ms-version" + headerContentEncoding = "Content-Encoding" + headerContentLanguage = "Content-Language" + headerContentType = "Content-Type" + headerContentMD5 = "Content-MD5" + headerIfModifiedSince = "If-Modified-Since" + headerIfMatch = "If-Match" + headerIfNoneMatch = "If-None-Match" + headerIfUnmodifiedSince = "If-Unmodified-Since" + headerRange = "Range" + headerDataServiceVersion = "DataServiceVersion" + headerMaxDataServiceVersion = "MaxDataServiceVersion" + headerContentTransferEncoding = "Content-Transfer-Encoding" ) func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]string, auth authentication) (map[string]string, error) { @@ -46,7 +50,7 @@ func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]str } func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth authentication) (string, error) { - canRes, err := c.buildCanonicalizedResource(url, auth) + canRes, err := c.buildCanonicalizedResource(url, auth, false) if err != nil { return "", err } @@ -58,15 +62,18 @@ func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth return c.createAuthorizationHeader(canString, auth), nil } -func (c *Client) buildCanonicalizedResource(uri string, auth authentication) (string, error) { +func (c *Client) buildCanonicalizedResource(uri string, auth authentication, sas bool) (string, error) { errMsg := "buildCanonicalizedResource error: %s" u, err := url.Parse(uri) if err != nil { return "", fmt.Errorf(errMsg, err.Error()) } - cr := bytes.NewBufferString("/") - cr.WriteString(c.getCanonicalizedAccountName()) + cr := bytes.NewBufferString("") + if c.accountName != StorageEmulatorAccountName || !sas { + cr.WriteString("/") + cr.WriteString(c.getCanonicalizedAccountName()) + } if len(u.Path) > 0 { // Any portion of the CanonicalizedResource string that is derived from diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go index e33de1031..731cdee47 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go @@ -1,7 +1,6 @@ package storage import ( - "bytes" "encoding/xml" "errors" "fmt" @@ -13,52 +12,28 @@ import ( "time" ) -// BlobStorageClient contains operations for Microsoft Azure Blob Storage -// Service. -type BlobStorageClient struct { - client Client - auth authentication -} - -// A Container is an entry in ContainerListResponse. -type Container struct { - Name string `xml:"Name"` - Properties ContainerProperties `xml:"Properties"` - // TODO (ahmetalpbalkan) Metadata -} - -// ContainerProperties contains various properties of a container returned from -// various endpoints like ListContainers. -type ContainerProperties struct { - LastModified string `xml:"Last-Modified"` - Etag string `xml:"Etag"` - LeaseStatus string `xml:"LeaseStatus"` - LeaseState string `xml:"LeaseState"` - LeaseDuration string `xml:"LeaseDuration"` - // TODO (ahmetalpbalkan) remaining fields -} - -// ContainerListResponse contains the response fields from -// ListContainers call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -type ContainerListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Prefix string `xml:"Prefix"` - Marker string `xml:"Marker"` - NextMarker string `xml:"NextMarker"` - MaxResults int64 `xml:"MaxResults"` - Containers []Container `xml:"Containers>Container"` -} - // A Blob is an entry in BlobListResponse. type Blob struct { + Container *Container Name string `xml:"Name"` + Snapshot time.Time `xml:"Snapshot"` Properties BlobProperties `xml:"Properties"` Metadata BlobMetadata `xml:"Metadata"` } +// PutBlobOptions includes the options any put blob operation +// (page, block, append) +type PutBlobOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + Origin string `header:"Origin"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + // BlobMetadata is a set of custom name/value pairs. // // See https://msdn.microsoft.com/en-us/library/azure/dd179404.aspx @@ -106,129 +81,28 @@ func (bm BlobMetadata) MarshalXML(enc *xml.Encoder, start xml.StartElement) erro // BlobProperties contains various properties of a blob // returned in various endpoints like ListBlobs or GetBlobProperties. type BlobProperties struct { - LastModified string `xml:"Last-Modified"` - Etag string `xml:"Etag"` - ContentMD5 string `xml:"Content-MD5"` - ContentLength int64 `xml:"Content-Length"` - ContentType string `xml:"Content-Type"` - ContentEncoding string `xml:"Content-Encoding"` - CacheControl string `xml:"Cache-Control"` - ContentLanguage string `xml:"Cache-Language"` - BlobType BlobType `xml:"x-ms-blob-blob-type"` - SequenceNumber int64 `xml:"x-ms-blob-sequence-number"` - CopyID string `xml:"CopyId"` - CopyStatus string `xml:"CopyStatus"` - CopySource string `xml:"CopySource"` - CopyProgress string `xml:"CopyProgress"` - CopyCompletionTime string `xml:"CopyCompletionTime"` - CopyStatusDescription string `xml:"CopyStatusDescription"` - LeaseStatus string `xml:"LeaseStatus"` - LeaseState string `xml:"LeaseState"` -} - -// BlobHeaders contains various properties of a blob and is an entry -// in SetBlobProperties -type BlobHeaders struct { - ContentMD5 string `header:"x-ms-blob-content-md5"` - ContentLanguage string `header:"x-ms-blob-content-language"` - ContentEncoding string `header:"x-ms-blob-content-encoding"` - ContentType string `header:"x-ms-blob-content-type"` - CacheControl string `header:"x-ms-blob-cache-control"` -} - -// BlobListResponse contains the response fields from ListBlobs call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx -type BlobListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Prefix string `xml:"Prefix"` - Marker string `xml:"Marker"` - NextMarker string `xml:"NextMarker"` - MaxResults int64 `xml:"MaxResults"` - Blobs []Blob `xml:"Blobs>Blob"` - - // BlobPrefix is used to traverse blobs as if it were a file system. - // It is returned if ListBlobsParameters.Delimiter is specified. - // The list here can be thought of as "folders" that may contain - // other folders or blobs. - BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"` - - // Delimiter is used to traverse blobs as if it were a file system. - // It is returned if ListBlobsParameters.Delimiter is specified. - Delimiter string `xml:"Delimiter"` -} - -// ListContainersParameters defines the set of customizable parameters to make a -// List Containers call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -type ListContainersParameters struct { - Prefix string - Marker string - Include string - MaxResults uint - Timeout uint -} - -func (p ListContainersParameters) getParameters() url.Values { - out := url.Values{} - - if p.Prefix != "" { - out.Set("prefix", p.Prefix) - } - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.Include != "" { - out.Set("include", p.Include) - } - if p.MaxResults != 0 { - out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) - } - if p.Timeout != 0 { - out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) - } - - return out -} - -// ListBlobsParameters defines the set of customizable -// parameters to make a List Blobs call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx -type ListBlobsParameters struct { - Prefix string - Delimiter string - Marker string - Include string - MaxResults uint - Timeout uint -} - -func (p ListBlobsParameters) getParameters() url.Values { - out := url.Values{} - - if p.Prefix != "" { - out.Set("prefix", p.Prefix) - } - if p.Delimiter != "" { - out.Set("delimiter", p.Delimiter) - } - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.Include != "" { - out.Set("include", p.Include) - } - if p.MaxResults != 0 { - out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) - } - if p.Timeout != 0 { - out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) - } - - return out + LastModified TimeRFC1123 `xml:"Last-Modified"` + Etag string `xml:"Etag"` + ContentMD5 string `xml:"Content-MD5" header:"x-ms-blob-content-md5"` + ContentLength int64 `xml:"Content-Length"` + ContentType string `xml:"Content-Type" header:"x-ms-blob-content-type"` + ContentEncoding string `xml:"Content-Encoding" header:"x-ms-blob-content-encoding"` + CacheControl string `xml:"Cache-Control" header:"x-ms-blob-cache-control"` + ContentLanguage string `xml:"Cache-Language" header:"x-ms-blob-content-language"` + ContentDisposition string `xml:"Content-Disposition" header:"x-ms-blob-content-disposition"` + BlobType BlobType `xml:"x-ms-blob-blob-type"` + SequenceNumber int64 `xml:"x-ms-blob-sequence-number"` + CopyID string `xml:"CopyId"` + CopyStatus string `xml:"CopyStatus"` + CopySource string `xml:"CopySource"` + CopyProgress string `xml:"CopyProgress"` + CopyCompletionTime TimeRFC1123 `xml:"CopyCompletionTime"` + CopyStatusDescription string `xml:"CopyStatusDescription"` + LeaseStatus string `xml:"LeaseStatus"` + LeaseState string `xml:"LeaseState"` + LeaseDuration string `xml:"LeaseDuration"` + ServerEncrypted bool `xml:"ServerEncrypted"` + IncrementalCopy bool `xml:"IncrementalCopy"` } // BlobType defines the type of the Azure Blob. @@ -241,387 +115,18 @@ const ( BlobTypeAppend BlobType = "AppendBlob" ) -// PageWriteType defines the type updates that are going to be -// done on the page blob. -type PageWriteType string - -// Types of operations on page blobs -const ( - PageWriteTypeUpdate PageWriteType = "update" - PageWriteTypeClear PageWriteType = "clear" -) - -const ( - blobCopyStatusPending = "pending" - blobCopyStatusSuccess = "success" - blobCopyStatusAborted = "aborted" - blobCopyStatusFailed = "failed" -) - -// lease constants. -const ( - leaseHeaderPrefix = "x-ms-lease-" - headerLeaseID = "x-ms-lease-id" - leaseAction = "x-ms-lease-action" - leaseBreakPeriod = "x-ms-lease-break-period" - leaseDuration = "x-ms-lease-duration" - leaseProposedID = "x-ms-proposed-lease-id" - leaseTime = "x-ms-lease-time" - - acquireLease = "acquire" - renewLease = "renew" - changeLease = "change" - releaseLease = "release" - breakLease = "break" -) - -// BlockListType is used to filter out types of blocks in a Get Blocks List call -// for a block blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all -// block types. -type BlockListType string - -// Filters for listing blocks in block blobs -const ( - BlockListTypeAll BlockListType = "all" - BlockListTypeCommitted BlockListType = "committed" - BlockListTypeUncommitted BlockListType = "uncommitted" -) - -// ContainerAccessType defines the access level to the container from a public -// request. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms- -// blob-public-access" header. -type ContainerAccessType string - -// Access options for containers -const ( - ContainerAccessTypePrivate ContainerAccessType = "" - ContainerAccessTypeBlob ContainerAccessType = "blob" - ContainerAccessTypeContainer ContainerAccessType = "container" -) - -// ContainerAccessPolicyDetails are used for SETTING container policies -type ContainerAccessPolicyDetails struct { - ID string - StartTime time.Time - ExpiryTime time.Time - CanRead bool - CanWrite bool - CanDelete bool +func (b *Blob) buildPath() string { + return b.Container.buildPath() + "/" + b.Name } -// ContainerPermissions is used when setting permissions and Access Policies for containers. -type ContainerPermissions struct { - AccessType ContainerAccessType - AccessPolicies []ContainerAccessPolicyDetails -} - -// ContainerAccessHeader references header used when setting/getting container ACL -const ( - ContainerAccessHeader string = "x-ms-blob-public-access" -) - -// Maximum sizes (per REST API) for various concepts -const ( - MaxBlobBlockSize = 4 * 1024 * 1024 - MaxBlobPageSize = 4 * 1024 * 1024 -) - -// BlockStatus defines states a block for a block blob can -// be in. -type BlockStatus string - -// List of statuses that can be used to refer to a block in a block list -const ( - BlockStatusUncommitted BlockStatus = "Uncommitted" - BlockStatusCommitted BlockStatus = "Committed" - BlockStatusLatest BlockStatus = "Latest" -) - -// Block is used to create Block entities for Put Block List -// call. -type Block struct { - ID string - Status BlockStatus -} - -// BlockListResponse contains the response fields from Get Block List call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx -type BlockListResponse struct { - XMLName xml.Name `xml:"BlockList"` - CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"` - UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"` -} - -// BlockResponse contains the block information returned -// in the GetBlockListCall. -type BlockResponse struct { - Name string `xml:"Name"` - Size int64 `xml:"Size"` -} - -// GetPageRangesResponse contains the response fields from -// Get Page Ranges call. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx -type GetPageRangesResponse struct { - XMLName xml.Name `xml:"PageList"` - PageList []PageRange `xml:"PageRange"` -} - -// PageRange contains information about a page of a page blob from -// Get Pages Range call. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx -type PageRange struct { - Start int64 `xml:"Start"` - End int64 `xml:"End"` -} - -var ( - errBlobCopyAborted = errors.New("storage: blob copy is aborted") - errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch") -) - -// ListContainers returns the list of containers in a storage account along with -// pagination token and other response details. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -func (b BlobStorageClient) ListContainers(params ListContainersParameters) (ContainerListResponse, error) { - q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) - uri := b.client.getEndpoint(blobServiceName, "", q) - headers := b.client.getStandardHeaders() - - var out ContainerListResponse - resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) - if err != nil { - return out, err - } - defer resp.body.Close() - - err = xmlUnmarshal(resp.body, &out) - return out, err -} - -// CreateContainer creates a blob container within the storage account -// with given name and access level. Returns error if container already exists. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx -func (b BlobStorageClient) CreateContainer(name string, access ContainerAccessType) error { - resp, err := b.createContainer(name, access) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// CreateContainerIfNotExists creates a blob container if it does not exist. Returns -// true if container is newly created or false if container already exists. -func (b BlobStorageClient) CreateContainerIfNotExists(name string, access ContainerAccessType) (bool, error) { - resp, err := b.createContainer(name, access) - if resp != nil { - defer resp.body.Close() - if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { - return resp.statusCode == http.StatusCreated, nil - } - } - return false, err -} - -func (b BlobStorageClient) createContainer(name string, access ContainerAccessType) (*storageResponse, error) { - uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) - - headers := b.client.getStandardHeaders() - if access != "" { - headers[ContainerAccessHeader] = string(access) - } - return b.client.exec(http.MethodPut, uri, headers, nil, b.auth) -} - -// ContainerExists returns true if a container with given name exists -// on the storage account, otherwise returns false. -func (b BlobStorageClient) ContainerExists(name string) (bool, error) { - uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) - headers := b.client.getStandardHeaders() - - resp, err := b.client.exec(http.MethodHead, uri, headers, nil, b.auth) - if resp != nil { - defer resp.body.Close() - if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusOK, nil - } - } - return false, err -} - -// SetContainerPermissions sets up container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179391.aspx -func (b BlobStorageClient) SetContainerPermissions(container string, containerPermissions ContainerPermissions, timeout int, leaseID string) (err error) { - params := url.Values{ - "restype": {"container"}, - "comp": {"acl"}, - } - - if timeout > 0 { - params.Add("timeout", strconv.Itoa(timeout)) - } - - uri := b.client.getEndpoint(blobServiceName, pathForContainer(container), params) - headers := b.client.getStandardHeaders() - if containerPermissions.AccessType != "" { - headers[ContainerAccessHeader] = string(containerPermissions.AccessType) - } - - if leaseID != "" { - headers[headerLeaseID] = leaseID - } - - body, length, err := generateContainerACLpayload(containerPermissions.AccessPolicies) - headers["Content-Length"] = strconv.Itoa(length) - resp, err := b.client.exec(http.MethodPut, uri, headers, body, b.auth) - - if err != nil { - return err - } - - if resp != nil { - defer resp.body.Close() - - if resp.statusCode != http.StatusOK { - return errors.New("Unable to set permissions") - } - } - return nil -} - -// GetContainerPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx -// If timeout is 0 then it will not be passed to Azure -// leaseID will only be passed to Azure if populated -// Returns permissionResponse which is combined permissions and AccessPolicy -func (b BlobStorageClient) GetContainerPermissions(container string, timeout int, leaseID string) (*ContainerPermissions, error) { - params := url.Values{"restype": {"container"}, - "comp": {"acl"}} - - if timeout > 0 { - params.Add("timeout", strconv.Itoa(timeout)) - } - - uri := b.client.getEndpoint(blobServiceName, pathForContainer(container), params) - headers := b.client.getStandardHeaders() - - if leaseID != "" { - headers[headerLeaseID] = leaseID - } - - resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) - if err != nil { - return nil, err - } - defer resp.body.Close() - - var out AccessPolicy - err = xmlUnmarshal(resp.body, &out.SignedIdentifiersList) - if err != nil { - return nil, err - } - - permissionResponse := updateContainerAccessPolicy(out, &resp.headers) - return &permissionResponse, nil -} - -func updateContainerAccessPolicy(ap AccessPolicy, headers *http.Header) ContainerPermissions { - // containerAccess. Blob, Container, empty - containerAccess := headers.Get(http.CanonicalHeaderKey(ContainerAccessHeader)) - - var cp ContainerPermissions - cp.AccessType = ContainerAccessType(containerAccess) - for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { - capd := ContainerAccessPolicyDetails{ - ID: policy.ID, - StartTime: policy.AccessPolicy.StartTime, - ExpiryTime: policy.AccessPolicy.ExpiryTime, - } - capd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r") - capd.CanWrite = updatePermissions(policy.AccessPolicy.Permission, "w") - capd.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d") - - cp.AccessPolicies = append(cp.AccessPolicies, capd) - } - - return cp -} - -// DeleteContainer deletes the container with given name on the storage -// account. If the container does not exist returns error. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx -func (b BlobStorageClient) DeleteContainer(name string) error { - resp, err := b.deleteContainer(name) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) -} - -// DeleteContainerIfExists deletes the container with given name on the storage -// account if it exists. Returns true if container is deleted with this call, or -// false if the container did not exist at the time of the Delete Container -// operation. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx -func (b BlobStorageClient) DeleteContainerIfExists(name string) (bool, error) { - resp, err := b.deleteContainer(name) - if resp != nil { - defer resp.body.Close() - if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusAccepted, nil - } - } - return false, err -} - -func (b BlobStorageClient) deleteContainer(name string) (*storageResponse, error) { - uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) - - headers := b.client.getStandardHeaders() - return b.client.exec(http.MethodDelete, uri, headers, nil, b.auth) -} - -// ListBlobs returns an object that contains list of blobs in the container, -// pagination token and other information in the response of List Blobs call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx -func (b BlobStorageClient) ListBlobs(container string, params ListBlobsParameters) (BlobListResponse, error) { - q := mergeParams(params.getParameters(), url.Values{ - "restype": {"container"}, - "comp": {"list"}}) - uri := b.client.getEndpoint(blobServiceName, pathForContainer(container), q) - headers := b.client.getStandardHeaders() - - var out BlobListResponse - resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) - if err != nil { - return out, err - } - defer resp.body.Close() - - err = xmlUnmarshal(resp.body, &out) - return out, err -} - -// BlobExists returns true if a blob with given name exists on the specified +// Exists returns true if a blob with given name exists on the specified // container of the storage account. -func (b BlobStorageClient) BlobExists(container, name string) (bool, error) { - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - headers := b.client.getStandardHeaders() - resp, err := b.client.exec(http.MethodHead, uri, headers, nil, b.auth) +func (b *Blob) Exists() (bool, error) { + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), nil) + headers := b.Container.bsc.client.getStandardHeaders() + resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth) if resp != nil { - defer resp.body.Close() + defer readAndCloseBody(resp.body) if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { return resp.statusCode == http.StatusOK, nil } @@ -629,23 +134,63 @@ func (b BlobStorageClient) BlobExists(container, name string) (bool, error) { return false, err } -// GetBlobURL gets the canonical URL to the blob with the specified name in the -// specified container. This method does not create a publicly accessible URL if -// the blob or container is private and this method does not check if the blob -// exists. -func (b BlobStorageClient) GetBlobURL(container, name string) string { +// GetURL gets the canonical URL to the blob with the specified name in the +// specified container. If name is not specified, the canonical URL for the entire +// container is obtained. +// This method does not create a publicly accessible URL if the blob or container +// is private and this method does not check if the blob exists. +func (b *Blob) GetURL() string { + container := b.Container.Name if container == "" { container = "$root" } - return b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + return b.Container.bsc.client.getEndpoint(blobServiceName, pathForResource(container, b.Name), nil) } -// GetBlob returns a stream to read the blob. Caller must call Close() the -// reader to close on the underlying connection. +// GetBlobRangeOptions includes the options for a get blob range operation +type GetBlobRangeOptions struct { + Range *BlobRange + GetRangeContentMD5 bool + *GetBlobOptions +} + +// GetBlobOptions includes the options for a get blob operation +type GetBlobOptions struct { + Timeout uint + Snapshot *time.Time + LeaseID string `header:"x-ms-lease-id"` + Origin string `header:"Origin"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// BlobRange represents the bytes range to be get +type BlobRange struct { + Start uint64 + End uint64 +} + +func (br BlobRange) String() string { + if br.End == 0 { + return fmt.Sprintf("bytes=%d-", br.Start) + } + return fmt.Sprintf("bytes=%d-%d", br.Start, br.End) +} + +// Get returns a stream to read the blob. Caller must call both Read and Close() +// to correctly close the underlying connection. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx -func (b BlobStorageClient) GetBlob(container, name string) (io.ReadCloser, error) { - resp, err := b.getBlobRange(container, name, "", nil) +// See the GetRange method for use with a Range header. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob +func (b *Blob) Get(options *GetBlobOptions) (io.ReadCloser, error) { + rangeOptions := GetBlobRangeOptions{ + GetBlobOptions: options, + } + resp, err := b.getRange(&rangeOptions) if err != nil { return nil, err } @@ -653,15 +198,19 @@ func (b BlobStorageClient) GetBlob(container, name string) (io.ReadCloser, error if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { return nil, err } + if err := b.writeProperties(resp.headers, true); err != nil { + return resp.body, err + } return resp.body, nil } -// GetBlobRange reads the specified range of a blob to a stream. The bytesRange +// GetRange reads the specified range of a blob to a stream. The bytesRange // string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx -func (b BlobStorageClient) GetBlobRange(container, name, bytesRange string, extraHeaders map[string]string) (io.ReadCloser, error) { - resp, err := b.getBlobRange(container, name, bytesRange, extraHeaders) +// Caller must call both Read and Close()// to correctly close the underlying +// connection. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob +func (b *Blob) GetRange(options *GetBlobRangeOptions) (io.ReadCloser, error) { + resp, err := b.getRange(options) if err != nil { return nil, err } @@ -669,66 +218,69 @@ func (b BlobStorageClient) GetBlobRange(container, name, bytesRange string, extr if err := checkRespCode(resp.statusCode, []int{http.StatusPartialContent}); err != nil { return nil, err } + // Content-Length header should not be updated, as the service returns the range length + // (which is not alwys the full blob length) + if err := b.writeProperties(resp.headers, false); err != nil { + return resp.body, err + } return resp.body, nil } -func (b BlobStorageClient) getBlobRange(container, name, bytesRange string, extraHeaders map[string]string) (*storageResponse, error) { - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) +func (b *Blob) getRange(options *GetBlobRangeOptions) (*storageResponse, error) { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() - if bytesRange != "" { - headers["Range"] = fmt.Sprintf("bytes=%s", bytesRange) + if options != nil { + if options.Range != nil { + headers["Range"] = options.Range.String() + if options.GetRangeContentMD5 { + headers["x-ms-range-get-content-md5"] = "true" + } + } + if options.GetBlobOptions != nil { + headers = mergeHeaders(headers, headersFromStruct(*options.GetBlobOptions)) + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + } } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) + resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) if err != nil { return nil, err } return resp, err } -// leasePut is common PUT code for the various acquire/release/break etc functions. -func (b BlobStorageClient) leaseCommonPut(container string, name string, headers map[string]string, expectedStatus int) (http.Header, error) { - params := url.Values{"comp": {"lease"}} - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - - resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) - if err != nil { - return nil, err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{expectedStatus}); err != nil { - return nil, err - } - - return resp.headers, nil +// SnapshotOptions includes the options for a snapshot blob operation +type SnapshotOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` } -// SnapshotBlob creates a snapshot for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691971.aspx -func (b BlobStorageClient) SnapshotBlob(container string, name string, timeout int, extraHeaders map[string]string) (snapshotTimestamp *time.Time, err error) { - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() +// CreateSnapshot creates a snapshot for a blob +// See https://msdn.microsoft.com/en-us/library/azure/ee691971.aspx +func (b *Blob) CreateSnapshot(options *SnapshotOptions) (snapshotTimestamp *time.Time, err error) { params := url.Values{"comp": {"snapshot"}} + headers := b.Container.bsc.client.getStandardHeaders() + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) - if timeout > 0 { - params.Add("timeout", strconv.Itoa(timeout)) + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - for k, v := range extraHeaders { - headers[k] = v - } - - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) - if err != nil { + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil || resp == nil { return nil, err } + defer readAndCloseBody(resp.body) if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil { return nil, err @@ -740,212 +292,180 @@ func (b BlobStorageClient) SnapshotBlob(container string, name string, timeout i if err != nil { return nil, err } - return &snapshotTimestamp, nil } return nil, errors.New("Snapshot not created") } -// AcquireLease creates a lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -// returns leaseID acquired -func (b BlobStorageClient) AcquireLease(container string, name string, leaseTimeInSeconds int, proposedLeaseID string) (returnedLeaseID string, err error) { - headers := b.client.getStandardHeaders() - headers[leaseAction] = acquireLease - - if leaseTimeInSeconds > 0 { - headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds) - } - - if proposedLeaseID != "" { - headers[leaseProposedID] = proposedLeaseID - } - - respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusCreated) - if err != nil { - return "", err - } - - returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID)) - - if returnedLeaseID != "" { - return returnedLeaseID, nil - } - - return "", errors.New("LeaseID not returned") +// GetBlobPropertiesOptions includes the options for a get blob properties operation +type GetBlobPropertiesOptions struct { + Timeout uint + Snapshot *time.Time + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` } -// BreakLease breaks the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -// Returns the timeout remaining in the lease in seconds -func (b BlobStorageClient) BreakLease(container string, name string) (breakTimeout int, err error) { - headers := b.client.getStandardHeaders() - headers[leaseAction] = breakLease - return b.breakLeaseCommon(container, name, headers) -} +// GetProperties provides various information about the specified blob. +// See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx +func (b *Blob) GetProperties(options *GetBlobPropertiesOptions) error { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() -// BreakLeaseWithBreakPeriod breaks the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -// breakPeriodInSeconds is used to determine how long until new lease can be created. -// Returns the timeout remaining in the lease in seconds -func (b BlobStorageClient) BreakLeaseWithBreakPeriod(container string, name string, breakPeriodInSeconds int) (breakTimeout int, err error) { - headers := b.client.getStandardHeaders() - headers[leaseAction] = breakLease - headers[leaseBreakPeriod] = strconv.Itoa(breakPeriodInSeconds) - return b.breakLeaseCommon(container, name, headers) -} - -// breakLeaseCommon is common code for both version of BreakLease (with and without break period) -func (b BlobStorageClient) breakLeaseCommon(container string, name string, headers map[string]string) (breakTimeout int, err error) { - - respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusAccepted) - if err != nil { - return 0, err + if options != nil { + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + headers = mergeHeaders(headers, headersFromStruct(*options)) } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - breakTimeoutStr := respHeaders.Get(http.CanonicalHeaderKey(leaseTime)) - if breakTimeoutStr != "" { - breakTimeout, err = strconv.Atoi(breakTimeoutStr) - if err != nil { - return 0, err - } - } - - return breakTimeout, nil -} - -// ChangeLease changes a lease ID for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -// Returns the new LeaseID acquired -func (b BlobStorageClient) ChangeLease(container string, name string, currentLeaseID string, proposedLeaseID string) (newLeaseID string, err error) { - headers := b.client.getStandardHeaders() - headers[leaseAction] = changeLease - headers[headerLeaseID] = currentLeaseID - headers[leaseProposedID] = proposedLeaseID - - respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusOK) - if err != nil { - return "", err - } - - newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID)) - if newLeaseID != "" { - return newLeaseID, nil - } - - return "", errors.New("LeaseID not returned") -} - -// ReleaseLease releases the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -func (b BlobStorageClient) ReleaseLease(container string, name string, currentLeaseID string) error { - headers := b.client.getStandardHeaders() - headers[leaseAction] = releaseLease - headers[headerLeaseID] = currentLeaseID - - _, err := b.leaseCommonPut(container, name, headers, http.StatusOK) + resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth) if err != nil { return err } - - return nil -} - -// RenewLease renews the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -func (b BlobStorageClient) RenewLease(container string, name string, currentLeaseID string) error { - headers := b.client.getStandardHeaders() - headers[leaseAction] = renewLease - headers[headerLeaseID] = currentLeaseID - - _, err := b.leaseCommonPut(container, name, headers, http.StatusOK) - if err != nil { - return err - } - - return nil -} - -// GetBlobProperties provides various information about the specified -// blob. See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx -func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobProperties, error) { - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - - headers := b.client.getStandardHeaders() - resp, err := b.client.exec(http.MethodHead, uri, headers, nil, b.auth) - if err != nil { - return nil, err - } - defer resp.body.Close() + defer readAndCloseBody(resp.body) if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, err + return err } + return b.writeProperties(resp.headers, true) +} - var contentLength int64 - contentLengthStr := resp.headers.Get("Content-Length") - if contentLengthStr != "" { - contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) - if err != nil { - return nil, err +func (b *Blob) writeProperties(h http.Header, includeContentLen bool) error { + var err error + + contentLength := b.Properties.ContentLength + if includeContentLen { + contentLengthStr := h.Get("Content-Length") + if contentLengthStr != "" { + contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) + if err != nil { + return err + } } } var sequenceNum int64 - sequenceNumStr := resp.headers.Get("x-ms-blob-sequence-number") + sequenceNumStr := h.Get("x-ms-blob-sequence-number") if sequenceNumStr != "" { sequenceNum, err = strconv.ParseInt(sequenceNumStr, 0, 64) if err != nil { - return nil, err + return err } } - return &BlobProperties{ - LastModified: resp.headers.Get("Last-Modified"), - Etag: resp.headers.Get("Etag"), - ContentMD5: resp.headers.Get("Content-MD5"), + lastModified, err := getTimeFromHeaders(h, "Last-Modified") + if err != nil { + return err + } + + copyCompletionTime, err := getTimeFromHeaders(h, "x-ms-copy-completion-time") + if err != nil { + return err + } + + b.Properties = BlobProperties{ + LastModified: TimeRFC1123(*lastModified), + Etag: h.Get("Etag"), + ContentMD5: h.Get("Content-MD5"), ContentLength: contentLength, - ContentEncoding: resp.headers.Get("Content-Encoding"), - ContentType: resp.headers.Get("Content-Type"), - CacheControl: resp.headers.Get("Cache-Control"), - ContentLanguage: resp.headers.Get("Content-Language"), + ContentEncoding: h.Get("Content-Encoding"), + ContentType: h.Get("Content-Type"), + ContentDisposition: h.Get("Content-Disposition"), + CacheControl: h.Get("Cache-Control"), + ContentLanguage: h.Get("Content-Language"), SequenceNumber: sequenceNum, - CopyCompletionTime: resp.headers.Get("x-ms-copy-completion-time"), - CopyStatusDescription: resp.headers.Get("x-ms-copy-status-description"), - CopyID: resp.headers.Get("x-ms-copy-id"), - CopyProgress: resp.headers.Get("x-ms-copy-progress"), - CopySource: resp.headers.Get("x-ms-copy-source"), - CopyStatus: resp.headers.Get("x-ms-copy-status"), - BlobType: BlobType(resp.headers.Get("x-ms-blob-type")), - LeaseStatus: resp.headers.Get("x-ms-lease-status"), - LeaseState: resp.headers.Get("x-ms-lease-state"), - }, nil + CopyCompletionTime: TimeRFC1123(*copyCompletionTime), + CopyStatusDescription: h.Get("x-ms-copy-status-description"), + CopyID: h.Get("x-ms-copy-id"), + CopyProgress: h.Get("x-ms-copy-progress"), + CopySource: h.Get("x-ms-copy-source"), + CopyStatus: h.Get("x-ms-copy-status"), + BlobType: BlobType(h.Get("x-ms-blob-type")), + LeaseStatus: h.Get("x-ms-lease-status"), + LeaseState: h.Get("x-ms-lease-state"), + } + b.writeMetadata(h) + return nil } -// SetBlobProperties replaces the BlobHeaders for the specified blob. +// SetBlobPropertiesOptions contains various properties of a blob and is an entry +// in SetProperties +type SetBlobPropertiesOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + Origin string `header:"Origin"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + SequenceNumberAction *SequenceNumberAction + RequestID string `header:"x-ms-client-request-id"` +} + +// SequenceNumberAction defines how the blob's sequence number should be modified +type SequenceNumberAction string + +// Options for sequence number action +const ( + SequenceNumberActionMax SequenceNumberAction = "max" + SequenceNumberActionUpdate SequenceNumberAction = "update" + SequenceNumberActionIncrement SequenceNumberAction = "increment" +) + +// SetProperties replaces the BlobHeaders for the specified blob. // // Some keys may be converted to Camel-Case before sending. All keys // are returned in lower case by GetBlobProperties. HTTP header names // are case-insensitive so case munging should not matter to other // applications either. // -// See https://msdn.microsoft.com/en-us/library/azure/ee691966.aspx -func (b BlobStorageClient) SetBlobProperties(container, name string, blobHeaders BlobHeaders) error { +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Blob-Properties +func (b *Blob) SetProperties(options *SetBlobPropertiesOptions) error { params := url.Values{"comp": {"properties"}} - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - headers := b.client.getStandardHeaders() + headers := b.Container.bsc.client.getStandardHeaders() + headers = mergeHeaders(headers, headersFromStruct(b.Properties)) - extraHeaders := headersFromStruct(blobHeaders) + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - for k, v := range extraHeaders { - headers[k] = v + if b.Properties.BlobType == BlobTypePage { + headers = addToHeaders(headers, "x-ms-blob-content-length", fmt.Sprintf("%v", b.Properties.ContentLength)) + if options != nil && options.SequenceNumberAction != nil { + headers = addToHeaders(headers, "x-ms-sequence-number-action", string(*options.SequenceNumberAction)) + if *options.SequenceNumberAction != SequenceNumberActionIncrement { + headers = addToHeaders(headers, "x-ms-blob-sequence-number", fmt.Sprintf("%v", b.Properties.SequenceNumber)) + } + } } - resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) if err != nil { return err } - defer resp.body.Close() - + readAndCloseBody(resp.body) return checkRespCode(resp.statusCode, []int{http.StatusOK}) } -// SetBlobMetadata replaces the metadata for the specified blob. +// SetBlobMetadataOptions includes the options for a set blob metadata operation +type SetBlobMetadataOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// SetMetadata replaces the metadata for the specified blob. // // Some keys may be converted to Camel-Case before sending. All keys // are returned in lower case by GetBlobMetadata. HTTP header names @@ -953,52 +473,71 @@ func (b BlobStorageClient) SetBlobProperties(container, name string, blobHeaders // applications either. // // See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx -func (b BlobStorageClient) SetBlobMetadata(container, name string, metadata map[string]string, extraHeaders map[string]string) error { +func (b *Blob) SetMetadata(options *SetBlobMetadataOptions) error { params := url.Values{"comp": {"metadata"}} - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - metadata = b.client.protectUserAgent(metadata) - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() - for k, v := range metadata { - headers[userDefinedMetadataHeaderPrefix+k] = v - } + headers := b.Container.bsc.client.getStandardHeaders() + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) - for k, v := range extraHeaders { - headers[k] = v + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) if err != nil { return err } - defer resp.body.Close() - + readAndCloseBody(resp.body) return checkRespCode(resp.statusCode, []int{http.StatusOK}) } -// GetBlobMetadata returns all user-defined metadata for the specified blob. +// GetBlobMetadataOptions includes the options for a get blob metadata operation +type GetBlobMetadataOptions struct { + Timeout uint + Snapshot *time.Time + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// GetMetadata returns all user-defined metadata for the specified blob. // // All metadata keys will be returned in lower case. (HTTP header // names are case-insensitive.) // // See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx -func (b BlobStorageClient) GetBlobMetadata(container, name string) (map[string]string, error) { +func (b *Blob) GetMetadata(options *GetBlobMetadataOptions) error { params := url.Values{"comp": {"metadata"}} - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - headers := b.client.getStandardHeaders() + headers := b.Container.bsc.client.getStandardHeaders() - resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) - if err != nil { - return nil, err + if options != nil { + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + headers = mergeHeaders(headers, headersFromStruct(*options)) } - defer resp.body.Close() + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, err + return err } + b.writeMetadata(resp.headers) + return nil +} + +func (b *Blob) writeMetadata(h http.Header) { metadata := make(map[string]string) - for k, v := range resp.headers { + for k, v := range h { // Can't trust CanonicalHeaderKey() to munge case // reliably. "_" is allowed in identifiers: // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx @@ -1007,383 +546,53 @@ func (b BlobStorageClient) GetBlobMetadata(container, name string) (map[string]s // ...but "_" is considered invalid by // CanonicalMIMEHeaderKey in // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 - // so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar". + // so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl". k = strings.ToLower(k) if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { continue } - // metadata["foo"] = content of the last X-Ms-Meta-Foo header + // metadata["lol"] = content of the last X-Ms-Meta-Lol header k = k[len(userDefinedMetadataHeaderPrefix):] metadata[k] = v[len(v)-1] } - return metadata, nil + + b.Metadata = BlobMetadata(metadata) } -// CreateBlockBlob initializes an empty block blob with no blocks. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx -func (b BlobStorageClient) CreateBlockBlob(container, name string) error { - return b.CreateBlockBlobFromReader(container, name, 0, nil, nil) +// DeleteBlobOptions includes the options for a delete blob operation +type DeleteBlobOptions struct { + Timeout uint + Snapshot *time.Time + LeaseID string `header:"x-ms-lease-id"` + DeleteSnapshots *bool + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` } -// CreateBlockBlobFromReader initializes a block blob using data from -// reader. Size must be the number of bytes read from reader. To -// create an empty blob, use size==0 and reader==nil. -// -// The API rejects requests with size > 64 MiB (but this limit is not -// checked by the SDK). To write a larger blob, use CreateBlockBlob, -// PutBlock, and PutBlockList. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx -func (b BlobStorageClient) CreateBlockBlobFromReader(container, name string, size uint64, blob io.Reader, extraHeaders map[string]string) error { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeBlock) - headers["Content-Length"] = fmt.Sprintf("%d", size) - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec(http.MethodPut, uri, headers, blob, b.auth) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// PutBlock saves the given data chunk to the specified block blob with -// given ID. -// -// The API rejects chunks larger than 4 MiB (but this limit is not -// checked by the SDK). -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx -func (b BlobStorageClient) PutBlock(container, name, blockID string, chunk []byte) error { - return b.PutBlockWithLength(container, name, blockID, uint64(len(chunk)), bytes.NewReader(chunk), nil) -} - -// PutBlockWithLength saves the given data stream of exactly specified size to -// the block blob with given ID. It is an alternative to PutBlocks where data -// comes as stream but the length is known in advance. -// -// The API rejects requests with size > 4 MiB (but this limit is not -// checked by the SDK). -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx -func (b BlobStorageClient) PutBlockWithLength(container, name, blockID string, size uint64, blob io.Reader, extraHeaders map[string]string) error { - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"block"}, "blockid": {blockID}}) - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeBlock) - headers["Content-Length"] = fmt.Sprintf("%v", size) - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec(http.MethodPut, uri, headers, blob, b.auth) - if err != nil { - return err - } - - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// PutBlockList saves list of blocks to the specified block blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179467.aspx -func (b BlobStorageClient) PutBlockList(container, name string, blocks []Block) error { - blockListXML := prepareBlockListRequest(blocks) - - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"blocklist"}}) - headers := b.client.getStandardHeaders() - headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML)) - - resp, err := b.client.exec(http.MethodPut, uri, headers, strings.NewReader(blockListXML), b.auth) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// GetBlockList retrieves list of blocks in the specified block blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx -func (b BlobStorageClient) GetBlockList(container, name string, blockType BlockListType) (BlockListResponse, error) { - params := url.Values{"comp": {"blocklist"}, "blocklisttype": {string(blockType)}} - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - headers := b.client.getStandardHeaders() - - var out BlockListResponse - resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) - if err != nil { - return out, err - } - defer resp.body.Close() - - err = xmlUnmarshal(resp.body, &out) - return out, err -} - -// PutPageBlob initializes an empty page blob with specified name and maximum -// size in bytes (size must be aligned to a 512-byte boundary). A page blob must -// be created using this method before writing pages. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx -func (b BlobStorageClient) PutPageBlob(container, name string, size int64, extraHeaders map[string]string) error { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypePage) - headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", size) - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) - if err != nil { - return err - } - defer resp.body.Close() - - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// PutPage writes a range of pages to a page blob or clears the given range. -// In case of 'clear' writes, given chunk is discarded. Ranges must be aligned -// with 512-byte boundaries and chunk must be of size multiplies by 512. -// -// See https://msdn.microsoft.com/en-us/library/ee691975.aspx -func (b BlobStorageClient) PutPage(container, name string, startByte, endByte int64, writeType PageWriteType, chunk []byte, extraHeaders map[string]string) error { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"page"}}) - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypePage) - headers["x-ms-page-write"] = string(writeType) - headers["x-ms-range"] = fmt.Sprintf("bytes=%v-%v", startByte, endByte) - for k, v := range extraHeaders { - headers[k] = v - } - var contentLength int64 - var data io.Reader - if writeType == PageWriteTypeClear { - contentLength = 0 - data = bytes.NewReader([]byte{}) - } else { - contentLength = int64(len(chunk)) - data = bytes.NewReader(chunk) - } - headers["Content-Length"] = fmt.Sprintf("%v", contentLength) - - resp, err := b.client.exec(http.MethodPut, uri, headers, data, b.auth) - if err != nil { - return err - } - defer resp.body.Close() - - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// GetPageRanges returns the list of valid page ranges for a page blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx -func (b BlobStorageClient) GetPageRanges(container, name string) (GetPageRangesResponse, error) { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"pagelist"}}) - headers := b.client.getStandardHeaders() - - var out GetPageRangesResponse - resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) - if err != nil { - return out, err - } - defer resp.body.Close() - - if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return out, err - } - err = xmlUnmarshal(resp.body, &out) - return out, err -} - -// PutAppendBlob initializes an empty append blob with specified name. An -// append blob must be created using this method before appending blocks. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx -func (b BlobStorageClient) PutAppendBlob(container, name string, extraHeaders map[string]string) error { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeAppend) - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) - if err != nil { - return err - } - defer resp.body.Close() - - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// AppendBlock appends a block to an append blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/mt427365.aspx -func (b BlobStorageClient) AppendBlock(container, name string, chunk []byte, extraHeaders map[string]string) error { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"appendblock"}}) - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeAppend) - headers["Content-Length"] = fmt.Sprintf("%v", len(chunk)) - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec(http.MethodPut, uri, headers, bytes.NewReader(chunk), b.auth) - if err != nil { - return err - } - defer resp.body.Close() - - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// CopyBlob starts a blob copy operation and waits for the operation to -// complete. sourceBlob parameter must be a canonical URL to the blob (can be -// obtained using GetBlobURL method.) There is no SLA on blob copy and therefore -// this helper method works faster on smaller files. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd894037.aspx -func (b BlobStorageClient) CopyBlob(container, name, sourceBlob string) error { - copyID, err := b.StartBlobCopy(container, name, sourceBlob) - if err != nil { - return err - } - - return b.WaitForBlobCopy(container, name, copyID) -} - -// StartBlobCopy starts a blob copy operation. -// sourceBlob parameter must be a canonical URL to the blob (can be -// obtained using GetBlobURL method.) -// -// See https://msdn.microsoft.com/en-us/library/azure/dd894037.aspx -func (b BlobStorageClient) StartBlobCopy(container, name, sourceBlob string) (string, error) { - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - - headers := b.client.getStandardHeaders() - headers["x-ms-copy-source"] = sourceBlob - - resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) - if err != nil { - return "", err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted, http.StatusCreated}); err != nil { - return "", err - } - - copyID := resp.headers.Get("x-ms-copy-id") - if copyID == "" { - return "", errors.New("Got empty copy id header") - } - return copyID, nil -} - -// AbortBlobCopy aborts a BlobCopy which has already been triggered by the StartBlobCopy function. -// copyID is generated from StartBlobCopy function. -// currentLeaseID is required IF the destination blob has an active lease on it. -// As defined in https://msdn.microsoft.com/en-us/library/azure/jj159098.aspx -func (b BlobStorageClient) AbortBlobCopy(container, name, copyID, currentLeaseID string, timeout int) error { - params := url.Values{"comp": {"copy"}, "copyid": {copyID}} - if timeout > 0 { - params.Add("timeout", strconv.Itoa(timeout)) - } - - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - headers := b.client.getStandardHeaders() - headers["x-ms-copy-action"] = "abort" - - if currentLeaseID != "" { - headers[headerLeaseID] = currentLeaseID - } - - resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) - if err != nil { - return err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { - return err - } - - return nil -} - -// WaitForBlobCopy loops until a BlobCopy operation is completed (or fails with error) -func (b BlobStorageClient) WaitForBlobCopy(container, name, copyID string) error { - for { - props, err := b.GetBlobProperties(container, name) - if err != nil { - return err - } - - if props.CopyID != copyID { - return errBlobCopyIDMismatch - } - - switch props.CopyStatus { - case blobCopyStatusSuccess: - return nil - case blobCopyStatusPending: - continue - case blobCopyStatusAborted: - return errBlobCopyAborted - case blobCopyStatusFailed: - return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", props.CopyID, props.CopyStatusDescription) - default: - return fmt.Errorf("storage: unhandled blob copy status: '%s'", props.CopyStatus) - } - } -} - -// DeleteBlob deletes the given blob from the specified container. +// Delete deletes the given blob from the specified container. // If the blob does not exists at the time of the Delete Blob operation, it -// returns error. See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx -func (b BlobStorageClient) DeleteBlob(container, name string, extraHeaders map[string]string) error { - resp, err := b.deleteBlob(container, name, extraHeaders) +// returns error. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob +func (b *Blob) Delete(options *DeleteBlobOptions) error { + resp, err := b.delete(options) if err != nil { return err } - defer resp.body.Close() + readAndCloseBody(resp.body) return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) } -// DeleteBlobIfExists deletes the given blob from the specified container If the +// DeleteIfExists deletes the given blob from the specified container If the // blob is deleted with this call, returns true. Otherwise returns false. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx -func (b BlobStorageClient) DeleteBlobIfExists(container, name string, extraHeaders map[string]string) (bool, error) { - resp, err := b.deleteBlob(container, name, extraHeaders) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob +func (b *Blob) DeleteIfExists(options *DeleteBlobOptions) (bool, error) { + resp, err := b.delete(options) if resp != nil { - defer resp.body.Close() + defer readAndCloseBody(resp.body) if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { return resp.statusCode == http.StatusAccepted, nil } @@ -1391,149 +600,40 @@ func (b BlobStorageClient) DeleteBlobIfExists(container, name string, extraHeade return false, err } -func (b BlobStorageClient) deleteBlob(container, name string, extraHeaders map[string]string) (*storageResponse, error) { - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() - for k, v := range extraHeaders { - headers[k] = v - } +func (b *Blob) delete(options *DeleteBlobOptions) (*storageResponse, error) { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() - return b.client.exec(http.MethodDelete, uri, headers, nil, b.auth) -} - -// helper method to construct the path to a container given its name -func pathForContainer(name string) string { - return fmt.Sprintf("/%s", name) -} - -// helper method to construct the path to a blob given its container and blob -// name -func pathForBlob(container, name string) string { - return fmt.Sprintf("/%s/%s", container, name) -} - -// GetBlobSASURIWithSignedIPAndProtocol creates an URL to the specified blob which contains the Shared -// Access Signature with specified permissions and expiration time. Also includes signedIPRange and allowed protocols. -// If old API version is used but no signedIP is passed (ie empty string) then this should still work. -// We only populate the signedIP when it non-empty. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx -func (b BlobStorageClient) GetBlobSASURIWithSignedIPAndProtocol(container, name string, expiry time.Time, permissions string, signedIPRange string, HTTPSOnly bool) (string, error) { - var ( - signedPermissions = permissions - blobURL = b.GetBlobURL(container, name) - ) - canonicalizedResource, err := b.client.buildCanonicalizedResource(blobURL, b.auth) - if err != nil { - return "", err - } - - // "The canonicalizedresouce portion of the string is a canonical path to the signed resource. - // It must include the service name (blob, table, queue or file) for version 2015-02-21 or - // later, the storage account name, and the resource name, and must be URL-decoded. - // -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx - - // We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component). - canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1) - canonicalizedResource, err = url.QueryUnescape(canonicalizedResource) - if err != nil { - return "", err - } - - signedExpiry := expiry.UTC().Format(time.RFC3339) - signedResource := "b" - - protocols := "https,http" - if HTTPSOnly { - protocols = "https" - } - stringToSign, err := blobSASStringToSign(b.client.apiVersion, canonicalizedResource, signedExpiry, signedPermissions, signedIPRange, protocols) - if err != nil { - return "", err - } - - sig := b.client.computeHmac256(stringToSign) - sasParams := url.Values{ - "sv": {b.client.apiVersion}, - "se": {signedExpiry}, - "sr": {signedResource}, - "sp": {signedPermissions}, - "sig": {sig}, - } - - if b.client.apiVersion >= "2015-04-05" { - sasParams.Add("spr", protocols) - if signedIPRange != "" { - sasParams.Add("sip", signedIPRange) + if options != nil { + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + headers = mergeHeaders(headers, headersFromStruct(*options)) + if options.DeleteSnapshots != nil { + if *options.DeleteSnapshots { + headers["x-ms-delete-snapshots"] = "include" + } else { + headers["x-ms-delete-snapshots"] = "only" + } } } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + return b.Container.bsc.client.exec(http.MethodDelete, uri, headers, nil, b.Container.bsc.auth) +} - sasURL, err := url.Parse(blobURL) +// helper method to construct the path to either a blob or container +func pathForResource(container, name string) string { + if name != "" { + return fmt.Sprintf("/%s/%s", container, name) + } + return fmt.Sprintf("/%s", container) +} + +func (b *Blob) respondCreation(resp *storageResponse, bt BlobType) error { + readAndCloseBody(resp.body) + err := checkRespCode(resp.statusCode, []int{http.StatusCreated}) if err != nil { - return "", err + return err } - sasURL.RawQuery = sasParams.Encode() - return sasURL.String(), nil -} - -// GetBlobSASURI creates an URL to the specified blob which contains the Shared -// Access Signature with specified permissions and expiration time. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx -func (b BlobStorageClient) GetBlobSASURI(container, name string, expiry time.Time, permissions string) (string, error) { - url, err := b.GetBlobSASURIWithSignedIPAndProtocol(container, name, expiry, permissions, "", false) - return url, err -} - -func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, signedPermissions string, signedIP string, protocols string) (string, error) { - var signedStart, signedIdentifier, rscc, rscd, rsce, rscl, rsct string - - if signedVersion >= "2015-02-21" { - canonicalizedResource = "/blob" + canonicalizedResource - } - - // https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12 - if signedVersion >= "2015-04-05" { - return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, rscc, rscd, rsce, rscl, rsct), nil - } - - // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx - if signedVersion >= "2013-08-15" { - return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil - } - - return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") -} - -func generateContainerACLpayload(policies []ContainerAccessPolicyDetails) (io.Reader, int, error) { - sil := SignedIdentifiers{ - SignedIdentifiers: []SignedIdentifier{}, - } - for _, capd := range policies { - permission := capd.generateContainerPermissions() - signedIdentifier := convertAccessPolicyToXMLStructs(capd.ID, capd.StartTime, capd.ExpiryTime, permission) - sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier) - } - return xmlMarshal(sil) -} - -func (capd *ContainerAccessPolicyDetails) generateContainerPermissions() (permissions string) { - // generate the permissions string (rwd). - // still want the end user API to have bool flags. - permissions = "" - - if capd.CanRead { - permissions += "r" - } - - if capd.CanWrite { - permissions += "w" - } - - if capd.CanDelete { - permissions += "d" - } - - return permissions + b.Properties.BlobType = bt + return nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go new file mode 100644 index 000000000..8f0864e34 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go @@ -0,0 +1,108 @@ +package storage + +import ( + "errors" + "fmt" + "net/url" + "strings" + "time" +) + +// GetSASURIWithSignedIPAndProtocol creates an URL to the specified blob which contains the Shared +// Access Signature with specified permissions and expiration time. Also includes signedIPRange and allowed protocols. +// If old API version is used but no signedIP is passed (ie empty string) then this should still work. +// We only populate the signedIP when it non-empty. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx +func (b *Blob) GetSASURIWithSignedIPAndProtocol(expiry time.Time, permissions string, signedIPRange string, HTTPSOnly bool) (string, error) { + var ( + signedPermissions = permissions + blobURL = b.GetURL() + ) + canonicalizedResource, err := b.Container.bsc.client.buildCanonicalizedResource(blobURL, b.Container.bsc.auth, true) + if err != nil { + return "", err + } + + // "The canonicalizedresouce portion of the string is a canonical path to the signed resource. + // It must include the service name (blob, table, queue or file) for version 2015-02-21 or + // later, the storage account name, and the resource name, and must be URL-decoded. + // -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + + // We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component). + canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1) + canonicalizedResource, err = url.QueryUnescape(canonicalizedResource) + if err != nil { + return "", err + } + + signedExpiry := expiry.UTC().Format(time.RFC3339) + + //If blob name is missing, resource is a container + signedResource := "c" + if len(b.Name) > 0 { + signedResource = "b" + } + + protocols := "" + if HTTPSOnly { + protocols = "https" + } + stringToSign, err := blobSASStringToSign(b.Container.bsc.client.apiVersion, canonicalizedResource, signedExpiry, signedPermissions, signedIPRange, protocols) + if err != nil { + return "", err + } + + sig := b.Container.bsc.client.computeHmac256(stringToSign) + sasParams := url.Values{ + "sv": {b.Container.bsc.client.apiVersion}, + "se": {signedExpiry}, + "sr": {signedResource}, + "sp": {signedPermissions}, + "sig": {sig}, + } + + if b.Container.bsc.client.apiVersion >= "2015-04-05" { + if protocols != "" { + sasParams.Add("spr", protocols) + } + if signedIPRange != "" { + sasParams.Add("sip", signedIPRange) + } + } + + sasURL, err := url.Parse(blobURL) + if err != nil { + return "", err + } + sasURL.RawQuery = sasParams.Encode() + return sasURL.String(), nil +} + +// GetSASURI creates an URL to the specified blob which contains the Shared +// Access Signature with specified permissions and expiration time. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx +func (b *Blob) GetSASURI(expiry time.Time, permissions string) (string, error) { + return b.GetSASURIWithSignedIPAndProtocol(expiry, permissions, "", false) +} + +func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, signedPermissions string, signedIP string, protocols string) (string, error) { + var signedStart, signedIdentifier, rscc, rscd, rsce, rscl, rsct string + + if signedVersion >= "2015-02-21" { + canonicalizedResource = "/blob" + canonicalizedResource + } + + // https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12 + if signedVersion >= "2015-04-05" { + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, rscc, rscd, rsce, rscl, rsct), nil + } + + // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + if signedVersion >= "2013-08-15" { + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil + } + + return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go new file mode 100644 index 000000000..450b20f96 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go @@ -0,0 +1,95 @@ +package storage + +import ( + "net/http" + "net/url" + "strconv" +) + +// BlobStorageClient contains operations for Microsoft Azure Blob Storage +// Service. +type BlobStorageClient struct { + client Client + auth authentication +} + +// GetServiceProperties gets the properties of your storage account's blob service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-blob-service-properties +func (b *BlobStorageClient) GetServiceProperties() (*ServiceProperties, error) { + return b.client.getServiceProperties(blobServiceName, b.auth) +} + +// SetServiceProperties sets the properties of your storage account's blob service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-blob-service-properties +func (b *BlobStorageClient) SetServiceProperties(props ServiceProperties) error { + return b.client.setServiceProperties(props, blobServiceName, b.auth) +} + +// ListContainersParameters defines the set of customizable parameters to make a +// List Containers call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +type ListContainersParameters struct { + Prefix string + Marker string + Include string + MaxResults uint + Timeout uint +} + +// GetContainerReference returns a Container object for the specified container name. +func (b *BlobStorageClient) GetContainerReference(name string) *Container { + return &Container{ + bsc: b, + Name: name, + } +} + +// ListContainers returns the list of containers in a storage account along with +// pagination token and other response details. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +func (b BlobStorageClient) ListContainers(params ListContainersParameters) (*ContainerListResponse, error) { + q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) + uri := b.client.getEndpoint(blobServiceName, "", q) + headers := b.client.getStandardHeaders() + + var out ContainerListResponse + resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) + if err != nil { + return nil, err + } + defer resp.body.Close() + err = xmlUnmarshal(resp.body, &out) + if err != nil { + return nil, err + } + + // assign our client to the newly created Container objects + for i := range out.Containers { + out.Containers[i].bsc = &b + } + return &out, err +} + +func (p ListContainersParameters) getParameters() url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.Include != "" { + out.Set("include", p.Include) + } + if p.MaxResults != 0 { + out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) + } + if p.Timeout != 0 { + out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10)) + } + + return out +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go new file mode 100644 index 000000000..91f8fbe26 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go @@ -0,0 +1,256 @@ +package storage + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +// BlockListType is used to filter out types of blocks in a Get Blocks List call +// for a block blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all +// block types. +type BlockListType string + +// Filters for listing blocks in block blobs +const ( + BlockListTypeAll BlockListType = "all" + BlockListTypeCommitted BlockListType = "committed" + BlockListTypeUncommitted BlockListType = "uncommitted" +) + +// Maximum sizes (per REST API) for various concepts +const ( + MaxBlobBlockSize = 100 * 1024 * 1024 + MaxBlobPageSize = 4 * 1024 * 1024 +) + +// BlockStatus defines states a block for a block blob can +// be in. +type BlockStatus string + +// List of statuses that can be used to refer to a block in a block list +const ( + BlockStatusUncommitted BlockStatus = "Uncommitted" + BlockStatusCommitted BlockStatus = "Committed" + BlockStatusLatest BlockStatus = "Latest" +) + +// Block is used to create Block entities for Put Block List +// call. +type Block struct { + ID string + Status BlockStatus +} + +// BlockListResponse contains the response fields from Get Block List call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx +type BlockListResponse struct { + XMLName xml.Name `xml:"BlockList"` + CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"` + UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"` +} + +// BlockResponse contains the block information returned +// in the GetBlockListCall. +type BlockResponse struct { + Name string `xml:"Name"` + Size int64 `xml:"Size"` +} + +// CreateBlockBlob initializes an empty block blob with no blocks. +// +// See CreateBlockBlobFromReader for more info on creating blobs. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob +func (b *Blob) CreateBlockBlob(options *PutBlobOptions) error { + return b.CreateBlockBlobFromReader(nil, options) +} + +// CreateBlockBlobFromReader initializes a block blob using data from +// reader. Size must be the number of bytes read from reader. To +// create an empty blob, use size==0 and reader==nil. +// +// Any headers set in blob.Properties or metadata in blob.Metadata +// will be set on the blob. +// +// The API rejects requests with size > 256 MiB (but this limit is not +// checked by the SDK). To write a larger blob, use CreateBlockBlob, +// PutBlock, and PutBlockList. +// +// To create a blob from scratch, call container.GetBlobReference() to +// get an empty blob, fill in blob.Properties and blob.Metadata as +// appropriate then call this method. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob +func (b *Blob) CreateBlockBlobFromReader(blob io.Reader, options *PutBlobOptions) error { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeBlock) + + headers["Content-Length"] = "0" + var n int64 + var err error + if blob != nil { + type lener interface { + Len() int + } + // TODO(rjeczalik): handle io.ReadSeeker, in case blob is *os.File etc. + if l, ok := blob.(lener); ok { + n = int64(l.Len()) + } else { + var buf bytes.Buffer + n, err = io.Copy(&buf, blob) + if err != nil { + return err + } + blob = &buf + } + + headers["Content-Length"] = strconv.FormatInt(n, 10) + } + b.Properties.ContentLength = n + + headers = mergeHeaders(headers, headersFromStruct(b.Properties)) + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth) + if err != nil { + return err + } + return b.respondCreation(resp, BlobTypeBlock) +} + +// PutBlockOptions includes the options for a put block operation +type PutBlockOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + ContentMD5 string `header:"Content-MD5"` + RequestID string `header:"x-ms-client-request-id"` +} + +// PutBlock saves the given data chunk to the specified block blob with +// given ID. +// +// The API rejects chunks larger than 100 MiB (but this limit is not +// checked by the SDK). +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block +func (b *Blob) PutBlock(blockID string, chunk []byte, options *PutBlockOptions) error { + return b.PutBlockWithLength(blockID, uint64(len(chunk)), bytes.NewReader(chunk), options) +} + +// PutBlockWithLength saves the given data stream of exactly specified size to +// the block blob with given ID. It is an alternative to PutBlocks where data +// comes as stream but the length is known in advance. +// +// The API rejects requests with size > 100 MiB (but this limit is not +// checked by the SDK). +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block +func (b *Blob) PutBlockWithLength(blockID string, size uint64, blob io.Reader, options *PutBlockOptions) error { + query := url.Values{ + "comp": {"block"}, + "blockid": {blockID}, + } + headers := b.Container.bsc.client.getStandardHeaders() + headers["Content-Length"] = fmt.Sprintf("%v", size) + + if options != nil { + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), query) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth) + if err != nil { + return err + } + return b.respondCreation(resp, BlobTypeBlock) +} + +// PutBlockListOptions includes the options for a put block list operation +type PutBlockListOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// PutBlockList saves list of blocks to the specified block blob. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block-List +func (b *Blob) PutBlockList(blocks []Block, options *PutBlockListOptions) error { + params := url.Values{"comp": {"blocklist"}} + blockListXML := prepareBlockListRequest(blocks) + headers := b.Container.bsc.client.getStandardHeaders() + headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML)) + headers = mergeHeaders(headers, headersFromStruct(b.Properties)) + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, strings.NewReader(blockListXML), b.Container.bsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// GetBlockListOptions includes the options for a get block list operation +type GetBlockListOptions struct { + Timeout uint + Snapshot *time.Time + LeaseID string `header:"x-ms-lease-id"` + RequestID string `header:"x-ms-client-request-id"` +} + +// GetBlockList retrieves list of blocks in the specified block blob. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Block-List +func (b *Blob) GetBlockList(blockType BlockListType, options *GetBlockListOptions) (BlockListResponse, error) { + params := url.Values{ + "comp": {"blocklist"}, + "blocklisttype": {string(blockType)}, + } + headers := b.Container.bsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + var out BlockListResponse + resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return out, err + } + defer resp.body.Close() + + err = xmlUnmarshal(resp.body, &out) + return out, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go index 817f934c5..42fa702f6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go @@ -2,6 +2,7 @@ package storage import ( + "bufio" "bytes" "encoding/base64" "encoding/json" @@ -10,21 +11,27 @@ import ( "fmt" "io" "io/ioutil" + "mime" + "mime/multipart" "net/http" "net/url" + "regexp" "runtime" - "strconv" "strings" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" ) const ( - // DefaultBaseURL is the domain name used for storage requests when a - // default client is created. + // DefaultBaseURL is the domain name used for storage requests in the + // public cloud when a default client is created. DefaultBaseURL = "core.windows.net" - // DefaultAPIVersion is the Azure Storage API version string used when a + // DefaultAPIVersion is the Azure Storage API version string used when a // basic client is created. - DefaultAPIVersion = "2015-02-21" + DefaultAPIVersion = "2016-05-31" defaultUseHTTPS = true @@ -44,15 +51,60 @@ const ( storageEmulatorQueue = "127.0.0.1:10001" userAgentHeader = "User-Agent" + + userDefinedMetadataHeaderPrefix = "x-ms-meta-" ) +var ( + validStorageAccount = regexp.MustCompile("^[0-9a-z]{3,24}$") +) + +// Sender sends a request +type Sender interface { + Send(*Client, *http.Request) (*http.Response, error) +} + +// DefaultSender is the default sender for the client. It implements +// an automatic retry strategy. +type DefaultSender struct { + RetryAttempts int + RetryDuration time.Duration + ValidStatusCodes []int + attempts int // used for testing +} + +// Send is the default retry strategy in the client +func (ds *DefaultSender) Send(c *Client, req *http.Request) (resp *http.Response, err error) { + rr := autorest.NewRetriableRequest(req) + for attempts := 0; attempts < ds.RetryAttempts; attempts++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = c.HTTPClient.Do(rr.Request()) + if err != nil || !autorest.ResponseHasStatusCode(resp, ds.ValidStatusCodes...) { + return resp, err + } + autorest.DelayForBackoff(ds.RetryDuration, attempts, req.Cancel) + ds.attempts = attempts + } + ds.attempts++ + return resp, err +} + // Client is the object that needs to be constructed to perform // operations on the storage account. type Client struct { // HTTPClient is the http.Client used to initiate API - // requests. If it is nil, http.DefaultClient is used. + // requests. http.DefaultClient is used when creating a + // client. HTTPClient *http.Client + // Sender is an interface that sends the request. Clients are + // created with a DefaultSender. The DefaultSender has an + // automatic retry strategy built in. The Sender can be customized. + Sender Sender + accountName string accountKey []byte useHTTPS bool @@ -70,7 +122,7 @@ type storageResponse struct { type odataResponse struct { storageResponse - odata odataErrorMessage + odata odataErrorWrapper } // AzureStorageServiceError contains fields of the error response from @@ -83,22 +135,25 @@ type AzureStorageServiceError struct { QueryParameterName string `xml:"QueryParameterName"` QueryParameterValue string `xml:"QueryParameterValue"` Reason string `xml:"Reason"` + Lang string StatusCode int RequestID string + Date string + APIVersion string } -type odataErrorMessageMessage struct { +type odataErrorMessage struct { Lang string `json:"lang"` Value string `json:"value"` } -type odataErrorMessageInternal struct { - Code string `json:"code"` - Message odataErrorMessageMessage `json:"message"` +type odataError struct { + Code string `json:"code"` + Message odataErrorMessage `json:"message"` } -type odataErrorMessage struct { - Err odataErrorMessageInternal `json:"odata.error"` +type odataErrorWrapper struct { + Err odataError `json:"odata.error"` } // UnexpectedStatusCodeError is returned when a storage service responds with neither an error @@ -131,7 +186,15 @@ func NewBasicClient(accountName, accountKey string) (Client, error) { return NewEmulatorClient() } return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS) +} +// NewBasicClientOnSovereignCloud constructs a Client with given storage service name and +// key in the referenced cloud. +func NewBasicClientOnSovereignCloud(accountName, accountKey string, env azure.Environment) (Client, error) { + if accountName == StorageEmulatorAccountName { + return NewEmulatorClient() + } + return NewClient(accountName, accountKey, env.StorageEndpointSuffix, DefaultAPIVersion, defaultUseHTTPS) } //NewEmulatorClient contructs a Client intended to only work with Azure @@ -145,8 +208,8 @@ func NewEmulatorClient() (Client, error) { // storage endpoint than Azure Public Cloud. func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, useHTTPS bool) (Client, error) { var c Client - if accountName == "" { - return c, fmt.Errorf("azure: account name required") + if !IsValidStorageAccount(accountName) { + return c, fmt.Errorf("azure: account name is not valid: it must be between 3 and 24 characters, and only may contain numbers and lowercase letters: %v", accountName) } else if accountKey == "" { return c, fmt.Errorf("azure: account key required") } else if blobServiceBaseURL == "" { @@ -159,19 +222,37 @@ func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, u } c = Client{ + HTTPClient: http.DefaultClient, accountName: accountName, accountKey: key, useHTTPS: useHTTPS, baseURL: blobServiceBaseURL, apiVersion: apiVersion, UseSharedKeyLite: false, + Sender: &DefaultSender{ + RetryAttempts: 5, + ValidStatusCodes: []int{ + http.StatusRequestTimeout, // 408 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + }, + RetryDuration: time.Second * 5, + }, } c.userAgent = c.getDefaultUserAgent() return c, nil } +// IsValidStorageAccount checks if the storage account name is valid. +// See https://docs.microsoft.com/en-us/azure/storage/storage-create-storage-account +func IsValidStorageAccount(account string) bool { + return validStorageAccount.MatchString(account) +} + func (c Client) getDefaultUserAgent() string { - return fmt.Sprintf("Go/%s (%s-%s) Azure-SDK-For-Go/%s storage-dataplane/%s", + return fmt.Sprintf("Go/%s (%s-%s) azure-storage-go/%s api-version/%s", runtime.Version(), runtime.GOARCH, runtime.GOOS, @@ -200,7 +281,7 @@ func (c *Client) protectUserAgent(extraheaders map[string]string) map[string]str return extraheaders } -func (c Client) getBaseURL(service string) string { +func (c Client) getBaseURL(service string) *url.URL { scheme := "http" if c.useHTTPS { scheme = "https" @@ -219,18 +300,14 @@ func (c Client) getBaseURL(service string) string { host = fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL) } - u := &url.URL{ + return &url.URL{ Scheme: scheme, - Host: host} - return u.String() + Host: host, + } } func (c Client) getEndpoint(service, path string, params url.Values) string { - u, err := url.Parse(c.getBaseURL(service)) - if err != nil { - // really should not be happening - panic(err) - } + u := c.getBaseURL(service) // API doesn't accept path segments not starting with '/' if !strings.HasPrefix(path, "/") { @@ -321,46 +398,53 @@ func (c Client) exec(verb, url string, headers map[string]string, body io.Reader return nil, errors.New("azure/storage: error creating request: " + err.Error()) } - if clstr, ok := headers["Content-Length"]; ok { - // content length header is being signed, but completely ignored by golang. - // instead we have to use the ContentLength property on the request struct - // (see https://golang.org/src/net/http/request.go?s=18140:18370#L536 and - // https://golang.org/src/net/http/transfer.go?s=1739:2467#L49) - req.ContentLength, err = strconv.ParseInt(clstr, 10, 64) - if err != nil { - return nil, err + // if a body was provided ensure that the content length was set. + // http.NewRequest() will automatically do this for a handful of types + // and for those that it doesn't we will handle here. + if body != nil && req.ContentLength < 1 { + if lr, ok := body.(*io.LimitedReader); ok { + setContentLengthFromLimitedReader(req, lr) } } + for k, v := range headers { - req.Header.Add(k, v) + req.Header[k] = append(req.Header[k], v) // Must bypass case munging present in `Add` by using map functions directly. See https://github.com/Azure/azure-sdk-for-go/issues/645 } - httpClient := c.HTTPClient - if httpClient == nil { - httpClient = http.DefaultClient - } - resp, err := httpClient.Do(req) + resp, err := c.Sender.Send(&c, req) if err != nil { return nil, err } - statusCode := resp.StatusCode - if statusCode >= 400 && statusCode <= 505 { + if resp.StatusCode >= 400 && resp.StatusCode <= 505 { var respBody []byte - respBody, err = readResponseBody(resp) + respBody, err = readAndCloseBody(resp.Body) if err != nil { return nil, err } - requestID := resp.Header.Get("x-ms-request-id") + requestID, date, version := getDebugHeaders(resp.Header) if len(respBody) == 0 { // no error in response body, might happen in HEAD requests - err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID) + err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version) } else { + storageErr := AzureStorageServiceError{ + StatusCode: resp.StatusCode, + RequestID: requestID, + Date: date, + APIVersion: version, + } // response contains storage service error object, unmarshal - storageErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, requestID) - if err != nil { // error unmarshaling the error response - err = errIn + if resp.Header.Get("Content-Type") == "application/xml" { + errIn := serviceErrFromXML(respBody, &storageErr) + if err != nil { // error unmarshaling the error response + err = errIn + } + } else { + errIn := serviceErrFromJSON(respBody, &storageErr) + if err != nil { // error unmarshaling the error response + err = errIn + } } err = storageErr } @@ -377,10 +461,10 @@ func (c Client) exec(verb, url string, headers map[string]string, body io.Reader body: resp.Body}, nil } -func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) { +func (c Client) execInternalJSONCommon(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, *http.Request, *http.Response, error) { headers, err := c.addAuthorizationHeader(verb, url, headers, auth) if err != nil { - return nil, err + return nil, nil, nil, err } req, err := http.NewRequest(verb, url, body) @@ -388,14 +472,9 @@ func (c Client) execInternalJSON(verb, url string, headers map[string]string, bo req.Header.Add(k, v) } - httpClient := c.HTTPClient - if httpClient == nil { - httpClient = http.DefaultClient - } - - resp, err := httpClient.Do(req) + resp, err := c.Sender.Send(&c, req) if err != nil { - return nil, err + return nil, nil, nil, err } respToRet := &odataResponse{} @@ -406,55 +485,155 @@ func (c Client) execInternalJSON(verb, url string, headers map[string]string, bo statusCode := resp.StatusCode if statusCode >= 400 && statusCode <= 505 { var respBody []byte - respBody, err = readResponseBody(resp) + respBody, err = readAndCloseBody(resp.Body) if err != nil { - return nil, err + return nil, nil, nil, err } + requestID, date, version := getDebugHeaders(resp.Header) if len(respBody) == 0 { // no error in response body, might happen in HEAD requests - err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, resp.Header.Get("x-ms-request-id")) - return respToRet, err + err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version) + return respToRet, req, resp, err } // try unmarshal as odata.error json err = json.Unmarshal(respBody, &respToRet.odata) - return respToRet, err + } + + return respToRet, req, resp, err +} + +func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) { + respToRet, _, _, err := c.execInternalJSONCommon(verb, url, headers, body, auth) + return respToRet, err +} + +func (c Client) execBatchOperationJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) { + // execute common query, get back generated request, response etc... for more processing. + respToRet, req, resp, err := c.execInternalJSONCommon(verb, url, headers, body, auth) + if err != nil { + return nil, err + } + + // return the OData in the case of executing batch commands. + // In this case we need to read the outer batch boundary and contents. + // Then we read the changeset information within the batch + var respBody []byte + respBody, err = readAndCloseBody(resp.Body) + if err != nil { + return nil, err + } + + // outer multipart body + _, batchHeader, err := mime.ParseMediaType(resp.Header["Content-Type"][0]) + if err != nil { + return nil, err + } + + // batch details. + batchBoundary := batchHeader["boundary"] + batchPartBuf, changesetBoundary, err := genBatchReader(batchBoundary, respBody) + if err != nil { + return nil, err + } + + // changeset details. + err = genChangesetReader(req, respToRet, batchPartBuf, changesetBoundary) + if err != nil { + return nil, err } return respToRet, nil } -func readResponseBody(resp *http.Response) ([]byte, error) { - defer resp.Body.Close() - out, err := ioutil.ReadAll(resp.Body) +func genChangesetReader(req *http.Request, respToRet *odataResponse, batchPartBuf io.Reader, changesetBoundary string) error { + changesetMultiReader := multipart.NewReader(batchPartBuf, changesetBoundary) + changesetPart, err := changesetMultiReader.NextPart() + if err != nil { + return err + } + + changesetPartBufioReader := bufio.NewReader(changesetPart) + changesetResp, err := http.ReadResponse(changesetPartBufioReader, req) + if err != nil { + return err + } + + if changesetResp.StatusCode != http.StatusNoContent { + changesetBody, err := readAndCloseBody(changesetResp.Body) + err = json.Unmarshal(changesetBody, &respToRet.odata) + if err != nil { + return err + } + respToRet.statusCode = changesetResp.StatusCode + } + + return nil +} + +func genBatchReader(batchBoundary string, respBody []byte) (io.Reader, string, error) { + respBodyString := string(respBody) + respBodyReader := strings.NewReader(respBodyString) + + // reading batchresponse + batchMultiReader := multipart.NewReader(respBodyReader, batchBoundary) + batchPart, err := batchMultiReader.NextPart() + if err != nil { + return nil, "", err + } + batchPartBufioReader := bufio.NewReader(batchPart) + + _, changesetHeader, err := mime.ParseMediaType(batchPart.Header.Get("Content-Type")) + if err != nil { + return nil, "", err + } + changesetBoundary := changesetHeader["boundary"] + return batchPartBufioReader, changesetBoundary, nil +} + +func readAndCloseBody(body io.ReadCloser) ([]byte, error) { + defer body.Close() + out, err := ioutil.ReadAll(body) if err == io.EOF { err = nil } return out, err } -func serviceErrFromXML(body []byte, statusCode int, requestID string) (AzureStorageServiceError, error) { - var storageErr AzureStorageServiceError - if err := xml.Unmarshal(body, &storageErr); err != nil { - return storageErr, err +func serviceErrFromXML(body []byte, storageErr *AzureStorageServiceError) error { + if err := xml.Unmarshal(body, storageErr); err != nil { + storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body)) + return err } - storageErr.StatusCode = statusCode - storageErr.RequestID = requestID - return storageErr, nil + return nil } -func serviceErrFromStatusCode(code int, status string, requestID string) AzureStorageServiceError { +func serviceErrFromJSON(body []byte, storageErr *AzureStorageServiceError) error { + odataError := odataErrorWrapper{} + if err := json.Unmarshal(body, &odataError); err != nil { + storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body)) + return err + } + storageErr.Code = odataError.Err.Code + storageErr.Message = odataError.Err.Message.Value + storageErr.Lang = odataError.Err.Message.Lang + return nil +} + +func serviceErrFromStatusCode(code int, status string, requestID, date, version string) AzureStorageServiceError { return AzureStorageServiceError{ StatusCode: code, Code: status, RequestID: requestID, + Date: date, + APIVersion: version, Message: "no response body was available for error status code", } } func (e AzureStorageServiceError) Error() string { - return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s, QueryParameterName=%s, QueryParameterValue=%s", - e.StatusCode, e.Code, e.Message, e.RequestID, e.QueryParameterName, e.QueryParameterValue) + return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestInitiated=%s, RequestId=%s, API Version=%s, QueryParameterName=%s, QueryParameterValue=%s", + e.StatusCode, e.Code, e.Message, e.Date, e.RequestID, e.APIVersion, e.QueryParameterName, e.QueryParameterValue) } // checkRespCode returns UnexpectedStatusError if the given response code is not @@ -467,3 +646,18 @@ func checkRespCode(respCode int, allowed []int) error { } return UnexpectedStatusCodeError{allowed, respCode} } + +func (c Client) addMetadataToHeaders(h map[string]string, metadata map[string]string) map[string]string { + metadata = c.protectUserAgent(metadata) + for k, v := range metadata { + h[userDefinedMetadataHeaderPrefix+k] = v + } + return h +} + +func getDebugHeaders(h http.Header) (requestID, date, version string) { + requestID = h.Get("x-ms-request-id") + version = h.Get("x-ms-version") + date = h.Get("Date") + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go new file mode 100644 index 000000000..c2c9c055b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go @@ -0,0 +1,453 @@ +package storage + +import ( + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +// Container represents an Azure container. +type Container struct { + bsc *BlobStorageClient + Name string `xml:"Name"` + Properties ContainerProperties `xml:"Properties"` + Metadata map[string]string +} + +func (c *Container) buildPath() string { + return fmt.Sprintf("/%s", c.Name) +} + +// ContainerProperties contains various properties of a container returned from +// various endpoints like ListContainers. +type ContainerProperties struct { + LastModified string `xml:"Last-Modified"` + Etag string `xml:"Etag"` + LeaseStatus string `xml:"LeaseStatus"` + LeaseState string `xml:"LeaseState"` + LeaseDuration string `xml:"LeaseDuration"` +} + +// ContainerListResponse contains the response fields from +// ListContainers call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +type ContainerListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Containers []Container `xml:"Containers>Container"` +} + +// BlobListResponse contains the response fields from ListBlobs call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx +type BlobListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Blobs []Blob `xml:"Blobs>Blob"` + + // BlobPrefix is used to traverse blobs as if it were a file system. + // It is returned if ListBlobsParameters.Delimiter is specified. + // The list here can be thought of as "folders" that may contain + // other folders or blobs. + BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"` + + // Delimiter is used to traverse blobs as if it were a file system. + // It is returned if ListBlobsParameters.Delimiter is specified. + Delimiter string `xml:"Delimiter"` +} + +// IncludeBlobDataset has options to include in a list blobs operation +type IncludeBlobDataset struct { + Snapshots bool + Metadata bool + UncommittedBlobs bool + Copy bool +} + +// ListBlobsParameters defines the set of customizable +// parameters to make a List Blobs call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx +type ListBlobsParameters struct { + Prefix string + Delimiter string + Marker string + Include *IncludeBlobDataset + MaxResults uint + Timeout uint + RequestID string +} + +func (p ListBlobsParameters) getParameters() url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Delimiter != "" { + out.Set("delimiter", p.Delimiter) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.Include != nil { + include := []string{} + include = addString(include, p.Include.Snapshots, "snapshots") + include = addString(include, p.Include.Metadata, "metadata") + include = addString(include, p.Include.UncommittedBlobs, "uncommittedblobs") + include = addString(include, p.Include.Copy, "copy") + fullInclude := strings.Join(include, ",") + out.Set("include", fullInclude) + } + if p.MaxResults != 0 { + out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) + } + if p.Timeout != 0 { + out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10)) + } + + return out +} + +func addString(datasets []string, include bool, text string) []string { + if include { + datasets = append(datasets, text) + } + return datasets +} + +// ContainerAccessType defines the access level to the container from a public +// request. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms- +// blob-public-access" header. +type ContainerAccessType string + +// Access options for containers +const ( + ContainerAccessTypePrivate ContainerAccessType = "" + ContainerAccessTypeBlob ContainerAccessType = "blob" + ContainerAccessTypeContainer ContainerAccessType = "container" +) + +// ContainerAccessPolicy represents each access policy in the container ACL. +type ContainerAccessPolicy struct { + ID string + StartTime time.Time + ExpiryTime time.Time + CanRead bool + CanWrite bool + CanDelete bool +} + +// ContainerPermissions represents the container ACLs. +type ContainerPermissions struct { + AccessType ContainerAccessType + AccessPolicies []ContainerAccessPolicy +} + +// ContainerAccessHeader references header used when setting/getting container ACL +const ( + ContainerAccessHeader string = "x-ms-blob-public-access" +) + +// GetBlobReference returns a Blob object for the specified blob name. +func (c *Container) GetBlobReference(name string) *Blob { + return &Blob{ + Container: c, + Name: name, + } +} + +// CreateContainerOptions includes the options for a create container operation +type CreateContainerOptions struct { + Timeout uint + Access ContainerAccessType `header:"x-ms-blob-public-access"` + RequestID string `header:"x-ms-client-request-id"` +} + +// Create creates a blob container within the storage account +// with given name and access level. Returns error if container already exists. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Container +func (c *Container) Create(options *CreateContainerOptions) error { + resp, err := c.create(options) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// CreateIfNotExists creates a blob container if it does not exist. Returns +// true if container is newly created or false if container already exists. +func (c *Container) CreateIfNotExists(options *CreateContainerOptions) (bool, error) { + resp, err := c.create(options) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { + return resp.statusCode == http.StatusCreated, nil + } + } + return false, err +} + +func (c *Container) create(options *CreateContainerOptions) (*storageResponse, error) { + query := url.Values{"restype": {"container"}} + headers := c.bsc.client.getStandardHeaders() + headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata) + + if options != nil { + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query) + + return c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth) +} + +// Exists returns true if a container with given name exists +// on the storage account, otherwise returns false. +func (c *Container) Exists() (bool, error) { + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}}) + headers := c.bsc.client.getStandardHeaders() + + resp, err := c.bsc.client.exec(http.MethodHead, uri, headers, nil, c.bsc.auth) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusOK, nil + } + } + return false, err +} + +// SetContainerPermissionOptions includes options for a set container permissions operation +type SetContainerPermissionOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + RequestID string `header:"x-ms-client-request-id"` +} + +// SetPermissions sets up container permissions +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Container-ACL +func (c *Container) SetPermissions(permissions ContainerPermissions, options *SetContainerPermissionOptions) error { + body, length, err := generateContainerACLpayload(permissions.AccessPolicies) + if err != nil { + return err + } + params := url.Values{ + "restype": {"container"}, + "comp": {"acl"}, + } + headers := c.bsc.client.getStandardHeaders() + headers = addToHeaders(headers, ContainerAccessHeader, string(permissions.AccessType)) + headers["Content-Length"] = strconv.Itoa(length) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) + + resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, body, c.bsc.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return errors.New("Unable to set permissions") + } + + return nil +} + +// GetContainerPermissionOptions includes options for a get container permissions operation +type GetContainerPermissionOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + RequestID string `header:"x-ms-client-request-id"` +} + +// GetPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx +// If timeout is 0 then it will not be passed to Azure +// leaseID will only be passed to Azure if populated +func (c *Container) GetPermissions(options *GetContainerPermissionOptions) (*ContainerPermissions, error) { + params := url.Values{ + "restype": {"container"}, + "comp": {"acl"}, + } + headers := c.bsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) + + resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) + if err != nil { + return nil, err + } + defer resp.body.Close() + + var ap AccessPolicy + err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList) + if err != nil { + return nil, err + } + return buildAccessPolicy(ap, &resp.headers), nil +} + +func buildAccessPolicy(ap AccessPolicy, headers *http.Header) *ContainerPermissions { + // containerAccess. Blob, Container, empty + containerAccess := headers.Get(http.CanonicalHeaderKey(ContainerAccessHeader)) + permissions := ContainerPermissions{ + AccessType: ContainerAccessType(containerAccess), + AccessPolicies: []ContainerAccessPolicy{}, + } + + for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { + capd := ContainerAccessPolicy{ + ID: policy.ID, + StartTime: policy.AccessPolicy.StartTime, + ExpiryTime: policy.AccessPolicy.ExpiryTime, + } + capd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r") + capd.CanWrite = updatePermissions(policy.AccessPolicy.Permission, "w") + capd.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d") + + permissions.AccessPolicies = append(permissions.AccessPolicies, capd) + } + return &permissions +} + +// DeleteContainerOptions includes options for a delete container operation +type DeleteContainerOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + RequestID string `header:"x-ms-client-request-id"` +} + +// Delete deletes the container with given name on the storage +// account. If the container does not exist returns error. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container +func (c *Container) Delete(options *DeleteContainerOptions) error { + resp, err := c.delete(options) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) +} + +// DeleteIfExists deletes the container with given name on the storage +// account if it exists. Returns true if container is deleted with this call, or +// false if the container did not exist at the time of the Delete Container +// operation. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container +func (c *Container) DeleteIfExists(options *DeleteContainerOptions) (bool, error) { + resp, err := c.delete(options) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusAccepted, nil + } + } + return false, err +} + +func (c *Container) delete(options *DeleteContainerOptions) (*storageResponse, error) { + query := url.Values{"restype": {"container"}} + headers := c.bsc.client.getStandardHeaders() + + if options != nil { + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query) + + return c.bsc.client.exec(http.MethodDelete, uri, headers, nil, c.bsc.auth) +} + +// ListBlobs returns an object that contains list of blobs in the container, +// pagination token and other information in the response of List Blobs call. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Blobs +func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, error) { + q := mergeParams(params.getParameters(), url.Values{ + "restype": {"container"}, + "comp": {"list"}}, + ) + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q) + + headers := c.bsc.client.getStandardHeaders() + headers = addToHeaders(headers, "x-ms-client-request-id", params.RequestID) + + var out BlobListResponse + resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) + if err != nil { + return out, err + } + defer resp.body.Close() + + err = xmlUnmarshal(resp.body, &out) + for i := range out.Blobs { + out.Blobs[i].Container = c + } + return out, err +} + +func generateContainerACLpayload(policies []ContainerAccessPolicy) (io.Reader, int, error) { + sil := SignedIdentifiers{ + SignedIdentifiers: []SignedIdentifier{}, + } + for _, capd := range policies { + permission := capd.generateContainerPermissions() + signedIdentifier := convertAccessPolicyToXMLStructs(capd.ID, capd.StartTime, capd.ExpiryTime, permission) + sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier) + } + return xmlMarshal(sil) +} + +func (capd *ContainerAccessPolicy) generateContainerPermissions() (permissions string) { + // generate the permissions string (rwd). + // still want the end user API to have bool flags. + permissions = "" + + if capd.CanRead { + permissions += "r" + } + + if capd.CanWrite { + permissions += "w" + } + + if capd.CanDelete { + permissions += "d" + } + + return permissions +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go new file mode 100644 index 000000000..f14342618 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go @@ -0,0 +1,223 @@ +package storage + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" +) + +const ( + blobCopyStatusPending = "pending" + blobCopyStatusSuccess = "success" + blobCopyStatusAborted = "aborted" + blobCopyStatusFailed = "failed" +) + +// CopyOptions includes the options for a copy blob operation +type CopyOptions struct { + Timeout uint + Source CopyOptionsConditions + Destiny CopyOptionsConditions + RequestID string +} + +// IncrementalCopyOptions includes the options for an incremental copy blob operation +type IncrementalCopyOptions struct { + Timeout uint + Destination IncrementalCopyOptionsConditions + RequestID string +} + +// CopyOptionsConditions includes some conditional options in a copy blob operation +type CopyOptionsConditions struct { + LeaseID string + IfModifiedSince *time.Time + IfUnmodifiedSince *time.Time + IfMatch string + IfNoneMatch string +} + +// IncrementalCopyOptionsConditions includes some conditional options in a copy blob operation +type IncrementalCopyOptionsConditions struct { + IfModifiedSince *time.Time + IfUnmodifiedSince *time.Time + IfMatch string + IfNoneMatch string +} + +// Copy starts a blob copy operation and waits for the operation to +// complete. sourceBlob parameter must be a canonical URL to the blob (can be +// obtained using the GetURL method.) There is no SLA on blob copy and therefore +// this helper method works faster on smaller files. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob +func (b *Blob) Copy(sourceBlob string, options *CopyOptions) error { + copyID, err := b.StartCopy(sourceBlob, options) + if err != nil { + return err + } + + return b.WaitForCopy(copyID) +} + +// StartCopy starts a blob copy operation. +// sourceBlob parameter must be a canonical URL to the blob (can be +// obtained using the GetURL method.) +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob +func (b *Blob) StartCopy(sourceBlob string, options *CopyOptions) (string, error) { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-copy-source"] = sourceBlob + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) + // source + headers = addToHeaders(headers, "x-ms-source-lease-id", options.Source.LeaseID) + headers = addTimeToHeaders(headers, "x-ms-source-if-modified-since", options.Source.IfModifiedSince) + headers = addTimeToHeaders(headers, "x-ms-source-if-unmodified-since", options.Source.IfUnmodifiedSince) + headers = addToHeaders(headers, "x-ms-source-if-match", options.Source.IfMatch) + headers = addToHeaders(headers, "x-ms-source-if-none-match", options.Source.IfNoneMatch) + //destiny + headers = addToHeaders(headers, "x-ms-lease-id", options.Destiny.LeaseID) + headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destiny.IfModifiedSince) + headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destiny.IfUnmodifiedSince) + headers = addToHeaders(headers, "x-ms-if-match", options.Destiny.IfMatch) + headers = addToHeaders(headers, "x-ms-if-none-match", options.Destiny.IfNoneMatch) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return "", err + } + defer readAndCloseBody(resp.body) + + if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted, http.StatusCreated}); err != nil { + return "", err + } + + copyID := resp.headers.Get("x-ms-copy-id") + if copyID == "" { + return "", errors.New("Got empty copy id header") + } + return copyID, nil +} + +// AbortCopyOptions includes the options for an abort blob operation +type AbortCopyOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + RequestID string `header:"x-ms-client-request-id"` +} + +// AbortCopy aborts a BlobCopy which has already been triggered by the StartBlobCopy function. +// copyID is generated from StartBlobCopy function. +// currentLeaseID is required IF the destination blob has an active lease on it. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Abort-Copy-Blob +func (b *Blob) AbortCopy(copyID string, options *AbortCopyOptions) error { + params := url.Values{ + "comp": {"copy"}, + "copyid": {copyID}, + } + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-copy-action"] = "abort" + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +// WaitForCopy loops until a BlobCopy operation is completed (or fails with error) +func (b *Blob) WaitForCopy(copyID string) error { + for { + err := b.GetProperties(nil) + if err != nil { + return err + } + + if b.Properties.CopyID != copyID { + return errBlobCopyIDMismatch + } + + switch b.Properties.CopyStatus { + case blobCopyStatusSuccess: + return nil + case blobCopyStatusPending: + continue + case blobCopyStatusAborted: + return errBlobCopyAborted + case blobCopyStatusFailed: + return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", b.Properties.CopyID, b.Properties.CopyStatusDescription) + default: + return fmt.Errorf("storage: unhandled blob copy status: '%s'", b.Properties.CopyStatus) + } + } +} + +// IncrementalCopyBlob copies a snapshot of a source blob and copies to referring blob +// sourceBlob parameter must be a valid snapshot URL of the original blob. +// THe original blob mut be public, or use a Shared Access Signature. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/incremental-copy-blob . +func (b *Blob) IncrementalCopyBlob(sourceBlobURL string, snapshotTime time.Time, options *IncrementalCopyOptions) (string, error) { + params := url.Values{"comp": {"incrementalcopy"}} + + // need formatting to 7 decimal places so it's friendly to Windows and *nix + snapshotTimeFormatted := snapshotTime.Format("2006-01-02T15:04:05.0000000Z") + u, err := url.Parse(sourceBlobURL) + if err != nil { + return "", err + } + query := u.Query() + query.Add("snapshot", snapshotTimeFormatted) + encodedQuery := query.Encode() + encodedQuery = strings.Replace(encodedQuery, "%3A", ":", -1) + u.RawQuery = encodedQuery + snapshotURL := u.String() + + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-copy-source"] = snapshotURL + + if options != nil { + addTimeout(params, options.Timeout) + headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) + headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destination.IfModifiedSince) + headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destination.IfUnmodifiedSince) + headers = addToHeaders(headers, "x-ms-if-match", options.Destination.IfMatch) + headers = addToHeaders(headers, "x-ms-if-none-match", options.Destination.IfNoneMatch) + } + + // get URI of destination blob + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return "", err + } + defer readAndCloseBody(resp.body) + + if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted}); err != nil { + return "", err + } + + copyID := resp.headers.Get("x-ms-copy-id") + if copyID == "" { + return "", errors.New("Got empty copy id header") + } + return copyID, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go index d07e0af1c..57053efd1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go @@ -4,6 +4,7 @@ import ( "encoding/xml" "net/http" "net/url" + "sync" ) // Directory represents a directory on a share. @@ -25,8 +26,9 @@ type DirectoryProperties struct { // ListDirsAndFilesParameters defines the set of customizable parameters to // make a List Files and Directories call. // -// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files type ListDirsAndFilesParameters struct { + Prefix string Marker string MaxResults uint Timeout uint @@ -35,7 +37,7 @@ type ListDirsAndFilesParameters struct { // DirsAndFilesListResponse contains the response fields from // a List Files and Directories call. // -// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files type DirsAndFilesListResponse struct { XMLName xml.Name `xml:"EnumerationResults"` Xmlns string `xml:"xmlns,attr"` @@ -60,14 +62,15 @@ func (d *Directory) buildPath() string { // Create this directory in the associated share. // If a directory with the same name already exists, the operation fails. // -// See https://msdn.microsoft.com/en-us/library/azure/dn166993.aspx -func (d *Directory) Create() error { +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory +func (d *Directory) Create(options *FileRequestOptions) error { // if this is the root directory exit early if d.parent == nil { return nil } - headers, err := d.fsc.createResource(d.buildPath(), resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil)) + params := prepareOptions(options) + headers, err := d.fsc.createResource(d.buildPath(), resourceDirectory, params, mergeMDIntoExtraHeaders(d.Metadata, nil), []int{http.StatusCreated}) if err != nil { return err } @@ -80,23 +83,24 @@ func (d *Directory) Create() error { // directory does not exists. Returns true if the directory is newly created or // false if the directory already exists. // -// See https://msdn.microsoft.com/en-us/library/azure/dn166993.aspx -func (d *Directory) CreateIfNotExists() (bool, error) { +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory +func (d *Directory) CreateIfNotExists(options *FileRequestOptions) (bool, error) { // if this is the root directory exit early if d.parent == nil { return false, nil } - resp, err := d.fsc.createResourceNoClose(d.buildPath(), resourceDirectory, nil) + params := prepareOptions(options) + resp, err := d.fsc.createResourceNoClose(d.buildPath(), resourceDirectory, params, nil) if resp != nil { - defer resp.body.Close() + defer readAndCloseBody(resp.body) if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { if resp.statusCode == http.StatusCreated { d.updateEtagAndLastModified(resp.headers) return true, nil } - return false, d.FetchAttributes() + return false, d.FetchAttributes(nil) } } @@ -106,18 +110,18 @@ func (d *Directory) CreateIfNotExists() (bool, error) { // Delete removes this directory. It must be empty in order to be deleted. // If the directory does not exist the operation fails. // -// See https://msdn.microsoft.com/en-us/library/azure/dn166969.aspx -func (d *Directory) Delete() error { - return d.fsc.deleteResource(d.buildPath(), resourceDirectory) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory +func (d *Directory) Delete(options *FileRequestOptions) error { + return d.fsc.deleteResource(d.buildPath(), resourceDirectory, options) } // DeleteIfExists removes this directory if it exists. // -// See https://msdn.microsoft.com/en-us/library/azure/dn166969.aspx -func (d *Directory) DeleteIfExists() (bool, error) { - resp, err := d.fsc.deleteResourceNoClose(d.buildPath(), resourceDirectory) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory +func (d *Directory) DeleteIfExists(options *FileRequestOptions) (bool, error) { + resp, err := d.fsc.deleteResourceNoClose(d.buildPath(), resourceDirectory, options) if resp != nil { - defer resp.body.Close() + defer readAndCloseBody(resp.body) if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { return resp.statusCode == http.StatusAccepted, nil } @@ -135,8 +139,10 @@ func (d *Directory) Exists() (bool, error) { } // FetchAttributes retrieves metadata for this directory. -func (d *Directory) FetchAttributes() error { - headers, err := d.fsc.getResourceHeaders(d.buildPath(), compNone, resourceDirectory, http.MethodHead) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-directory-properties +func (d *Directory) FetchAttributes(options *FileRequestOptions) error { + params := prepareOptions(options) + headers, err := d.fsc.getResourceHeaders(d.buildPath(), compNone, resourceDirectory, params, http.MethodHead) if err != nil { return err } @@ -164,13 +170,14 @@ func (d *Directory) GetFileReference(name string) *File { Name: name, parent: d, share: d.share, + mutex: &sync.Mutex{}, } } // ListDirsAndFiles returns a list of files and directories under this directory. // It also contains a pagination token and other response details. // -// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAndFilesListResponse, error) { q := mergeParams(params.getParameters(), getURLInitValues(compList, resourceDirectory)) @@ -192,9 +199,9 @@ func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAn // are case-insensitive so case munging should not matter to other // applications either. // -// See https://msdn.microsoft.com/en-us/library/azure/mt427370.aspx -func (d *Directory) SetMetadata() error { - headers, err := d.fsc.setResourceHeaders(d.buildPath(), compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil)) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Directory-Metadata +func (d *Directory) SetMetadata(options *FileRequestOptions) error { + headers, err := d.fsc.setResourceHeaders(d.buildPath(), compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil), options) if err != nil { return err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go new file mode 100644 index 000000000..13e947507 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go @@ -0,0 +1,439 @@ +package storage + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/satori/uuid" +) + +// Annotating as secure for gas scanning +/* #nosec */ +const ( + partitionKeyNode = "PartitionKey" + rowKeyNode = "RowKey" + etagErrorTemplate = "Etag didn't match: %v" +) + +var ( + errEmptyPayload = errors.New("Empty payload is not a valid metadata level for this operation") + errNilPreviousResult = errors.New("The previous results page is nil") + errNilNextLink = errors.New("There are no more pages in this query results") +) + +// Entity represents an entity inside an Azure table. +type Entity struct { + Table *Table + PartitionKey string + RowKey string + TimeStamp time.Time + OdataMetadata string + OdataType string + OdataID string + OdataEtag string + OdataEditLink string + Properties map[string]interface{} +} + +// GetEntityReference returns an Entity object with the specified +// partition key and row key. +func (t *Table) GetEntityReference(partitionKey, rowKey string) *Entity { + return &Entity{ + PartitionKey: partitionKey, + RowKey: rowKey, + Table: t, + } +} + +// EntityOptions includes options for entity operations. +type EntityOptions struct { + Timeout uint + RequestID string `header:"x-ms-client-request-id"` +} + +// GetEntityOptions includes options for a get entity operation +type GetEntityOptions struct { + Select []string + RequestID string `header:"x-ms-client-request-id"` +} + +// Get gets the referenced entity. Which properties to get can be +// specified using the select option. +// See: +// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities +// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities +func (e *Entity) Get(timeout uint, ml MetadataLevel, options *GetEntityOptions) error { + if ml == EmptyPayload { + return errEmptyPayload + } + // RowKey and PartitionKey could be lost if not included in the query + // As those are the entity identifiers, it is best if they are not lost + rk := e.RowKey + pk := e.PartitionKey + + query := url.Values{ + "timeout": {strconv.FormatUint(uint64(timeout), 10)}, + } + headers := e.Table.tsc.client.getStandardHeaders() + headers[headerAccept] = string(ml) + + if options != nil { + if len(options.Select) > 0 { + query.Add("$select", strings.Join(options.Select, ",")) + } + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + + uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) + resp, err := e.Table.tsc.client.exec(http.MethodGet, uri, headers, nil, e.Table.tsc.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return err + } + + respBody, err := ioutil.ReadAll(resp.body) + if err != nil { + return err + } + err = json.Unmarshal(respBody, e) + if err != nil { + return err + } + e.PartitionKey = pk + e.RowKey = rk + + return nil +} + +// Insert inserts the referenced entity in its table. +// The function fails if there is an entity with the same +// PartitionKey and RowKey in the table. +// ml determines the level of detail of metadata in the operation response, +// or no data at all. +// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-entity +func (e *Entity) Insert(ml MetadataLevel, options *EntityOptions) error { + query, headers := options.getParameters() + headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) + + body, err := json.Marshal(e) + if err != nil { + return err + } + headers = addBodyRelatedHeaders(headers, len(body)) + headers = addReturnContentHeaders(headers, ml) + + uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.Table.buildPath(), query) + resp, err := e.Table.tsc.client.exec(http.MethodPost, uri, headers, bytes.NewReader(body), e.Table.tsc.auth) + if err != nil { + return err + } + defer resp.body.Close() + + data, err := ioutil.ReadAll(resp.body) + if err != nil { + return err + } + + if ml != EmptyPayload { + if err = checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil { + return err + } + if err = e.UnmarshalJSON(data); err != nil { + return err + } + } else { + if err = checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { + return err + } + } + + return nil +} + +// Update updates the contents of an entity. The function fails if there is no entity +// with the same PartitionKey and RowKey in the table or if the ETag is different +// than the one in Azure. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/update-entity2 +func (e *Entity) Update(force bool, options *EntityOptions) error { + return e.updateMerge(force, http.MethodPut, options) +} + +// Merge merges the contents of entity specified with PartitionKey and RowKey +// with the content specified in Properties. +// The function fails if there is no entity with the same PartitionKey and +// RowKey in the table or if the ETag is different than the one in Azure. +// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/merge-entity +func (e *Entity) Merge(force bool, options *EntityOptions) error { + return e.updateMerge(force, "MERGE", options) +} + +// Delete deletes the entity. +// The function fails if there is no entity with the same PartitionKey and +// RowKey in the table or if the ETag is different than the one in Azure. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-entity1 +func (e *Entity) Delete(force bool, options *EntityOptions) error { + query, headers := options.getParameters() + headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) + + headers = addIfMatchHeader(headers, force, e.OdataEtag) + headers = addReturnContentHeaders(headers, EmptyPayload) + + uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) + resp, err := e.Table.tsc.client.exec(http.MethodDelete, uri, headers, nil, e.Table.tsc.auth) + if err != nil { + if resp.statusCode == http.StatusPreconditionFailed { + return fmt.Errorf(etagErrorTemplate, err) + } + return err + } + defer readAndCloseBody(resp.body) + + if err = checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { + return err + } + + return e.updateTimestamp(resp.headers) +} + +// InsertOrReplace inserts an entity or replaces the existing one. +// Read more: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-or-replace-entity +func (e *Entity) InsertOrReplace(options *EntityOptions) error { + return e.insertOr(http.MethodPut, options) +} + +// InsertOrMerge inserts an entity or merges the existing one. +// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/insert-or-merge-entity +func (e *Entity) InsertOrMerge(options *EntityOptions) error { + return e.insertOr("MERGE", options) +} + +func (e *Entity) buildPath() string { + return fmt.Sprintf("%s(PartitionKey='%s', RowKey='%s')", e.Table.buildPath(), e.PartitionKey, e.RowKey) +} + +// MarshalJSON is a custom marshaller for entity +func (e *Entity) MarshalJSON() ([]byte, error) { + completeMap := map[string]interface{}{} + completeMap[partitionKeyNode] = e.PartitionKey + completeMap[rowKeyNode] = e.RowKey + for k, v := range e.Properties { + typeKey := strings.Join([]string{k, OdataTypeSuffix}, "") + switch t := v.(type) { + case []byte: + completeMap[typeKey] = OdataBinary + completeMap[k] = string(t) + case time.Time: + completeMap[typeKey] = OdataDateTime + completeMap[k] = t.Format(time.RFC3339Nano) + case uuid.UUID: + completeMap[typeKey] = OdataGUID + completeMap[k] = t.String() + case int64: + completeMap[typeKey] = OdataInt64 + completeMap[k] = fmt.Sprintf("%v", v) + default: + completeMap[k] = v + } + if strings.HasSuffix(k, OdataTypeSuffix) { + if !(completeMap[k] == OdataBinary || + completeMap[k] == OdataDateTime || + completeMap[k] == OdataGUID || + completeMap[k] == OdataInt64) { + return nil, fmt.Errorf("Odata.type annotation %v value is not valid", k) + } + valueKey := strings.TrimSuffix(k, OdataTypeSuffix) + if _, ok := completeMap[valueKey]; !ok { + return nil, fmt.Errorf("Odata.type annotation %v defined without value defined", k) + } + } + } + return json.Marshal(completeMap) +} + +// UnmarshalJSON is a custom unmarshaller for entities +func (e *Entity) UnmarshalJSON(data []byte) error { + errorTemplate := "Deserializing error: %v" + + props := map[string]interface{}{} + err := json.Unmarshal(data, &props) + if err != nil { + return err + } + + // deselialize metadata + e.OdataMetadata = stringFromMap(props, "odata.metadata") + e.OdataType = stringFromMap(props, "odata.type") + e.OdataID = stringFromMap(props, "odata.id") + e.OdataEtag = stringFromMap(props, "odata.etag") + e.OdataEditLink = stringFromMap(props, "odata.editLink") + e.PartitionKey = stringFromMap(props, partitionKeyNode) + e.RowKey = stringFromMap(props, rowKeyNode) + + // deserialize timestamp + timeStamp, ok := props["Timestamp"] + if ok { + str, ok := timeStamp.(string) + if !ok { + return fmt.Errorf(errorTemplate, "Timestamp casting error") + } + t, err := time.Parse(time.RFC3339Nano, str) + if err != nil { + return fmt.Errorf(errorTemplate, err) + } + e.TimeStamp = t + } + delete(props, "Timestamp") + delete(props, "Timestamp@odata.type") + + // deserialize entity (user defined fields) + for k, v := range props { + if strings.HasSuffix(k, OdataTypeSuffix) { + valueKey := strings.TrimSuffix(k, OdataTypeSuffix) + str, ok := props[valueKey].(string) + if !ok { + return fmt.Errorf(errorTemplate, fmt.Sprintf("%v casting error", v)) + } + switch v { + case OdataBinary: + props[valueKey] = []byte(str) + case OdataDateTime: + t, err := time.Parse("2006-01-02T15:04:05Z", str) + if err != nil { + return fmt.Errorf(errorTemplate, err) + } + props[valueKey] = t + case OdataGUID: + props[valueKey] = uuid.FromStringOrNil(str) + case OdataInt64: + i, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return fmt.Errorf(errorTemplate, err) + } + props[valueKey] = i + default: + return fmt.Errorf(errorTemplate, fmt.Sprintf("%v is not supported", v)) + } + delete(props, k) + } + } + + e.Properties = props + return nil +} + +func getAndDelete(props map[string]interface{}, key string) interface{} { + if value, ok := props[key]; ok { + delete(props, key) + return value + } + return nil +} + +func addIfMatchHeader(h map[string]string, force bool, etag string) map[string]string { + if force { + h[headerIfMatch] = "*" + } else { + h[headerIfMatch] = etag + } + return h +} + +// updates Etag and timestamp +func (e *Entity) updateEtagAndTimestamp(headers http.Header) error { + e.OdataEtag = headers.Get(headerEtag) + return e.updateTimestamp(headers) +} + +func (e *Entity) updateTimestamp(headers http.Header) error { + str := headers.Get(headerDate) + t, err := time.Parse(time.RFC1123, str) + if err != nil { + return fmt.Errorf("Update timestamp error: %v", err) + } + e.TimeStamp = t + return nil +} + +func (e *Entity) insertOr(verb string, options *EntityOptions) error { + query, headers := options.getParameters() + headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) + + body, err := json.Marshal(e) + if err != nil { + return err + } + headers = addBodyRelatedHeaders(headers, len(body)) + headers = addReturnContentHeaders(headers, EmptyPayload) + + uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) + resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + + if err = checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { + return err + } + + return e.updateEtagAndTimestamp(resp.headers) +} + +func (e *Entity) updateMerge(force bool, verb string, options *EntityOptions) error { + query, headers := options.getParameters() + headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) + + body, err := json.Marshal(e) + if err != nil { + return err + } + headers = addBodyRelatedHeaders(headers, len(body)) + headers = addIfMatchHeader(headers, force, e.OdataEtag) + headers = addReturnContentHeaders(headers, EmptyPayload) + + uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) + resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth) + if err != nil { + if resp.statusCode == http.StatusPreconditionFailed { + return fmt.Errorf(etagErrorTemplate, err) + } + return err + } + defer readAndCloseBody(resp.body) + + if err = checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { + return err + } + + return e.updateEtagAndTimestamp(resp.headers) +} + +func stringFromMap(props map[string]interface{}, key string) string { + value := getAndDelete(props, key) + if value != nil { + return value.(string) + } + return "" +} + +func (options *EntityOptions) getParameters() (url.Values, map[string]string) { + query := url.Values{} + headers := map[string]string{} + if options != nil { + query = addTimeout(query, options.Timeout) + headers = headersFromStruct(*options) + } + return query, headers +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go index 575f3f726..b4022bcb6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go @@ -4,9 +4,11 @@ import ( "errors" "fmt" "io" + "io/ioutil" "net/http" "net/url" "strconv" + "sync" ) const fourMB = uint64(4194304) @@ -14,12 +16,14 @@ const oneTB = uint64(1099511627776) // File represents a file on a share. type File struct { - fsc *FileServiceClient - Metadata map[string]string - Name string `xml:"Name"` - parent *Directory - Properties FileProperties `xml:"Properties"` - share *Share + fsc *FileServiceClient + Metadata map[string]string + Name string `xml:"Name"` + parent *Directory + Properties FileProperties `xml:"Properties"` + share *Share + FileCopyProperties FileCopyState + mutex *sync.Mutex } // FileProperties contains various properties of a file. @@ -30,7 +34,7 @@ type FileProperties struct { Etag string Language string `header:"x-ms-content-language"` LastModified string - Length uint64 `xml:"Content-Length"` + Length uint64 `xml:"Content-Length" header:"x-ms-content-length"` MD5 string `header:"x-ms-content-md5"` Type string `header:"x-ms-content-type"` } @@ -38,10 +42,10 @@ type FileProperties struct { // FileCopyState contains various properties of a file copy operation. type FileCopyState struct { CompletionTime string - ID string + ID string `header:"x-ms-copy-id"` Progress string Source string - Status string + Status string `header:"x-ms-copy-status"` StatusDesc string } @@ -51,9 +55,23 @@ type FileStream struct { ContentMD5 string } +// FileRequestOptions will be passed to misc file operations. +// Currently just Timeout (in seconds) but could expand. +type FileRequestOptions struct { + Timeout uint // timeout duration in seconds. +} + +func prepareOptions(options *FileRequestOptions) url.Values { + params := url.Values{} + if options != nil { + params = addTimeout(params, options.Timeout) + } + return params +} + // FileRanges contains a list of file range information for a file. // -// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges type FileRanges struct { ContentLength uint64 LastModified string @@ -63,7 +81,7 @@ type FileRanges struct { // FileRange contains range information for a file. // -// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges type FileRange struct { Start uint64 `xml:"Start"` End uint64 `xml:"End"` @@ -80,9 +98,13 @@ func (f *File) buildPath() string { // ClearRange releases the specified range of space in a file. // -// See https://msdn.microsoft.com/en-us/library/azure/dn194276.aspx -func (f *File) ClearRange(fileRange FileRange) error { - headers, err := f.modifyRange(nil, fileRange, nil) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range +func (f *File) ClearRange(fileRange FileRange, options *FileRequestOptions) error { + var timeout *uint + if options != nil { + timeout = &options.Timeout + } + headers, err := f.modifyRange(nil, fileRange, timeout, nil) if err != nil { return err } @@ -93,41 +115,61 @@ func (f *File) ClearRange(fileRange FileRange) error { // Create creates a new file or replaces an existing one. // -// See https://msdn.microsoft.com/en-us/library/azure/dn194271.aspx -func (f *File) Create(maxSize uint64) error { +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-File +func (f *File) Create(maxSize uint64, options *FileRequestOptions) error { if maxSize > oneTB { return fmt.Errorf("max file size is 1TB") } + params := prepareOptions(options) + headers := headersFromStruct(f.Properties) + headers["x-ms-content-length"] = strconv.FormatUint(maxSize, 10) + headers["x-ms-type"] = "file" - extraHeaders := map[string]string{ - "x-ms-content-length": strconv.FormatUint(maxSize, 10), - "x-ms-type": "file", - } - - headers, err := f.fsc.createResource(f.buildPath(), resourceFile, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders)) + outputHeaders, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, headers), []int{http.StatusCreated}) if err != nil { return err } f.Properties.Length = maxSize + f.updateEtagAndLastModified(outputHeaders) + return nil +} + +// CopyFile operation copied a file/blob from the sourceURL to the path provided. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/copy-file +func (f *File) CopyFile(sourceURL string, options *FileRequestOptions) error { + extraHeaders := map[string]string{ + "x-ms-type": "file", + "x-ms-copy-source": sourceURL, + } + params := prepareOptions(options) + + headers, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusAccepted}) + if err != nil { + return err + } + f.updateEtagAndLastModified(headers) + f.FileCopyProperties.ID = headers.Get("X-Ms-Copy-Id") + f.FileCopyProperties.Status = headers.Get("X-Ms-Copy-Status") return nil } // Delete immediately removes this file from the storage account. // -// See https://msdn.microsoft.com/en-us/library/azure/dn689085.aspx -func (f *File) Delete() error { - return f.fsc.deleteResource(f.buildPath(), resourceFile) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2 +func (f *File) Delete(options *FileRequestOptions) error { + return f.fsc.deleteResource(f.buildPath(), resourceFile, options) } // DeleteIfExists removes this file if it exists. // -// See https://msdn.microsoft.com/en-us/library/azure/dn689085.aspx -func (f *File) DeleteIfExists() (bool, error) { - resp, err := f.fsc.deleteResourceNoClose(f.buildPath(), resourceFile) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2 +func (f *File) DeleteIfExists(options *FileRequestOptions) (bool, error) { + resp, err := f.fsc.deleteResourceNoClose(f.buildPath(), resourceFile, options) if resp != nil { - defer resp.body.Close() + defer readAndCloseBody(resp.body) if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { return resp.statusCode == http.StatusAccepted, nil } @@ -135,33 +177,59 @@ func (f *File) DeleteIfExists() (bool, error) { return false, err } +// GetFileOptions includes options for a get file operation +type GetFileOptions struct { + Timeout uint + GetContentMD5 bool +} + +// DownloadToStream operation downloads the file. +// +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file +func (f *File) DownloadToStream(options *FileRequestOptions) (io.ReadCloser, error) { + params := prepareOptions(options) + resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, nil) + if err != nil { + return nil, err + } + + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + readAndCloseBody(resp.body) + return nil, err + } + return resp.body, nil +} + // DownloadRangeToStream operation downloads the specified range of this file with optional MD5 hash. // // See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file -func (f *File) DownloadRangeToStream(fileRange FileRange, getContentMD5 bool) (fs FileStream, err error) { - if getContentMD5 && isRangeTooBig(fileRange) { - return fs, fmt.Errorf("must specify a range less than or equal to 4MB when getContentMD5 is true") - } - +func (f *File) DownloadRangeToStream(fileRange FileRange, options *GetFileOptions) (fs FileStream, err error) { extraHeaders := map[string]string{ "Range": fileRange.String(), } - if getContentMD5 == true { - extraHeaders["x-ms-range-get-content-md5"] = "true" + params := url.Values{} + if options != nil { + if options.GetContentMD5 { + if isRangeTooBig(fileRange) { + return fs, fmt.Errorf("must specify a range less than or equal to 4MB when getContentMD5 is true") + } + extraHeaders["x-ms-range-get-content-md5"] = "true" + } + params = addTimeout(params, options.Timeout) } - resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, http.MethodGet, extraHeaders) + resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, extraHeaders) if err != nil { return fs, err } if err = checkRespCode(resp.statusCode, []int{http.StatusOK, http.StatusPartialContent}); err != nil { - resp.body.Close() + readAndCloseBody(resp.body) return fs, err } fs.Body = resp.body - if getContentMD5 { + if options != nil && options.GetContentMD5 { fs.ContentMD5 = resp.headers.Get("Content-MD5") } return fs, nil @@ -178,8 +246,10 @@ func (f *File) Exists() (bool, error) { } // FetchAttributes updates metadata and properties for this file. -func (f *File) FetchAttributes() error { - headers, err := f.fsc.getResourceHeaders(f.buildPath(), compNone, resourceFile, http.MethodHead) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-properties +func (f *File) FetchAttributes(options *FileRequestOptions) error { + params := prepareOptions(options) + headers, err := f.fsc.getResourceHeaders(f.buildPath(), compNone, resourceFile, params, http.MethodHead) if err != nil { return err } @@ -199,17 +269,26 @@ func isRangeTooBig(fileRange FileRange) bool { return false } +// ListRangesOptions includes options for a list file ranges operation +type ListRangesOptions struct { + Timeout uint + ListRange *FileRange +} + // ListRanges returns the list of valid ranges for this file. // -// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx -func (f *File) ListRanges(listRange *FileRange) (*FileRanges, error) { +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges +func (f *File) ListRanges(options *ListRangesOptions) (*FileRanges, error) { params := url.Values{"comp": {"rangelist"}} // add optional range to list var headers map[string]string - if listRange != nil { - headers = make(map[string]string) - headers["Range"] = listRange.String() + if options != nil { + params = addTimeout(params, options.Timeout) + if options.ListRange != nil { + headers = make(map[string]string) + headers["Range"] = options.ListRange.String() + } } resp, err := f.fsc.listContent(f.buildPath(), params, headers) @@ -221,6 +300,7 @@ func (f *File) ListRanges(listRange *FileRange) (*FileRanges, error) { var cl uint64 cl, err = strconv.ParseUint(resp.headers.Get("x-ms-content-length"), 10, 64) if err != nil { + ioutil.ReadAll(resp.body) return nil, err } @@ -234,7 +314,7 @@ func (f *File) ListRanges(listRange *FileRange) (*FileRanges, error) { } // modifies a range of bytes in this file -func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, contentMD5 *string) (http.Header, error) { +func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, timeout *uint, contentMD5 *string) (http.Header, error) { if err := f.fsc.checkForStorageEmulator(); err != nil { return nil, err } @@ -245,7 +325,12 @@ func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, contentMD5 *str return nil, errors.New("range cannot exceed 4MB in size") } - uri := f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), url.Values{"comp": {"range"}}) + params := url.Values{"comp": {"range"}} + if timeout != nil { + params = addTimeout(params, *timeout) + } + + uri := f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), params) // default to clear write := "clear" @@ -272,7 +357,7 @@ func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, contentMD5 *str if err != nil { return nil, err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusCreated}) } @@ -283,9 +368,9 @@ func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, contentMD5 *str // are case-insensitive so case munging should not matter to other // applications either. // -// See https://msdn.microsoft.com/en-us/library/azure/dn689097.aspx -func (f *File) SetMetadata() error { - headers, err := f.fsc.setResourceHeaders(f.buildPath(), compMetadata, resourceFile, mergeMDIntoExtraHeaders(f.Metadata, nil)) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Metadata +func (f *File) SetMetadata(options *FileRequestOptions) error { + headers, err := f.fsc.setResourceHeaders(f.buildPath(), compMetadata, resourceFile, mergeMDIntoExtraHeaders(f.Metadata, nil), options) if err != nil { return err } @@ -301,9 +386,9 @@ func (f *File) SetMetadata() error { // are case-insensitive so case munging should not matter to other // applications either. // -// See https://msdn.microsoft.com/en-us/library/azure/dn166975.aspx -func (f *File) SetProperties() error { - headers, err := f.fsc.setResourceHeaders(f.buildPath(), compProperties, resourceFile, headersFromStruct(f.Properties)) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Properties +func (f *File) SetProperties(options *FileRequestOptions) error { + headers, err := f.fsc.setResourceHeaders(f.buildPath(), compProperties, resourceFile, headersFromStruct(f.Properties), options) if err != nil { return err } @@ -338,23 +423,40 @@ func (f *File) updateProperties(header http.Header) { // This method does not create a publicly accessible URL if the file // is private and this method does not check if the file exists. func (f *File) URL() string { - return f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), url.Values{}) + return f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), nil) } -// WriteRange writes a range of bytes to this file with an optional MD5 hash of the content. -// Note that the length of bytes must match (rangeEnd - rangeStart) + 1 with a maximum size of 4MB. +// WriteRangeOptions includes options for a write file range operation +type WriteRangeOptions struct { + Timeout uint + ContentMD5 string +} + +// WriteRange writes a range of bytes to this file with an optional MD5 hash of the content (inside +// options parameter). Note that the length of bytes must match (rangeEnd - rangeStart) + 1 with +// a maximum size of 4MB. // -// See https://msdn.microsoft.com/en-us/library/azure/dn194276.aspx -func (f *File) WriteRange(bytes io.Reader, fileRange FileRange, contentMD5 *string) error { +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range +func (f *File) WriteRange(bytes io.Reader, fileRange FileRange, options *WriteRangeOptions) error { if bytes == nil { return errors.New("bytes cannot be nil") } + var timeout *uint + var md5 *string + if options != nil { + timeout = &options.Timeout + md5 = &options.ContentMD5 + } - headers, err := f.modifyRange(bytes, fileRange, contentMD5) + headers, err := f.modifyRange(bytes, fileRange, timeout, md5) if err != nil { return err } - + // it's perfectly legal for multiple go routines to call WriteRange + // on the same *File (e.g. concurrently writing non-overlapping ranges) + // so we must take the file mutex before updating our properties. + f.mutex.Lock() f.updateEtagAndLastModified(headers) + f.mutex.Unlock() return nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go index 81e094f00..81217bdfa 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go @@ -5,7 +5,7 @@ import ( "fmt" "net/http" "net/url" - "strings" + "strconv" ) // FileServiceClient contains operations for Microsoft Azure File Service. @@ -17,7 +17,7 @@ type FileServiceClient struct { // ListSharesParameters defines the set of customizable parameters to make a // List Shares call. // -// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares type ListSharesParameters struct { Prefix string Marker string @@ -29,7 +29,7 @@ type ListSharesParameters struct { // ShareListResponse contains the response fields from // ListShares call. // -// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares type ShareListResponse struct { XMLName xml.Name `xml:"EnumerationResults"` Xmlns string `xml:"xmlns,attr"` @@ -79,10 +79,10 @@ func (p ListSharesParameters) getParameters() url.Values { out.Set("include", p.Include) } if p.MaxResults != 0 { - out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) + out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) } if p.Timeout != 0 { - out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) + out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10)) } return out @@ -91,15 +91,16 @@ func (p ListSharesParameters) getParameters() url.Values { func (p ListDirsAndFilesParameters) getParameters() url.Values { out := url.Values{} + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } if p.Marker != "" { out.Set("marker", p.Marker) } if p.MaxResults != 0 { - out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) - } - if p.Timeout != 0 { - out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) + out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) } + out = addTimeout(out, p.Timeout) return out } @@ -117,9 +118,9 @@ func getURLInitValues(comp compType, res resourceType) url.Values { } // GetShareReference returns a Share object for the specified share name. -func (f FileServiceClient) GetShareReference(name string) Share { - return Share{ - fsc: &f, +func (f *FileServiceClient) GetShareReference(name string) *Share { + return &Share{ + fsc: f, Name: name, Properties: ShareProperties{ Quota: -1, @@ -130,7 +131,7 @@ func (f FileServiceClient) GetShareReference(name string) Share { // ListShares returns the list of shares in a storage account along with // pagination token and other response details. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/list-shares func (f FileServiceClient) ListShares(params ListSharesParameters) (*ShareListResponse, error) { q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) @@ -149,6 +150,20 @@ func (f FileServiceClient) ListShares(params ListSharesParameters) (*ShareListRe return &out, err } +// GetServiceProperties gets the properties of your storage account's file service. +// File service does not support logging +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-service-properties +func (f *FileServiceClient) GetServiceProperties() (*ServiceProperties, error) { + return f.client.getServiceProperties(fileServiceName, f.auth) +} + +// SetServiceProperties sets the properties of your storage account's file service. +// File service does not support logging +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-file-service-properties +func (f *FileServiceClient) SetServiceProperties(props ServiceProperties) error { + return f.client.setServiceProperties(props, fileServiceName, f.auth) +} + // retrieves directory or share content func (f FileServiceClient) listContent(path string, params url.Values, extraHeaders map[string]string) (*storageResponse, error) { if err := f.checkForStorageEmulator(); err != nil { @@ -165,7 +180,7 @@ func (f FileServiceClient) listContent(path string, params url.Values, extraHead } if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - resp.body.Close() + readAndCloseBody(resp.body) return nil, err } @@ -183,7 +198,7 @@ func (f FileServiceClient) resourceExists(path string, res resourceType) (bool, resp, err := f.client.exec(http.MethodHead, uri, headers, nil, f.auth) if resp != nil { - defer resp.body.Close() + defer readAndCloseBody(resp.body) if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { return resp.statusCode == http.StatusOK, resp.headers, nil } @@ -192,23 +207,24 @@ func (f FileServiceClient) resourceExists(path string, res resourceType) (bool, } // creates a resource depending on the specified resource type -func (f FileServiceClient) createResource(path string, res resourceType, extraHeaders map[string]string) (http.Header, error) { - resp, err := f.createResourceNoClose(path, res, extraHeaders) +func (f FileServiceClient) createResource(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string, expectedResponseCodes []int) (http.Header, error) { + resp, err := f.createResourceNoClose(path, res, urlParams, extraHeaders) if err != nil { return nil, err } - defer resp.body.Close() - return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusCreated}) + defer readAndCloseBody(resp.body) + return resp.headers, checkRespCode(resp.statusCode, expectedResponseCodes) } // creates a resource depending on the specified resource type, doesn't close the response body -func (f FileServiceClient) createResourceNoClose(path string, res resourceType, extraHeaders map[string]string) (*storageResponse, error) { +func (f FileServiceClient) createResourceNoClose(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string) (*storageResponse, error) { if err := f.checkForStorageEmulator(); err != nil { return nil, err } values := getURLInitValues(compNone, res) - uri := f.client.getEndpoint(fileServiceName, path, values) + combinedParams := mergeParams(values, urlParams) + uri := f.client.getEndpoint(fileServiceName, path, combinedParams) extraHeaders = f.client.protectUserAgent(extraHeaders) headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) @@ -216,12 +232,12 @@ func (f FileServiceClient) createResourceNoClose(path string, res resourceType, } // returns HTTP header data for the specified directory or share -func (f FileServiceClient) getResourceHeaders(path string, comp compType, res resourceType, verb string) (http.Header, error) { - resp, err := f.getResourceNoClose(path, comp, res, verb, nil) +func (f FileServiceClient) getResourceHeaders(path string, comp compType, res resourceType, params url.Values, verb string) (http.Header, error) { + resp, err := f.getResourceNoClose(path, comp, res, params, verb, nil) if err != nil { return nil, err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { return nil, err @@ -231,36 +247,35 @@ func (f FileServiceClient) getResourceHeaders(path string, comp compType, res re } // gets the specified resource, doesn't close the response body -func (f FileServiceClient) getResourceNoClose(path string, comp compType, res resourceType, verb string, extraHeaders map[string]string) (*storageResponse, error) { +func (f FileServiceClient) getResourceNoClose(path string, comp compType, res resourceType, params url.Values, verb string, extraHeaders map[string]string) (*storageResponse, error) { if err := f.checkForStorageEmulator(); err != nil { return nil, err } - params := getURLInitValues(comp, res) + params = mergeParams(params, getURLInitValues(comp, res)) uri := f.client.getEndpoint(fileServiceName, path, params) - extraHeaders = f.client.protectUserAgent(extraHeaders) headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) return f.client.exec(verb, uri, headers, nil, f.auth) } // deletes the resource and returns the response -func (f FileServiceClient) deleteResource(path string, res resourceType) error { - resp, err := f.deleteResourceNoClose(path, res) +func (f FileServiceClient) deleteResource(path string, res resourceType, options *FileRequestOptions) error { + resp, err := f.deleteResourceNoClose(path, res, options) if err != nil { return err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) } // deletes the resource and returns the response, doesn't close the response body -func (f FileServiceClient) deleteResourceNoClose(path string, res resourceType) (*storageResponse, error) { +func (f FileServiceClient) deleteResourceNoClose(path string, res resourceType, options *FileRequestOptions) (*storageResponse, error) { if err := f.checkForStorageEmulator(); err != nil { return nil, err } - values := getURLInitValues(compNone, res) + values := mergeParams(getURLInitValues(compNone, res), prepareOptions(options)) uri := f.client.getEndpoint(fileServiceName, path, values) return f.client.exec(http.MethodDelete, uri, f.client.getStandardHeaders(), nil, f.auth) } @@ -279,21 +294,13 @@ func mergeMDIntoExtraHeaders(metadata, extraHeaders map[string]string) map[strin return extraHeaders } -// merges extraHeaders into headers and returns headers -func mergeHeaders(headers, extraHeaders map[string]string) map[string]string { - for k, v := range extraHeaders { - headers[k] = v - } - return headers -} - // sets extra header data for the specified resource -func (f FileServiceClient) setResourceHeaders(path string, comp compType, res resourceType, extraHeaders map[string]string) (http.Header, error) { +func (f FileServiceClient) setResourceHeaders(path string, comp compType, res resourceType, extraHeaders map[string]string, options *FileRequestOptions) (http.Header, error) { if err := f.checkForStorageEmulator(); err != nil { return nil, err } - params := getURLInitValues(comp, res) + params := mergeParams(getURLInitValues(comp, res), prepareOptions(options)) uri := f.client.getEndpoint(fileServiceName, path, params) extraHeaders = f.client.protectUserAgent(extraHeaders) headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) @@ -302,54 +309,11 @@ func (f FileServiceClient) setResourceHeaders(path string, comp compType, res re if err != nil { return nil, err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusOK}) } -// gets metadata for the specified resource -func (f FileServiceClient) getMetadata(path string, res resourceType) (map[string]string, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err - } - - headers, err := f.getResourceHeaders(path, compMetadata, res, http.MethodGet) - if err != nil { - return nil, err - } - - return getMetadataFromHeaders(headers), nil -} - -// returns a map of custom metadata values from the specified HTTP header -func getMetadataFromHeaders(header http.Header) map[string]string { - metadata := make(map[string]string) - for k, v := range header { - // Can't trust CanonicalHeaderKey() to munge case - // reliably. "_" is allowed in identifiers: - // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx - // https://msdn.microsoft.com/library/aa664670(VS.71).aspx - // http://tools.ietf.org/html/rfc7230#section-3.2 - // ...but "_" is considered invalid by - // CanonicalMIMEHeaderKey in - // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 - // so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar". - k = strings.ToLower(k) - if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { - continue - } - // metadata["foo"] = content of the last X-Ms-Meta-Foo header - k = k[len(userDefinedMetadataHeaderPrefix):] - metadata[k] = v[len(v)-1] - } - - if len(metadata) == 0 { - return nil - } - - return metadata -} - //checkForStorageEmulator determines if the client is setup for use with //Azure Storage Emulator, and returns a relevant error func (f FileServiceClient) checkForStorageEmulator() error { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go new file mode 100644 index 000000000..415b74018 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go @@ -0,0 +1,187 @@ +package storage + +import ( + "errors" + "net/http" + "net/url" + "strconv" + "time" +) + +// lease constants. +const ( + leaseHeaderPrefix = "x-ms-lease-" + headerLeaseID = "x-ms-lease-id" + leaseAction = "x-ms-lease-action" + leaseBreakPeriod = "x-ms-lease-break-period" + leaseDuration = "x-ms-lease-duration" + leaseProposedID = "x-ms-proposed-lease-id" + leaseTime = "x-ms-lease-time" + + acquireLease = "acquire" + renewLease = "renew" + changeLease = "change" + releaseLease = "release" + breakLease = "break" +) + +// leasePut is common PUT code for the various acquire/release/break etc functions. +func (b *Blob) leaseCommonPut(headers map[string]string, expectedStatus int, options *LeaseOptions) (http.Header, error) { + params := url.Values{"comp": {"lease"}} + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return nil, err + } + defer readAndCloseBody(resp.body) + + if err := checkRespCode(resp.statusCode, []int{expectedStatus}); err != nil { + return nil, err + } + + return resp.headers, nil +} + +// LeaseOptions includes options for all operations regarding leasing blobs +type LeaseOptions struct { + Timeout uint + Origin string `header:"Origin"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + RequestID string `header:"x-ms-client-request-id"` +} + +// AcquireLease creates a lease for a blob +// returns leaseID acquired +// In API Versions starting on 2012-02-12, the minimum leaseTimeInSeconds is 15, the maximum +// non-infinite leaseTimeInSeconds is 60. To specify an infinite lease, provide the value -1. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob +func (b *Blob) AcquireLease(leaseTimeInSeconds int, proposedLeaseID string, options *LeaseOptions) (returnedLeaseID string, err error) { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = acquireLease + + if leaseTimeInSeconds == -1 { + // Do nothing, but don't trigger the following clauses. + } else if leaseTimeInSeconds > 60 || b.Container.bsc.client.apiVersion < "2012-02-12" { + leaseTimeInSeconds = 60 + } else if leaseTimeInSeconds < 15 { + leaseTimeInSeconds = 15 + } + + headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds) + + if proposedLeaseID != "" { + headers[leaseProposedID] = proposedLeaseID + } + + respHeaders, err := b.leaseCommonPut(headers, http.StatusCreated, options) + if err != nil { + return "", err + } + + returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID)) + + if returnedLeaseID != "" { + return returnedLeaseID, nil + } + + return "", errors.New("LeaseID not returned") +} + +// BreakLease breaks the lease for a blob +// Returns the timeout remaining in the lease in seconds +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob +func (b *Blob) BreakLease(options *LeaseOptions) (breakTimeout int, err error) { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = breakLease + return b.breakLeaseCommon(headers, options) +} + +// BreakLeaseWithBreakPeriod breaks the lease for a blob +// breakPeriodInSeconds is used to determine how long until new lease can be created. +// Returns the timeout remaining in the lease in seconds +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob +func (b *Blob) BreakLeaseWithBreakPeriod(breakPeriodInSeconds int, options *LeaseOptions) (breakTimeout int, err error) { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = breakLease + headers[leaseBreakPeriod] = strconv.Itoa(breakPeriodInSeconds) + return b.breakLeaseCommon(headers, options) +} + +// breakLeaseCommon is common code for both version of BreakLease (with and without break period) +func (b *Blob) breakLeaseCommon(headers map[string]string, options *LeaseOptions) (breakTimeout int, err error) { + + respHeaders, err := b.leaseCommonPut(headers, http.StatusAccepted, options) + if err != nil { + return 0, err + } + + breakTimeoutStr := respHeaders.Get(http.CanonicalHeaderKey(leaseTime)) + if breakTimeoutStr != "" { + breakTimeout, err = strconv.Atoi(breakTimeoutStr) + if err != nil { + return 0, err + } + } + + return breakTimeout, nil +} + +// ChangeLease changes a lease ID for a blob +// Returns the new LeaseID acquired +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob +func (b *Blob) ChangeLease(currentLeaseID string, proposedLeaseID string, options *LeaseOptions) (newLeaseID string, err error) { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = changeLease + headers[headerLeaseID] = currentLeaseID + headers[leaseProposedID] = proposedLeaseID + + respHeaders, err := b.leaseCommonPut(headers, http.StatusOK, options) + if err != nil { + return "", err + } + + newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID)) + if newLeaseID != "" { + return newLeaseID, nil + } + + return "", errors.New("LeaseID not returned") +} + +// ReleaseLease releases the lease for a blob +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob +func (b *Blob) ReleaseLease(currentLeaseID string, options *LeaseOptions) error { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = releaseLease + headers[headerLeaseID] = currentLeaseID + + _, err := b.leaseCommonPut(headers, http.StatusOK, options) + if err != nil { + return err + } + + return nil +} + +// RenewLease renews the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx +func (b *Blob) RenewLease(currentLeaseID string, options *LeaseOptions) error { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = renewLease + headers[headerLeaseID] = currentLeaseID + + _, err := b.leaseCommonPut(headers, http.StatusOK, options) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go new file mode 100644 index 000000000..3ededcd42 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go @@ -0,0 +1,153 @@ +package storage + +import ( + "encoding/xml" + "fmt" + "net/http" + "net/url" + "strconv" + "time" +) + +// Message represents an Azure message. +type Message struct { + Queue *Queue + Text string `xml:"MessageText"` + ID string `xml:"MessageId"` + Insertion TimeRFC1123 `xml:"InsertionTime"` + Expiration TimeRFC1123 `xml:"ExpirationTime"` + PopReceipt string `xml:"PopReceipt"` + NextVisible TimeRFC1123 `xml:"TimeNextVisible"` + DequeueCount int `xml:"DequeueCount"` +} + +func (m *Message) buildPath() string { + return fmt.Sprintf("%s/%s", m.Queue.buildPathMessages(), m.ID) +} + +// PutMessageOptions is the set of options can be specified for Put Messsage +// operation. A zero struct does not use any preferences for the request. +type PutMessageOptions struct { + Timeout uint + VisibilityTimeout int + MessageTTL int + RequestID string `header:"x-ms-client-request-id"` +} + +// Put operation adds a new message to the back of the message queue. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Message +func (m *Message) Put(options *PutMessageOptions) error { + query := url.Values{} + headers := m.Queue.qsc.client.getStandardHeaders() + + req := putMessageRequest{MessageText: m.Text} + body, nn, err := xmlMarshal(req) + if err != nil { + return err + } + headers["Content-Length"] = strconv.Itoa(nn) + + if options != nil { + if options.VisibilityTimeout != 0 { + query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout)) + } + if options.MessageTTL != 0 { + query.Set("messagettl", strconv.Itoa(options.MessageTTL)) + } + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + + uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.Queue.buildPathMessages(), query) + resp, err := m.Queue.qsc.client.exec(http.MethodPost, uri, headers, body, m.Queue.qsc.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + + err = xmlUnmarshal(resp.body, m) + if err != nil { + return err + } + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// UpdateMessageOptions is the set of options can be specified for Update Messsage +// operation. A zero struct does not use any preferences for the request. +type UpdateMessageOptions struct { + Timeout uint + VisibilityTimeout int + RequestID string `header:"x-ms-client-request-id"` +} + +// Update operation updates the specified message. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Update-Message +func (m *Message) Update(options *UpdateMessageOptions) error { + query := url.Values{} + if m.PopReceipt != "" { + query.Set("popreceipt", m.PopReceipt) + } + + headers := m.Queue.qsc.client.getStandardHeaders() + req := putMessageRequest{MessageText: m.Text} + body, nn, err := xmlMarshal(req) + if err != nil { + return err + } + headers["Content-Length"] = strconv.Itoa(nn) + + if options != nil { + if options.VisibilityTimeout != 0 { + query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout)) + } + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), query) + + resp, err := m.Queue.qsc.client.exec(http.MethodPut, uri, headers, body, m.Queue.qsc.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + + m.PopReceipt = resp.headers.Get("x-ms-popreceipt") + nextTimeStr := resp.headers.Get("x-ms-time-next-visible") + if nextTimeStr != "" { + nextTime, err := time.Parse(time.RFC1123, nextTimeStr) + if err != nil { + return err + } + m.NextVisible = TimeRFC1123(nextTime) + } + + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +// Delete operation deletes the specified message. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx +func (m *Message) Delete(options *QueueServiceOptions) error { + params := url.Values{"popreceipt": {m.PopReceipt}} + headers := m.Queue.qsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), params) + + resp, err := m.Queue.qsc.client.exec(http.MethodDelete, uri, headers, nil, m.Queue.qsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +type putMessageRequest struct { + XMLName xml.Name `xml:"QueueMessage"` + MessageText string `xml:"MessageText"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go new file mode 100644 index 000000000..41d832e2b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go @@ -0,0 +1,33 @@ +package storage + +// MetadataLevel determines if operations should return a paylod, +// and it level of detail. +type MetadataLevel string + +// This consts are meant to help with Odata supported operations +const ( + OdataTypeSuffix = "@odata.type" + + // Types + + OdataBinary = "Edm.Binary" + OdataDateTime = "Edm.DateTime" + OdataGUID = "Edm.Guid" + OdataInt64 = "Edm.Int64" + + // Query options + + OdataFilter = "$filter" + OdataOrderBy = "$orderby" + OdataTop = "$top" + OdataSkip = "$skip" + OdataCount = "$count" + OdataExpand = "$expand" + OdataSelect = "$select" + OdataSearch = "$search" + + EmptyPayload MetadataLevel = "" + NoMetadata MetadataLevel = "application/json;odata=nometadata" + MinimalMetadata MetadataLevel = "application/json;odata=minimalmetadata" + FullMetadata MetadataLevel = "application/json;odata=fullmetadata" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go new file mode 100644 index 000000000..cdaf4a913 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go @@ -0,0 +1,190 @@ +package storage + +import ( + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "time" +) + +// GetPageRangesResponse contains the response fields from +// Get Page Ranges call. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx +type GetPageRangesResponse struct { + XMLName xml.Name `xml:"PageList"` + PageList []PageRange `xml:"PageRange"` +} + +// PageRange contains information about a page of a page blob from +// Get Pages Range call. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx +type PageRange struct { + Start int64 `xml:"Start"` + End int64 `xml:"End"` +} + +var ( + errBlobCopyAborted = errors.New("storage: blob copy is aborted") + errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch") +) + +// PutPageOptions includes the options for a put page operation +type PutPageOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfSequenceNumberLessThanOrEqualTo *int `header:"x-ms-if-sequence-number-le"` + IfSequenceNumberLessThan *int `header:"x-ms-if-sequence-number-lt"` + IfSequenceNumberEqualTo *int `header:"x-ms-if-sequence-number-eq"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// WriteRange writes a range of pages to a page blob. +// Ranges must be aligned with 512-byte boundaries and chunk must be of size +// multiplies by 512. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page +func (b *Blob) WriteRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error { + if bytes == nil { + return errors.New("bytes cannot be nil") + } + return b.modifyRange(blobRange, bytes, options) +} + +// ClearRange clears the given range in a page blob. +// Ranges must be aligned with 512-byte boundaries and chunk must be of size +// multiplies by 512. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page +func (b *Blob) ClearRange(blobRange BlobRange, options *PutPageOptions) error { + return b.modifyRange(blobRange, nil, options) +} + +func (b *Blob) modifyRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error { + if blobRange.End < blobRange.Start { + return errors.New("the value for rangeEnd must be greater than or equal to rangeStart") + } + if blobRange.Start%512 != 0 { + return errors.New("the value for rangeStart must be a modulus of 512") + } + if blobRange.End%512 != 511 { + return errors.New("the value for rangeEnd must be a modulus of 511") + } + + params := url.Values{"comp": {"page"}} + + // default to clear + write := "clear" + var cl uint64 + + // if bytes is not nil then this is an update operation + if bytes != nil { + write = "update" + cl = (blobRange.End - blobRange.Start) + 1 + } + + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypePage) + headers["x-ms-page-write"] = write + headers["x-ms-range"] = blobRange.String() + headers["Content-Length"] = fmt.Sprintf("%v", cl) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes, b.Container.bsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// GetPageRangesOptions includes the options for a get page ranges operation +type GetPageRangesOptions struct { + Timeout uint + Snapshot *time.Time + PreviousSnapshot *time.Time + Range *BlobRange + LeaseID string `header:"x-ms-lease-id"` + RequestID string `header:"x-ms-client-request-id"` +} + +// GetPageRanges returns the list of valid page ranges for a page blob. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Page-Ranges +func (b *Blob) GetPageRanges(options *GetPageRangesOptions) (GetPageRangesResponse, error) { + params := url.Values{"comp": {"pagelist"}} + headers := b.Container.bsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + if options.PreviousSnapshot != nil { + params.Add("prevsnapshot", timeRfc1123Formatted(*options.PreviousSnapshot)) + } + if options.Range != nil { + headers["Range"] = options.Range.String() + } + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + var out GetPageRangesResponse + resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return out, err + } + defer resp.body.Close() + + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return out, err + } + err = xmlUnmarshal(resp.body, &out) + return out, err +} + +// PutPageBlob initializes an empty page blob with specified name and maximum +// size in bytes (size must be aligned to a 512-byte boundary). A page blob must +// be created using this method before writing pages. +// +// See CreateBlockBlobFromReader for more info on creating blobs. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob +func (b *Blob) PutPageBlob(options *PutBlobOptions) error { + if b.Properties.ContentLength%512 != 0 { + return errors.New("Content length must be aligned to a 512-byte boundary") + } + + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypePage) + headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", b.Properties.ContentLength) + headers["x-ms-blob-sequence-number"] = fmt.Sprintf("%v", b.Properties.SequenceNumber) + headers = mergeHeaders(headers, headersFromStruct(b.Properties)) + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return err + } + return b.respondCreation(resp, BlobTypePage) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go index 4dbb67733..c2c7f742c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go @@ -2,168 +2,139 @@ package storage import ( "encoding/xml" + "errors" "fmt" + "io" "net/http" "net/url" "strconv" - "strings" + "time" ) const ( // casing is per Golang's http.Header canonicalizing the header names. - approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count" - userDefinedMetadataHeaderPrefix = "X-Ms-Meta-" + approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count" ) -// QueueServiceClient contains operations for Microsoft Azure Queue Storage -// Service. -type QueueServiceClient struct { - client Client - auth authentication +// QueueAccessPolicy represents each access policy in the queue ACL. +type QueueAccessPolicy struct { + ID string + StartTime time.Time + ExpiryTime time.Time + CanRead bool + CanAdd bool + CanUpdate bool + CanProcess bool } -func pathForQueue(queue string) string { return fmt.Sprintf("/%s", queue) } -func pathForQueueMessages(queue string) string { return fmt.Sprintf("/%s/messages", queue) } -func pathForMessage(queue, name string) string { return fmt.Sprintf("/%s/messages/%s", queue, name) } - -type putMessageRequest struct { - XMLName xml.Name `xml:"QueueMessage"` - MessageText string `xml:"MessageText"` +// QueuePermissions represents the queue ACLs. +type QueuePermissions struct { + AccessPolicies []QueueAccessPolicy } -// PutMessageParameters is the set of options can be specified for Put Messsage -// operation. A zero struct does not use any preferences for the request. -type PutMessageParameters struct { - VisibilityTimeout int - MessageTTL int +// SetQueuePermissionOptions includes options for a set queue permissions operation +type SetQueuePermissionOptions struct { + Timeout uint + RequestID string `header:"x-ms-client-request-id"` } -func (p PutMessageParameters) getParameters() url.Values { - out := url.Values{} - if p.VisibilityTimeout != 0 { - out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout)) - } - if p.MessageTTL != 0 { - out.Set("messagettl", strconv.Itoa(p.MessageTTL)) - } - return out +// Queue represents an Azure queue. +type Queue struct { + qsc *QueueServiceClient + Name string + Metadata map[string]string + AproxMessageCount uint64 } -// GetMessagesParameters is the set of options can be specified for Get -// Messsages operation. A zero struct does not use any preferences for the -// request. -type GetMessagesParameters struct { - NumOfMessages int - VisibilityTimeout int +func (q *Queue) buildPath() string { + return fmt.Sprintf("/%s", q.Name) } -func (p GetMessagesParameters) getParameters() url.Values { - out := url.Values{} - if p.NumOfMessages != 0 { - out.Set("numofmessages", strconv.Itoa(p.NumOfMessages)) - } - if p.VisibilityTimeout != 0 { - out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout)) - } - return out +func (q *Queue) buildPathMessages() string { + return fmt.Sprintf("%s/messages", q.buildPath()) } -// PeekMessagesParameters is the set of options can be specified for Peek -// Messsage operation. A zero struct does not use any preferences for the -// request. -type PeekMessagesParameters struct { - NumOfMessages int +// QueueServiceOptions includes options for some queue service operations +type QueueServiceOptions struct { + Timeout uint + RequestID string `header:"x-ms-client-request-id"` } -func (p PeekMessagesParameters) getParameters() url.Values { - out := url.Values{"peekonly": {"true"}} // Required for peek operation - if p.NumOfMessages != 0 { - out.Set("numofmessages", strconv.Itoa(p.NumOfMessages)) - } - return out -} - -// UpdateMessageParameters is the set of options can be specified for Update Messsage -// operation. A zero struct does not use any preferences for the request. -type UpdateMessageParameters struct { - PopReceipt string - VisibilityTimeout int -} - -func (p UpdateMessageParameters) getParameters() url.Values { - out := url.Values{} - if p.PopReceipt != "" { - out.Set("popreceipt", p.PopReceipt) - } - if p.VisibilityTimeout != 0 { - out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout)) - } - return out -} - -// GetMessagesResponse represents a response returned from Get Messages -// operation. -type GetMessagesResponse struct { - XMLName xml.Name `xml:"QueueMessagesList"` - QueueMessagesList []GetMessageResponse `xml:"QueueMessage"` -} - -// GetMessageResponse represents a QueueMessage object returned from Get -// Messages operation response. -type GetMessageResponse struct { - MessageID string `xml:"MessageId"` - InsertionTime string `xml:"InsertionTime"` - ExpirationTime string `xml:"ExpirationTime"` - PopReceipt string `xml:"PopReceipt"` - TimeNextVisible string `xml:"TimeNextVisible"` - DequeueCount int `xml:"DequeueCount"` - MessageText string `xml:"MessageText"` -} - -// PeekMessagesResponse represents a response returned from Get Messages -// operation. -type PeekMessagesResponse struct { - XMLName xml.Name `xml:"QueueMessagesList"` - QueueMessagesList []PeekMessageResponse `xml:"QueueMessage"` -} - -// PeekMessageResponse represents a QueueMessage object returned from Peek -// Messages operation response. -type PeekMessageResponse struct { - MessageID string `xml:"MessageId"` - InsertionTime string `xml:"InsertionTime"` - ExpirationTime string `xml:"ExpirationTime"` - DequeueCount int `xml:"DequeueCount"` - MessageText string `xml:"MessageText"` -} - -// QueueMetadataResponse represents user defined metadata and queue -// properties on a specific queue. +// Create operation creates a queue under the given account. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx -type QueueMetadataResponse struct { - ApproximateMessageCount int - UserDefinedMetadata map[string]string +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Queue4 +func (q *Queue) Create(options *QueueServiceOptions) error { + params := url.Values{} + headers := q.qsc.client.getStandardHeaders() + headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) + + resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// Delete operation permanently deletes the specified queue. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Queue3 +func (q *Queue) Delete(options *QueueServiceOptions) error { + params := url.Values{} + headers := q.qsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) + resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +// Exists returns true if a queue with given name exists. +func (q *Queue) Exists() (bool, error) { + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}}) + resp, err := q.qsc.client.exec(http.MethodGet, uri, q.qsc.client.getStandardHeaders(), nil, q.qsc.auth) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusOK, nil + } + } + return false, err } // SetMetadata operation sets user-defined metadata on the specified queue. // Metadata is associated with the queue as name-value pairs. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179348.aspx -func (c QueueServiceClient) SetMetadata(name string, metadata map[string]string) error { - uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}}) - metadata = c.client.protectUserAgent(metadata) - headers := c.client.getStandardHeaders() - for k, v := range metadata { - headers[userDefinedMetadataHeaderPrefix+k] = v - } +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata +func (q *Queue) SetMetadata(options *QueueServiceOptions) error { + params := url.Values{"comp": {"metadata"}} + headers := q.qsc.client.getStandardHeaders() + headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata) - resp, err := c.client.exec(http.MethodPut, uri, headers, nil, c.auth) + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) + + resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth) if err != nil { return err } - defer resp.body.Close() - + readAndCloseBody(resp.body) return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) } @@ -171,176 +142,286 @@ func (c QueueServiceClient) SetMetadata(name string, metadata map[string]string) // properties on the specified queue. Metadata is associated with // the queue as name-values pairs. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata // // Because the way Golang's http client (and http.Header in particular) // canonicalize header names, the returned metadata names would always // be all lower case. -func (c QueueServiceClient) GetMetadata(name string) (QueueMetadataResponse, error) { - qm := QueueMetadataResponse{} - qm.UserDefinedMetadata = make(map[string]string) - uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}}) - headers := c.client.getStandardHeaders() - resp, err := c.client.exec(http.MethodGet, uri, headers, nil, c.auth) - if err != nil { - return qm, err - } - defer resp.body.Close() +func (q *Queue) GetMetadata(options *QueueServiceOptions) error { + params := url.Values{"comp": {"metadata"}} + headers := q.qsc.client.getStandardHeaders() - for k, v := range resp.headers { - if len(v) != 1 { - return qm, fmt.Errorf("Unexpected number of values (%d) in response header '%s'", len(v), k) - } - - value := v[0] - - if k == approximateMessagesCountHeader { - qm.ApproximateMessageCount, err = strconv.Atoi(value) - if err != nil { - return qm, fmt.Errorf("Unexpected value in response header '%s': '%s' ", k, value) - } - } else if strings.HasPrefix(k, userDefinedMetadataHeaderPrefix) { - name := strings.TrimPrefix(k, userDefinedMetadataHeaderPrefix) - qm.UserDefinedMetadata[strings.ToLower(name)] = value + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}}) + + resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return err + } + + aproxMessagesStr := resp.headers.Get(http.CanonicalHeaderKey(approximateMessagesCountHeader)) + if aproxMessagesStr != "" { + aproxMessages, err := strconv.ParseUint(aproxMessagesStr, 10, 64) + if err != nil { + return err } + q.AproxMessageCount = aproxMessages } - return qm, checkRespCode(resp.statusCode, []int{http.StatusOK}) + q.Metadata = getMetadataFromHeaders(resp.headers) + return nil } -// CreateQueue operation creates a queue under the given account. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179342.aspx -func (c QueueServiceClient) CreateQueue(name string) error { - uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{}) - headers := c.client.getStandardHeaders() - resp, err := c.client.exec(http.MethodPut, uri, headers, nil, c.auth) - if err != nil { - return err +// GetMessageReference returns a message object with the specified text. +func (q *Queue) GetMessageReference(text string) *Message { + return &Message{ + Queue: q, + Text: text, } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) } -// DeleteQueue operation permanently deletes the specified queue. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179436.aspx -func (c QueueServiceClient) DeleteQueue(name string) error { - uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{}) - resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +// GetMessagesOptions is the set of options can be specified for Get +// Messsages operation. A zero struct does not use any preferences for the +// request. +type GetMessagesOptions struct { + Timeout uint + NumOfMessages int + VisibilityTimeout int + RequestID string `header:"x-ms-client-request-id"` } -// QueueExists returns true if a queue with given name exists. -func (c QueueServiceClient) QueueExists(name string) (bool, error) { - uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": {"metadata"}}) - resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth) - if resp != nil && (resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound) { - return resp.statusCode == http.StatusOK, nil - } - - return false, err -} - -// PutMessage operation adds a new message to the back of the message queue. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179346.aspx -func (c QueueServiceClient) PutMessage(queue string, message string, params PutMessageParameters) error { - uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) - req := putMessageRequest{MessageText: message} - body, nn, err := xmlMarshal(req) - if err != nil { - return err - } - headers := c.client.getStandardHeaders() - headers["Content-Length"] = strconv.Itoa(nn) - resp, err := c.client.exec(http.MethodPost, uri, headers, body, c.auth) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// ClearMessages operation deletes all messages from the specified queue. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179454.aspx -func (c QueueServiceClient) ClearMessages(queue string) error { - uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), url.Values{}) - resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +type messages struct { + XMLName xml.Name `xml:"QueueMessagesList"` + Messages []Message `xml:"QueueMessage"` } // GetMessages operation retrieves one or more messages from the front of the // queue. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179474.aspx -func (c QueueServiceClient) GetMessages(queue string, params GetMessagesParameters) (GetMessagesResponse, error) { - var r GetMessagesResponse - uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) - resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth) - if err != nil { - return r, err +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Messages +func (q *Queue) GetMessages(options *GetMessagesOptions) ([]Message, error) { + query := url.Values{} + headers := q.qsc.client.getStandardHeaders() + + if options != nil { + if options.NumOfMessages != 0 { + query.Set("numofmessages", strconv.Itoa(options.NumOfMessages)) + } + if options.VisibilityTimeout != 0 { + query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout)) + } + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) } - defer resp.body.Close() - err = xmlUnmarshal(resp.body, &r) - return r, err + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query) + + resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) + if err != nil { + return []Message{}, err + } + defer readAndCloseBody(resp.body) + + var out messages + err = xmlUnmarshal(resp.body, &out) + if err != nil { + return []Message{}, err + } + for i := range out.Messages { + out.Messages[i].Queue = q + } + return out.Messages, err +} + +// PeekMessagesOptions is the set of options can be specified for Peek +// Messsage operation. A zero struct does not use any preferences for the +// request. +type PeekMessagesOptions struct { + Timeout uint + NumOfMessages int + RequestID string `header:"x-ms-client-request-id"` } // PeekMessages retrieves one or more messages from the front of the queue, but // does not alter the visibility of the message. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179472.aspx -func (c QueueServiceClient) PeekMessages(queue string, params PeekMessagesParameters) (PeekMessagesResponse, error) { - var r PeekMessagesResponse - uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) - resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth) - if err != nil { - return r, err +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Peek-Messages +func (q *Queue) PeekMessages(options *PeekMessagesOptions) ([]Message, error) { + query := url.Values{"peekonly": {"true"}} // Required for peek operation + headers := q.qsc.client.getStandardHeaders() + + if options != nil { + if options.NumOfMessages != 0 { + query.Set("numofmessages", strconv.Itoa(options.NumOfMessages)) + } + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) } - defer resp.body.Close() - err = xmlUnmarshal(resp.body, &r) - return r, err + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query) + + resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) + if err != nil { + return []Message{}, err + } + defer readAndCloseBody(resp.body) + + var out messages + err = xmlUnmarshal(resp.body, &out) + if err != nil { + return []Message{}, err + } + for i := range out.Messages { + out.Messages[i].Queue = q + } + return out.Messages, err } -// DeleteMessage operation deletes the specified message. +// ClearMessages operation deletes all messages from the specified queue. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx -func (c QueueServiceClient) DeleteMessage(queue, messageID, popReceipt string) error { - uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), url.Values{ - "popreceipt": {popReceipt}}) - resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Clear-Messages +func (q *Queue) ClearMessages(options *QueueServiceOptions) error { + params := url.Values{} + headers := q.qsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), params) + + resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth) if err != nil { return err } - defer resp.body.Close() + readAndCloseBody(resp.body) return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) } -// UpdateMessage operation deletes the specified message. -// -// See https://msdn.microsoft.com/en-us/library/azure/hh452234.aspx -func (c QueueServiceClient) UpdateMessage(queue string, messageID string, message string, params UpdateMessageParameters) error { - uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), params.getParameters()) - req := putMessageRequest{MessageText: message} - body, nn, err := xmlMarshal(req) +// SetPermissions sets up queue permissions +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-acl +func (q *Queue) SetPermissions(permissions QueuePermissions, options *SetQueuePermissionOptions) error { + body, length, err := generateQueueACLpayload(permissions.AccessPolicies) if err != nil { return err } - headers := c.client.getStandardHeaders() - headers["Content-Length"] = fmt.Sprintf("%d", nn) - resp, err := c.client.exec(http.MethodPut, uri, headers, body, c.auth) + + params := url.Values{ + "comp": {"acl"}, + } + headers := q.qsc.client.getStandardHeaders() + headers["Content-Length"] = strconv.Itoa(length) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) + resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, body, q.qsc.auth) if err != nil { return err } + defer readAndCloseBody(resp.body) + + if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { + return errors.New("Unable to set permissions") + } + + return nil +} + +func generateQueueACLpayload(policies []QueueAccessPolicy) (io.Reader, int, error) { + sil := SignedIdentifiers{ + SignedIdentifiers: []SignedIdentifier{}, + } + for _, qapd := range policies { + permission := qapd.generateQueuePermissions() + signedIdentifier := convertAccessPolicyToXMLStructs(qapd.ID, qapd.StartTime, qapd.ExpiryTime, permission) + sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier) + } + return xmlMarshal(sil) +} + +func (qapd *QueueAccessPolicy) generateQueuePermissions() (permissions string) { + // generate the permissions string (raup). + // still want the end user API to have bool flags. + permissions = "" + + if qapd.CanRead { + permissions += "r" + } + + if qapd.CanAdd { + permissions += "a" + } + + if qapd.CanUpdate { + permissions += "u" + } + + if qapd.CanProcess { + permissions += "p" + } + + return permissions +} + +// GetQueuePermissionOptions includes options for a get queue permissions operation +type GetQueuePermissionOptions struct { + Timeout uint + RequestID string `header:"x-ms-client-request-id"` +} + +// GetPermissions gets the queue permissions as per https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-acl +// If timeout is 0 then it will not be passed to Azure +func (q *Queue) GetPermissions(options *GetQueuePermissionOptions) (*QueuePermissions, error) { + params := url.Values{ + "comp": {"acl"}, + } + headers := q.qsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) + resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) + if err != nil { + return nil, err + } defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) + + var ap AccessPolicy + err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList) + if err != nil { + return nil, err + } + return buildQueueAccessPolicy(ap, &resp.headers), nil +} + +func buildQueueAccessPolicy(ap AccessPolicy, headers *http.Header) *QueuePermissions { + permissions := QueuePermissions{ + AccessPolicies: []QueueAccessPolicy{}, + } + + for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { + qapd := QueueAccessPolicy{ + ID: policy.ID, + StartTime: policy.AccessPolicy.StartTime, + ExpiryTime: policy.AccessPolicy.ExpiryTime, + } + qapd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r") + qapd.CanAdd = updatePermissions(policy.AccessPolicy.Permission, "a") + qapd.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u") + qapd.CanProcess = updatePermissions(policy.AccessPolicy.Permission, "p") + + permissions.AccessPolicies = append(permissions.AccessPolicies, qapd) + } + return &permissions } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go new file mode 100644 index 000000000..19b44941c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go @@ -0,0 +1,28 @@ +package storage + +// QueueServiceClient contains operations for Microsoft Azure Queue Storage +// Service. +type QueueServiceClient struct { + client Client + auth authentication +} + +// GetServiceProperties gets the properties of your storage account's queue service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-service-properties +func (q *QueueServiceClient) GetServiceProperties() (*ServiceProperties, error) { + return q.client.getServiceProperties(queueServiceName, q.auth) +} + +// SetServiceProperties sets the properties of your storage account's queue service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-service-properties +func (q *QueueServiceClient) SetServiceProperties(props ServiceProperties) error { + return q.client.setServiceProperties(props, queueServiceName, q.auth) +} + +// GetQueueReference returns a Container object for the specified queue name. +func (q *QueueServiceClient) GetQueueReference(name string) *Queue { + return &Queue{ + qsc: q, + Name: name, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go index 8423aedcc..e6a868081 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go @@ -30,9 +30,15 @@ func (s *Share) buildPath() string { // Create this share under the associated account. // If a share with the same name already exists, the operation fails. // -// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx -func (s *Share) Create() error { - headers, err := s.fsc.createResource(s.buildPath(), resourceShare, mergeMDIntoExtraHeaders(s.Metadata, nil)) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share +func (s *Share) Create(options *FileRequestOptions) error { + extraheaders := map[string]string{} + if s.Properties.Quota > 0 { + extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota) + } + + params := prepareOptions(options) + headers, err := s.fsc.createResource(s.buildPath(), resourceShare, params, mergeMDIntoExtraHeaders(s.Metadata, extraheaders), []int{http.StatusCreated}) if err != nil { return err } @@ -45,17 +51,23 @@ func (s *Share) Create() error { // it does not exist. Returns true if the share is newly created or false if // the share already exists. // -// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx -func (s *Share) CreateIfNotExists() (bool, error) { - resp, err := s.fsc.createResourceNoClose(s.buildPath(), resourceShare, nil) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share +func (s *Share) CreateIfNotExists(options *FileRequestOptions) (bool, error) { + extraheaders := map[string]string{} + if s.Properties.Quota > 0 { + extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota) + } + + params := prepareOptions(options) + resp, err := s.fsc.createResourceNoClose(s.buildPath(), resourceShare, params, extraheaders) if resp != nil { - defer resp.body.Close() + defer readAndCloseBody(resp.body) if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { if resp.statusCode == http.StatusCreated { s.updateEtagAndLastModified(resp.headers) return true, nil } - return false, s.FetchAttributes() + return false, s.FetchAttributes(nil) } } @@ -66,18 +78,18 @@ func (s *Share) CreateIfNotExists() (bool, error) { // and directories contained within it are later deleted during garbage // collection. If the share does not exist the operation fails // -// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx -func (s *Share) Delete() error { - return s.fsc.deleteResource(s.buildPath(), resourceShare) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share +func (s *Share) Delete(options *FileRequestOptions) error { + return s.fsc.deleteResource(s.buildPath(), resourceShare, options) } // DeleteIfExists operation marks this share for deletion if it exists. // -// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx -func (s *Share) DeleteIfExists() (bool, error) { - resp, err := s.fsc.deleteResourceNoClose(s.buildPath(), resourceShare) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share +func (s *Share) DeleteIfExists(options *FileRequestOptions) (bool, error) { + resp, err := s.fsc.deleteResourceNoClose(s.buildPath(), resourceShare, options) if resp != nil { - defer resp.body.Close() + defer readAndCloseBody(resp.body) if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { return resp.statusCode == http.StatusAccepted, nil } @@ -97,8 +109,10 @@ func (s *Share) Exists() (bool, error) { } // FetchAttributes retrieves metadata and properties for this share. -func (s *Share) FetchAttributes() error { - headers, err := s.fsc.getResourceHeaders(s.buildPath(), compNone, resourceShare, http.MethodHead) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-share-properties +func (s *Share) FetchAttributes(options *FileRequestOptions) error { + params := prepareOptions(options) + headers, err := s.fsc.getResourceHeaders(s.buildPath(), compNone, resourceShare, params, http.MethodHead) if err != nil { return err } @@ -130,9 +144,9 @@ func (s *Share) ServiceClient() *FileServiceClient { // are case-insensitive so case munging should not matter to other // applications either. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx -func (s *Share) SetMetadata() error { - headers, err := s.fsc.setResourceHeaders(s.buildPath(), compMetadata, resourceShare, mergeMDIntoExtraHeaders(s.Metadata, nil)) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-share-metadata +func (s *Share) SetMetadata(options *FileRequestOptions) error { + headers, err := s.fsc.setResourceHeaders(s.buildPath(), compMetadata, resourceShare, mergeMDIntoExtraHeaders(s.Metadata, nil), options) if err != nil { return err } @@ -148,15 +162,17 @@ func (s *Share) SetMetadata() error { // are case-insensitive so case munging should not matter to other // applications either. // -// See https://msdn.microsoft.com/en-us/library/azure/mt427368.aspx -func (s *Share) SetProperties() error { - if s.Properties.Quota < 1 || s.Properties.Quota > 5120 { - return fmt.Errorf("invalid value %v for quota, valid values are [1, 5120]", s.Properties.Quota) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Share-Properties +func (s *Share) SetProperties(options *FileRequestOptions) error { + extraheaders := map[string]string{} + if s.Properties.Quota > 0 { + if s.Properties.Quota > 5120 { + return fmt.Errorf("invalid value %v for quota, valid values are [1, 5120]", s.Properties.Quota) + } + extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota) } - headers, err := s.fsc.setResourceHeaders(s.buildPath(), compProperties, resourceShare, map[string]string{ - "x-ms-share-quota": strconv.Itoa(s.Properties.Quota), - }) + headers, err := s.fsc.setResourceHeaders(s.buildPath(), compProperties, resourceShare, extraheaders, options) if err != nil { return err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go new file mode 100644 index 000000000..88700fbc9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go @@ -0,0 +1,117 @@ +package storage + +import ( + "net/http" + "net/url" + "strconv" +) + +// ServiceProperties represents the storage account service properties +type ServiceProperties struct { + Logging *Logging + HourMetrics *Metrics + MinuteMetrics *Metrics + Cors *Cors +} + +// Logging represents the Azure Analytics Logging settings +type Logging struct { + Version string + Delete bool + Read bool + Write bool + RetentionPolicy *RetentionPolicy +} + +// RetentionPolicy indicates if retention is enabled and for how many days +type RetentionPolicy struct { + Enabled bool + Days *int +} + +// Metrics provide request statistics. +type Metrics struct { + Version string + Enabled bool + IncludeAPIs *bool + RetentionPolicy *RetentionPolicy +} + +// Cors includes all the CORS rules +type Cors struct { + CorsRule []CorsRule +} + +// CorsRule includes all settings for a Cors rule +type CorsRule struct { + AllowedOrigins string + AllowedMethods string + MaxAgeInSeconds int + ExposedHeaders string + AllowedHeaders string +} + +func (c Client) getServiceProperties(service string, auth authentication) (*ServiceProperties, error) { + query := url.Values{ + "restype": {"service"}, + "comp": {"properties"}, + } + uri := c.getEndpoint(service, "", query) + headers := c.getStandardHeaders() + + resp, err := c.exec(http.MethodGet, uri, headers, nil, auth) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + var out ServiceProperties + err = xmlUnmarshal(resp.body, &out) + if err != nil { + return nil, err + } + + return &out, nil +} + +func (c Client) setServiceProperties(props ServiceProperties, service string, auth authentication) error { + query := url.Values{ + "restype": {"service"}, + "comp": {"properties"}, + } + uri := c.getEndpoint(service, "", query) + + // Ideally, StorageServiceProperties would be the output struct + // This is to avoid golint stuttering, while generating the correct XML + type StorageServiceProperties struct { + Logging *Logging + HourMetrics *Metrics + MinuteMetrics *Metrics + Cors *Cors + } + input := StorageServiceProperties{ + Logging: props.Logging, + HourMetrics: props.HourMetrics, + MinuteMetrics: props.MinuteMetrics, + Cors: props.Cors, + } + + body, length, err := xmlMarshal(input) + if err != nil { + return err + } + + headers := c.getStandardHeaders() + headers["Content-Length"] = strconv.Itoa(length) + + resp, err := c.exec(http.MethodPut, uri, headers, body, auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go index 5cbc13ff1..b92a590a9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go @@ -5,30 +5,23 @@ import ( "encoding/json" "fmt" "io" + "io/ioutil" "net/http" "net/url" "strconv" + "strings" "time" ) -// TableServiceClient contains operations for Microsoft Azure Table Storage -// Service. -type TableServiceClient struct { - client Client - auth authentication -} - -// AzureTable is the typedef of the Azure Table name -type AzureTable string - const ( - tablesURIPath = "/Tables" + tablesURIPath = "/Tables" + nextTableQueryParameter = "NextTableName" + headerNextPartitionKey = "x-ms-continuation-NextPartitionKey" + headerNextRowKey = "x-ms-continuation-NextRowKey" + nextPartitionKeyQueryParameter = "NextPartitionKey" + nextRowKeyQueryParameter = "NextRowKey" ) -type createTableRequest struct { - TableName string `json:"TableName"` -} - // TableAccessPolicy are used for SETTING table policies type TableAccessPolicy struct { ID string @@ -40,139 +33,233 @@ type TableAccessPolicy struct { CanDelete bool } -func pathForTable(table AzureTable) string { return fmt.Sprintf("%s", table) } - -func (c *TableServiceClient) getStandardHeaders() map[string]string { - return map[string]string{ - "x-ms-version": "2015-02-21", - "x-ms-date": currentTimeRfc1123Formatted(), - "Accept": "application/json;odata=nometadata", - "Accept-Charset": "UTF-8", - "Content-Type": "application/json", - userAgentHeader: c.client.userAgent, - } +// Table represents an Azure table. +type Table struct { + tsc *TableServiceClient + Name string `json:"TableName"` + OdataEditLink string `json:"odata.editLink"` + OdataID string `json:"odata.id"` + OdataMetadata string `json:"odata.metadata"` + OdataType string `json:"odata.type"` } -// QueryTables returns the tables created in the -// *TableServiceClient storage account. -func (c *TableServiceClient) QueryTables() ([]AzureTable, error) { - uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{}) +// EntityQueryResult contains the response from +// ExecuteQuery and ExecuteQueryNextResults functions. +type EntityQueryResult struct { + OdataMetadata string `json:"odata.metadata"` + Entities []*Entity `json:"value"` + QueryNextLink + table *Table +} - headers := c.getStandardHeaders() - headers["Content-Length"] = "0" +type continuationToken struct { + NextPartitionKey string + NextRowKey string +} - resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth) +func (t *Table) buildPath() string { + return fmt.Sprintf("/%s", t.Name) +} + +func (t *Table) buildSpecificPath() string { + return fmt.Sprintf("%s('%s')", tablesURIPath, t.Name) +} + +// Get gets the referenced table. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities +func (t *Table) Get(timeout uint, ml MetadataLevel) error { + if ml == EmptyPayload { + return errEmptyPayload + } + + query := url.Values{ + "timeout": {strconv.FormatUint(uint64(timeout), 10)}, + } + headers := t.tsc.client.getStandardHeaders() + headers[headerAccept] = string(ml) + + uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), query) + resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth) if err != nil { - return nil, err + return err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, err + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return err } - buf := new(bytes.Buffer) - if _, err := buf.ReadFrom(resp.body); err != nil { - return nil, err + respBody, err := ioutil.ReadAll(resp.body) + if err != nil { + return err } - - var respArray queryTablesResponse - if err := json.Unmarshal(buf.Bytes(), &respArray); err != nil { - return nil, err + err = json.Unmarshal(respBody, t) + if err != nil { + return err } - - s := make([]AzureTable, len(respArray.TableName)) - for i, elem := range respArray.TableName { - s[i] = AzureTable(elem.TableName) - } - - return s, nil + return nil } -// CreateTable creates the table given the specific -// name. This function fails if the name is not compliant +// Create creates the referenced table. +// This function fails if the name is not compliant // with the specification or the tables already exists. -func (c *TableServiceClient) CreateTable(table AzureTable) error { - uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{}) +// ml determines the level of detail of metadata in the operation response, +// or no data at all. +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/create-table +func (t *Table) Create(timeout uint, ml MetadataLevel, options *TableOptions) error { + uri := t.tsc.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{ + "timeout": {strconv.FormatUint(uint64(timeout), 10)}, + }) - headers := c.getStandardHeaders() - - req := createTableRequest{TableName: string(table)} + type createTableRequest struct { + TableName string `json:"TableName"` + } + req := createTableRequest{TableName: t.Name} buf := new(bytes.Buffer) - if err := json.NewEncoder(buf).Encode(req); err != nil { return err } - headers["Content-Length"] = fmt.Sprintf("%d", buf.Len()) - - resp, err := c.client.execInternalJSON(http.MethodPost, uri, headers, buf, c.auth) + headers := t.tsc.client.getStandardHeaders() + headers = addReturnContentHeaders(headers, ml) + headers = addBodyRelatedHeaders(headers, buf.Len()) + headers = options.addToHeaders(headers) + resp, err := t.tsc.client.exec(http.MethodPost, uri, headers, buf, t.tsc.auth) if err != nil { return err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) - if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil { - return err + if ml == EmptyPayload { + if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { + return err + } + } else { + if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil { + return err + } + } + + if ml != EmptyPayload { + data, err := ioutil.ReadAll(resp.body) + if err != nil { + return err + } + err = json.Unmarshal(data, t) + if err != nil { + return err + } } return nil } -// DeleteTable deletes the table given the specific -// name. This function fails if the table is not present. -// Be advised: DeleteTable deletes all the entries -// that may be present. -func (c *TableServiceClient) DeleteTable(table AzureTable) error { - uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{}) - uri += fmt.Sprintf("('%s')", string(table)) +// Delete deletes the referenced table. +// This function fails if the table is not present. +// Be advised: Delete deletes all the entries that may be present. +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/delete-table +func (t *Table) Delete(timeout uint, options *TableOptions) error { + uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), url.Values{ + "timeout": {strconv.Itoa(int(timeout))}, + }) - headers := c.getStandardHeaders() - - headers["Content-Length"] = "0" - - resp, err := c.client.execInternalJSON(http.MethodDelete, uri, headers, nil, c.auth) + headers := t.tsc.client.getStandardHeaders() + headers = addReturnContentHeaders(headers, EmptyPayload) + headers = options.addToHeaders(headers) + resp, err := t.tsc.client.exec(http.MethodDelete, uri, headers, nil, t.tsc.auth) if err != nil { return err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) - if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { - return err - - } - return nil + err = checkRespCode(resp.statusCode, []int{http.StatusNoContent}) + return err } -// SetTablePermissions sets up table ACL permissions as per REST details https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Table-ACL -func (c *TableServiceClient) SetTablePermissions(table AzureTable, policies []TableAccessPolicy, timeout uint) (err error) { - params := url.Values{"comp": {"acl"}} +// QueryOptions includes options for a query entities operation. +// Top, filter and select are OData query options. +type QueryOptions struct { + Top uint + Filter string + Select []string + RequestID string +} - if timeout > 0 { - params.Add("timeout", fmt.Sprint(timeout)) +func (options *QueryOptions) getParameters() (url.Values, map[string]string) { + query := url.Values{} + headers := map[string]string{} + if options != nil { + if options.Top > 0 { + query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10)) + } + if options.Filter != "" { + query.Add(OdataFilter, options.Filter) + } + if len(options.Select) > 0 { + query.Add(OdataSelect, strings.Join(options.Select, ",")) + } + headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) + } + return query, headers +} + +// QueryEntities returns the entities in the table. +// You can use query options defined by the OData Protocol specification. +// +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities +func (t *Table) QueryEntities(timeout uint, ml MetadataLevel, options *QueryOptions) (*EntityQueryResult, error) { + if ml == EmptyPayload { + return nil, errEmptyPayload + } + query, headers := options.getParameters() + query = addTimeout(query, timeout) + uri := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), query) + return t.queryEntities(uri, headers, ml) +} + +// NextResults returns the next page of results +// from a QueryEntities or NextResults operation. +// +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination +func (eqr *EntityQueryResult) NextResults(options *TableOptions) (*EntityQueryResult, error) { + if eqr == nil { + return nil, errNilPreviousResult + } + if eqr.NextLink == nil { + return nil, errNilNextLink + } + headers := options.addToHeaders(map[string]string{}) + return eqr.table.queryEntities(*eqr.NextLink, headers, eqr.ml) +} + +// SetPermissions sets up table ACL permissions +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/Set-Table-ACL +func (t *Table) SetPermissions(tap []TableAccessPolicy, timeout uint, options *TableOptions) error { + params := url.Values{"comp": {"acl"}, + "timeout": {strconv.Itoa(int(timeout))}, } - uri := c.client.getEndpoint(tableServiceName, string(table), params) - headers := c.client.getStandardHeaders() + uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params) + headers := t.tsc.client.getStandardHeaders() + headers = options.addToHeaders(headers) - body, length, err := generateTableACLPayload(policies) + body, length, err := generateTableACLPayload(tap) if err != nil { return err } - headers["Content-Length"] = fmt.Sprintf("%v", length) + headers["Content-Length"] = strconv.Itoa(length) - resp, err := c.client.execInternalJSON(http.MethodPut, uri, headers, body, c.auth) + resp, err := t.tsc.client.exec(http.MethodPut, uri, headers, body, t.tsc.auth) if err != nil { return err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) - if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { - return err - } - return nil + err = checkRespCode(resp.statusCode, []int{http.StatusNoContent}) + return err } func generateTableACLPayload(policies []TableAccessPolicy) (io.Reader, int, error) { @@ -187,17 +274,18 @@ func generateTableACLPayload(policies []TableAccessPolicy) (io.Reader, int, erro return xmlMarshal(sil) } -// GetTablePermissions gets the table ACL permissions, as per REST details https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-acl -func (c *TableServiceClient) GetTablePermissions(table AzureTable, timeout int) (permissionResponse []TableAccessPolicy, err error) { - params := url.Values{"comp": {"acl"}} - - if timeout > 0 { - params.Add("timeout", strconv.Itoa(timeout)) +// GetPermissions gets the table ACL permissions +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/get-table-acl +func (t *Table) GetPermissions(timeout int, options *TableOptions) ([]TableAccessPolicy, error) { + params := url.Values{"comp": {"acl"}, + "timeout": {strconv.Itoa(int(timeout))}, } - uri := c.client.getEndpoint(tableServiceName, string(table), params) - headers := c.client.getStandardHeaders() - resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth) + uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params) + headers := t.tsc.client.getStandardHeaders() + headers = options.addToHeaders(headers) + + resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth) if err != nil { return nil, err } @@ -212,12 +300,73 @@ func (c *TableServiceClient) GetTablePermissions(table AzureTable, timeout int) if err != nil { return nil, err } - out := updateTableAccessPolicy(ap) - return out, nil + return updateTableAccessPolicy(ap), nil +} + +func (t *Table) queryEntities(uri string, headers map[string]string, ml MetadataLevel) (*EntityQueryResult, error) { + headers = mergeHeaders(headers, t.tsc.client.getStandardHeaders()) + if ml != EmptyPayload { + headers[headerAccept] = string(ml) + } + + resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + data, err := ioutil.ReadAll(resp.body) + if err != nil { + return nil, err + } + var entities EntityQueryResult + err = json.Unmarshal(data, &entities) + if err != nil { + return nil, err + } + + for i := range entities.Entities { + entities.Entities[i].Table = t + } + entities.table = t + + contToken := extractContinuationTokenFromHeaders(resp.headers) + if contToken == nil { + entities.NextLink = nil + } else { + originalURI, err := url.Parse(uri) + if err != nil { + return nil, err + } + v := originalURI.Query() + v.Set(nextPartitionKeyQueryParameter, contToken.NextPartitionKey) + v.Set(nextRowKeyQueryParameter, contToken.NextRowKey) + newURI := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), v) + entities.NextLink = &newURI + entities.ml = ml + } + + return &entities, nil +} + +func extractContinuationTokenFromHeaders(h http.Header) *continuationToken { + ct := continuationToken{ + NextPartitionKey: h.Get(headerNextPartitionKey), + NextRowKey: h.Get(headerNextRowKey), + } + + if ct.NextPartitionKey != "" && ct.NextRowKey != "" { + return &ct + } + return nil } func updateTableAccessPolicy(ap AccessPolicy) []TableAccessPolicy { - out := []TableAccessPolicy{} + taps := []TableAccessPolicy{} for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { tap := TableAccessPolicy{ ID: policy.ID, @@ -229,9 +378,9 @@ func updateTableAccessPolicy(ap AccessPolicy) []TableAccessPolicy { tap.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u") tap.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d") - out = append(out, tap) + taps = append(taps, tap) } - return out + return taps } func generateTablePermissions(tap *TableAccessPolicy) (permissions string) { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go new file mode 100644 index 000000000..7a0f0915c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go @@ -0,0 +1,302 @@ +package storage + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "mime/multipart" + "net/http" + "net/textproto" + "sort" + "strings" + + "github.com/satori/uuid" +) + +// Operation type. Insert, Delete, Replace etc. +type Operation int + +// consts for batch operations. +const ( + InsertOp = Operation(1) + DeleteOp = Operation(2) + ReplaceOp = Operation(3) + MergeOp = Operation(4) + InsertOrReplaceOp = Operation(5) + InsertOrMergeOp = Operation(6) +) + +// BatchEntity used for tracking Entities to operate on and +// whether operations (replace/merge etc) should be forced. +// Wrapper for regular Entity with additional data specific for the entity. +type BatchEntity struct { + *Entity + Force bool + Op Operation +} + +// TableBatch stores all the entities that will be operated on during a batch process. +// Entities can be inserted, replaced or deleted. +type TableBatch struct { + BatchEntitySlice []BatchEntity + + // reference to table we're operating on. + Table *Table +} + +// defaultChangesetHeaders for changeSets +var defaultChangesetHeaders = map[string]string{ + "Accept": "application/json;odata=minimalmetadata", + "Content-Type": "application/json", + "Prefer": "return-no-content", +} + +// NewBatch return new TableBatch for populating. +func (t *Table) NewBatch() *TableBatch { + return &TableBatch{ + Table: t, + } +} + +// InsertEntity adds an entity in preparation for a batch insert. +func (t *TableBatch) InsertEntity(entity *Entity) { + be := BatchEntity{Entity: entity, Force: false, Op: InsertOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// InsertOrReplaceEntity adds an entity in preparation for a batch insert or replace. +func (t *TableBatch) InsertOrReplaceEntity(entity *Entity, force bool) { + be := BatchEntity{Entity: entity, Force: false, Op: InsertOrReplaceOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// InsertOrReplaceEntityByForce adds an entity in preparation for a batch insert or replace. Forces regardless of ETag +func (t *TableBatch) InsertOrReplaceEntityByForce(entity *Entity) { + t.InsertOrReplaceEntity(entity, true) +} + +// InsertOrMergeEntity adds an entity in preparation for a batch insert or merge. +func (t *TableBatch) InsertOrMergeEntity(entity *Entity, force bool) { + be := BatchEntity{Entity: entity, Force: false, Op: InsertOrMergeOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// InsertOrMergeEntityByForce adds an entity in preparation for a batch insert or merge. Forces regardless of ETag +func (t *TableBatch) InsertOrMergeEntityByForce(entity *Entity) { + t.InsertOrMergeEntity(entity, true) +} + +// ReplaceEntity adds an entity in preparation for a batch replace. +func (t *TableBatch) ReplaceEntity(entity *Entity) { + be := BatchEntity{Entity: entity, Force: false, Op: ReplaceOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// DeleteEntity adds an entity in preparation for a batch delete +func (t *TableBatch) DeleteEntity(entity *Entity, force bool) { + be := BatchEntity{Entity: entity, Force: false, Op: DeleteOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// DeleteEntityByForce adds an entity in preparation for a batch delete. Forces regardless of ETag +func (t *TableBatch) DeleteEntityByForce(entity *Entity, force bool) { + t.DeleteEntity(entity, true) +} + +// MergeEntity adds an entity in preparation for a batch merge +func (t *TableBatch) MergeEntity(entity *Entity) { + be := BatchEntity{Entity: entity, Force: false, Op: MergeOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// ExecuteBatch executes many table operations in one request to Azure. +// The operations can be combinations of Insert, Delete, Replace and Merge +// Creates the inner changeset body (various operations, Insert, Delete etc) then creates the outer request packet that encompasses +// the changesets. +// As per document https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/performing-entity-group-transactions +func (t *TableBatch) ExecuteBatch() error { + changesetBoundary := fmt.Sprintf("changeset_%s", uuid.NewV1()) + uri := t.Table.tsc.client.getEndpoint(tableServiceName, "$batch", nil) + changesetBody, err := t.generateChangesetBody(changesetBoundary) + if err != nil { + return err + } + + boundary := fmt.Sprintf("batch_%s", uuid.NewV1()) + body, err := generateBody(changesetBody, changesetBoundary, boundary) + if err != nil { + return err + } + + headers := t.Table.tsc.client.getStandardHeaders() + headers[headerContentType] = fmt.Sprintf("multipart/mixed; boundary=%s", boundary) + + resp, err := t.Table.tsc.client.execBatchOperationJSON(http.MethodPost, uri, headers, bytes.NewReader(body.Bytes()), t.Table.tsc.auth) + if err != nil { + return err + } + defer resp.body.Close() + + if err = checkRespCode(resp.statusCode, []int{http.StatusAccepted}); err != nil { + + // check which batch failed. + operationFailedMessage := t.getFailedOperation(resp.odata.Err.Message.Value) + requestID, date, version := getDebugHeaders(resp.headers) + return AzureStorageServiceError{ + StatusCode: resp.statusCode, + Code: resp.odata.Err.Code, + RequestID: requestID, + Date: date, + APIVersion: version, + Message: operationFailedMessage, + } + } + + return nil +} + +// getFailedOperation parses the original Azure error string and determines which operation failed +// and generates appropriate message. +func (t *TableBatch) getFailedOperation(errorMessage string) string { + // errorMessage consists of "number:string" we just need the number. + sp := strings.Split(errorMessage, ":") + if len(sp) > 1 { + msg := fmt.Sprintf("Element %s in the batch returned an unexpected response code.\n%s", sp[0], errorMessage) + return msg + } + + // cant parse the message, just return the original message to client + return errorMessage +} + +// generateBody generates the complete body for the batch request. +func generateBody(changeSetBody *bytes.Buffer, changesetBoundary string, boundary string) (*bytes.Buffer, error) { + + body := new(bytes.Buffer) + writer := multipart.NewWriter(body) + writer.SetBoundary(boundary) + h := make(textproto.MIMEHeader) + h.Set(headerContentType, fmt.Sprintf("multipart/mixed; boundary=%s\r\n", changesetBoundary)) + batchWriter, err := writer.CreatePart(h) + if err != nil { + return nil, err + } + batchWriter.Write(changeSetBody.Bytes()) + writer.Close() + return body, nil +} + +// generateChangesetBody generates the individual changesets for the various operations within the batch request. +// There is a changeset for Insert, Delete, Merge etc. +func (t *TableBatch) generateChangesetBody(changesetBoundary string) (*bytes.Buffer, error) { + + body := new(bytes.Buffer) + writer := multipart.NewWriter(body) + writer.SetBoundary(changesetBoundary) + + for _, be := range t.BatchEntitySlice { + t.generateEntitySubset(&be, writer) + } + + writer.Close() + return body, nil +} + +// generateVerb generates the HTTP request VERB required for each changeset. +func generateVerb(op Operation) (string, error) { + switch op { + case InsertOp: + return http.MethodPost, nil + case DeleteOp: + return http.MethodDelete, nil + case ReplaceOp, InsertOrReplaceOp: + return http.MethodPut, nil + case MergeOp, InsertOrMergeOp: + return "MERGE", nil + default: + return "", errors.New("Unable to detect operation") + } +} + +// generateQueryPath generates the query path for within the changesets +// For inserts it will just be a table query path (table name) +// but for other operations (modifying an existing entity) then +// the partition/row keys need to be generated. +func (t *TableBatch) generateQueryPath(op Operation, entity *Entity) string { + if op == InsertOp { + return entity.Table.buildPath() + } + return entity.buildPath() +} + +// generateGenericOperationHeaders generates common headers for a given operation. +func generateGenericOperationHeaders(be *BatchEntity) map[string]string { + retval := map[string]string{} + + for k, v := range defaultChangesetHeaders { + retval[k] = v + } + + if be.Op == DeleteOp || be.Op == ReplaceOp || be.Op == MergeOp { + if be.Force || be.Entity.OdataEtag == "" { + retval["If-Match"] = "*" + } else { + retval["If-Match"] = be.Entity.OdataEtag + } + } + + return retval +} + +// generateEntitySubset generates body payload for particular batch entity +func (t *TableBatch) generateEntitySubset(batchEntity *BatchEntity, writer *multipart.Writer) error { + + h := make(textproto.MIMEHeader) + h.Set(headerContentType, "application/http") + h.Set(headerContentTransferEncoding, "binary") + + verb, err := generateVerb(batchEntity.Op) + if err != nil { + return err + } + + genericOpHeadersMap := generateGenericOperationHeaders(batchEntity) + queryPath := t.generateQueryPath(batchEntity.Op, batchEntity.Entity) + uri := t.Table.tsc.client.getEndpoint(tableServiceName, queryPath, nil) + + operationWriter, err := writer.CreatePart(h) + if err != nil { + return err + } + + urlAndVerb := fmt.Sprintf("%s %s HTTP/1.1\r\n", verb, uri) + operationWriter.Write([]byte(urlAndVerb)) + writeHeaders(genericOpHeadersMap, &operationWriter) + operationWriter.Write([]byte("\r\n")) // additional \r\n is needed per changeset separating the "headers" and the body. + + // delete operation doesn't need a body. + if batchEntity.Op != DeleteOp { + //var e Entity = batchEntity.Entity + body, err := json.Marshal(batchEntity.Entity) + if err != nil { + return err + } + operationWriter.Write(body) + } + + return nil +} + +func writeHeaders(h map[string]string, writer *io.Writer) { + // This way it is guaranteed the headers will be written in a sorted order + var keys []string + for k := range h { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + (*writer).Write([]byte(fmt.Sprintf("%s: %s\r\n", k, h[k]))) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go deleted file mode 100644 index 1758d9f3e..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go +++ /dev/null @@ -1,345 +0,0 @@ -package storage - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "reflect" -) - -// Annotating as secure for gas scanning -/* #nosec */ -const ( - partitionKeyNode = "PartitionKey" - rowKeyNode = "RowKey" - tag = "table" - tagIgnore = "-" - continuationTokenPartitionKeyHeader = "X-Ms-Continuation-Nextpartitionkey" - continuationTokenRowHeader = "X-Ms-Continuation-Nextrowkey" - maxTopParameter = 1000 -) - -type queryTablesResponse struct { - TableName []struct { - TableName string `json:"TableName"` - } `json:"value"` -} - -const ( - tableOperationTypeInsert = iota - tableOperationTypeUpdate = iota - tableOperationTypeMerge = iota - tableOperationTypeInsertOrReplace = iota - tableOperationTypeInsertOrMerge = iota -) - -type tableOperation int - -// TableEntity interface specifies -// the functions needed to support -// marshaling and unmarshaling into -// Azure Tables. The struct must only contain -// simple types because Azure Tables do not -// support hierarchy. -type TableEntity interface { - PartitionKey() string - RowKey() string - SetPartitionKey(string) error - SetRowKey(string) error -} - -// ContinuationToken is an opaque (ie not useful to inspect) -// struct that Get... methods can return if there are more -// entries to be returned than the ones already -// returned. Just pass it to the same function to continue -// receiving the remaining entries. -type ContinuationToken struct { - NextPartitionKey string - NextRowKey string -} - -type getTableEntriesResponse struct { - Elements []map[string]interface{} `json:"value"` -} - -// QueryTableEntities queries the specified table and returns the unmarshaled -// entities of type retType. -// top parameter limits the returned entries up to top. Maximum top -// allowed by Azure API is 1000. In case there are more than top entries to be -// returned the function will return a non nil *ContinuationToken. You can call the -// same function again passing the received ContinuationToken as previousContToken -// parameter in order to get the following entries. The query parameter -// is the odata query. To retrieve all the entries pass the empty string. -// The function returns a pointer to a TableEntity slice, the *ContinuationToken -// if there are more entries to be returned and an error in case something went -// wrong. -// -// Example: -// entities, cToken, err = tSvc.QueryTableEntities("table", cToken, reflect.TypeOf(entity), 20, "") -func (c *TableServiceClient) QueryTableEntities(tableName AzureTable, previousContToken *ContinuationToken, retType reflect.Type, top int, query string) ([]TableEntity, *ContinuationToken, error) { - if top > maxTopParameter { - return nil, nil, fmt.Errorf("top accepts at maximum %d elements. Requested %d instead", maxTopParameter, top) - } - - uri := c.client.getEndpoint(tableServiceName, pathForTable(tableName), url.Values{}) - uri += fmt.Sprintf("?$top=%d", top) - if query != "" { - uri += fmt.Sprintf("&$filter=%s", url.QueryEscape(query)) - } - - if previousContToken != nil { - uri += fmt.Sprintf("&NextPartitionKey=%s&NextRowKey=%s", previousContToken.NextPartitionKey, previousContToken.NextRowKey) - } - - headers := c.getStandardHeaders() - - headers["Content-Length"] = "0" - - resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth) - - if err != nil { - return nil, nil, err - } - - contToken := extractContinuationTokenFromHeaders(resp.headers) - - defer resp.body.Close() - - if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, contToken, err - } - - retEntries, err := deserializeEntity(retType, resp.body) - if err != nil { - return nil, contToken, err - } - - return retEntries, contToken, nil -} - -// InsertEntity inserts an entity in the specified table. -// The function fails if there is an entity with the same -// PartitionKey and RowKey in the table. -func (c *TableServiceClient) InsertEntity(table AzureTable, entity TableEntity) error { - if sc, err := c.execTable(table, entity, false, http.MethodPost); err != nil { - return checkRespCode(sc, []int{http.StatusCreated}) - } - - return nil -} - -func (c *TableServiceClient) execTable(table AzureTable, entity TableEntity, specifyKeysInURL bool, method string) (int, error) { - uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{}) - if specifyKeysInURL { - uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey())) - } - - headers := c.getStandardHeaders() - - var buf bytes.Buffer - - if err := injectPartitionAndRowKeys(entity, &buf); err != nil { - return 0, err - } - - headers["Content-Length"] = fmt.Sprintf("%d", buf.Len()) - - resp, err := c.client.execInternalJSON(method, uri, headers, &buf, c.auth) - - if err != nil { - return 0, err - } - - defer resp.body.Close() - - return resp.statusCode, nil -} - -// UpdateEntity updates the contents of an entity with the -// one passed as parameter. The function fails if there is no entity -// with the same PartitionKey and RowKey in the table. -func (c *TableServiceClient) UpdateEntity(table AzureTable, entity TableEntity) error { - if sc, err := c.execTable(table, entity, true, http.MethodPut); err != nil { - return checkRespCode(sc, []int{http.StatusNoContent}) - } - return nil -} - -// MergeEntity merges the contents of an entity with the -// one passed as parameter. -// The function fails if there is no entity -// with the same PartitionKey and RowKey in the table. -func (c *TableServiceClient) MergeEntity(table AzureTable, entity TableEntity) error { - if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil { - return checkRespCode(sc, []int{http.StatusNoContent}) - } - return nil -} - -// DeleteEntityWithoutCheck deletes the entity matching by -// PartitionKey and RowKey. There is no check on IfMatch -// parameter so the entity is always deleted. -// The function fails if there is no entity -// with the same PartitionKey and RowKey in the table. -func (c *TableServiceClient) DeleteEntityWithoutCheck(table AzureTable, entity TableEntity) error { - return c.DeleteEntity(table, entity, "*") -} - -// DeleteEntity deletes the entity matching by -// PartitionKey, RowKey and ifMatch field. -// The function fails if there is no entity -// with the same PartitionKey and RowKey in the table or -// the ifMatch is different. -func (c *TableServiceClient) DeleteEntity(table AzureTable, entity TableEntity, ifMatch string) error { - uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{}) - uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey())) - - headers := c.getStandardHeaders() - - headers["Content-Length"] = "0" - headers["If-Match"] = ifMatch - - resp, err := c.client.execInternalJSON(http.MethodDelete, uri, headers, nil, c.auth) - - if err != nil { - return err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { - return err - } - - return nil -} - -// InsertOrReplaceEntity inserts an entity in the specified table -// or replaced the existing one. -func (c *TableServiceClient) InsertOrReplaceEntity(table AzureTable, entity TableEntity) error { - if sc, err := c.execTable(table, entity, true, http.MethodPut); err != nil { - return checkRespCode(sc, []int{http.StatusNoContent}) - } - return nil -} - -// InsertOrMergeEntity inserts an entity in the specified table -// or merges the existing one. -func (c *TableServiceClient) InsertOrMergeEntity(table AzureTable, entity TableEntity) error { - if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil { - return checkRespCode(sc, []int{http.StatusNoContent}) - } - return nil -} - -func injectPartitionAndRowKeys(entity TableEntity, buf *bytes.Buffer) error { - if err := json.NewEncoder(buf).Encode(entity); err != nil { - return err - } - - dec := make(map[string]interface{}) - if err := json.NewDecoder(buf).Decode(&dec); err != nil { - return err - } - - // Inject PartitionKey and RowKey - dec[partitionKeyNode] = entity.PartitionKey() - dec[rowKeyNode] = entity.RowKey() - - // Remove tagged fields - // The tag is defined in the const section - // This is useful to avoid storing the PartitionKey and RowKey twice. - numFields := reflect.ValueOf(entity).Elem().NumField() - for i := 0; i < numFields; i++ { - f := reflect.ValueOf(entity).Elem().Type().Field(i) - - if f.Tag.Get(tag) == tagIgnore { - // we must look for its JSON name in the dictionary - // as the user can rename it using a tag - jsonName := f.Name - if f.Tag.Get("json") != "" { - jsonName = f.Tag.Get("json") - } - delete(dec, jsonName) - } - } - - buf.Reset() - - if err := json.NewEncoder(buf).Encode(&dec); err != nil { - return err - } - - return nil -} - -func deserializeEntity(retType reflect.Type, reader io.Reader) ([]TableEntity, error) { - buf := new(bytes.Buffer) - - var ret getTableEntriesResponse - if err := json.NewDecoder(reader).Decode(&ret); err != nil { - return nil, err - } - - tEntries := make([]TableEntity, len(ret.Elements)) - - for i, entry := range ret.Elements { - - buf.Reset() - if err := json.NewEncoder(buf).Encode(entry); err != nil { - return nil, err - } - - dec := make(map[string]interface{}) - if err := json.NewDecoder(buf).Decode(&dec); err != nil { - return nil, err - } - - var pKey, rKey string - // strip pk and rk - for key, val := range dec { - switch key { - case partitionKeyNode: - pKey = val.(string) - case rowKeyNode: - rKey = val.(string) - } - } - - delete(dec, partitionKeyNode) - delete(dec, rowKeyNode) - - buf.Reset() - if err := json.NewEncoder(buf).Encode(dec); err != nil { - return nil, err - } - - // Create a empty retType instance - tEntries[i] = reflect.New(retType.Elem()).Interface().(TableEntity) - // Popolate it with the values - if err := json.NewDecoder(buf).Decode(&tEntries[i]); err != nil { - return nil, err - } - - // Reset PartitionKey and RowKey - if err := tEntries[i].SetPartitionKey(pKey); err != nil { - return nil, err - } - if err := tEntries[i].SetRowKey(rKey); err != nil { - return nil, err - } - } - - return tEntries, nil -} - -func extractContinuationTokenFromHeaders(h http.Header) *ContinuationToken { - ct := ContinuationToken{h.Get(continuationTokenPartitionKeyHeader), h.Get(continuationTokenRowHeader)} - - if ct.NextPartitionKey != "" && ct.NextRowKey != "" { - return &ct - } - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go new file mode 100644 index 000000000..895dcfded --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go @@ -0,0 +1,190 @@ +package storage + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strconv" +) + +const ( + headerAccept = "Accept" + headerEtag = "Etag" + headerPrefer = "Prefer" + headerXmsContinuation = "x-ms-Continuation-NextTableName" +) + +// TableServiceClient contains operations for Microsoft Azure Table Storage +// Service. +type TableServiceClient struct { + client Client + auth authentication +} + +// TableOptions includes options for some table operations +type TableOptions struct { + RequestID string +} + +func (options *TableOptions) addToHeaders(h map[string]string) map[string]string { + if options != nil { + h = addToHeaders(h, "x-ms-client-request-id", options.RequestID) + } + return h +} + +// QueryNextLink includes information for getting the next page of +// results in query operations +type QueryNextLink struct { + NextLink *string + ml MetadataLevel +} + +// GetServiceProperties gets the properties of your storage account's table service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-service-properties +func (t *TableServiceClient) GetServiceProperties() (*ServiceProperties, error) { + return t.client.getServiceProperties(tableServiceName, t.auth) +} + +// SetServiceProperties sets the properties of your storage account's table service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-table-service-properties +func (t *TableServiceClient) SetServiceProperties(props ServiceProperties) error { + return t.client.setServiceProperties(props, tableServiceName, t.auth) +} + +// GetTableReference returns a Table object for the specified table name. +func (t *TableServiceClient) GetTableReference(name string) *Table { + return &Table{ + tsc: t, + Name: name, + } +} + +// QueryTablesOptions includes options for some table operations +type QueryTablesOptions struct { + Top uint + Filter string + RequestID string +} + +func (options *QueryTablesOptions) getParameters() (url.Values, map[string]string) { + query := url.Values{} + headers := map[string]string{} + if options != nil { + if options.Top > 0 { + query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10)) + } + if options.Filter != "" { + query.Add(OdataFilter, options.Filter) + } + headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) + } + return query, headers +} + +// QueryTables returns the tables in the storage account. +// You can use query options defined by the OData Protocol specification. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-tables +func (t *TableServiceClient) QueryTables(ml MetadataLevel, options *QueryTablesOptions) (*TableQueryResult, error) { + query, headers := options.getParameters() + uri := t.client.getEndpoint(tableServiceName, tablesURIPath, query) + return t.queryTables(uri, headers, ml) +} + +// NextResults returns the next page of results +// from a QueryTables or a NextResults operation. +// +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-tables +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination +func (tqr *TableQueryResult) NextResults(options *TableOptions) (*TableQueryResult, error) { + if tqr == nil { + return nil, errNilPreviousResult + } + if tqr.NextLink == nil { + return nil, errNilNextLink + } + headers := options.addToHeaders(map[string]string{}) + + return tqr.tsc.queryTables(*tqr.NextLink, headers, tqr.ml) +} + +// TableQueryResult contains the response from +// QueryTables and QueryTablesNextResults functions. +type TableQueryResult struct { + OdataMetadata string `json:"odata.metadata"` + Tables []Table `json:"value"` + QueryNextLink + tsc *TableServiceClient +} + +func (t *TableServiceClient) queryTables(uri string, headers map[string]string, ml MetadataLevel) (*TableQueryResult, error) { + if ml == EmptyPayload { + return nil, errEmptyPayload + } + headers = mergeHeaders(headers, t.client.getStandardHeaders()) + headers[headerAccept] = string(ml) + + resp, err := t.client.exec(http.MethodGet, uri, headers, nil, t.auth) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + respBody, err := ioutil.ReadAll(resp.body) + if err != nil { + return nil, err + } + var out TableQueryResult + err = json.Unmarshal(respBody, &out) + if err != nil { + return nil, err + } + + for i := range out.Tables { + out.Tables[i].tsc = t + } + out.tsc = t + + nextLink := resp.headers.Get(http.CanonicalHeaderKey(headerXmsContinuation)) + if nextLink == "" { + out.NextLink = nil + } else { + originalURI, err := url.Parse(uri) + if err != nil { + return nil, err + } + v := originalURI.Query() + v.Set(nextTableQueryParameter, nextLink) + newURI := t.client.getEndpoint(tableServiceName, tablesURIPath, v) + out.NextLink = &newURI + out.ml = ml + } + + return &out, nil +} + +func addBodyRelatedHeaders(h map[string]string, length int) map[string]string { + h[headerContentType] = "application/json" + h[headerContentLength] = fmt.Sprintf("%v", length) + h[headerAcceptCharset] = "UTF-8" + return h +} + +func addReturnContentHeaders(h map[string]string, ml MetadataLevel) map[string]string { + if ml != EmptyPayload { + h[headerPrefer] = "return-content" + h[headerAccept] = string(ml) + } else { + h[headerPrefer] = "return-no-content" + // From API version 2015-12-11 onwards, Accept header is required + h[headerAccept] = string(NoMetadata) + } + return h +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go index 57ca1b6d9..d3ae9d092 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go @@ -12,9 +12,15 @@ import ( "net/http" "net/url" "reflect" + "strconv" + "strings" "time" ) +var ( + fixedTime = time.Date(2050, time.December, 20, 21, 55, 0, 0, time.FixedZone("GMT", -6)) +) + func (c Client) computeHmac256(message string) string { h := hmac.New(sha256.New, c.accountKey) h.Write([]byte(message)) @@ -76,10 +82,118 @@ func headersFromStruct(v interface{}) map[string]string { value := reflect.ValueOf(v) for i := 0; i < value.NumField(); i++ { key := value.Type().Field(i).Tag.Get("header") - val := value.Field(i).String() - if key != "" && val != "" { - headers[key] = val + if key != "" { + reflectedValue := reflect.Indirect(value.Field(i)) + var val string + if reflectedValue.IsValid() { + switch reflectedValue.Type() { + case reflect.TypeOf(fixedTime): + val = timeRfc1123Formatted(reflectedValue.Interface().(time.Time)) + case reflect.TypeOf(uint64(0)), reflect.TypeOf(uint(0)): + val = strconv.FormatUint(reflectedValue.Uint(), 10) + case reflect.TypeOf(int(0)): + val = strconv.FormatInt(reflectedValue.Int(), 10) + default: + val = reflectedValue.String() + } + } + if val != "" { + headers[key] = val + } } } return headers } + +// merges extraHeaders into headers and returns headers +func mergeHeaders(headers, extraHeaders map[string]string) map[string]string { + for k, v := range extraHeaders { + headers[k] = v + } + return headers +} + +func addToHeaders(h map[string]string, key, value string) map[string]string { + if value != "" { + h[key] = value + } + return h +} + +func addTimeToHeaders(h map[string]string, key string, value *time.Time) map[string]string { + if value != nil { + h = addToHeaders(h, key, timeRfc1123Formatted(*value)) + } + return h +} + +func addTimeout(params url.Values, timeout uint) url.Values { + if timeout > 0 { + params.Add("timeout", fmt.Sprintf("%v", timeout)) + } + return params +} + +func addSnapshot(params url.Values, snapshot *time.Time) url.Values { + if snapshot != nil { + params.Add("snapshot", snapshot.Format("2006-01-02T15:04:05.0000000Z")) + } + return params +} + +func getTimeFromHeaders(h http.Header, key string) (*time.Time, error) { + var out time.Time + var err error + outStr := h.Get(key) + if outStr != "" { + out, err = time.Parse(time.RFC1123, outStr) + if err != nil { + return nil, err + } + } + return &out, nil +} + +// TimeRFC1123 is an alias for time.Time needed for custom Unmarshalling +type TimeRFC1123 time.Time + +// UnmarshalXML is a custom unmarshaller that overrides the default time unmarshal which uses a different time layout. +func (t *TimeRFC1123) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var value string + d.DecodeElement(&value, &start) + parse, err := time.Parse(time.RFC1123, value) + if err != nil { + return err + } + *t = TimeRFC1123(parse) + return nil +} + +// returns a map of custom metadata values from the specified HTTP header +func getMetadataFromHeaders(header http.Header) map[string]string { + metadata := make(map[string]string) + for k, v := range header { + // Can't trust CanonicalHeaderKey() to munge case + // reliably. "_" is allowed in identifiers: + // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx + // https://msdn.microsoft.com/library/aa664670(VS.71).aspx + // http://tools.ietf.org/html/rfc7230#section-3.2 + // ...but "_" is considered invalid by + // CanonicalMIMEHeaderKey in + // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 + // so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl". + k = strings.ToLower(k) + if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { + continue + } + // metadata["lol"] = content of the last X-Ms-Meta-Lol header + k = k[len(userDefinedMetadataHeaderPrefix):] + metadata[k] = v[len(v)-1] + } + + if len(metadata) == 0 { + return nil + } + + return metadata +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.7.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.7.go new file mode 100644 index 000000000..345bb28f2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.7.go @@ -0,0 +1,12 @@ +// +build !go1.8 + +package storage + +import ( + "io" + "net/http" +) + +func setContentLengthFromLimitedReader(req *http.Request, lr *io.LimitedReader) { + req.ContentLength = lr.N +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.8.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.8.go new file mode 100644 index 000000000..ed8b77919 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.8.go @@ -0,0 +1,18 @@ +// +build go1.8 + +package storage + +import ( + "io" + "io/ioutil" + "net/http" +) + +func setContentLengthFromLimitedReader(req *http.Request, lr *io.LimitedReader) { + req.ContentLength = lr.N + snapshot := *lr + req.GetBody = func() (io.ReadCloser, error) { + r := snapshot + return ioutil.NopCloser(&r), nil + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go index d52ebfdb2..a23fff1e2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go @@ -1,5 +1,5 @@ package storage var ( - sdkVersion = "8.0.0-beta" + sdkVersion = "10.0.2" ) diff --git a/vendor/github.com/Azure/go-autorest/LICENSE b/vendor/github.com/Azure/go-autorest/LICENSE new file mode 100644 index 000000000..b9d6a27ea --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md new file mode 100644 index 000000000..a17cf98c6 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md @@ -0,0 +1,253 @@ +# Azure Active Directory library for Go + +This project provides a stand alone Azure Active Directory library for Go. The code was extracted +from [go-autorest](https://github.com/Azure/go-autorest/) project, which is used as a base for +[azure-sdk-for-go](https://github.com/Azure/azure-sdk-for-go). + + +## Installation + +``` +go get -u github.com/Azure/go-autorest/autorest/adal +``` + +## Usage + +An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) follow these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). + +### Register an Azure AD Application with secret + + +1. Register a new application with a `secret` credential + + ``` + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --password secret + ``` + +2. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "Application ID" + ``` + + * Replace `Application ID` with `appId` from step 1. + +### Register an Azure AD Application with certificate + +1. Create a private key + + ``` + openssl genrsa -out "example-app.key" 2048 + ``` + +2. Create the certificate + + ``` + openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr" + openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000 + ``` + +3. Create the PKCS12 version of the certificate containing also the private key + + ``` + openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass: + + ``` + +4. Register a new application with the certificate content form `example-app.crt` + + ``` + certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)" + + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --key-usage Verify --end-date 2018-01-01 \ + --key-value "${certificateContents}" + ``` + +5. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "APPLICATION_ID" + ``` + + * Replace `APPLICATION_ID` with `appId` from step 4. + + +### Grant the necessary permissions + +Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained +level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles) +which can be assigned to a service principal of an Azure AD application depending of your needs. + +``` +az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME" +``` + +* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step. +* Replace the `ROLE_NAME` with a role name of your choice. + +It is also possible to define custom role definitions. + +``` +az role definition create --role-definition role-definition.json +``` + +* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. + + +### Acquire Access Token + +The common configuration used by all flows: + +```Go +const activeDirectoryEndpoint = "https://login.microsoftonline.com/" +tenantID := "TENANT_ID" +oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) + +applicationID := "APPLICATION_ID" + +callback := func(token adal.Token) error { + // This is called after the token is acquired +} + +// The resource for which the token is acquired +resource := "https://management.core.windows.net/" +``` + +* Replace the `TENANT_ID` with your tenant ID. +* Replace the `APPLICATION_ID` with the value from previous section. + +#### Client Credentials + +```Go +applicationSecret := "APPLICATION_SECRET" + +spt, err := adal.NewServicePrincipalToken( + oauthConfig, + appliationID, + applicationSecret, + resource, + callbacks...) +if err != nil { + return nil, err +} + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Replace the `APPLICATION_SECRET` with the `password` value from previous section. + +#### Client Certificate + +```Go +certificatePath := "./example-app.pfx" + +certData, err := ioutil.ReadFile(certificatePath) +if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) +} + +// Get the certificate and private key from pfx file +certificate, rsaPrivateKey, err := decodePkcs12(certData, "") +if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) +} + +spt, err := adal.NewServicePrincipalTokenFromCertificate( + oauthConfig, + applicationID, + certificate, + rsaPrivateKey, + resource, + callbacks...) + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Update the certificate path to point to the example-app.pfx file which was created in previous section. + + +#### Device Code + +```Go +oauthClient := &http.Client{} + +// Acquire the device code +deviceCode, err := adal.InitiateDeviceAuth( + oauthClient, + oauthConfig, + applicationID, + resource) +if err != nil { + return nil, fmt.Errorf("Failed to start device auth flow: %s", err) +} + +// Display the authentication message +fmt.Println(*deviceCode.Message) + +// Wait here until the user is authenticated +token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) +if err != nil { + return nil, fmt.Errorf("Failed to finish device auth flow: %s", err) +} + +spt, err := adal.NewServicePrincipalTokenFromManualToken( + oauthConfig, + applicationID, + resource, + *token, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +### Command Line Tool + +A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above. + +``` +adal -h + +Usage of ./adal: + -applicationId string + application id + -certificatePath string + path to pk12/PFC application certificate + -mode string + authentication mode (device, secret, cert, refresh) (default "device") + -resource string + resource for which the token is requested + -secret string + application secret + -tenantId string + tenant id + -tokenCachePath string + location of oath token cache (default "/home/cgc/.adal/accessToken.json") +``` + +Example acquire a token for `https://management.core.windows.net/` using device code flow: + +``` +adal -mode device \ + -applicationId "APPLICATION_ID" \ + -tenantId "TENANT_ID" \ + -resource https://management.core.windows.net/ + +``` diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go new file mode 100644 index 000000000..12375e0e4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go @@ -0,0 +1,51 @@ +package adal + +import ( + "fmt" + "net/url" +) + +const ( + activeDirectoryAPIVersion = "1.0" +) + +// OAuthConfig represents the endpoints needed +// in OAuth operations +type OAuthConfig struct { + AuthorityEndpoint url.URL + AuthorizeEndpoint url.URL + TokenEndpoint url.URL + DeviceCodeEndpoint url.URL +} + +// NewOAuthConfig returns an OAuthConfig with tenant specific urls +func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { + const activeDirectoryEndpointTemplate = "%s/oauth2/%s?api-version=%s" + u, err := url.Parse(activeDirectoryEndpoint) + if err != nil { + return nil, err + } + authorityURL, err := u.Parse(tenantID) + if err != nil { + return nil, err + } + authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + + return &OAuthConfig{ + AuthorityEndpoint: *authorityURL, + AuthorizeEndpoint: *authorizeURL, + TokenEndpoint: *tokenURL, + DeviceCodeEndpoint: *deviceCodeURL, + }, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go new file mode 100644 index 000000000..6c511f8c8 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go @@ -0,0 +1,228 @@ +package adal + +/* + This file is largely based on rjw57/oauth2device's code, with the follow differences: + * scope -> resource, and only allow a single one + * receive "Message" in the DeviceCode struct and show it to users as the prompt + * azure-xplat-cli has the following behavior that this emulates: + - does not send client_secret during the token exchange + - sends resource again in the token exchange request +*/ + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" +) + +const ( + logPrefix = "autorest/adal/devicetoken:" +) + +var ( + // ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow + ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix) + + // ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow + ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix) + + // ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow + ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix) + + // ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow + ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix) + + // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow + ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) + + // ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow + ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix) + + // ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow + ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix) + + errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" + errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" + errTokenSendingFails = "Error occurred while sending request with device code for a token" + errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" + errStatusNotOK = "Error HTTP status != 200" +) + +// DeviceCode is the object returned by the device auth endpoint +// It contains information to instruct the user to complete the auth flow +type DeviceCode struct { + DeviceCode *string `json:"device_code,omitempty"` + UserCode *string `json:"user_code,omitempty"` + VerificationURL *string `json:"verification_url,omitempty"` + ExpiresIn *int64 `json:"expires_in,string,omitempty"` + Interval *int64 `json:"interval,string,omitempty"` + + Message *string `json:"message"` // Azure specific + Resource string // store the following, stored when initiating, used when exchanging + OAuthConfig OAuthConfig + ClientID string +} + +// TokenError is the object returned by the token exchange endpoint +// when something is amiss +type TokenError struct { + Error *string `json:"error,omitempty"` + ErrorCodes []int `json:"error_codes,omitempty"` + ErrorDescription *string `json:"error_description,omitempty"` + Timestamp *string `json:"timestamp,omitempty"` + TraceID *string `json:"trace_id,omitempty"` +} + +// DeviceToken is the object return by the token exchange endpoint +// It can either look like a Token or an ErrorToken, so put both here +// and check for presence of "Error" to know if we are in error state +type deviceToken struct { + Token + TokenError +} + +// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode +// that can be used with CheckForUserCompletion or WaitForUserCompletion. +func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + v := url.Values{ + "client_id": []string{clientID}, + "resource": []string{resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK) + } + + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrDeviceCodeEmpty + } + + var code DeviceCode + err = json.Unmarshal(rb, &code) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + code.ClientID = clientID + code.Resource = resource + code.OAuthConfig = oauthConfig + + return &code, nil +} + +// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint +// to see if the device flow has: been completed, timed out, or otherwise failed +func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + v := url.Values{ + "client_id": []string{code.ClientID}, + "code": []string{*code.DeviceCode}, + "grant_type": []string{OAuthGrantTypeDeviceCode}, + "resource": []string{code.Resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrOAuthTokenEmpty + } + + var token deviceToken + err = json.Unmarshal(rb, &token) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if token.Error == nil { + return &token.Token, nil + } + + switch *token.Error { + case "authorization_pending": + return nil, ErrDeviceAuthorizationPending + case "slow_down": + return nil, ErrDeviceSlowDown + case "access_denied": + return nil, ErrDeviceAccessDenied + case "code_expired": + return nil, ErrDeviceCodeExpired + default: + return nil, ErrDeviceGeneric + } +} + +// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. +// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. +func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + intervalDuration := time.Duration(*code.Interval) * time.Second + waitDuration := intervalDuration + + for { + token, err := CheckForUserCompletion(sender, code) + + if err == nil { + return token, nil + } + + switch err { + case ErrDeviceSlowDown: + waitDuration += waitDuration + case ErrDeviceAuthorizationPending: + // noop + default: // everything else is "fatal" to us + return nil, err + } + + if waitDuration > (intervalDuration * 3) { + return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix) + } + + time.Sleep(waitDuration) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/msi.go b/vendor/github.com/Azure/go-autorest/autorest/adal/msi.go new file mode 100644 index 000000000..e87911e83 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/msi.go @@ -0,0 +1,6 @@ +// +build !windows + +package adal + +// msiPath is the path to the MSI Extension settings file (to discover the endpoint) +var msiPath = "/var/lib/waagent/ManagedIdentity-Settings" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go b/vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go new file mode 100644 index 000000000..80f800432 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go @@ -0,0 +1,11 @@ +// +build windows + +package adal + +import ( + "os" + "strings" +) + +// msiPath is the path to the MSI Extension settings file (to discover the endpoint) +var msiPath = strings.Join([]string{os.Getenv("SystemDrive"), "WindowsAzure/Config/ManagedIdentity-Settings"}, "/") diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go new file mode 100644 index 000000000..73711c667 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go @@ -0,0 +1,59 @@ +package adal + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +// LoadToken restores a Token object from a file located at 'path'. +func LoadToken(path string) (*Token, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + } + defer file.Close() + + var token Token + + dec := json.NewDecoder(file) + if err = dec.Decode(&token); err != nil { + return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err) + } + return &token, nil +} + +// SaveToken persists an oauth token at the given location on disk. +// It moves the new file into place so it can safely be used to replace an existing file +// that maybe accessed by multiple processes. +func SaveToken(path string, mode os.FileMode, token Token) error { + dir := filepath.Dir(path) + err := os.MkdirAll(dir, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) + } + + newFile, err := ioutil.TempFile(dir, "token") + if err != nil { + return fmt.Errorf("failed to create the temp file to write the token: %v", err) + } + tempPath := newFile.Name() + + if err := json.NewEncoder(newFile).Encode(token); err != nil { + return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err) + } + if err := newFile.Close(); err != nil { + return fmt.Errorf("failed to close temp file %s: %v", tempPath, err) + } + + // Atomic replace to avoid multi-writer file corruptions + if err := os.Rename(tempPath, path); err != nil { + return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err) + } + if err := os.Chmod(path, mode); err != nil { + return fmt.Errorf("failed to chmod the token file %s: %v", path, err) + } + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go new file mode 100644 index 000000000..7928c971a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go @@ -0,0 +1,46 @@ +package adal + +import ( + "net/http" +) + +const ( + contentType = "Content-Type" + mimeTypeFormPost = "application/x-www-form-urlencoded" +) + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(&http.Client{}, decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go new file mode 100644 index 000000000..2ac8c3c22 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -0,0 +1,413 @@ +package adal + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest/date" + "github.com/dgrijalva/jwt-go" +) + +const ( + defaultRefresh = 5 * time.Minute + + // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow + OAuthGrantTypeDeviceCode = "device_code" + + // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows + OAuthGrantTypeClientCredentials = "client_credentials" + + // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows + OAuthGrantTypeRefreshToken = "refresh_token" + + // metadataHeader is the header required by MSI extension + metadataHeader = "Metadata" +) + +// OAuthTokenProvider is an interface which should be implemented by an access token retriever +type OAuthTokenProvider interface { + OAuthToken() string +} + +// Refresher is an interface for token refresh functionality +type Refresher interface { + Refresh() error + RefreshExchange(resource string) error + EnsureFresh() error +} + +// TokenRefreshCallback is the type representing callbacks that will be called after +// a successful token refresh +type TokenRefreshCallback func(Token) error + +// Token encapsulates the access token used to authorize Azure requests. +type Token struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + ExpiresIn string `json:"expires_in"` + ExpiresOn string `json:"expires_on"` + NotBefore string `json:"not_before"` + + Resource string `json:"resource"` + Type string `json:"token_type"` +} + +// Expires returns the time.Time when the Token expires. +func (t Token) Expires() time.Time { + s, err := strconv.Atoi(t.ExpiresOn) + if err != nil { + s = -3600 + } + + expiration := date.NewUnixTimeFromSeconds(float64(s)) + + return time.Time(expiration).UTC() +} + +// IsExpired returns true if the Token is expired, false otherwise. +func (t Token) IsExpired() bool { + return t.WillExpireIn(0) +} + +// WillExpireIn returns true if the Token will expire after the passed time.Duration interval +// from now, false otherwise. +func (t Token) WillExpireIn(d time.Duration) bool { + return !t.Expires().After(time.Now().Add(d)) +} + +//OAuthToken return the current access token +func (t *Token) OAuthToken() string { + return t.AccessToken +} + +// ServicePrincipalNoSecret represents a secret type that contains no secret +// meaning it is not valid for fetching a fresh token. This is used by Manual +type ServicePrincipalNoSecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret +// It only returns an error for the ServicePrincipalNoSecret type +func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") +} + +// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form +// that is submitted when acquiring an oAuth token. +type ServicePrincipalSecret interface { + SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error +} + +// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. +type ServicePrincipalTokenSecret struct { + ClientSecret string +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using the client_secret. +func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("client_secret", tokenSecret.ClientSecret) + return nil +} + +// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. +type ServicePrincipalCertificateSecret struct { + Certificate *x509.Certificate + PrivateKey *rsa.PrivateKey +} + +// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. +type ServicePrincipalMSISecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return nil +} + +// SignJwt returns the JWT signed with the certificate's private key. +func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { + hasher := sha1.New() + _, err := hasher.Write(secret.Certificate.Raw) + if err != nil { + return "", err + } + + thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + + // The jti (JWT ID) claim provides a unique identifier for the JWT. + jti := make([]byte, 20) + _, err = rand.Read(jti) + if err != nil { + return "", err + } + + token := jwt.New(jwt.SigningMethodRS256) + token.Header["x5t"] = thumbprint + token.Claims = jwt.MapClaims{ + "aud": spt.oauthConfig.TokenEndpoint.String(), + "iss": spt.clientID, + "sub": spt.clientID, + "jti": base64.URLEncoding.EncodeToString(jti), + "nbf": time.Now().Unix(), + "exp": time.Now().Add(time.Hour * 24).Unix(), + } + + signedString, err := token.SignedString(secret.PrivateKey) + return signedString, err +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. +func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + jwt, err := secret.SignJwt(spt) + if err != nil { + return err + } + + v.Set("client_assertion", jwt) + v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") + return nil +} + +// ServicePrincipalToken encapsulates a Token created for a Service Principal. +type ServicePrincipalToken struct { + Token + + secret ServicePrincipalSecret + oauthConfig OAuthConfig + clientID string + resource string + autoRefresh bool + refreshWithin time.Duration + sender Sender + + refreshCallbacks []TokenRefreshCallback +} + +// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. +func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + spt := &ServicePrincipalToken{ + oauthConfig: oauthConfig, + secret: secret, + clientID: id, + resource: resource, + autoRefresh: true, + refreshWithin: defaultRefresh, + sender: &http.Client{}, + refreshCallbacks: callbacks, + } + return spt, nil +} + +// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token +func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalNoSecret{}, + callbacks...) + if err != nil { + return nil, err + } + + spt.Token = token + + return spt, nil +} + +// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal +// credentials scoped to the named resource. +func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalTokenSecret{ + ClientSecret: secret, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromCertificate create a ServicePrincipalToken from the supplied pkcs12 bytes. +func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + callbacks..., + ) +} + +// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. +func GetMSIVMEndpoint() (string, error) { + return getMSIVMEndpoint(msiPath) +} + +func getMSIVMEndpoint(path string) (string, error) { + // Read MSI settings + bytes, err := ioutil.ReadFile(path) + if err != nil { + return "", err + } + msiSettings := struct { + URL string `json:"url"` + }{} + err = json.Unmarshal(bytes, &msiSettings) + if err != nil { + return "", err + } + + return msiSettings.URL, nil +} + +// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. +func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + // We set the oauth config token endpoint to be MSI's endpoint + msiEndpointURL, err := url.Parse(msiEndpoint) + if err != nil { + return nil, err + } + + oauthConfig, err := NewOAuthConfig(msiEndpointURL.String(), "") + if err != nil { + return nil, err + } + + spt := &ServicePrincipalToken{ + oauthConfig: *oauthConfig, + secret: &ServicePrincipalMSISecret{}, + resource: resource, + autoRefresh: true, + refreshWithin: defaultRefresh, + sender: &http.Client{}, + refreshCallbacks: callbacks, + } + + return spt, nil +} + +// EnsureFresh will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. +func (spt *ServicePrincipalToken) EnsureFresh() error { + if spt.autoRefresh && spt.WillExpireIn(spt.refreshWithin) { + return spt.Refresh() + } + return nil +} + +// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization +func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { + if spt.refreshCallbacks != nil { + for _, callback := range spt.refreshCallbacks { + err := callback(spt.Token) + if err != nil { + return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err) + } + } + } + return nil +} + +// Refresh obtains a fresh token for the Service Principal. +func (spt *ServicePrincipalToken) Refresh() error { + return spt.refreshInternal(spt.resource) +} + +// RefreshExchange refreshes the token, but for a different resource. +func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { + return spt.refreshInternal(resource) +} + +func (spt *ServicePrincipalToken) refreshInternal(resource string) error { + v := url.Values{} + v.Set("client_id", spt.clientID) + v.Set("resource", resource) + + if spt.RefreshToken != "" { + v.Set("grant_type", OAuthGrantTypeRefreshToken) + v.Set("refresh_token", spt.RefreshToken) + } else { + v.Set("grant_type", OAuthGrantTypeClientCredentials) + err := spt.secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + req, err := http.NewRequest(http.MethodPost, spt.oauthConfig.TokenEndpoint.String(), body) + if err != nil { + return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + if _, ok := spt.secret.(*ServicePrincipalMSISecret); ok { + req.Header.Set(metadataHeader, "true") + } + resp, err := spt.sender.Do(req) + if err != nil { + return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err) + } + + defer resp.Body.Close() + rb, err := ioutil.ReadAll(resp.Body) + + if resp.StatusCode != http.StatusOK { + if err != nil { + return fmt.Errorf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body", resp.StatusCode) + } + return fmt.Errorf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)) + } + + if err != nil { + return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return fmt.Errorf("adal: Empty service principal token received during refresh") + } + var token Token + err = json.Unmarshal(rb, &token) + if err != nil { + return fmt.Errorf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)) + } + + spt.Token = token + + return spt.InvokeRefreshCallbacks(token) +} + +// SetAutoRefresh enables or disables automatic refreshing of stale tokens. +func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { + spt.autoRefresh = autoRefresh +} + +// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will +// refresh the token. +func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { + spt.refreshWithin = d + return +} + +// SetSender sets the http.Client used when obtaining the Service Principal token. An +// undecorated http.Client is used by default. +func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s } diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go new file mode 100644 index 000000000..314ed7876 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go @@ -0,0 +1,167 @@ +package autorest + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/Azure/go-autorest/autorest/adal" +) + +const ( + bearerChallengeHeader = "Www-Authenticate" + bearer = "Bearer" + tenantID = "tenantID" +) + +// Authorizer is the interface that provides a PrepareDecorator used to supply request +// authorization. Most often, the Authorizer decorator runs last so it has access to the full +// state of the formed HTTP request. +type Authorizer interface { + WithAuthorization() PrepareDecorator +} + +// NullAuthorizer implements a default, "do nothing" Authorizer. +type NullAuthorizer struct{} + +// WithAuthorization returns a PrepareDecorator that does nothing. +func (na NullAuthorizer) WithAuthorization() PrepareDecorator { + return WithNothing() +} + +// BearerAuthorizer implements the bearer authorization +type BearerAuthorizer struct { + tokenProvider adal.OAuthTokenProvider +} + +// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider +func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer { + return &BearerAuthorizer{tokenProvider: tp} +} + +func (ba *BearerAuthorizer) withBearerAuthorization() PrepareDecorator { + return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken())) +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the token. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + refresher, ok := ba.tokenProvider.(adal.Refresher) + if ok { + err := refresher.EnsureFresh() + if err != nil { + return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", nil, + "Failed to refresh the Token for request to %s", r.URL) + } + } + return (ba.withBearerAuthorization()(p)).Prepare(r) + }) + } +} + +// BearerAuthorizerCallbackFunc is the authentication callback signature. +type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) + +// BearerAuthorizerCallback implements bearer authorization via a callback. +type BearerAuthorizerCallback struct { + sender Sender + callback BearerAuthorizerCallbackFunc +} + +// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback +// is invoked when the HTTP request is submitted. +func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { + if sender == nil { + sender = &http.Client{} + } + return &BearerAuthorizerCallback{sender: sender, callback: callback} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value +// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + // make a copy of the request and remove the body as it's not + // required and avoids us having to create a copy of it. + rCopy := *r + removeRequestBody(&rCopy) + + resp, err := bacb.sender.Do(&rCopy) + if err == nil && resp.StatusCode == 401 { + defer resp.Body.Close() + if hasBearerChallenge(resp) { + bc, err := newBearerChallenge(resp) + if err != nil { + return r, err + } + if bacb.callback != nil { + ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"]) + if err != nil { + return r, err + } + return ba.WithAuthorization()(p).Prepare(r) + } + } + } + return r, err + }) + } +} + +// returns true if the HTTP response contains a bearer challenge +func hasBearerChallenge(resp *http.Response) bool { + authHeader := resp.Header.Get(bearerChallengeHeader) + if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 { + return false + } + return true +} + +type bearerChallenge struct { + values map[string]string +} + +func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) { + challenge := strings.TrimSpace(resp.Header.Get(bearerChallengeHeader)) + trimmedChallenge := challenge[len(bearer)+1:] + + // challenge is a set of key=value pairs that are comma delimited + pairs := strings.Split(trimmedChallenge, ",") + if len(pairs) < 1 { + err = fmt.Errorf("challenge '%s' contains no pairs", challenge) + return bc, err + } + + bc.values = make(map[string]string) + for i := range pairs { + trimmedPair := strings.TrimSpace(pairs[i]) + pair := strings.Split(trimmedPair, "=") + if len(pair) == 2 { + // remove the enclosing quotes + key := strings.Trim(pair[0], "\"") + value := strings.Trim(pair[1], "\"") + + switch key { + case "authorization", "authorization_uri": + // strip the tenant ID from the authorization URL + asURL, err := url.Parse(value) + if err != nil { + return bc, err + } + bc.values[tenantID] = asURL.Path[1:] + default: + bc.values[key] = value + } + } + } + + return bc, err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go new file mode 100644 index 000000000..51f1c4bbc --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/autorest.go @@ -0,0 +1,115 @@ +/* +Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines +and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) +generated Go code. + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) + +will set the URL to: + + https://microsoft.com/a/b/c + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., ByUnmarshallingJson) is likely incorrect. + +Lastly, the Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure +correct parsing and formatting. + +Errors raised by autorest objects and methods will conform to the autorest.Error interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. +*/ +package autorest + +import ( + "net/http" + "time" +) + +const ( + // HeaderLocation specifies the HTTP Location header. + HeaderLocation = "Location" + + // HeaderRetryAfter specifies the HTTP Retry-After header. + HeaderRetryAfter = "Retry-After" +) + +// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set +// and false otherwise. +func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { + return containsInt(codes, resp.StatusCode) +} + +// GetLocation retrieves the URL from the Location header of the passed response. +func GetLocation(resp *http.Response) string { + return resp.Header.Get(HeaderLocation) +} + +// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If +// the header is absent or is malformed, it will return the supplied default delay time.Duration. +func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration { + retry := resp.Header.Get(HeaderRetryAfter) + if retry == "" { + return defaultDelay + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + return defaultDelay + } + + return d +} + +// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. +func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare(&http.Request{Cancel: cancel}, + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go new file mode 100644 index 000000000..332a8909d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -0,0 +1,302 @@ +package azure + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" +) + +const ( + headerAsyncOperation = "Azure-AsyncOperation" +) + +const ( + operationInProgress string = "InProgress" + operationCanceled string = "Canceled" + operationFailed string = "Failed" + operationSucceeded string = "Succeeded" +) + +// DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure +// long-running operation. It will delay between requests for the duration specified in the +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by +// closing the optional channel on the http.Request. +func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + resp, err = s.Do(r) + if err != nil { + return resp, err + } + pollingCodes := []int{http.StatusAccepted, http.StatusCreated, http.StatusOK} + if !autorest.ResponseHasStatusCode(resp, pollingCodes...) { + return resp, nil + } + + ps := pollingState{} + for err == nil { + err = updatePollingState(resp, &ps) + if err != nil { + break + } + if ps.hasTerminated() { + if !ps.hasSucceeded() { + err = ps + } + break + } + + r, err = newPollingRequest(resp, ps) + if err != nil { + return resp, err + } + + delay = autorest.GetRetryAfter(resp, delay) + resp, err = autorest.SendWithSender(s, r, + autorest.AfterDelay(delay)) + } + + return resp, err + }) + } +} + +func getAsyncOperation(resp *http.Response) string { + return resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) +} + +func hasSucceeded(state string) bool { + return state == operationSucceeded +} + +func hasTerminated(state string) bool { + switch state { + case operationCanceled, operationFailed, operationSucceeded: + return true + default: + return false + } +} + +func hasFailed(state string) bool { + return state == operationFailed +} + +type provisioningTracker interface { + state() string + hasSucceeded() bool + hasTerminated() bool +} + +type operationResource struct { + // Note: + // The specification states services should return the "id" field. However some return it as + // "operationId". + ID string `json:"id"` + OperationID string `json:"operationId"` + Name string `json:"name"` + Status string `json:"status"` + Properties map[string]interface{} `json:"properties"` + OperationError ServiceError `json:"error"` + StartTime date.Time `json:"startTime"` + EndTime date.Time `json:"endTime"` + PercentComplete float64 `json:"percentComplete"` +} + +func (or operationResource) state() string { + return or.Status +} + +func (or operationResource) hasSucceeded() bool { + return hasSucceeded(or.state()) +} + +func (or operationResource) hasTerminated() bool { + return hasTerminated(or.state()) +} + +type provisioningProperties struct { + ProvisioningState string `json:"provisioningState"` +} + +type provisioningStatus struct { + Properties provisioningProperties `json:"properties,omitempty"` + ProvisioningError ServiceError `json:"error,omitempty"` +} + +func (ps provisioningStatus) state() string { + return ps.Properties.ProvisioningState +} + +func (ps provisioningStatus) hasSucceeded() bool { + return hasSucceeded(ps.state()) +} + +func (ps provisioningStatus) hasTerminated() bool { + return hasTerminated(ps.state()) +} + +func (ps provisioningStatus) hasProvisioningError() bool { + return ps.ProvisioningError != ServiceError{} +} + +type pollingResponseFormat string + +const ( + usesOperationResponse pollingResponseFormat = "OperationResponse" + usesProvisioningStatus pollingResponseFormat = "ProvisioningStatus" + formatIsUnknown pollingResponseFormat = "" +) + +type pollingState struct { + responseFormat pollingResponseFormat + uri string + state string + code string + message string +} + +func (ps pollingState) hasSucceeded() bool { + return hasSucceeded(ps.state) +} + +func (ps pollingState) hasTerminated() bool { + return hasTerminated(ps.state) +} + +func (ps pollingState) hasFailed() bool { + return hasFailed(ps.state) +} + +func (ps pollingState) Error() string { + return fmt.Sprintf("Long running operation terminated with status '%s': Code=%q Message=%q", ps.state, ps.code, ps.message) +} + +// updatePollingState maps the operation status -- retrieved from either a provisioningState +// field, the status field of an OperationResource, or inferred from the HTTP status code -- +// into a well-known states. Since the process begins from the initial request, the state +// always comes from either a the provisioningState returned or is inferred from the HTTP +// status code. Subsequent requests will read an Azure OperationResource object if the +// service initially returned the Azure-AsyncOperation header. The responseFormat field notes +// the expected response format. +func updatePollingState(resp *http.Response, ps *pollingState) error { + // Determine the response shape + // -- The first response will always be a provisioningStatus response; only the polling requests, + // depending on the header returned, may be something otherwise. + var pt provisioningTracker + if ps.responseFormat == usesOperationResponse { + pt = &operationResource{} + } else { + pt = &provisioningStatus{} + } + + // If this is the first request (that is, the polling response shape is unknown), determine how + // to poll and what to expect + if ps.responseFormat == formatIsUnknown { + req := resp.Request + if req == nil { + return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Original HTTP request is missing") + } + + // Prefer the Azure-AsyncOperation header + ps.uri = getAsyncOperation(resp) + if ps.uri != "" { + ps.responseFormat = usesOperationResponse + } else { + ps.responseFormat = usesProvisioningStatus + } + + // Else, use the Location header + if ps.uri == "" { + ps.uri = autorest.GetLocation(resp) + } + + // Lastly, requests against an existing resource, use the last request URI + if ps.uri == "" { + m := strings.ToUpper(req.Method) + if m == http.MethodPatch || m == http.MethodPut || m == http.MethodGet { + ps.uri = req.URL.String() + } + } + } + + // Read and interpret the response (saving the Body in case no polling is necessary) + b := &bytes.Buffer{} + err := autorest.Respond(resp, + autorest.ByCopying(b), + autorest.ByUnmarshallingJSON(pt), + autorest.ByClosing()) + resp.Body = ioutil.NopCloser(b) + if err != nil { + return err + } + + // Interpret the results + // -- Terminal states apply regardless + // -- Unknown states are per-service inprogress states + // -- Otherwise, infer state from HTTP status code + if pt.hasTerminated() { + ps.state = pt.state() + } else if pt.state() != "" { + ps.state = operationInProgress + } else { + switch resp.StatusCode { + case http.StatusAccepted: + ps.state = operationInProgress + + case http.StatusNoContent, http.StatusCreated, http.StatusOK: + ps.state = operationSucceeded + + default: + ps.state = operationFailed + } + } + + if ps.state == operationInProgress && ps.uri == "" { + return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Unable to obtain polling URI for %s %s", resp.Request.Method, resp.Request.URL) + } + + // For failed operation, check for error code and message in + // -- Operation resource + // -- Response + // -- Otherwise, Unknown + if ps.hasFailed() { + if ps.responseFormat == usesOperationResponse { + or := pt.(*operationResource) + ps.code = or.OperationError.Code + ps.message = or.OperationError.Message + } else { + p := pt.(*provisioningStatus) + if p.hasProvisioningError() { + ps.code = p.ProvisioningError.Code + ps.message = p.ProvisioningError.Message + } else { + ps.code = "Unknown" + ps.message = "None" + } + } + } + return nil +} + +func newPollingRequest(resp *http.Response, ps pollingState) (*http.Request, error) { + req := resp.Request + if req == nil { + return nil, autorest.NewError("azure", "newPollingRequest", "Azure Polling Error - Original HTTP request is missing") + } + + reqPoll, err := autorest.Prepare(&http.Request{Cancel: req.Cancel}, + autorest.AsGet(), + autorest.WithBaseURL(ps.uri)) + if err != nil { + return nil, autorest.NewErrorWithError(err, "azure", "newPollingRequest", nil, "Failure creating poll request to %s", ps.uri) + } + + return reqPoll, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go new file mode 100644 index 000000000..3f4d13421 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go @@ -0,0 +1,180 @@ +/* +Package azure provides Azure-specific implementations used with AutoRest. + +See the included examples for more detail. +*/ +package azure + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strconv" + + "github.com/Azure/go-autorest/autorest" +) + +const ( + // HeaderClientID is the Azure extension header to set a user-specified request ID. + HeaderClientID = "x-ms-client-request-id" + + // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID + // should be included in the response. + HeaderReturnClientID = "x-ms-return-client-request-id" + + // HeaderRequestID is the Azure extension header of the service generated request ID returned + // in the response. + HeaderRequestID = "x-ms-request-id" +) + +// ServiceError encapsulates the error response from an Azure service. +type ServiceError struct { + Code string `json:"code"` + Message string `json:"message"` + Details *[]interface{} `json:"details"` +} + +func (se ServiceError) Error() string { + if se.Details != nil { + d, err := json.Marshal(*(se.Details)) + if err != nil { + return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, *se.Details) + } + return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, string(d)) + } + return fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) +} + +// RequestError describes an error response returned by Azure service. +type RequestError struct { + autorest.DetailedError + + // The error returned by the Azure service. + ServiceError *ServiceError `json:"error"` + + // The request id (from the x-ms-request-id-header) of the request. + RequestID string +} + +// Error returns a human-friendly error message from service error. +func (e RequestError) Error() string { + return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v", + e.StatusCode, e.ServiceError) +} + +// IsAzureError returns true if the passed error is an Azure Service error; false otherwise. +func IsAzureError(e error) bool { + _, ok := e.(*RequestError) + return ok +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError { + if v, ok := original.(*RequestError); ok { + return *v + } + + statusCode := autorest.UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + return RequestError{ + DetailedError: autorest.DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + }, + } +} + +// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id +// header to true such that UUID accompanies the http.Response. +func WithReturningClientID(uuid string) autorest.PrepareDecorator { + preparer := autorest.CreatePreparer( + WithClientID(uuid), + WithReturnClientID(true)) + + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + return preparer.Prepare(r) + }) + } +} + +// WithClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). +func WithClientID(uuid string) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderClientID, uuid) +} + +// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-return-client-request-id whose boolean value indicates if the value of the +// x-ms-client-request-id header should be included in the http.Response. +func WithReturnClientID(b bool) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) +} + +// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the +// http.Request sent to the service (and returned in the http.Response) +func ExtractClientID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderClientID, resp) +} + +// ExtractRequestID extracts the Azure server generated request identifier from the +// x-ms-request-id header. +func ExtractRequestID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderRequestID, resp) +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an +// azure.RequestError by reading the response body unless the response HTTP status code +// is among the set passed. +// +// If there is a chance service may return responses other than the Azure error +// format and the response cannot be parsed into an error, a decoding error will +// be returned containing the response body. In any case, the Responder will +// return an error if the status code is not satisfied. +// +// If this Responder returns an error, the response body will be replaced with +// an in-memory reader, which needs no further closing. +func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { + return func(r autorest.Responder) autorest.Responder { + return autorest.ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) { + var e RequestError + defer resp.Body.Close() + + // Copy and replace the Body in case it does not contain an error object. + // This will leave the Body available to the caller. + b, decodeErr := autorest.CopyAndDecode(autorest.EncodedAsJSON, resp.Body, &e) + resp.Body = ioutil.NopCloser(&b) + if decodeErr != nil { + return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr) + } else if e.ServiceError == nil { + e.ServiceError = &ServiceError{Code: "Unknown", Message: "Unknown service error"} + } + + e.RequestID = ExtractRequestID(resp) + if e.StatusCode == nil { + e.StatusCode = resp.StatusCode + } + err = &e + } + return err + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go new file mode 100644 index 000000000..1cf55651f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -0,0 +1,130 @@ +package azure + +import ( + "fmt" + "strings" +) + +var environments = map[string]Environment{ + "AZURECHINACLOUD": ChinaCloud, + "AZUREGERMANCLOUD": GermanCloud, + "AZUREPUBLICCLOUD": PublicCloud, + "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, +} + +// Environment represents a set of endpoints for each of Azure's Clouds. +type Environment struct { + Name string `json:"name"` + ManagementPortalURL string `json:"managementPortalURL"` + PublishSettingsURL string `json:"publishSettingsURL"` + ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` + ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` + ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` + GalleryEndpoint string `json:"galleryEndpoint"` + KeyVaultEndpoint string `json:"keyVaultEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + StorageEndpointSuffix string `json:"storageEndpointSuffix"` + SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` + TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` + KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` + ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` + ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` + ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` + ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` +} + +var ( + // PublicCloud is the default public Azure cloud environment + PublicCloud = Environment{ + Name: "AzurePublicCloud", + ManagementPortalURL: "https://manage.windowsazure.com/", + PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.windows.net/", + ResourceManagerEndpoint: "https://management.azure.com/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", + GalleryEndpoint: "https://gallery.azure.com/", + KeyVaultEndpoint: "https://vault.azure.net/", + GraphEndpoint: "https://graph.windows.net/", + StorageEndpointSuffix: "core.windows.net", + SQLDatabaseDNSSuffix: "database.windows.net", + TrafficManagerDNSSuffix: "trafficmanager.net", + KeyVaultDNSSuffix: "vault.azure.net", + ServiceBusEndpointSuffix: "servicebus.azure.com", + ServiceManagementVMDNSSuffix: "cloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.azure.com", + ContainerRegistryDNSSuffix: "azurecr.io", + } + + // USGovernmentCloud is the cloud environment for the US Government + USGovernmentCloud = Environment{ + Name: "AzureUSGovernmentCloud", + ManagementPortalURL: "https://manage.windowsazure.us/", + PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", + ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", + GalleryEndpoint: "https://gallery.usgovcloudapi.net/", + KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", + GraphEndpoint: "https://graph.usgovcloudapi.net/", + StorageEndpointSuffix: "core.usgovcloudapi.net", + SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", + TrafficManagerDNSSuffix: "usgovtrafficmanager.net", + KeyVaultDNSSuffix: "vault.usgovcloudapi.net", + ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", + ServiceManagementVMDNSSuffix: "usgovcloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us", + ContainerRegistryDNSSuffix: "azurecr.io", + } + + // ChinaCloud is the cloud environment operated in China + ChinaCloud = Environment{ + Name: "AzureChinaCloud", + ManagementPortalURL: "https://manage.chinacloudapi.com/", + PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", + ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", + ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/", + GalleryEndpoint: "https://gallery.chinacloudapi.cn/", + KeyVaultEndpoint: "https://vault.azure.cn/", + GraphEndpoint: "https://graph.chinacloudapi.cn/", + StorageEndpointSuffix: "core.chinacloudapi.cn", + SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", + TrafficManagerDNSSuffix: "trafficmanager.cn", + KeyVaultDNSSuffix: "vault.azure.cn", + ServiceBusEndpointSuffix: "servicebus.chinacloudapi.net", + ServiceManagementVMDNSSuffix: "chinacloudapp.cn", + ResourceManagerVMDNSSuffix: "cloudapp.azure.cn", + ContainerRegistryDNSSuffix: "azurecr.io", + } + + // GermanCloud is the cloud environment operated in Germany + GermanCloud = Environment{ + Name: "AzureGermanCloud", + ManagementPortalURL: "http://portal.microsoftazure.de/", + PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.cloudapi.de/", + ResourceManagerEndpoint: "https://management.microsoftazure.de/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", + GalleryEndpoint: "https://gallery.cloudapi.de/", + KeyVaultEndpoint: "https://vault.microsoftazure.de/", + GraphEndpoint: "https://graph.cloudapi.de/", + StorageEndpointSuffix: "core.cloudapi.de", + SQLDatabaseDNSSuffix: "database.cloudapi.de", + TrafficManagerDNSSuffix: "azuretrafficmanager.de", + KeyVaultDNSSuffix: "vault.microsoftazure.de", + ServiceBusEndpointSuffix: "servicebus.cloudapi.de", + ServiceManagementVMDNSSuffix: "azurecloudapp.de", + ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", + ContainerRegistryDNSSuffix: "azurecr.io", + } +) + +// EnvironmentFromName returns an Environment based on the common name specified +func EnvironmentFromName(name string) (Environment, error) { + name = strings.ToUpper(name) + env, ok := environments[name] + if !ok { + return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) + } + return env, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go new file mode 100644 index 000000000..5f1e72fbe --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -0,0 +1,236 @@ +package autorest + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "net/http/cookiejar" + "runtime" + "time" +) + +const ( + // DefaultPollingDelay is a reasonable delay between polling requests. + DefaultPollingDelay = 60 * time.Second + + // DefaultPollingDuration is a reasonable total polling duration. + DefaultPollingDuration = 15 * time.Minute + + // DefaultRetryAttempts is number of attempts for retry status codes (5xx). + DefaultRetryAttempts = 3 +) + +var ( + // defaultUserAgent builds a string containing the Go version, system archityecture and OS, + // and the go-autorest version. + defaultUserAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + Version(), + ) + + statusCodesForRetry = []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } +) + +const ( + requestFormat = `HTTP Request Begin =================================================== +%s +===================================================== HTTP Request End +` + responseFormat = `HTTP Response Begin =================================================== +%s +===================================================== HTTP Response End +` +) + +// Response serves as the base for all responses from generated clients. It provides access to the +// last http.Response. +type Response struct { + *http.Response `json:"-"` +} + +// LoggingInspector implements request and response inspectors that log the full request and +// response to a supplied log. +type LoggingInspector struct { + Logger *log.Logger +} + +// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) WithInspection() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + var body, b bytes.Buffer + + defer r.Body.Close() + + r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) + if err := r.Write(&b); err != nil { + return nil, fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(requestFormat, b.String()) + + r.Body = ioutil.NopCloser(&body) + return p.Prepare(r) + }) + } +} + +// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) ByInspecting() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + var body, b bytes.Buffer + defer resp.Body.Close() + resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) + if err := resp.Write(&b); err != nil { + return fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(responseFormat, b.String()) + + resp.Body = ioutil.NopCloser(&body) + return r.Respond(resp) + }) + } +} + +// Client is the base for autorest generated clients. It provides default, "do nothing" +// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the +// standard, undecorated http.Client as a default Sender. +// +// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and +// return responses that compose with Response. +// +// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom +// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit +// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence +// sending the request by providing a decorated Sender. +type Client struct { + Authorizer Authorizer + Sender Sender + RequestInspector PrepareDecorator + ResponseInspector RespondDecorator + + // PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header + PollingDelay time.Duration + + // PollingDuration sets the maximum polling time after which an error is returned. + PollingDuration time.Duration + + // RetryAttempts sets the default number of retry attempts for client. + RetryAttempts int + + // RetryDuration sets the delay duration for retries. + RetryDuration time.Duration + + // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent + // through the Do method. + UserAgent string + + Jar http.CookieJar +} + +// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed +// string. +func NewClientWithUserAgent(ua string) Client { + c := Client{ + PollingDelay: DefaultPollingDelay, + PollingDuration: DefaultPollingDuration, + RetryAttempts: DefaultRetryAttempts, + RetryDuration: 30 * time.Second, + UserAgent: defaultUserAgent, + } + c.AddToUserAgent(ua) + return c +} + +// AddToUserAgent adds an extension to the current user agent +func (c *Client) AddToUserAgent(extension string) error { + if extension != "" { + c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent) +} + +// Do implements the Sender interface by invoking the active Sender after applying authorization. +// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent +// is set, apply set the User-Agent header. +func (c Client) Do(r *http.Request) (*http.Response, error) { + if r.UserAgent() == "" { + r, _ = Prepare(r, + WithUserAgent(c.UserAgent)) + } + r, err := Prepare(r, + c.WithInspection(), + c.WithAuthorization()) + if err != nil { + return nil, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") + } + resp, err := SendWithSender(c.sender(), r, + DoRetryForStatusCodes(c.RetryAttempts, c.RetryDuration, statusCodesForRetry...)) + Respond(resp, + c.ByInspecting()) + return resp, err +} + +// sender returns the Sender to which to send requests. +func (c Client) sender() Sender { + if c.Sender == nil { + j, _ := cookiejar.New(nil) + return &http.Client{Jar: j} + } + return c.Sender +} + +// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator +// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. +func (c Client) WithAuthorization() PrepareDecorator { + return c.authorizer().WithAuthorization() +} + +// authorizer returns the Authorizer to use. +func (c Client) authorizer() Authorizer { + if c.Authorizer == nil { + return NullAuthorizer{} + } + return c.Authorizer +} + +// WithInspection is a convenience method that passes the request to the supplied RequestInspector, +// if present, or returns the WithNothing PrepareDecorator otherwise. +func (c Client) WithInspection() PrepareDecorator { + if c.RequestInspector == nil { + return WithNothing() + } + return c.RequestInspector +} + +// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, +// if present, or returns the ByIgnoring RespondDecorator otherwise. +func (c Client) ByInspecting() RespondDecorator { + if c.ResponseInspector == nil { + return ByIgnoring() + } + return c.ResponseInspector +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/Azure/go-autorest/autorest/date/date.go new file mode 100644 index 000000000..80ca60e9b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/date.go @@ -0,0 +1,82 @@ +/* +Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) +defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of +time.Time types. And both convert to time.Time through a ToTime method. +*/ +package date + +import ( + "fmt" + "time" +) + +const ( + fullDate = "2006-01-02" + fullDateJSON = `"2006-01-02"` + dateFormat = "%04d-%02d-%02d" + jsonFormat = `"%04d-%02d-%02d"` +) + +// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., +// 2006-01-02). +type Date struct { + time.Time +} + +// ParseDate create a new Date from the passed string. +func ParseDate(date string) (d Date, err error) { + return parseDate(date, fullDate) +} + +func parseDate(date string, format string) (Date, error) { + d, err := time.Parse(format, date) + return Date{Time: d}, err +} + +// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalBinary() ([]byte, error) { + return d.MarshalText() +} + +// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalBinary(data []byte) error { + return d.UnmarshalText(data) +} + +// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalJSON() (json []byte, err error) { + return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalJSON(data []byte) (err error) { + d.Time, err = time.Parse(fullDateJSON, string(data)) + return err +} + +// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalText() (text []byte, err error) { + return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalText(data []byte) (err error) { + d.Time, err = time.Parse(fullDate, string(data)) + return err +} + +// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). +func (d Date) String() string { + return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) +} + +// ToTime returns a Date as a time.Time +func (d Date) ToTime() time.Time { + return d.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/Azure/go-autorest/autorest/date/time.go new file mode 100644 index 000000000..c1af62963 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/time.go @@ -0,0 +1,89 @@ +package date + +import ( + "regexp" + "time" +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +const ( + azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"` + azureUtcFormat = "2006-01-02T15:04:05.999999999" + rfc3339JSON = `"` + time.RFC3339Nano + `"` + rfc3339 = time.RFC3339Nano + tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$` +) + +// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +type Time struct { + time.Time +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalBinary() ([]byte, error) { + return t.Time.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalJSON() (json []byte, err error) { + return t.Time.MarshalJSON() +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalJSON(data []byte) (err error) { + timeFormat := azureUtcFormatJSON + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339JSON + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalText() (text []byte, err error) { + return t.Time.MarshalText() +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalText(data []byte) (err error) { + timeFormat := azureUtcFormat + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339 + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// String returns the Time formatted as an RFC3339 date-time string (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) String() string { + // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} + +// ToTime returns a Time as a time.Time +func (t Time) ToTime() time.Time { + return t.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go new file mode 100644 index 000000000..11995fb9f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go @@ -0,0 +1,86 @@ +package date + +import ( + "errors" + "time" +) + +const ( + rfc1123JSON = `"` + time.RFC1123 + `"` + rfc1123 = time.RFC1123 +) + +// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +type TimeRFC1123 struct { + time.Time +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123JSON, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalJSON() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]") + } + b := []byte(t.Format(rfc1123JSON)) + return b, nil +} + +// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalText() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalText: year outside of range [0,9999]") + } + + b := []byte(t.Format(rfc1123)) + return b, nil +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalBinary() ([]byte, error) { + return t.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// ToTime returns a Time as a time.Time +func (t TimeRFC1123) ToTime() time.Time { + return t.Time +} + +// String returns the Time formatted as an RFC1123 date-time string (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) String() string { + // Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go new file mode 100644 index 000000000..e085c77ee --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go @@ -0,0 +1,109 @@ +package date + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "time" +) + +// unixEpoch is the moment in time that should be treated as timestamp 0. +var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) + +// UnixTime marshals and unmarshals a time that is represented as the number +// of seconds (ignoring skip-seconds) since the Unix Epoch. +type UnixTime time.Time + +// Duration returns the time as a Duration since the UnixEpoch. +func (t UnixTime) Duration() time.Duration { + return time.Time(t).Sub(unixEpoch) +} + +// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch. +func NewUnixTimeFromSeconds(seconds float64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second))) +} + +// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch. +func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(nanoseconds)) +} + +// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch. +func NewUnixTimeFromDuration(dur time.Duration) UnixTime { + return UnixTime(unixEpoch.Add(dur)) +} + +// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0' +func UnixEpoch() time.Time { + return unixEpoch +} + +// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements. +// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.) +func (t UnixTime) MarshalJSON() ([]byte, error) { + buffer := &bytes.Buffer{} + enc := json.NewEncoder(buffer) + err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9) + if err != nil { + return nil, err + } + return buffer.Bytes(), nil +} + +// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since +// midnight January 1st, 1970. +func (t *UnixTime) UnmarshalJSON(text []byte) error { + dec := json.NewDecoder(bytes.NewReader(text)) + + var secondsSinceEpoch float64 + if err := dec.Decode(&secondsSinceEpoch); err != nil { + return err + } + + *t = NewUnixTimeFromSeconds(secondsSinceEpoch) + + return nil +} + +// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number. +func (t UnixTime) MarshalText() ([]byte, error) { + cast := time.Time(t) + return cast.MarshalText() +} + +// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch. +func (t *UnixTime) UnmarshalText(raw []byte) error { + var unmarshaled time.Time + + if err := unmarshaled.UnmarshalText(raw); err != nil { + return err + } + + *t = UnixTime(unmarshaled) + return nil +} + +// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch. +func (t UnixTime) MarshalBinary() ([]byte, error) { + buf := &bytes.Buffer{} + + payload := int64(t.Duration()) + + if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime. +func (t *UnixTime) UnmarshalBinary(raw []byte) error { + var nanosecondsSinceEpoch int64 + + if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil { + return err + } + *t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch) + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go new file mode 100644 index 000000000..207b1a240 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go @@ -0,0 +1,11 @@ +package date + +import ( + "strings" + "time" +) + +// ParseTime to parse Time string to specified format. +func ParseTime(format string, t string) (d time.Time, err error) { + return time.Parse(format, strings.ToUpper(t)) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/Azure/go-autorest/autorest/error.go new file mode 100644 index 000000000..aaef2ac8e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/error.go @@ -0,0 +1,84 @@ +package autorest + +import ( + "fmt" + "net/http" +) + +const ( + // UndefinedStatusCode is used when HTTP status code is not available for an error. + UndefinedStatusCode = 0 +) + +// DetailedError encloses a error with details of the package, method, and associated HTTP +// status code (if any). +type DetailedError struct { + Original error + + // PackageType is the package type of the object emitting the error. For types, the value + // matches that produced the the '%T' format specifier of the fmt package. For other elements, + // such as functions, it is just the package name (e.g., "autorest"). + PackageType string + + // Method is the name of the method raising the error. + Method string + + // StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error. + StatusCode interface{} + + // Message is the error message. + Message string + + // Service Error is the response body of failed API in bytes + ServiceError []byte + + // Response is the response object that was returned during failure if applicable. + Response *http.Response +} + +// NewError creates a new Error conforming object from the passed packageType, method, and +// message. message is treated as a format string to which the optional args apply. +func NewError(packageType string, method string, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, nil, message, args...) +} + +// NewErrorWithResponse creates a new Error conforming object from the passed +// packageType, method, statusCode of the given resp (UndefinedStatusCode if +// resp is nil), and message. message is treated as a format string to which the +// optional args apply. +func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, resp, message, args...) +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + if v, ok := original.(DetailedError); ok { + return v + } + + statusCode := UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + + return DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + Response: resp, + } +} + +// Error returns a formatted containing all available details (i.e., PackageType, Method, +// StatusCode, Message, and original error (if any)). +func (e DetailedError) Error() string { + if e.Original == nil { + return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode) + } + return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go new file mode 100644 index 000000000..afd114821 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go @@ -0,0 +1,428 @@ +package autorest + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/url" + "strings" +) + +const ( + mimeTypeJSON = "application/json" + mimeTypeFormPost = "application/x-www-form-urlencoded" + + headerAuthorization = "Authorization" + headerContentType = "Content-Type" + headerUserAgent = "User-Agent" +) + +// Preparer is the interface that wraps the Prepare method. +// +// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations +// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. +type Preparer interface { + Prepare(*http.Request) (*http.Request, error) +} + +// PreparerFunc is a method that implements the Preparer interface. +type PreparerFunc func(*http.Request) (*http.Request, error) + +// Prepare implements the Preparer interface on PreparerFunc. +func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { + return pf(r) +} + +// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then affect the result. +type PrepareDecorator func(Preparer) Preparer + +// CreatePreparer creates, decorates, and returns a Preparer. +// Without decorators, the returned Preparer returns the passed http.Request unmodified. +// Preparers are safe to share and re-use. +func CreatePreparer(decorators ...PrepareDecorator) Preparer { + return DecoratePreparer( + Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), + decorators...) +} + +// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it +// applies to the Preparer. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (change the http.Request and then pass it +// along) or a post-decorator (pass the http.Request along and alter it on return). +func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { + for _, decorate := range decorators { + p = decorate(p) + } + return p +} + +// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. +// It creates a Preparer from the decorators which it then applies to the passed http.Request. +func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { + if r == nil { + return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") + } + return CreatePreparer(decorators...).Prepare(r) +} + +// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed +// http.Request. +func WithNothing() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + return p.Prepare(r) + }) + } +} + +// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to +// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before +// adding the header. +func WithHeader(header string, value string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(header), value) + } + return r, err + }) + } +} + +// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the supplied token. +func WithBearerAuthorization(token string) PrepareDecorator { + return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) +} + +// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value +// is the passed contentType. +func AsContentType(contentType string) PrepareDecorator { + return WithHeader(headerContentType, contentType) +} + +// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the +// passed string. +func WithUserAgent(ua string) PrepareDecorator { + return WithHeader(headerUserAgent, ua) +} + +// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/x-www-form-urlencoded". +func AsFormURLEncoded() PrepareDecorator { + return AsContentType(mimeTypeFormPost) +} + +// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/json". +func AsJSON() PrepareDecorator { + return AsContentType(mimeTypeJSON) +} + +// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The +// decorator does not validate that the passed method string is a known HTTP method. +func WithMethod(method string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r.Method = method + return p.Prepare(r) + }) + } +} + +// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. +func AsDelete() PrepareDecorator { return WithMethod("DELETE") } + +// AsGet returns a PrepareDecorator that sets the HTTP method to GET. +func AsGet() PrepareDecorator { return WithMethod("GET") } + +// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. +func AsHead() PrepareDecorator { return WithMethod("HEAD") } + +// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. +func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } + +// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. +func AsPatch() PrepareDecorator { return WithMethod("PATCH") } + +// AsPost returns a PrepareDecorator that sets the HTTP method to POST. +func AsPost() PrepareDecorator { return WithMethod("POST") } + +// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. +func AsPut() PrepareDecorator { return WithMethod("PUT") } + +// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed +// from the supplied baseUrl. +func WithBaseURL(baseURL string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var u *url.URL + if u, err = url.Parse(baseURL); err != nil { + return r, err + } + if u.Scheme == "" { + err = fmt.Errorf("autorest: No scheme detected in URL %s", baseURL) + } + if err == nil { + r.URL = u + } + } + return r, err + }) + } +} + +// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the +// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. +func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(urlParameters) + for key, value := range parameters { + baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1) + } + return WithBaseURL(baseURL) +} + +// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the +// http.Request body. +func WithFormData(v url.Values) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + s := v.Encode() + r.ContentLength = int64(len(s)) + r.Body = ioutil.NopCloser(strings.NewReader(s)) + } + return r, err + }) + } +} + +// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters +// into the http.Request body. +func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var body bytes.Buffer + writer := multipart.NewWriter(&body) + for key, value := range formDataParameters { + if rc, ok := value.(io.ReadCloser); ok { + var fd io.Writer + if fd, err = writer.CreateFormFile(key, key); err != nil { + return r, err + } + if _, err = io.Copy(fd, rc); err != nil { + return r, err + } + } else { + if err = writer.WriteField(key, ensureValueString(value)); err != nil { + return r, err + } + } + } + if err = writer.Close(); err != nil { + return r, err + } + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType()) + r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + r.ContentLength = int64(body.Len()) + return r, err + } + return r, err + }) + } +} + +// WithFile returns a PrepareDecorator that sends file in request body. +func WithFile(f io.ReadCloser) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := ioutil.ReadAll(f) + if err != nil { + return r, err + } + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + r.ContentLength = int64(len(b)) + } + return r, err + }) + } +} + +// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request +// and sets the Content-Length header. +func WithBool(v bool) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the +// request and sets the Content-Length header. +func WithFloat32(v float32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the +// request and sets the Content-Length header. +func WithFloat64(v float64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request +// and sets the Content-Length header. +func WithInt32(v int32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request +// and sets the Content-Length header. +func WithInt64(v int64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithString returns a PrepareDecorator that encodes the passed string into the body of the request +// and sets the Content-Length header. +func WithString(v string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + r.ContentLength = int64(len(v)) + r.Body = ioutil.NopCloser(strings.NewReader(v)) + } + return r, err + }) + } +} + +// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the +// request and sets the Content-Length header. +func WithJSON(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := json.Marshal(v) + if err == nil { + r.ContentLength = int64(len(b)) + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + } + return r, err + }) + } +} + +// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path +// is absolute (that is, it begins with a "/"), it replaces the existing path. +func WithPath(path string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPath", "Invoked with a nil URL") + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The +// values will be escaped (aka URL encoded) before insertion into the path. +func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := escapeValueStrings(ensureValueStrings(pathParameters)) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. +func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(pathParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +func parseURL(u *url.URL, path string) (*url.URL, error) { + p := strings.TrimRight(u.String(), "/") + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + return url.Parse(p + path) +} + +// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters +// given in the supplied map (i.e., key=value). +func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(queryParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") + } + v := r.URL.Query() + for key, value := range parameters { + v.Add(key, value) + } + r.URL.RawQuery = createQuery(v) + } + return r, err + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go new file mode 100644 index 000000000..87f71e585 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/responder.go @@ -0,0 +1,236 @@ +package autorest + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" +) + +// Responder is the interface that wraps the Respond method. +// +// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold +// state since Responders may be shared and re-used. +type Responder interface { + Respond(*http.Response) error +} + +// ResponderFunc is a method that implements the Responder interface. +type ResponderFunc func(*http.Response) error + +// Respond implements the Responder interface on ResponderFunc. +func (rf ResponderFunc) Respond(r *http.Response) error { + return rf(r) +} + +// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to +// the http.Response and pass it along or, first, pass the http.Response along then react. +type RespondDecorator func(Responder) Responder + +// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned +// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share +// and re-used: It depends on the applied decorators. For example, a standard decorator that closes +// the response body is fine to share whereas a decorator that reads the body into a passed struct +// is not. +// +// To prevent memory leaks, ensure that at least one Responder closes the response body. +func CreateResponder(decorators ...RespondDecorator) Responder { + return DecorateResponder( + Responder(ResponderFunc(func(r *http.Response) error { return nil })), + decorators...) +} + +// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it +// applies to the Responder. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (react to the http.Response and then pass it +// along) or a post-decorator (pass the http.Response along and then react). +func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { + for _, decorate := range decorators { + r = decorate(r) + } + return r +} + +// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. +// It creates a Responder from the decorators it then applies to the passed http.Response. +func Respond(r *http.Response, decorators ...RespondDecorator) error { + if r == nil { + return nil + } + return CreateResponder(decorators...).Respond(r) +} + +// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined +// to the next RespondDecorator. +func ByIgnoring() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + return r.Respond(resp) + }) + } +} + +// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as +// the Body is read. +func ByCopying(b *bytes.Buffer) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + resp.Body = TeeReadCloser(resp.Body, b) + } + return err + }) + } +} + +// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which +// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed +// Responder is invoked prior to discarding the response body, the decorator may occur anywhere +// within the set. +func ByDiscardingBody() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { + return fmt.Errorf("Error discarding the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it +// closes the response body. Since the passed Responder is invoked prior to closing the response +// body, the decorator may occur anywhere within the set. +func ByClosing() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which +// it closes the response if the passed Responder returns an error and the response body exists. +func ByClosingIfError() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err != nil && resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingJSON(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + // Some responses might include a BOM, remove for successful unmarshalling + b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else if len(strings.Trim(string(b), " ")) > 0 { + errInner = json.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingXML(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + errInner = xml.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response +// StatusCode is among the set passed. On error, response body is fully read into a buffer and +// presented in the returned error, as well as in the response body. +func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + if resp.Body != nil { + defer resp.Body.Close() + b, _ := ioutil.ReadAll(resp.Body) + derr.ServiceError = b + resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + err = derr + } + return err + }) + } +} + +// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is +// anything other than HTTP 200. +func WithErrorUnlessOK() RespondDecorator { + return WithErrorUnlessStatusCode(http.StatusOK) +} + +// ExtractHeader extracts all values of the specified header from the http.Response. It returns an +// empty string slice if the passed http.Response is nil or the header does not exist. +func ExtractHeader(header string, resp *http.Response) []string { + if resp != nil && resp.Header != nil { + return resp.Header[http.CanonicalHeaderKey(header)] + } + return nil +} + +// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It +// returns an empty string if the passed http.Response is nil or the header does not exist. +func ExtractHeaderValue(header string, resp *http.Response) string { + h := ExtractHeader(header, resp) + if len(h) > 0 { + return h[0] + } + return "" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go new file mode 100644 index 000000000..0ab1eb300 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go @@ -0,0 +1,38 @@ +package autorest + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic. +func NewRetriableRequest(req *http.Request) *RetriableRequest { + return &RetriableRequest{req: req} +} + +// Request returns the wrapped HTTP request. +func (rr *RetriableRequest) Request() *http.Request { + return rr.req +} + +func (rr *RetriableRequest) prepareFromByteReader() (err error) { + // fall back to making a copy (only do this once) + b := []byte{} + if rr.req.ContentLength > 0 { + b = make([]byte, rr.req.ContentLength) + _, err = io.ReadFull(rr.req.Body, b) + if err != nil { + return err + } + } else { + b, err = ioutil.ReadAll(rr.req.Body) + if err != nil { + return err + } + } + rr.br = bytes.NewReader(b) + rr.req.Body = ioutil.NopCloser(rr.br) + return err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go new file mode 100644 index 000000000..e28eb2cbd --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go @@ -0,0 +1,44 @@ +// +build !go1.8 + +package autorest + +import ( + "bytes" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + br *bytes.Reader + reset bool +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.reset { + if rr.br != nil { + _, err = rr.br.Seek(0, 0 /*io.SeekStart*/) + } + rr.reset = false + if err != nil { + return err + } + } + if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + // indicates that the request body needs to be reset + rr.reset = true + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go new file mode 100644 index 000000000..8c1d1aec8 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go @@ -0,0 +1,56 @@ +// +build go1.8 + +package autorest + +import ( + "bytes" + "io" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + rc io.ReadCloser + br *bytes.Reader + reset bool +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.reset { + if rr.rc != nil { + rr.req.Body = rr.rc + } else if rr.br != nil { + _, err = rr.br.Seek(0, io.SeekStart) + } + rr.reset = false + if err != nil { + return err + } + } + if rr.req.GetBody != nil { + // this will allow us to preserve the body without having to + // make a copy. note we need to do this on each iteration + rr.rc, err = rr.req.GetBody() + if err != nil { + return err + } + } else if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + // indicates that the request body needs to be reset + rr.reset = true + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.GetBody = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go new file mode 100644 index 000000000..94b029847 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -0,0 +1,293 @@ +package autorest + +import ( + "fmt" + "log" + "math" + "net/http" + "strconv" + "time" +) + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(&http.Client{}, decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} + +// Send sends, by means of the default http.Client, the passed http.Request, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// Send is a convenience method and not recommended for production. Advanced users should use +// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). +// +// Send will not poll or retry requests. +func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return SendWithSender(&http.Client{}, r, decorators...) +} + +// SendWithSender sends the passed http.Request, through the provided Sender, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// SendWithSender will not poll or retry requests. +func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return DecorateSender(s, decorators...).Do(r) +} + +// AfterDelay returns a SendDecorator that delays for the passed time.Duration before +// invoking the Sender. The delay may be terminated by closing the optional channel on the +// http.Request. If canceled, no further Senders are invoked. +func AfterDelay(d time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + if !DelayForBackoff(d, 0, r.Cancel) { + return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") + } + return s.Do(r) + }) + } +} + +// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. +func AsIs() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return s.Do(r) + }) + } +} + +// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which +// it closes the response if the passed Sender returns an error and the response body exists. +func DoCloseIfError() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + Respond(resp, ByDiscardingBody(), ByClosing()) + } + return resp, err + }) + } +} + +// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is +// among the set passed. Since these are artificial errors, the response body may still require +// closing. +func DoErrorIfStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response +// StatusCode is among the set passed. Since these are artificial errors, the response body +// may still require closing. +func DoErrorUnlessStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the +// passed status codes. It expects the http.Response to contain a Location header providing the +// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than +// the supplied duration. It will delay between requests for the duration specified in the +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by +// closing the optional channel on the http.Request. +func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + resp, err = s.Do(r) + + if err == nil && ResponseHasStatusCode(resp, codes...) { + r, err = NewPollingRequest(resp, r.Cancel) + + for err == nil && ResponseHasStatusCode(resp, codes...) { + Respond(resp, + ByDiscardingBody(), + ByClosing()) + resp, err = SendWithSender(s, r, + AfterDelay(GetRetryAfter(resp, delay))) + } + } + + return resp, err + }) + } +} + +// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on +// the http.Request. +func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + for attempt := 0; attempt < attempts; attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + DelayForBackoff(backoff, attempt, r.Cancel) + } + return resp, err + }) + } +} + +// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on +// the http.Request. +func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + // Increment to add the first call (attempts denotes number of retries) + attempts++ + for attempt := 0; attempt < attempts; attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) + if err != nil || !ResponseHasStatusCode(resp, codes...) { + return resp, err + } + delayed := DelayWithRetryAfter(resp, r.Cancel) + if !delayed { + DelayForBackoff(backoff, attempt, r.Cancel) + } + } + return resp, err + }) + } +} + +// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header in +// responses with status code 429 +func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { + retryAfter, _ := strconv.Atoi(resp.Header.Get("Retry-After")) + if resp.StatusCode == http.StatusTooManyRequests && retryAfter > 0 { + select { + case <-time.After(time.Duration(retryAfter) * time.Second): + return true + case <-cancel: + return false + } + } + return false +} + +// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal +// to or greater than the specified duration, exponentially backing off between requests using the +// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the +// optional channel on the http.Request. +func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + end := time.Now().Add(d) + for attempt := 0; time.Now().Before(end); attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + DelayForBackoff(backoff, attempt, r.Cancel) + } + return resp, err + }) + } +} + +// WithLogging returns a SendDecorator that implements simple before and after logging of the +// request. +func WithLogging(logger *log.Logger) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + logger.Printf("Sending %s %s", r.Method, r.URL) + resp, err := s.Do(r) + if err != nil { + logger.Printf("%s %s received error '%v'", r.Method, r.URL, err) + } else { + logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status) + } + return resp, err + }) + } +} + +// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early, +// returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. +func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { + select { + case <-time.After(time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second): + return true + case <-cancel: + return false + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go new file mode 100644 index 000000000..78067148b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utility.go @@ -0,0 +1,178 @@ +package autorest + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net/url" + "reflect" + "sort" + "strings" +) + +// EncodedAs is a series of constants specifying various data encodings +type EncodedAs string + +const ( + // EncodedAsJSON states that data is encoded as JSON + EncodedAsJSON EncodedAs = "JSON" + + // EncodedAsXML states that data is encoded as Xml + EncodedAsXML EncodedAs = "XML" +) + +// Decoder defines the decoding method json.Decoder and xml.Decoder share +type Decoder interface { + Decode(v interface{}) error +} + +// NewDecoder creates a new decoder appropriate to the passed encoding. +// encodedAs specifies the type of encoding and r supplies the io.Reader containing the +// encoded data. +func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { + if encodedAs == EncodedAsJSON { + return json.NewDecoder(r) + } else if encodedAs == EncodedAsXML { + return xml.NewDecoder(r) + } + return nil +} + +// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy +// is especially useful if there is a chance the data will fail to decode. +// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v +// is the decoding destination. +func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) { + b := bytes.Buffer{} + return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) +} + +// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. +// It utilizes io.TeeReader to copy the data read and has the same behavior when reading. +// Further, when it is closed, it ensures that rc is closed as well. +func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser { + return &teeReadCloser{rc, io.TeeReader(rc, w)} +} + +type teeReadCloser struct { + rc io.ReadCloser + r io.Reader +} + +func (t *teeReadCloser) Read(p []byte) (int, error) { + return t.r.Read(p) +} + +func (t *teeReadCloser) Close() error { + return t.rc.Close() +} + +func containsInt(ints []int, n int) bool { + for _, i := range ints { + if i == n { + return true + } + } + return false +} + +func escapeValueStrings(m map[string]string) map[string]string { + for key, value := range m { + m[key] = url.QueryEscape(value) + } + return m +} + +func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { + mapOfStrings := make(map[string]string) + for key, value := range mapOfInterface { + mapOfStrings[key] = ensureValueString(value) + } + return mapOfStrings +} + +func ensureValueString(value interface{}) string { + if value == nil { + return "" + } + switch v := value.(type) { + case string: + return v + case []byte: + return string(v) + default: + return fmt.Sprintf("%v", v) + } +} + +// MapToValues method converts map[string]interface{} to url.Values. +func MapToValues(m map[string]interface{}) url.Values { + v := url.Values{} + for key, value := range m { + x := reflect.ValueOf(value) + if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { + for i := 0; i < x.Len(); i++ { + v.Add(key, ensureValueString(x.Index(i))) + } + } else { + v.Add(key, ensureValueString(value)) + } + } + return v +} + +// String method converts interface v to string. If interface is a list, it +// joins list elements using separator. +func String(v interface{}, sep ...string) string { + if len(sep) > 0 { + return ensureValueString(strings.Join(v.([]string), sep[0])) + } + return ensureValueString(v) +} + +// Encode method encodes url path and query parameters. +func Encode(location string, v interface{}, sep ...string) string { + s := String(v, sep...) + switch strings.ToLower(location) { + case "path": + return pathEscape(s) + case "query": + return queryEscape(s) + default: + return s + } +} + +func pathEscape(s string) string { + return strings.Replace(url.QueryEscape(s), "+", "%20", -1) +} + +func queryEscape(s string) string { + return url.QueryEscape(s) +} + +// This method is same as Encode() method of "net/url" go package, +// except it does not encode the query parameters because they +// already come encoded. It formats values map in query format (bar=foo&a=b). +func createQuery(v url.Values) string { + var buf bytes.Buffer + keys := make([]string, 0, len(v)) + for k := range v { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := v[k] + prefix := url.QueryEscape(k) + "=" + for _, v := range vs { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(prefix) + buf.WriteString(v) + } + } + return buf.String() +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go new file mode 100644 index 000000000..a222e8efa --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -0,0 +1,35 @@ +package autorest + +import ( + "bytes" + "fmt" + "strings" + "sync" +) + +const ( + major = 8 + minor = 0 + patch = 0 + tag = "" +) + +var once sync.Once +var version string + +// Version returns the semantic version (see http://semver.org). +func Version() string { + once.Do(func() { + semver := fmt.Sprintf("%d.%d.%d", major, minor, patch) + verBuilder := bytes.NewBufferString(semver) + if tag != "" && tag != "-" { + updated := strings.TrimPrefix(tag, "-") + _, err := verBuilder.WriteString("-" + updated) + if err == nil { + verBuilder = bytes.NewBufferString(semver) + } + } + version = verBuilder.String() + }) + return version +} diff --git a/vendor/github.com/satori/uuid/LICENSE b/vendor/github.com/satori/uuid/LICENSE new file mode 100644 index 000000000..488357b8a --- /dev/null +++ b/vendor/github.com/satori/uuid/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013-2016 by Maxim Bublis + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/satori/uuid/README.md b/vendor/github.com/satori/uuid/README.md new file mode 100644 index 000000000..b6aad1c81 --- /dev/null +++ b/vendor/github.com/satori/uuid/README.md @@ -0,0 +1,65 @@ +# UUID package for Go language + +[![Build Status](https://travis-ci.org/satori/go.uuid.png?branch=master)](https://travis-ci.org/satori/go.uuid) +[![Coverage Status](https://coveralls.io/repos/github/satori/go.uuid/badge.svg?branch=master)](https://coveralls.io/github/satori/go.uuid) +[![GoDoc](http://godoc.org/github.com/satori/go.uuid?status.png)](http://godoc.org/github.com/satori/go.uuid) + +This package provides pure Go implementation of Universally Unique Identifier (UUID). Supported both creation and parsing of UUIDs. + +With 100% test coverage and benchmarks out of box. + +Supported versions: +* Version 1, based on timestamp and MAC address (RFC 4122) +* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1) +* Version 3, based on MD5 hashing (RFC 4122) +* Version 4, based on random numbers (RFC 4122) +* Version 5, based on SHA-1 hashing (RFC 4122) + +## Installation + +Use the `go` command: + + $ go get github.com/satori/go.uuid + +## Requirements + +UUID package requires Go >= 1.2. + +## Example + +```go +package main + +import ( + "fmt" + "github.com/satori/go.uuid" +) + +func main() { + // Creating UUID Version 4 + u1 := uuid.NewV4() + fmt.Printf("UUIDv4: %s\n", u1) + + // Parsing UUID from string input + u2, err := uuid.FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + if err != nil { + fmt.Printf("Something gone wrong: %s", err) + } + fmt.Printf("Successfully parsed: %s", u2) +} +``` + +## Documentation + +[Documentation](http://godoc.org/github.com/satori/go.uuid) is hosted at GoDoc project. + +## Links +* [RFC 4122](http://tools.ietf.org/html/rfc4122) +* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01) + +## Copyright + +Copyright (C) 2013-2016 by Maxim Bublis . + +UUID package released under MIT License. +See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details. diff --git a/vendor/github.com/satori/uuid/uuid.go b/vendor/github.com/satori/uuid/uuid.go new file mode 100644 index 000000000..295f3fc2c --- /dev/null +++ b/vendor/github.com/satori/uuid/uuid.go @@ -0,0 +1,481 @@ +// Copyright (C) 2013-2015 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +// Package uuid provides implementation of Universally Unique Identifier (UUID). +// Supported versions are 1, 3, 4 and 5 (as specified in RFC 4122) and +// version 2 (as specified in DCE 1.1). +package uuid + +import ( + "bytes" + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "database/sql/driver" + "encoding/binary" + "encoding/hex" + "fmt" + "hash" + "net" + "os" + "sync" + "time" +) + +// UUID layout variants. +const ( + VariantNCS = iota + VariantRFC4122 + VariantMicrosoft + VariantFuture +) + +// UUID DCE domains. +const ( + DomainPerson = iota + DomainGroup + DomainOrg +) + +// Difference in 100-nanosecond intervals between +// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970). +const epochStart = 122192928000000000 + +// Used in string method conversion +const dash byte = '-' + +// UUID v1/v2 storage. +var ( + storageMutex sync.Mutex + storageOnce sync.Once + epochFunc = unixTimeFunc + clockSequence uint16 + lastTime uint64 + hardwareAddr [6]byte + posixUID = uint32(os.Getuid()) + posixGID = uint32(os.Getgid()) +) + +// String parse helpers. +var ( + urnPrefix = []byte("urn:uuid:") + byteGroups = []int{8, 4, 4, 4, 12} +) + +func initClockSequence() { + buf := make([]byte, 2) + safeRandom(buf) + clockSequence = binary.BigEndian.Uint16(buf) +} + +func initHardwareAddr() { + interfaces, err := net.Interfaces() + if err == nil { + for _, iface := range interfaces { + if len(iface.HardwareAddr) >= 6 { + copy(hardwareAddr[:], iface.HardwareAddr) + return + } + } + } + + // Initialize hardwareAddr randomly in case + // of real network interfaces absence + safeRandom(hardwareAddr[:]) + + // Set multicast bit as recommended in RFC 4122 + hardwareAddr[0] |= 0x01 +} + +func initStorage() { + initClockSequence() + initHardwareAddr() +} + +func safeRandom(dest []byte) { + if _, err := rand.Read(dest); err != nil { + panic(err) + } +} + +// Returns difference in 100-nanosecond intervals between +// UUID epoch (October 15, 1582) and current time. +// This is default epoch calculation function. +func unixTimeFunc() uint64 { + return epochStart + uint64(time.Now().UnixNano()/100) +} + +// UUID representation compliant with specification +// described in RFC 4122. +type UUID [16]byte + +// NullUUID can be used with the standard sql package to represent a +// UUID value that can be NULL in the database +type NullUUID struct { + UUID UUID + Valid bool +} + +// The nil UUID is special form of UUID that is specified to have all +// 128 bits set to zero. +var Nil = UUID{} + +// Predefined namespace UUIDs. +var ( + NamespaceDNS, _ = FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + NamespaceURL, _ = FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8") + NamespaceOID, _ = FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8") + NamespaceX500, _ = FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8") +) + +// And returns result of binary AND of two UUIDs. +func And(u1 UUID, u2 UUID) UUID { + u := UUID{} + for i := 0; i < 16; i++ { + u[i] = u1[i] & u2[i] + } + return u +} + +// Or returns result of binary OR of two UUIDs. +func Or(u1 UUID, u2 UUID) UUID { + u := UUID{} + for i := 0; i < 16; i++ { + u[i] = u1[i] | u2[i] + } + return u +} + +// Equal returns true if u1 and u2 equals, otherwise returns false. +func Equal(u1 UUID, u2 UUID) bool { + return bytes.Equal(u1[:], u2[:]) +} + +// Version returns algorithm version used to generate UUID. +func (u UUID) Version() uint { + return uint(u[6] >> 4) +} + +// Variant returns UUID layout variant. +func (u UUID) Variant() uint { + switch { + case (u[8] & 0x80) == 0x00: + return VariantNCS + case (u[8]&0xc0)|0x80 == 0x80: + return VariantRFC4122 + case (u[8]&0xe0)|0xc0 == 0xc0: + return VariantMicrosoft + } + return VariantFuture +} + +// Bytes returns bytes slice representation of UUID. +func (u UUID) Bytes() []byte { + return u[:] +} + +// Returns canonical string representation of UUID: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. +func (u UUID) String() string { + buf := make([]byte, 36) + + hex.Encode(buf[0:8], u[0:4]) + buf[8] = dash + hex.Encode(buf[9:13], u[4:6]) + buf[13] = dash + hex.Encode(buf[14:18], u[6:8]) + buf[18] = dash + hex.Encode(buf[19:23], u[8:10]) + buf[23] = dash + hex.Encode(buf[24:], u[10:]) + + return string(buf) +} + +// SetVersion sets version bits. +func (u *UUID) SetVersion(v byte) { + u[6] = (u[6] & 0x0f) | (v << 4) +} + +// SetVariant sets variant bits as described in RFC 4122. +func (u *UUID) SetVariant() { + u[8] = (u[8] & 0xbf) | 0x80 +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The encoding is the same as returned by String. +func (u UUID) MarshalText() (text []byte, err error) { + text = []byte(u.String()) + return +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Following formats are supported: +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", +// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", +// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" +func (u *UUID) UnmarshalText(text []byte) (err error) { + if len(text) < 32 { + err = fmt.Errorf("uuid: UUID string too short: %s", text) + return + } + + t := text[:] + braced := false + + if bytes.Equal(t[:9], urnPrefix) { + t = t[9:] + } else if t[0] == '{' { + braced = true + t = t[1:] + } + + b := u[:] + + for i, byteGroup := range byteGroups { + if i > 0 { + if t[0] != '-' { + err = fmt.Errorf("uuid: invalid string format") + return + } + t = t[1:] + } + + if len(t) < byteGroup { + err = fmt.Errorf("uuid: UUID string too short: %s", text) + return + } + + if i == 4 && len(t) > byteGroup && + ((braced && t[byteGroup] != '}') || len(t[byteGroup:]) > 1 || !braced) { + err = fmt.Errorf("uuid: UUID string too long: %s", text) + return + } + + _, err = hex.Decode(b[:byteGroup/2], t[:byteGroup]) + if err != nil { + return + } + + t = t[byteGroup:] + b = b[byteGroup/2:] + } + + return +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (u UUID) MarshalBinary() (data []byte, err error) { + data = u.Bytes() + return +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +// It will return error if the slice isn't 16 bytes long. +func (u *UUID) UnmarshalBinary(data []byte) (err error) { + if len(data) != 16 { + err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data)) + return + } + copy(u[:], data) + + return +} + +// Value implements the driver.Valuer interface. +func (u UUID) Value() (driver.Value, error) { + return u.String(), nil +} + +// Scan implements the sql.Scanner interface. +// A 16-byte slice is handled by UnmarshalBinary, while +// a longer byte slice or a string is handled by UnmarshalText. +func (u *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + if len(src) == 16 { + return u.UnmarshalBinary(src) + } + return u.UnmarshalText(src) + + case string: + return u.UnmarshalText([]byte(src)) + } + + return fmt.Errorf("uuid: cannot convert %T to UUID", src) +} + +// Value implements the driver.Valuer interface. +func (u NullUUID) Value() (driver.Value, error) { + if !u.Valid { + return nil, nil + } + // Delegate to UUID Value function + return u.UUID.Value() +} + +// Scan implements the sql.Scanner interface. +func (u *NullUUID) Scan(src interface{}) error { + if src == nil { + u.UUID, u.Valid = Nil, false + return nil + } + + // Delegate to UUID Scan function + u.Valid = true + return u.UUID.Scan(src) +} + +// FromBytes returns UUID converted from raw byte slice input. +// It will return error if the slice isn't 16 bytes long. +func FromBytes(input []byte) (u UUID, err error) { + err = u.UnmarshalBinary(input) + return +} + +// FromBytesOrNil returns UUID converted from raw byte slice input. +// Same behavior as FromBytes, but returns a Nil UUID on error. +func FromBytesOrNil(input []byte) UUID { + uuid, err := FromBytes(input) + if err != nil { + return Nil + } + return uuid +} + +// FromString returns UUID parsed from string input. +// Input is expected in a form accepted by UnmarshalText. +func FromString(input string) (u UUID, err error) { + err = u.UnmarshalText([]byte(input)) + return +} + +// FromStringOrNil returns UUID parsed from string input. +// Same behavior as FromString, but returns a Nil UUID on error. +func FromStringOrNil(input string) UUID { + uuid, err := FromString(input) + if err != nil { + return Nil + } + return uuid +} + +// Returns UUID v1/v2 storage state. +// Returns epoch timestamp, clock sequence, and hardware address. +func getStorage() (uint64, uint16, []byte) { + storageOnce.Do(initStorage) + + storageMutex.Lock() + defer storageMutex.Unlock() + + timeNow := epochFunc() + // Clock changed backwards since last UUID generation. + // Should increase clock sequence. + if timeNow <= lastTime { + clockSequence++ + } + lastTime = timeNow + + return timeNow, clockSequence, hardwareAddr[:] +} + +// NewV1 returns UUID based on current timestamp and MAC address. +func NewV1() UUID { + u := UUID{} + + timeNow, clockSeq, hardwareAddr := getStorage() + + binary.BigEndian.PutUint32(u[0:], uint32(timeNow)) + binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) + binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) + binary.BigEndian.PutUint16(u[8:], clockSeq) + + copy(u[10:], hardwareAddr) + + u.SetVersion(1) + u.SetVariant() + + return u +} + +// NewV2 returns DCE Security UUID based on POSIX UID/GID. +func NewV2(domain byte) UUID { + u := UUID{} + + timeNow, clockSeq, hardwareAddr := getStorage() + + switch domain { + case DomainPerson: + binary.BigEndian.PutUint32(u[0:], posixUID) + case DomainGroup: + binary.BigEndian.PutUint32(u[0:], posixGID) + } + + binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) + binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) + binary.BigEndian.PutUint16(u[8:], clockSeq) + u[9] = domain + + copy(u[10:], hardwareAddr) + + u.SetVersion(2) + u.SetVariant() + + return u +} + +// NewV3 returns UUID based on MD5 hash of namespace UUID and name. +func NewV3(ns UUID, name string) UUID { + u := newFromHash(md5.New(), ns, name) + u.SetVersion(3) + u.SetVariant() + + return u +} + +// NewV4 returns random generated UUID. +func NewV4() UUID { + u := UUID{} + safeRandom(u[:]) + u.SetVersion(4) + u.SetVariant() + + return u +} + +// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name. +func NewV5(ns UUID, name string) UUID { + u := newFromHash(sha1.New(), ns, name) + u.SetVersion(5) + u.SetVariant() + + return u +} + +// Returns UUID based on hashing of namespace UUID and name. +func newFromHash(h hash.Hash, ns UUID, name string) UUID { + u := UUID{} + h.Write(ns[:]) + h.Write([]byte(name)) + copy(u[:], h.Sum(nil)) + + return u +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 0939e878c..5b4a8f74d 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -51,10 +51,34 @@ "revisionTime": "2017-05-01T17:57:10Z" }, { - "checksumSHA1": "rK3ght7KTtHGdm0V4+U7fv9+tUU=", + "checksumSHA1": "02D1G/awFY2Jb/w/OLN2tlMuORk=", "path": "github.com/Azure/azure-sdk-for-go/storage", - "revision": "8e625d1702a32d01cef05a9252198d231c4af113", - "revisionTime": "2017-02-08T01:01:20Z" + "revision": "21f5db4a3b31b290b68bd706520298d37c87cd29", + "revisionTime": "2017-09-22T22:15:32Z" + }, + { + "checksumSHA1": "+4d+Y67AMKKuyR1EO33Zdt+RVx0=", + "path": "github.com/Azure/go-autorest/autorest", + "revision": "f6be1abbb5abd0517522f850dd785990d373da7e", + "revisionTime": "2017-09-13T22:19:17Z" + }, + { + "checksumSHA1": "7G4HgRaIT25bgz/hPtXG6Kv8Fho=", + "path": "github.com/Azure/go-autorest/autorest/adal", + "revision": "f6be1abbb5abd0517522f850dd785990d373da7e", + "revisionTime": "2017-09-13T22:19:17Z" + }, + { + "checksumSHA1": "2KdBFgT4qY+fMOkBTa5vA9V0AiM=", + "path": "github.com/Azure/go-autorest/autorest/azure", + "revision": "f6be1abbb5abd0517522f850dd785990d373da7e", + "revisionTime": "2017-09-13T22:19:17Z" + }, + { + "checksumSHA1": "LSF/pNrjhIxl6jiS6bKooBFCOxI=", + "path": "github.com/Azure/go-autorest/autorest/date", + "revision": "f6be1abbb5abd0517522f850dd785990d373da7e", + "revisionTime": "2017-09-13T22:19:17Z" }, { "path": "github.com/Sirupsen/logrus", @@ -428,6 +452,12 @@ "revision": "ed27b6fd65218132ee50cd95f38474a3d8a2cd12", "revisionTime": "2016-06-18T12:32:21-07:00" }, + { + "checksumSHA1": "iqUXcP3VA+G1/gVLRpQpBUt/BuA=", + "path": "github.com/satori/uuid", + "revision": "5bf94b69c6b68ee1b541973bb8e1144db23a194b", + "revisionTime": "2017-03-21T23:07:31Z" + }, { "checksumSHA1": "u0hXGADM3JDza8YjgiyNJpAJk8g=", "path": "github.com/skyrings/skyring-common/tools/uuid",