mirror of
https://github.com/minio/minio.git
synced 2025-01-11 06:53:22 -05:00
Implement S3 Gateway to third party cloud storage providers. (#3756)
Currently supported backend is Azure Blob Storage. ``` export MINIO_ACCESS_KEY=azureaccountname export MINIO_SECRET_KEY=azureaccountkey minio gateway azure ```
This commit is contained in:
parent
8426cf9aec
commit
cea4cfa3a8
@ -692,6 +692,8 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrEntityTooLarge
|
||||
case ObjectTooSmall:
|
||||
apiErr = ErrEntityTooSmall
|
||||
case NotImplemented:
|
||||
apiErr = ErrNotImplemented
|
||||
default:
|
||||
apiErr = ErrInternalError
|
||||
}
|
||||
|
@ -103,6 +103,10 @@ func TestAPIErrCode(t *testing.T) {
|
||||
StorageFull{},
|
||||
ErrStorageFull,
|
||||
},
|
||||
{
|
||||
NotImplemented{},
|
||||
ErrNotImplemented,
|
||||
},
|
||||
{
|
||||
errSignatureMismatch,
|
||||
ErrSignatureDoesNotMatch,
|
||||
|
185
cmd/azure-anonymous.go
Normal file
185
cmd/azure-anonymous.go
Normal file
@ -0,0 +1,185 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/storage"
|
||||
)
|
||||
|
||||
// AnonGetBucketInfo - Get bucket metadata from azure anonymously.
|
||||
func (a AzureObjects) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, err error) {
|
||||
url, err := url.Parse(a.client.GetBlobURL(bucket, ""))
|
||||
if err != nil {
|
||||
return bucketInfo, azureToObjectError(traceError(err))
|
||||
}
|
||||
url.RawQuery = "restype=container"
|
||||
resp, err := http.Head(url.String())
|
||||
if err != nil {
|
||||
return bucketInfo, azureToObjectError(traceError(err), bucket)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return bucketInfo, azureToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket)), bucket)
|
||||
}
|
||||
|
||||
t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
|
||||
if err != nil {
|
||||
return bucketInfo, traceError(err)
|
||||
}
|
||||
bucketInfo = BucketInfo{
|
||||
Name: bucket,
|
||||
Created: t,
|
||||
}
|
||||
return bucketInfo, nil
|
||||
}
|
||||
|
||||
// AnonGetObject - SendGET request without authentication.
|
||||
// This is needed when clients send GET requests on objects that can be downloaded without auth.
|
||||
func (a AzureObjects) AnonGetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error) {
|
||||
url := a.client.GetBlobURL(bucket, object)
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return azureToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
|
||||
if length > 0 && startOffset > 0 {
|
||||
req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1))
|
||||
} else if startOffset > 0 {
|
||||
req.Header.Add("Range", fmt.Sprintf("bytes=%d-", startOffset))
|
||||
}
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return azureToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusPartialContent && resp.StatusCode != http.StatusOK {
|
||||
return azureToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
|
||||
}
|
||||
|
||||
_, err = io.Copy(writer, resp.Body)
|
||||
return traceError(err)
|
||||
}
|
||||
|
||||
// AnonGetObjectInfo - Send HEAD request without authentication and convert the
|
||||
// result to ObjectInfo.
|
||||
func (a AzureObjects) AnonGetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) {
|
||||
resp, err := http.Head(a.client.GetBlobURL(bucket, object))
|
||||
if err != nil {
|
||||
return objInfo, azureToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return objInfo, azureToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
|
||||
}
|
||||
|
||||
var contentLength int64
|
||||
contentLengthStr := resp.Header.Get("Content-Length")
|
||||
if contentLengthStr != "" {
|
||||
contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64)
|
||||
if err != nil {
|
||||
return objInfo, azureToObjectError(traceError(errUnexpected), bucket, object)
|
||||
}
|
||||
}
|
||||
|
||||
t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
|
||||
if err != nil {
|
||||
return objInfo, traceError(err)
|
||||
}
|
||||
|
||||
objInfo.ModTime = t
|
||||
objInfo.Bucket = bucket
|
||||
objInfo.UserDefined = make(map[string]string)
|
||||
if resp.Header.Get("Content-Encoding") != "" {
|
||||
objInfo.UserDefined["Content-Encoding"] = resp.Header.Get("Content-Encoding")
|
||||
}
|
||||
objInfo.UserDefined["Content-Type"] = resp.Header.Get("Content-Type")
|
||||
objInfo.MD5Sum = resp.Header.Get("Etag")
|
||||
objInfo.ModTime = t
|
||||
objInfo.Name = object
|
||||
objInfo.Size = contentLength
|
||||
return
|
||||
}
|
||||
|
||||
// AnonListObjects - Use Azure equivalent ListBlobs.
|
||||
func (a AzureObjects) AnonListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) {
|
||||
params := storage.ListBlobsParameters{
|
||||
Prefix: prefix,
|
||||
Marker: marker,
|
||||
Delimiter: delimiter,
|
||||
MaxResults: uint(maxKeys),
|
||||
}
|
||||
|
||||
q := azureListBlobsGetParameters(params)
|
||||
q.Set("restype", "container")
|
||||
q.Set("comp", "list")
|
||||
|
||||
url, err := url.Parse(a.client.GetBlobURL(bucket, ""))
|
||||
if err != nil {
|
||||
return result, azureToObjectError(traceError(err))
|
||||
}
|
||||
url.RawQuery = q.Encode()
|
||||
|
||||
resp, err := http.Get(url.String())
|
||||
if err != nil {
|
||||
return result, azureToObjectError(traceError(err))
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var listResp storage.BlobListResponse
|
||||
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return result, azureToObjectError(traceError(err))
|
||||
}
|
||||
err = xml.Unmarshal(data, &listResp)
|
||||
if err != nil {
|
||||
return result, azureToObjectError(traceError(err))
|
||||
}
|
||||
|
||||
result.IsTruncated = listResp.NextMarker != ""
|
||||
result.NextMarker = listResp.NextMarker
|
||||
for _, object := range listResp.Blobs {
|
||||
t, e := time.Parse(time.RFC1123, object.Properties.LastModified)
|
||||
if e != nil {
|
||||
continue
|
||||
}
|
||||
result.Objects = append(result.Objects, ObjectInfo{
|
||||
Bucket: bucket,
|
||||
Name: object.Name,
|
||||
ModTime: t,
|
||||
Size: object.Properties.ContentLength,
|
||||
MD5Sum: object.Properties.Etag,
|
||||
ContentType: object.Properties.ContentType,
|
||||
ContentEncoding: object.Properties.ContentEncoding,
|
||||
})
|
||||
}
|
||||
result.Prefixes = listResp.BlobPrefixes
|
||||
return result, nil
|
||||
}
|
43
cmd/azure-unsupported.go
Normal file
43
cmd/azure-unsupported.go
Normal file
@ -0,0 +1,43 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
// HealBucket - Not relevant.
|
||||
func (a AzureObjects) HealBucket(bucket string) error {
|
||||
return traceError(NotImplemented{})
|
||||
}
|
||||
|
||||
// ListBucketsHeal - Not relevant.
|
||||
func (a AzureObjects) ListBucketsHeal() (buckets []BucketInfo, err error) {
|
||||
return nil, traceError(NotImplemented{})
|
||||
}
|
||||
|
||||
// HealObject - Not relevant.
|
||||
func (a AzureObjects) HealObject(bucket, object string) error {
|
||||
return traceError(NotImplemented{})
|
||||
}
|
||||
|
||||
// ListObjectsHeal - Not relevant.
|
||||
func (a AzureObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
|
||||
return ListObjectsInfo{}, traceError(NotImplemented{})
|
||||
}
|
||||
|
||||
// ListUploadsHeal - Not relevant.
|
||||
func (a AzureObjects) ListUploadsHeal(bucket, prefix, marker, uploadIDMarker,
|
||||
delimiter string, maxUploads int) (ListMultipartsInfo, error) {
|
||||
return ListMultipartsInfo{}, traceError(NotImplemented{})
|
||||
}
|
635
cmd/azure.go
Normal file
635
cmd/azure.go
Normal file
@ -0,0 +1,635 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/minio/minio-go/pkg/policy"
|
||||
"github.com/minio/sha256-simd"
|
||||
)
|
||||
|
||||
const globalAzureAPIVersion = "2016-05-31"
|
||||
|
||||
// To store metadata during NewMultipartUpload which will be used after
|
||||
// CompleteMultipartUpload to call SetBlobMetadata.
|
||||
type azureMultipartMetaInfo struct {
|
||||
meta map[string]map[string]string
|
||||
*sync.Mutex
|
||||
}
|
||||
|
||||
// Return metadata map of the multipart object.
|
||||
func (a *azureMultipartMetaInfo) get(key string) map[string]string {
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
return a.meta[key]
|
||||
}
|
||||
|
||||
// Set metadata map for the multipart object.
|
||||
func (a *azureMultipartMetaInfo) set(key string, value map[string]string) {
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
a.meta[key] = value
|
||||
}
|
||||
|
||||
// Delete metadata map for the multipart object.
|
||||
func (a *azureMultipartMetaInfo) del(key string) {
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
delete(a.meta, key)
|
||||
}
|
||||
|
||||
// AzureObjects - Implements Object layer for Azure blob storage.
|
||||
type AzureObjects struct {
|
||||
client storage.BlobStorageClient // Azure sdk client
|
||||
metaInfo azureMultipartMetaInfo
|
||||
}
|
||||
|
||||
// Convert azure errors to minio object layer errors.
|
||||
func azureToObjectError(err error, params ...string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
e, ok := err.(*Error)
|
||||
if !ok {
|
||||
// Code should be fixed if this function is called without doing traceError()
|
||||
// Else handling different situations in this function makes this function complicated.
|
||||
errorIf(err, "Expected type *Error")
|
||||
return err
|
||||
}
|
||||
|
||||
err = e.e
|
||||
bucket := ""
|
||||
object := ""
|
||||
if len(params) >= 1 {
|
||||
bucket = params[0]
|
||||
}
|
||||
if len(params) == 2 {
|
||||
object = params[1]
|
||||
}
|
||||
|
||||
azureErr, ok := err.(storage.AzureStorageServiceError)
|
||||
if !ok {
|
||||
// We don't interpret non Azure errors. As azure errors will
|
||||
// have StatusCode to help to convert to object errors.
|
||||
return e
|
||||
}
|
||||
|
||||
switch azureErr.Code {
|
||||
case "ContainerAlreadyExists":
|
||||
err = BucketExists{Bucket: bucket}
|
||||
case "InvalidResourceName":
|
||||
err = BucketNameInvalid{Bucket: bucket}
|
||||
default:
|
||||
switch azureErr.StatusCode {
|
||||
case http.StatusNotFound:
|
||||
if object != "" {
|
||||
err = ObjectNotFound{bucket, object}
|
||||
} else {
|
||||
err = BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
case http.StatusBadRequest:
|
||||
err = BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
}
|
||||
e.e = err
|
||||
return e
|
||||
}
|
||||
|
||||
// Inits azure blob storage client and returns AzureObjects.
|
||||
func newAzureLayer(account, key string) (GatewayLayer, error) {
|
||||
useHTTPS := true
|
||||
c, err := storage.NewClient(account, key, storage.DefaultBaseURL, globalAzureAPIVersion, useHTTPS)
|
||||
if err != nil {
|
||||
return AzureObjects{}, err
|
||||
}
|
||||
return &AzureObjects{
|
||||
client: c.GetBlobService(),
|
||||
metaInfo: azureMultipartMetaInfo{
|
||||
meta: make(map[string]map[string]string),
|
||||
Mutex: &sync.Mutex{},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Shutdown - save any gateway metadata to disk
|
||||
// if necessary and reload upon next restart.
|
||||
func (a AzureObjects) Shutdown() error {
|
||||
// TODO
|
||||
return nil
|
||||
}
|
||||
|
||||
// StorageInfo - Not relevant to Azure backend.
|
||||
func (a AzureObjects) StorageInfo() StorageInfo {
|
||||
return StorageInfo{}
|
||||
}
|
||||
|
||||
// MakeBucket - Create a new container on azure backend.
|
||||
func (a AzureObjects) MakeBucket(bucket string) error {
|
||||
err := a.client.CreateContainer(bucket, storage.ContainerAccessTypePrivate)
|
||||
return azureToObjectError(traceError(err), bucket)
|
||||
}
|
||||
|
||||
// GetBucketInfo - Get bucket metadata..
|
||||
func (a AzureObjects) GetBucketInfo(bucket string) (BucketInfo, error) {
|
||||
// Azure does not have an equivalent call, hence use ListContainers.
|
||||
resp, err := a.client.ListContainers(storage.ListContainersParameters{
|
||||
Prefix: bucket,
|
||||
})
|
||||
if err != nil {
|
||||
return BucketInfo{}, azureToObjectError(traceError(err), bucket)
|
||||
}
|
||||
for _, container := range resp.Containers {
|
||||
if container.Name == bucket {
|
||||
t, e := time.Parse(time.RFC1123, container.Properties.LastModified)
|
||||
if e == nil {
|
||||
return BucketInfo{
|
||||
Name: bucket,
|
||||
Created: t,
|
||||
}, nil
|
||||
} // else continue
|
||||
}
|
||||
}
|
||||
return BucketInfo{}, traceError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
|
||||
// ListBuckets - Lists all azure containers, uses Azure equivalent ListContainers.
|
||||
func (a AzureObjects) ListBuckets() (buckets []BucketInfo, err error) {
|
||||
resp, err := a.client.ListContainers(storage.ListContainersParameters{})
|
||||
if err != nil {
|
||||
return nil, azureToObjectError(traceError(err))
|
||||
}
|
||||
for _, container := range resp.Containers {
|
||||
t, e := time.Parse(time.RFC1123, container.Properties.LastModified)
|
||||
if e != nil {
|
||||
return nil, traceError(e)
|
||||
}
|
||||
buckets = append(buckets, BucketInfo{
|
||||
Name: container.Name,
|
||||
Created: t,
|
||||
})
|
||||
}
|
||||
return buckets, nil
|
||||
}
|
||||
|
||||
// DeleteBucket - delete a container on azure, uses Azure equivalent DeleteContainer.
|
||||
func (a AzureObjects) DeleteBucket(bucket string) error {
|
||||
return azureToObjectError(traceError(a.client.DeleteContainer(bucket)), bucket)
|
||||
}
|
||||
|
||||
// ListObjects - lists all blobs on azure with in a container filtered by prefix
|
||||
// and marker, uses Azure equivalent ListBlobs.
|
||||
func (a AzureObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) {
|
||||
resp, err := a.client.ListBlobs(bucket, storage.ListBlobsParameters{
|
||||
Prefix: prefix,
|
||||
Marker: marker,
|
||||
Delimiter: delimiter,
|
||||
MaxResults: uint(maxKeys),
|
||||
})
|
||||
if err != nil {
|
||||
return result, azureToObjectError(traceError(err), bucket, prefix)
|
||||
}
|
||||
result.IsTruncated = resp.NextMarker != ""
|
||||
result.NextMarker = resp.NextMarker
|
||||
for _, object := range resp.Blobs {
|
||||
t, e := time.Parse(time.RFC1123, object.Properties.LastModified)
|
||||
if e != nil {
|
||||
continue
|
||||
}
|
||||
result.Objects = append(result.Objects, ObjectInfo{
|
||||
Bucket: bucket,
|
||||
Name: object.Name,
|
||||
ModTime: t,
|
||||
Size: object.Properties.ContentLength,
|
||||
MD5Sum: canonicalizeETag(object.Properties.Etag),
|
||||
ContentType: object.Properties.ContentType,
|
||||
ContentEncoding: object.Properties.ContentEncoding,
|
||||
})
|
||||
}
|
||||
result.Prefixes = resp.BlobPrefixes
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetObject - reads an object from azure. Supports additional
|
||||
// parameters like offset and length which are synonymous with
|
||||
// HTTP Range requests.
|
||||
//
|
||||
// startOffset indicates the starting read location of the object.
|
||||
// length indicates the total length of the object.
|
||||
func (a AzureObjects) GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) error {
|
||||
byteRange := fmt.Sprintf("%d-", startOffset)
|
||||
if length > 0 && startOffset > 0 {
|
||||
byteRange = fmt.Sprintf("%d-%d", startOffset, startOffset+length-1)
|
||||
}
|
||||
|
||||
var rc io.ReadCloser
|
||||
var err error
|
||||
if startOffset == 0 && length == 0 {
|
||||
rc, err = a.client.GetBlob(bucket, object)
|
||||
} else {
|
||||
rc, err = a.client.GetBlobRange(bucket, object, byteRange, nil)
|
||||
}
|
||||
if err != nil {
|
||||
return azureToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
_, err = io.Copy(writer, rc)
|
||||
rc.Close()
|
||||
return traceError(err)
|
||||
}
|
||||
|
||||
// GetObjectInfo - reads blob metadata properties and replies back ObjectInfo,
|
||||
// uses zure equivalent GetBlobProperties.
|
||||
func (a AzureObjects) GetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) {
|
||||
prop, err := a.client.GetBlobProperties(bucket, object)
|
||||
if err != nil {
|
||||
return objInfo, azureToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
t, err := time.Parse(time.RFC1123, prop.LastModified)
|
||||
if err != nil {
|
||||
return objInfo, traceError(err)
|
||||
}
|
||||
objInfo = ObjectInfo{
|
||||
Bucket: bucket,
|
||||
UserDefined: make(map[string]string),
|
||||
MD5Sum: canonicalizeETag(prop.Etag),
|
||||
ModTime: t,
|
||||
Name: object,
|
||||
Size: prop.ContentLength,
|
||||
}
|
||||
if prop.ContentEncoding != "" {
|
||||
objInfo.UserDefined["Content-Encoding"] = prop.ContentEncoding
|
||||
}
|
||||
objInfo.UserDefined["Content-Type"] = prop.ContentType
|
||||
return objInfo, nil
|
||||
}
|
||||
|
||||
// Canonicalize the metadata headers, without this azure-sdk calculates
|
||||
// incorrect signature. This attempt to canonicalize is to convert
|
||||
// any HTTP header which is of form say `accept-encoding` should be
|
||||
// converted to `Accept-Encoding` in its canonical form.
|
||||
func canonicalMetadata(metadata map[string]string) (canonical map[string]string) {
|
||||
canonical = make(map[string]string)
|
||||
for k, v := range metadata {
|
||||
canonical[http.CanonicalHeaderKey(k)] = v
|
||||
}
|
||||
return canonical
|
||||
}
|
||||
|
||||
// PutObject - Create a new blob with the incoming data,
|
||||
// uses Azure equivalent CreateBlockBlobFromReader.
|
||||
func (a AzureObjects) PutObject(bucket, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, err error) {
|
||||
var sha256Writer hash.Hash
|
||||
teeReader := data
|
||||
if sha256sum != "" {
|
||||
sha256Writer = sha256.New()
|
||||
teeReader = io.TeeReader(data, sha256Writer)
|
||||
}
|
||||
|
||||
delete(metadata, "md5Sum")
|
||||
|
||||
err = a.client.CreateBlockBlobFromReader(bucket, object, uint64(size), teeReader, canonicalMetadata(metadata))
|
||||
if err != nil {
|
||||
return objInfo, azureToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
|
||||
if sha256sum != "" {
|
||||
newSHA256sum := hex.EncodeToString(sha256Writer.Sum(nil))
|
||||
if newSHA256sum != sha256sum {
|
||||
a.client.DeleteBlob(bucket, object, nil)
|
||||
return ObjectInfo{}, traceError(SHA256Mismatch{})
|
||||
}
|
||||
}
|
||||
|
||||
return a.GetObjectInfo(bucket, object)
|
||||
}
|
||||
|
||||
// CopyObject - Copies a blob from source container to destination container.
|
||||
// Uses Azure equivalent CopyBlob API.
|
||||
func (a AzureObjects) CopyObject(srcBucket, srcObject, destBucket, destObject string, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
||||
err = a.client.CopyBlob(destBucket, destObject, a.client.GetBlobURL(srcBucket, srcObject))
|
||||
if err != nil {
|
||||
return objInfo, azureToObjectError(traceError(err), srcBucket, srcObject)
|
||||
}
|
||||
return a.GetObjectInfo(destBucket, destObject)
|
||||
}
|
||||
|
||||
// DeleteObject - Deletes a blob on azure container, uses Azure
|
||||
// equivalent DeleteBlob API.
|
||||
func (a AzureObjects) DeleteObject(bucket, object string) error {
|
||||
err := a.client.DeleteBlob(bucket, object, nil)
|
||||
if err != nil {
|
||||
return azureToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListMultipartUploads - Incomplete implementation, for now just return the prefix if it is an incomplete upload.
|
||||
// FIXME: Full ListMultipartUploads is not supported yet. It is supported just enough to help our client libs to
|
||||
// support re-uploads. a.client.ListBlobs() can be made to return entries which include uncommitted blobs using
|
||||
// which we need to filter out the committed blobs to get the list of uncommitted blobs.
|
||||
func (a AzureObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) {
|
||||
result.MaxUploads = maxUploads
|
||||
result.Prefix = prefix
|
||||
result.Delimiter = delimiter
|
||||
meta := a.metaInfo.get(prefix)
|
||||
if meta == nil {
|
||||
// In case minio was restarted after NewMultipartUpload and before CompleteMultipartUpload we expect
|
||||
// the client to do a fresh upload so that any metadata like content-type are sent again in the
|
||||
// NewMultipartUpload.
|
||||
return result, nil
|
||||
}
|
||||
result.Uploads = []uploadMetadata{{prefix, prefix, time.Now().UTC(), "", nil}}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// NewMultipartUpload - Use Azure equivalent CreateBlockBlob.
|
||||
func (a AzureObjects) NewMultipartUpload(bucket, object string, metadata map[string]string) (uploadID string, err error) {
|
||||
// Azure doesn't return a unique upload ID and we use object name in place of it. Azure allows multiple uploads to
|
||||
// co-exist as long as the user keeps the blocks uploaded (in block blobs) unique amongst concurrent upload attempts.
|
||||
// Each concurrent client, keeps its own blockID list which it can commit.
|
||||
uploadID = object
|
||||
if metadata == nil {
|
||||
// Store an empty map as a placeholder else ListObjectParts/PutObjectPart will not work properly.
|
||||
metadata = make(map[string]string)
|
||||
} else {
|
||||
metadata = canonicalMetadata(metadata)
|
||||
}
|
||||
a.metaInfo.set(uploadID, metadata)
|
||||
return uploadID, nil
|
||||
}
|
||||
|
||||
// CopyObjectPart - Not implemented.
|
||||
func (a AzureObjects) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64) (info PartInfo, err error) {
|
||||
return info, traceError(NotImplemented{})
|
||||
}
|
||||
|
||||
// Encode partID+md5Hex to a blockID.
|
||||
func azureGetBlockID(partID int, md5Hex string) string {
|
||||
return base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%.5d.%s", partID, md5Hex)))
|
||||
}
|
||||
|
||||
// Decode blockID to partID+md5Hex.
|
||||
func azureParseBlockID(blockID string) (int, string, error) {
|
||||
idByte, err := base64.StdEncoding.DecodeString(blockID)
|
||||
if err != nil {
|
||||
return 0, "", traceError(err)
|
||||
}
|
||||
idStr := string(idByte)
|
||||
splitRes := strings.Split(idStr, ".")
|
||||
if len(splitRes) != 2 {
|
||||
return 0, "", traceError(errUnexpected)
|
||||
}
|
||||
partID, err := strconv.Atoi(splitRes[0])
|
||||
if err != nil {
|
||||
return 0, "", traceError(err)
|
||||
}
|
||||
return partID, splitRes[1], nil
|
||||
}
|
||||
|
||||
// PutObjectPart - Use Azure equivalent PutBlockWithLength.
|
||||
func (a AzureObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (info PartInfo, err error) {
|
||||
if meta := a.metaInfo.get(uploadID); meta == nil {
|
||||
return info, traceError(InvalidUploadID{})
|
||||
}
|
||||
var sha256Writer hash.Hash
|
||||
if sha256sum != "" {
|
||||
sha256Writer = sha256.New()
|
||||
}
|
||||
|
||||
teeReader := io.TeeReader(data, sha256Writer)
|
||||
|
||||
id := azureGetBlockID(partID, md5Hex)
|
||||
err = a.client.PutBlockWithLength(bucket, object, id, uint64(size), teeReader, nil)
|
||||
if err != nil {
|
||||
return info, azureToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
|
||||
if sha256sum != "" {
|
||||
newSHA256sum := hex.EncodeToString(sha256Writer.Sum(nil))
|
||||
if newSHA256sum != sha256sum {
|
||||
return PartInfo{}, traceError(SHA256Mismatch{})
|
||||
}
|
||||
}
|
||||
|
||||
info.PartNumber = partID
|
||||
info.ETag = md5Hex
|
||||
info.LastModified = time.Now().UTC()
|
||||
info.Size = size
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// ListObjectParts - Use Azure equivalent GetBlockList.
|
||||
func (a AzureObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error) {
|
||||
result.Bucket = bucket
|
||||
result.Object = object
|
||||
result.UploadID = uploadID
|
||||
result.MaxParts = maxParts
|
||||
|
||||
if meta := a.metaInfo.get(uploadID); meta == nil {
|
||||
return result, nil
|
||||
}
|
||||
resp, err := a.client.GetBlockList(bucket, object, storage.BlockListTypeUncommitted)
|
||||
if err != nil {
|
||||
return result, azureToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
tmpMaxParts := 0
|
||||
partCount := 0 // Used for figuring out IsTruncated.
|
||||
nextPartNumberMarker := 0
|
||||
for _, part := range resp.UncommittedBlocks {
|
||||
if tmpMaxParts == maxParts {
|
||||
// Also takes care of the case if maxParts = 0
|
||||
break
|
||||
}
|
||||
partCount++
|
||||
partID, md5Hex, err := azureParseBlockID(part.Name)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
if partID <= partNumberMarker {
|
||||
continue
|
||||
}
|
||||
result.Parts = append(result.Parts, PartInfo{
|
||||
partID,
|
||||
time.Now().UTC(),
|
||||
md5Hex,
|
||||
part.Size,
|
||||
})
|
||||
tmpMaxParts++
|
||||
nextPartNumberMarker = partID
|
||||
}
|
||||
if partCount < len(resp.UncommittedBlocks) {
|
||||
result.IsTruncated = true
|
||||
result.NextPartNumberMarker = nextPartNumberMarker
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// AbortMultipartUpload - Not Implemented.
|
||||
// There is no corresponding API in azure to abort an incomplete upload. The uncommmitted blocks
|
||||
// gets deleted after one week.
|
||||
func (a AzureObjects) AbortMultipartUpload(bucket, object, uploadID string) error {
|
||||
a.metaInfo.del(uploadID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload - Use Azure equivalent PutBlockList.
|
||||
func (a AzureObjects) CompleteMultipartUpload(bucket, object, uploadID string, uploadedParts []completePart) (objInfo ObjectInfo, err error) {
|
||||
meta := a.metaInfo.get(uploadID)
|
||||
if meta == nil {
|
||||
return objInfo, traceError(InvalidUploadID{uploadID})
|
||||
}
|
||||
var blocks []storage.Block
|
||||
for _, part := range uploadedParts {
|
||||
blocks = append(blocks, storage.Block{
|
||||
ID: azureGetBlockID(part.PartNumber, part.ETag),
|
||||
Status: storage.BlockStatusUncommitted,
|
||||
})
|
||||
}
|
||||
err = a.client.PutBlockList(bucket, object, blocks)
|
||||
if err != nil {
|
||||
return objInfo, azureToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
if len(meta) > 0 {
|
||||
prop := storage.BlobHeaders{
|
||||
ContentMD5: meta["Content-Md5"],
|
||||
ContentLanguage: meta["Content-Language"],
|
||||
ContentEncoding: meta["Content-Encoding"],
|
||||
ContentType: meta["Content-Type"],
|
||||
CacheControl: meta["Cache-Control"],
|
||||
}
|
||||
err = a.client.SetBlobProperties(bucket, object, prop)
|
||||
if err != nil {
|
||||
return objInfo, azureToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
}
|
||||
a.metaInfo.del(uploadID)
|
||||
return a.GetObjectInfo(bucket, object)
|
||||
}
|
||||
|
||||
func anonErrToObjectErr(statusCode int, params ...string) error {
|
||||
bucket := ""
|
||||
object := ""
|
||||
if len(params) >= 1 {
|
||||
bucket = params[0]
|
||||
}
|
||||
if len(params) == 2 {
|
||||
object = params[1]
|
||||
}
|
||||
|
||||
switch statusCode {
|
||||
case http.StatusNotFound:
|
||||
if object != "" {
|
||||
return ObjectNotFound{bucket, object}
|
||||
}
|
||||
return BucketNotFound{Bucket: bucket}
|
||||
case http.StatusBadRequest:
|
||||
if object != "" {
|
||||
return ObjectNameInvalid{bucket, object}
|
||||
}
|
||||
return BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
return errUnexpected
|
||||
}
|
||||
|
||||
// Copied from github.com/Azure/azure-sdk-for-go/storage/blob.go
|
||||
func azureListBlobsGetParameters(p storage.ListBlobsParameters) url.Values {
|
||||
out := url.Values{}
|
||||
|
||||
if p.Prefix != "" {
|
||||
out.Set("prefix", p.Prefix)
|
||||
}
|
||||
if p.Delimiter != "" {
|
||||
out.Set("delimiter", p.Delimiter)
|
||||
}
|
||||
if p.Marker != "" {
|
||||
out.Set("marker", p.Marker)
|
||||
}
|
||||
if p.Include != "" {
|
||||
out.Set("include", p.Include)
|
||||
}
|
||||
if p.MaxResults != 0 {
|
||||
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
|
||||
}
|
||||
if p.Timeout != 0 {
|
||||
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// SetBucketPolicies - Azure supports three types of container policies:
|
||||
// storage.ContainerAccessTypeContainer - readonly in minio terminology
|
||||
// storage.ContainerAccessTypeBlob - readonly without listing in minio terminology
|
||||
// storage.ContainerAccessTypePrivate - none in minio terminology
|
||||
// As the common denominator for minio and azure is readonly and none, we support
|
||||
// these two policies at the bucket level.
|
||||
func (a AzureObjects) SetBucketPolicies(bucket string, policies []BucketAccessPolicy) error {
|
||||
prefix := bucket + "/*" // For all objects inside the bucket.
|
||||
if len(policies) != 1 {
|
||||
return traceError(NotImplemented{})
|
||||
}
|
||||
if policies[0].Prefix != prefix {
|
||||
return traceError(NotImplemented{})
|
||||
}
|
||||
if policies[0].Policy != policy.BucketPolicyReadOnly {
|
||||
return traceError(NotImplemented{})
|
||||
}
|
||||
perm := storage.ContainerPermissions{
|
||||
AccessType: storage.ContainerAccessTypeContainer,
|
||||
AccessPolicies: nil,
|
||||
}
|
||||
err := a.client.SetContainerPermissions(bucket, perm, 0, "")
|
||||
return azureToObjectError(traceError(err), bucket)
|
||||
}
|
||||
|
||||
// GetBucketPolicies - Get the container ACL and convert it to canonical []bucketAccessPolicy
|
||||
func (a AzureObjects) GetBucketPolicies(bucket string) ([]BucketAccessPolicy, error) {
|
||||
perm, err := a.client.GetContainerPermissions(bucket, 0, "")
|
||||
if err != nil {
|
||||
return nil, azureToObjectError(traceError(err), bucket)
|
||||
}
|
||||
switch perm.AccessType {
|
||||
case storage.ContainerAccessTypePrivate:
|
||||
return nil, nil
|
||||
case storage.ContainerAccessTypeContainer:
|
||||
return []BucketAccessPolicy{{"", policy.BucketPolicyReadOnly}}, nil
|
||||
}
|
||||
return nil, azureToObjectError(traceError(NotImplemented{}))
|
||||
}
|
||||
|
||||
// DeleteBucketPolicies - Set the container ACL to "private"
|
||||
func (a AzureObjects) DeleteBucketPolicies(bucket string) error {
|
||||
perm := storage.ContainerPermissions{
|
||||
AccessType: storage.ContainerAccessTypePrivate,
|
||||
AccessPolicies: nil,
|
||||
}
|
||||
err := a.client.SetContainerPermissions(bucket, perm, 0, "")
|
||||
return azureToObjectError(traceError(err))
|
||||
}
|
142
cmd/azure_test.go
Normal file
142
cmd/azure_test.go
Normal file
@ -0,0 +1,142 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/storage"
|
||||
)
|
||||
|
||||
// Test canonical metadata.
|
||||
func TestCanonicalMetadata(t *testing.T) {
|
||||
metadata := map[string]string{
|
||||
"accept-encoding": "gzip",
|
||||
"content-encoding": "gzip",
|
||||
}
|
||||
expectedCanonicalM := map[string]string{
|
||||
"Accept-Encoding": "gzip",
|
||||
"Content-Encoding": "gzip",
|
||||
}
|
||||
actualCanonicalM := canonicalMetadata(metadata)
|
||||
if !reflect.DeepEqual(actualCanonicalM, expectedCanonicalM) {
|
||||
t.Fatalf("Test failed, expected %#v, got %#v", expectedCanonicalM, actualCanonicalM)
|
||||
}
|
||||
}
|
||||
|
||||
// Add tests for azure to object error.
|
||||
func TestAzureToObjectError(t *testing.T) {
|
||||
testCases := []struct {
|
||||
actualErr error
|
||||
expectedErr error
|
||||
bucket, object string
|
||||
}{
|
||||
{
|
||||
nil, nil, "", "",
|
||||
},
|
||||
{
|
||||
traceError(errUnexpected), errUnexpected, "", "",
|
||||
},
|
||||
{
|
||||
traceError(errUnexpected), traceError(errUnexpected), "", "",
|
||||
},
|
||||
{
|
||||
traceError(storage.AzureStorageServiceError{
|
||||
Code: "ContainerAlreadyExists",
|
||||
}), BucketExists{Bucket: "bucket"}, "bucket", "",
|
||||
},
|
||||
{
|
||||
traceError(storage.AzureStorageServiceError{
|
||||
Code: "InvalidResourceName",
|
||||
}), BucketNameInvalid{Bucket: "bucket."}, "bucket.", "",
|
||||
},
|
||||
{
|
||||
traceError(storage.AzureStorageServiceError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
}), ObjectNotFound{
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
}, "bucket", "object",
|
||||
},
|
||||
{
|
||||
traceError(storage.AzureStorageServiceError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
}), BucketNotFound{Bucket: "bucket"}, "bucket", "",
|
||||
},
|
||||
{
|
||||
traceError(storage.AzureStorageServiceError{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
}), BucketNameInvalid{Bucket: "bucket."}, "bucket.", "",
|
||||
},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
err := azureToObjectError(testCase.actualErr, testCase.bucket, testCase.object)
|
||||
if err != nil {
|
||||
if err.Error() != testCase.expectedErr.Error() {
|
||||
t.Errorf("Test %d: Expected error %s, got %s", i+1, testCase.expectedErr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test azureGetBlockID().
|
||||
func TestAzureGetBlockID(t *testing.T) {
|
||||
testCases := []struct {
|
||||
partID int
|
||||
md5 string
|
||||
blockID string
|
||||
}{
|
||||
{1, "d41d8cd98f00b204e9800998ecf8427e", "MDAwMDEuZDQxZDhjZDk4ZjAwYjIwNGU5ODAwOTk4ZWNmODQyN2U="},
|
||||
{2, "a7fb6b7b36ee4ed66b5546fac4690273", "MDAwMDIuYTdmYjZiN2IzNmVlNGVkNjZiNTU0NmZhYzQ2OTAyNzM="},
|
||||
}
|
||||
for _, test := range testCases {
|
||||
blockID := azureGetBlockID(test.partID, test.md5)
|
||||
if blockID != test.blockID {
|
||||
t.Fatalf("%s is not equal to %s", blockID, test.blockID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test azureParseBlockID().
|
||||
func TestAzureParseBlockID(t *testing.T) {
|
||||
testCases := []struct {
|
||||
partID int
|
||||
md5 string
|
||||
blockID string
|
||||
}{
|
||||
{1, "d41d8cd98f00b204e9800998ecf8427e", "MDAwMDEuZDQxZDhjZDk4ZjAwYjIwNGU5ODAwOTk4ZWNmODQyN2U="},
|
||||
{2, "a7fb6b7b36ee4ed66b5546fac4690273", "MDAwMDIuYTdmYjZiN2IzNmVlNGVkNjZiNTU0NmZhYzQ2OTAyNzM="},
|
||||
}
|
||||
for _, test := range testCases {
|
||||
partID, md5, err := azureParseBlockID(test.blockID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if partID != test.partID {
|
||||
t.Fatalf("%d not equal to %d", partID, test.partID)
|
||||
}
|
||||
if md5 != test.md5 {
|
||||
t.Fatalf("%s not equal to %s", md5, test.md5)
|
||||
}
|
||||
}
|
||||
_, _, err := azureParseBlockID("junk")
|
||||
if err == nil {
|
||||
t.Fatal("Expected azureParseBlockID() to return error")
|
||||
}
|
||||
}
|
@ -49,6 +49,10 @@ func enforceBucketPolicy(bucket, action, resource, referer string, queryParams u
|
||||
return ErrInternalError
|
||||
}
|
||||
|
||||
if globalBucketPolicies == nil {
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
// Fetch bucket policy, if policy is not set return access denied.
|
||||
policy := globalBucketPolicies.GetBucketPolicy(bucket)
|
||||
if policy == nil {
|
||||
|
@ -27,19 +27,41 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
accessKeyMinLen = 5
|
||||
accessKeyMaxLen = 20
|
||||
secretKeyMinLen = 8
|
||||
secretKeyMaxLen = 40
|
||||
|
||||
alphaNumericTable = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
alphaNumericTableLen = byte(len(alphaNumericTable))
|
||||
accessKeyMinLen = 5
|
||||
accessKeyMaxLen = 20
|
||||
secretKeyMinLen = 8
|
||||
secretKeyMaxLenAmazon = 40
|
||||
alphaNumericTable = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
alphaNumericTableLen = byte(len(alphaNumericTable))
|
||||
)
|
||||
|
||||
var (
|
||||
errInvalidAccessKeyLength = errors.New("Invalid access key, access key should be 5 to 20 characters in length")
|
||||
errInvalidSecretKeyLength = errors.New("Invalid secret key, secret key should be 8 to 40 characters in length")
|
||||
)
|
||||
var secretKeyMaxLen = secretKeyMaxLenAmazon
|
||||
|
||||
func mustGetAccessKey() string {
|
||||
keyBytes := make([]byte, accessKeyMaxLen)
|
||||
if _, err := rand.Read(keyBytes); err != nil {
|
||||
console.Fatalf("Unable to generate access key. Err: %s.\n", err)
|
||||
}
|
||||
|
||||
for i := 0; i < accessKeyMaxLen; i++ {
|
||||
keyBytes[i] = alphaNumericTable[keyBytes[i]%alphaNumericTableLen]
|
||||
}
|
||||
|
||||
return string(keyBytes)
|
||||
}
|
||||
|
||||
func mustGetSecretKey() string {
|
||||
keyBytes := make([]byte, secretKeyMaxLen)
|
||||
if _, err := rand.Read(keyBytes); err != nil {
|
||||
console.Fatalf("Unable to generate secret key. Err: %s.\n", err)
|
||||
}
|
||||
|
||||
return string([]byte(base64.StdEncoding.EncodeToString(keyBytes))[:secretKeyMaxLen])
|
||||
}
|
||||
|
||||
// isAccessKeyValid - validate access key for right length.
|
||||
func isAccessKeyValid(accessKey string) bool {
|
||||
|
@ -313,6 +313,9 @@ func eventNotifyForBucketListeners(eventType, objectName, bucketName string,
|
||||
// eventNotify notifies an event to relevant targets based on their
|
||||
// bucket configuration (notifications and listeners).
|
||||
func eventNotify(event eventData) {
|
||||
if globalEventNotifier == nil {
|
||||
return
|
||||
}
|
||||
// Notifies a new event.
|
||||
// List of events reported through this function are
|
||||
// - s3:ObjectCreated:Put
|
||||
|
703
cmd/gateway-handlers.go
Normal file
703
cmd/gateway-handlers.go
Normal file
@ -0,0 +1,703 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
|
||||
router "github.com/gorilla/mux"
|
||||
"github.com/minio/minio-go/pkg/policy"
|
||||
)
|
||||
|
||||
// GetObjectHandler - GET Object
|
||||
// ----------
|
||||
// This implementation of the GET operation retrieves object. To use GET,
|
||||
// you must have READ access to the object.
|
||||
func (api gatewayAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
var object, bucket string
|
||||
vars := router.Vars(r)
|
||||
bucket = vars["bucket"]
|
||||
object = vars["object"]
|
||||
|
||||
// Fetch object stat info.
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
reqAuthType := getRequestAuthType(r)
|
||||
|
||||
switch reqAuthType {
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
// Signature V2 validation.
|
||||
s3Error := isReqAuthenticatedV2(r)
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
case authTypeSigned, authTypePresigned:
|
||||
s3Error := isReqAuthenticated(r, serverConfig.GetRegion())
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
getObjectInfo := objectAPI.GetObjectInfo
|
||||
if reqAuthType == authTypeAnonymous {
|
||||
getObjectInfo = objectAPI.AnonGetObjectInfo
|
||||
}
|
||||
objInfo, err := getObjectInfo(bucket, object)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to fetch object info.")
|
||||
apiErr := toAPIErrorCode(err)
|
||||
if apiErr == ErrNoSuchKey {
|
||||
apiErr = errAllowableObjectNotFound(bucket, r)
|
||||
}
|
||||
writeErrorResponse(w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Get request range.
|
||||
var hrange *httpRange
|
||||
rangeHeader := r.Header.Get("Range")
|
||||
if rangeHeader != "" {
|
||||
if hrange, err = parseRequestRange(rangeHeader, objInfo.Size); err != nil {
|
||||
// Handle only errInvalidRange
|
||||
// Ignore other parse error and treat it as regular Get request like Amazon S3.
|
||||
if err == errInvalidRange {
|
||||
writeErrorResponse(w, ErrInvalidRange, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// log the error.
|
||||
errorIf(err, "Invalid request range")
|
||||
}
|
||||
}
|
||||
|
||||
// Validate pre-conditions if any.
|
||||
if checkPreconditions(w, r, objInfo) {
|
||||
return
|
||||
}
|
||||
|
||||
// Get the object.
|
||||
var startOffset int64
|
||||
length := objInfo.Size
|
||||
if hrange != nil {
|
||||
startOffset = hrange.offsetBegin
|
||||
length = hrange.getLength()
|
||||
}
|
||||
// Indicates if any data was written to the http.ResponseWriter
|
||||
dataWritten := false
|
||||
// io.Writer type which keeps track if any data was written.
|
||||
writer := funcToWriter(func(p []byte) (int, error) {
|
||||
if !dataWritten {
|
||||
// Set headers on the first write.
|
||||
// Set standard object headers.
|
||||
setObjectHeaders(w, objInfo, hrange)
|
||||
|
||||
// Set any additional requested response headers.
|
||||
setGetRespHeaders(w, r.URL.Query())
|
||||
|
||||
dataWritten = true
|
||||
}
|
||||
return w.Write(p)
|
||||
})
|
||||
|
||||
getObject := objectAPI.GetObject
|
||||
if reqAuthType == authTypeAnonymous {
|
||||
getObject = objectAPI.AnonGetObject
|
||||
}
|
||||
|
||||
// Reads the object at startOffset and writes to mw.
|
||||
if err := getObject(bucket, object, startOffset, length, writer); err != nil {
|
||||
errorIf(err, "Unable to write to client.")
|
||||
if !dataWritten {
|
||||
// Error response only if no data has been written to client yet. i.e if
|
||||
// partial data has already been written before an error
|
||||
// occurred then no point in setting StatusCode and
|
||||
// sending error XML.
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
}
|
||||
return
|
||||
}
|
||||
if !dataWritten {
|
||||
// If ObjectAPI.GetObject did not return error and no data has
|
||||
// been written it would mean that it is a 0-byte object.
|
||||
// call wrter.Write(nil) to set appropriate headers.
|
||||
writer.Write(nil)
|
||||
}
|
||||
}
|
||||
|
||||
// HeadObjectHandler - HEAD Object
|
||||
// -----------
|
||||
// The HEAD operation retrieves metadata from an object without returning the object itself.
|
||||
func (api gatewayAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
var object, bucket string
|
||||
vars := router.Vars(r)
|
||||
bucket = vars["bucket"]
|
||||
object = vars["object"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseHeadersOnly(w, ErrServerNotInitialized)
|
||||
return
|
||||
}
|
||||
|
||||
reqAuthType := getRequestAuthType(r)
|
||||
|
||||
switch reqAuthType {
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
// Signature V2 validation.
|
||||
s3Error := isReqAuthenticatedV2(r)
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
case authTypeSigned, authTypePresigned:
|
||||
s3Error := isReqAuthenticated(r, serverConfig.GetRegion())
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
getObjectInfo := objectAPI.GetObjectInfo
|
||||
if reqAuthType == authTypeAnonymous {
|
||||
getObjectInfo = objectAPI.AnonGetObjectInfo
|
||||
}
|
||||
objInfo, err := getObjectInfo(bucket, object)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to fetch object info.")
|
||||
apiErr := toAPIErrorCode(err)
|
||||
if apiErr == ErrNoSuchKey {
|
||||
apiErr = errAllowableObjectNotFound(bucket, r)
|
||||
}
|
||||
writeErrorResponse(w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Validate pre-conditions if any.
|
||||
if checkPreconditions(w, r, objInfo) {
|
||||
return
|
||||
}
|
||||
|
||||
// Set standard object headers.
|
||||
setObjectHeaders(w, objInfo, nil)
|
||||
|
||||
// Successful response.
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
// DeleteMultipleObjectsHandler - deletes multiple objects.
|
||||
func (api gatewayAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := router.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(r, bucket, "s3:DeleteObject", serverConfig.GetRegion()); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Content-Length is required and should be non-zero
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||
if r.ContentLength <= 0 {
|
||||
writeErrorResponse(w, ErrMissingContentLength, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Content-Md5 is requied should be set
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||
if _, ok := r.Header["Content-Md5"]; !ok {
|
||||
writeErrorResponse(w, ErrMissingContentMD5, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Allocate incoming content length bytes.
|
||||
deleteXMLBytes := make([]byte, r.ContentLength)
|
||||
|
||||
// Read incoming body XML bytes.
|
||||
if _, err := io.ReadFull(r.Body, deleteXMLBytes); err != nil {
|
||||
errorIf(err, "Unable to read HTTP body.")
|
||||
writeErrorResponse(w, ErrInternalError, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Unmarshal list of keys to be deleted.
|
||||
deleteObjects := &DeleteObjectsRequest{}
|
||||
if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil {
|
||||
errorIf(err, "Unable to unmarshal delete objects request XML.")
|
||||
writeErrorResponse(w, ErrMalformedXML, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var dErrs = make([]error, len(deleteObjects.Objects))
|
||||
|
||||
// Delete all requested objects in parallel.
|
||||
for index, object := range deleteObjects.Objects {
|
||||
dErr := objectAPI.DeleteObject(bucket, object.ObjectName)
|
||||
if dErr != nil {
|
||||
dErrs[index] = dErr
|
||||
}
|
||||
}
|
||||
|
||||
// Collect deleted objects and errors if any.
|
||||
var deletedObjects []ObjectIdentifier
|
||||
var deleteErrors []DeleteError
|
||||
for index, err := range dErrs {
|
||||
object := deleteObjects.Objects[index]
|
||||
// Success deleted objects are collected separately.
|
||||
if err == nil {
|
||||
deletedObjects = append(deletedObjects, object)
|
||||
continue
|
||||
}
|
||||
if _, ok := errorCause(err).(ObjectNotFound); ok {
|
||||
// If the object is not found it should be
|
||||
// accounted as deleted as per S3 spec.
|
||||
deletedObjects = append(deletedObjects, object)
|
||||
continue
|
||||
}
|
||||
errorIf(err, "Unable to delete object. %s", object.ObjectName)
|
||||
// Error during delete should be collected separately.
|
||||
deleteErrors = append(deleteErrors, DeleteError{
|
||||
Code: errorCodeResponse[toAPIErrorCode(err)].Code,
|
||||
Message: errorCodeResponse[toAPIErrorCode(err)].Description,
|
||||
Key: object.ObjectName,
|
||||
})
|
||||
}
|
||||
|
||||
// Generate response
|
||||
response := generateMultiDeleteResponse(deleteObjects.Quiet, deletedObjects, deleteErrors)
|
||||
encodedSuccessResponse := encodeResponse(response)
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, encodedSuccessResponse)
|
||||
}
|
||||
|
||||
// PutBucketPolicyHandler - PUT Bucket policy
|
||||
// -----------------
|
||||
// This implementation of the PUT operation uses the policy
|
||||
// subresource to add to or replace a policy on a bucket
|
||||
func (api gatewayAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(r, "", "", serverConfig.GetRegion()); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := router.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
// Before proceeding validate if bucket exists.
|
||||
_, err := objAPI.GetBucketInfo(bucket)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to find bucket info.")
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// If Content-Length is unknown or zero, deny the
|
||||
// request. PutBucketPolicy always needs a Content-Length.
|
||||
if r.ContentLength == -1 || r.ContentLength == 0 {
|
||||
writeErrorResponse(w, ErrMissingContentLength, r.URL)
|
||||
return
|
||||
}
|
||||
// If Content-Length is greater than maximum allowed policy size.
|
||||
if r.ContentLength > maxAccessPolicySize {
|
||||
writeErrorResponse(w, ErrEntityTooLarge, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Read access policy up to maxAccessPolicySize.
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
|
||||
// bucket policies are limited to 20KB in size, using a limit reader.
|
||||
policyBytes, err := ioutil.ReadAll(io.LimitReader(r.Body, maxAccessPolicySize))
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to read from client.")
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
{
|
||||
// FIXME: consolidate bucketPolicy and policy.BucketAccessPolicy so that
|
||||
// the verification below is done on the same type.
|
||||
// Parse bucket policy.
|
||||
policyInfo := &bucketPolicy{}
|
||||
err = parseBucketPolicy(bytes.NewReader(policyBytes), policyInfo)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to parse bucket policy.")
|
||||
writeErrorResponse(w, ErrInvalidPolicyDocument, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse check bucket policy.
|
||||
if s3Error := checkBucketPolicyResources(bucket, policyInfo); s3Error != ErrNone {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
policyInfo := &policy.BucketAccessPolicy{}
|
||||
if err = json.Unmarshal(policyBytes, policyInfo); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
var policies []BucketAccessPolicy
|
||||
for prefix, policy := range policy.GetPolicies(policyInfo.Statements, bucket) {
|
||||
policies = append(policies, BucketAccessPolicy{
|
||||
Prefix: prefix,
|
||||
Policy: policy,
|
||||
})
|
||||
}
|
||||
if err = objAPI.SetBucketPolicies(bucket, policies); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
// Success.
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
// DeleteBucketPolicyHandler - DELETE Bucket policy
|
||||
// -----------------
|
||||
// This implementation of the DELETE operation uses the policy
|
||||
// subresource to add to remove a policy on a bucket.
|
||||
func (api gatewayAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(r, "", "", serverConfig.GetRegion()); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := router.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
// Before proceeding validate if bucket exists.
|
||||
_, err := objAPI.GetBucketInfo(bucket)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to find bucket info.")
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete bucket access policy, by passing an empty policy
|
||||
// struct.
|
||||
objAPI.DeleteBucketPolicies(bucket)
|
||||
// Success.
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
// GetBucketPolicyHandler - GET Bucket policy
|
||||
// -----------------
|
||||
// This operation uses the policy
|
||||
// subresource to return the policy of a specified bucket.
|
||||
func (api gatewayAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(r, "", "", serverConfig.GetRegion()); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := router.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
// Before proceeding validate if bucket exists.
|
||||
_, err := objAPI.GetBucketInfo(bucket)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to find bucket info.")
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
policies, err := objAPI.GetBucketPolicies(bucket)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to read bucket policy.")
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"}
|
||||
for _, p := range policies {
|
||||
policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, p.Policy, bucket, p.Prefix)
|
||||
}
|
||||
policyBytes, err := json.Marshal(&policyInfo)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to read bucket policy.")
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
// Write to client.
|
||||
w.Write(policyBytes)
|
||||
}
|
||||
|
||||
// GetBucketNotificationHandler - This implementation of the GET
|
||||
// operation uses the notification subresource to return the
|
||||
// notification configuration of a bucket. If notifications are
|
||||
// not enabled on the bucket, the operation returns an empty
|
||||
// NotificationConfiguration element.
|
||||
func (api gatewayAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL)
|
||||
}
|
||||
|
||||
// PutBucketNotificationHandler - Minio notification feature enables
|
||||
// you to receive notifications when certain events happen in your bucket.
|
||||
// Using this API, you can replace an existing notification configuration.
|
||||
// The configuration is an XML file that defines the event types that you
|
||||
// want Minio to publish and the destination where you want Minio to publish
|
||||
// an event notification when it detects an event of the specified type.
|
||||
// By default, your bucket has no event notifications configured. That is,
|
||||
// the notification configuration will be an empty NotificationConfiguration.
|
||||
func (api gatewayAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL)
|
||||
}
|
||||
|
||||
// ListenBucketNotificationHandler - list bucket notifications.
|
||||
func (api gatewayAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL)
|
||||
}
|
||||
|
||||
// DeleteBucketHandler - Delete bucket
|
||||
func (api gatewayAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteBucket does not have any bucket action.
|
||||
if s3Error := checkRequestAuthType(r, "", "", serverConfig.GetRegion()); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := router.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
// Attempt to delete bucket.
|
||||
if err := objectAPI.DeleteBucket(bucket); err != nil {
|
||||
errorIf(err, "Unable to delete a bucket.")
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
// ListObjectsV1Handler - GET Bucket (List Objects) Version 1.
|
||||
// --------------------------
|
||||
// This implementation of the GET operation returns some or all (up to 1000)
|
||||
// of the objects in a bucket. You can use the request parameters as selection
|
||||
// criteria to return a subset of the objects in a bucket.
|
||||
//
|
||||
func (api gatewayAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := router.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
reqAuthType := getRequestAuthType(r)
|
||||
|
||||
switch reqAuthType {
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
// Signature V2 validation.
|
||||
s3Error := isReqAuthenticatedV2(r)
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
case authTypeSigned, authTypePresigned:
|
||||
s3Error := isReqAuthenticated(r, serverConfig.GetRegion())
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Extract all the litsObjectsV1 query params to their native values.
|
||||
prefix, marker, delimiter, maxKeys, _ := getListObjectsV1Args(r.URL.Query())
|
||||
|
||||
// Validate all the query params before beginning to serve the request.
|
||||
if s3Error := validateListObjectsArgs(prefix, marker, delimiter, maxKeys); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
listObjects := objectAPI.ListObjects
|
||||
if reqAuthType == authTypeAnonymous {
|
||||
listObjects = objectAPI.AnonListObjects
|
||||
}
|
||||
// Inititate a list objects operation based on the input params.
|
||||
// On success would return back ListObjectsInfo object to be
|
||||
// marshalled into S3 compatible XML header.
|
||||
listObjectsInfo, err := listObjects(bucket, prefix, marker, delimiter, maxKeys)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to list objects.")
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, maxKeys, listObjectsInfo)
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, encodeResponse(response))
|
||||
}
|
||||
|
||||
// HeadBucketHandler - HEAD Bucket
|
||||
// ----------
|
||||
// This operation is useful to determine if a bucket exists.
|
||||
// The operation returns a 200 OK if the bucket exists and you
|
||||
// have permission to access it. Otherwise, the operation might
|
||||
// return responses such as 404 Not Found and 403 Forbidden.
|
||||
func (api gatewayAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := router.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseHeadersOnly(w, ErrServerNotInitialized)
|
||||
return
|
||||
}
|
||||
|
||||
reqAuthType := getRequestAuthType(r)
|
||||
|
||||
switch reqAuthType {
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
// Signature V2 validation.
|
||||
s3Error := isReqAuthenticatedV2(r)
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
case authTypeSigned, authTypePresigned:
|
||||
s3Error := isReqAuthenticated(r, serverConfig.GetRegion())
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
getBucketInfo := objectAPI.GetBucketInfo
|
||||
if reqAuthType == authTypeAnonymous {
|
||||
getBucketInfo = objectAPI.AnonGetBucketInfo
|
||||
}
|
||||
|
||||
if _, err := getBucketInfo(bucket); err != nil {
|
||||
errorIf(err, "Unable to fetch bucket info.")
|
||||
writeErrorResponseHeadersOnly(w, toAPIErrorCode(err))
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetBucketLocationHandler - GET Bucket location.
|
||||
// -------------------------
|
||||
// This operation returns bucket location.
|
||||
func (api gatewayAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := router.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
return
|
||||
}
|
||||
reqAuthType := getRequestAuthType(r)
|
||||
|
||||
switch reqAuthType {
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
// Signature V2 validation.
|
||||
s3Error := isReqAuthenticatedV2(r)
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
case authTypeSigned, authTypePresigned:
|
||||
s3Error := isReqAuthenticated(r, globalMinioDefaultRegion)
|
||||
if s3Error == ErrInvalidRegion {
|
||||
// Clients like boto3 send getBucketLocation() call signed with region that is configured.
|
||||
s3Error = isReqAuthenticated(r, serverConfig.GetRegion())
|
||||
}
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
getBucketInfo := objectAPI.GetBucketInfo
|
||||
if reqAuthType == authTypeAnonymous {
|
||||
getBucketInfo = objectAPI.AnonGetBucketInfo
|
||||
}
|
||||
|
||||
if _, err := getBucketInfo(bucket); err != nil {
|
||||
errorIf(err, "Unable to fetch bucket info.")
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Generate response.
|
||||
encodedSuccessResponse := encodeResponse(LocationResponse{})
|
||||
// Get current region.
|
||||
region := serverConfig.GetRegion()
|
||||
if region != globalMinioDefaultRegion {
|
||||
encodedSuccessResponse = encodeResponse(LocationResponse{
|
||||
Location: region,
|
||||
})
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, encodedSuccessResponse)
|
||||
}
|
219
cmd/gateway-main.go
Normal file
219
cmd/gateway-main.go
Normal file
@ -0,0 +1,219 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/cli"
|
||||
"github.com/minio/mc/pkg/console"
|
||||
)
|
||||
|
||||
var gatewayTemplate = `NAME:
|
||||
{{.HelpName}} - {{.Usage}}
|
||||
|
||||
USAGE:
|
||||
{{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} BACKEND
|
||||
{{if .VisibleFlags}}
|
||||
FLAGS:
|
||||
{{range .VisibleFlags}}{{.}}
|
||||
{{end}}{{end}}
|
||||
ENVIRONMENT VARIABLES:
|
||||
ACCESS:
|
||||
MINIO_ACCESS_KEY: Username or access key of your storage backend.
|
||||
MINIO_SECRET_KEY: Password or secret key of your storage backend.
|
||||
|
||||
EXAMPLES:
|
||||
1. Start minio gateway server for Azure Blob Storage backend.
|
||||
$ {{.HelpName}} azure
|
||||
|
||||
2. Start minio gateway server bound to a specific ADDRESS:PORT.
|
||||
$ {{.HelpName}} --address 192.168.1.101:9000 azure
|
||||
`
|
||||
|
||||
var gatewayCmd = cli.Command{
|
||||
Name: "gateway",
|
||||
Usage: "Start object storage gateway server.",
|
||||
Action: gatewayMain,
|
||||
CustomHelpTemplate: gatewayTemplate,
|
||||
Flags: append(serverFlags, cli.BoolFlag{
|
||||
Name: "quiet",
|
||||
Usage: "Disable startup banner.",
|
||||
}),
|
||||
HideHelpCommand: true,
|
||||
}
|
||||
|
||||
// Represents the type of the gateway backend.
|
||||
type gatewayBackend string
|
||||
|
||||
const (
|
||||
azureBackend gatewayBackend = "azure"
|
||||
// Add more backends here.
|
||||
)
|
||||
|
||||
// Returns access and secretkey set from environment variables.
|
||||
func mustGetGatewayCredsFromEnv() (accessKey, secretKey string) {
|
||||
// Fetch access keys from environment variables.
|
||||
accessKey = os.Getenv("MINIO_ACCESS_KEY")
|
||||
secretKey = os.Getenv("MINIO_SECRET_KEY")
|
||||
if accessKey == "" || secretKey == "" {
|
||||
console.Fatalln("Access and secret keys are mandatory to run Minio gateway server.")
|
||||
}
|
||||
return accessKey, secretKey
|
||||
}
|
||||
|
||||
// Initialize gateway layer depending on the backend type.
|
||||
// Supported backend types are
|
||||
//
|
||||
// - Azure Blob Storage.
|
||||
// - Add your favorite backend here.
|
||||
func newGatewayLayer(backendType, accessKey, secretKey string) (GatewayLayer, error) {
|
||||
if gatewayBackend(backendType) != azureBackend {
|
||||
return nil, fmt.Errorf("Unrecognized backend type %s", backendType)
|
||||
}
|
||||
return newAzureLayer(accessKey, secretKey)
|
||||
}
|
||||
|
||||
// Initialize a new gateway config.
|
||||
//
|
||||
// DO NOT save this config, this is meant to be
|
||||
// only used in memory.
|
||||
func newGatewayConfig(accessKey, secretKey, region string) error {
|
||||
// Initialize server config.
|
||||
srvCfg := newServerConfigV14()
|
||||
|
||||
// If env is set for a fresh start, save them to config file.
|
||||
srvCfg.SetCredential(credential{
|
||||
AccessKey: accessKey,
|
||||
SecretKey: secretKey,
|
||||
})
|
||||
|
||||
// Set default printing to console.
|
||||
srvCfg.Logger.SetConsole(consoleLogger{true, "error"})
|
||||
|
||||
// Set custom region.
|
||||
srvCfg.SetRegion(region)
|
||||
|
||||
// Create certs path for SSL configuration.
|
||||
if err := createConfigDir(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// hold the mutex lock before a new config is assigned.
|
||||
// Save the new config globally.
|
||||
// unlock the mutex.
|
||||
serverConfigMu.Lock()
|
||||
serverConfig = srvCfg
|
||||
serverConfigMu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handler for 'minio gateway'.
|
||||
func gatewayMain(ctx *cli.Context) {
|
||||
if !ctx.Args().Present() || ctx.Args().First() == "help" {
|
||||
cli.ShowCommandHelpAndExit(ctx, "gateway", 1)
|
||||
}
|
||||
|
||||
// Fetch access and secret key from env.
|
||||
accessKey, secretKey := mustGetGatewayCredsFromEnv()
|
||||
|
||||
// Initialize new gateway config.
|
||||
//
|
||||
// TODO: add support for custom region when we add
|
||||
// support for S3 backend storage, currently this can
|
||||
// default to "us-east-1"
|
||||
err := newGatewayConfig(accessKey, secretKey, "us-east-1")
|
||||
if err != nil {
|
||||
console.Fatalf("Unable to initialize gateway config. Error: %s", err)
|
||||
}
|
||||
|
||||
// Enable console logging.
|
||||
enableConsoleLogger()
|
||||
|
||||
// Get quiet flag from command line argument.
|
||||
quietFlag := ctx.Bool("quiet") || ctx.GlobalBool("quiet")
|
||||
|
||||
// First argument is selected backend type.
|
||||
backendType := ctx.Args().First()
|
||||
|
||||
newObject, err := newGatewayLayer(backendType, accessKey, secretKey)
|
||||
if err != nil {
|
||||
console.Fatalf("Unable to initialize gateway layer. Error: %s", err)
|
||||
}
|
||||
|
||||
initNSLock(false) // Enable local namespace lock.
|
||||
|
||||
router := mux.NewRouter().SkipClean(true)
|
||||
registerGatewayAPIRouter(router, newObject)
|
||||
|
||||
var handlerFns = []HandlerFunc{
|
||||
// Limits all requests size to a maximum fixed limit
|
||||
setRequestSizeLimitHandler,
|
||||
// Adds 'crossdomain.xml' policy handler to serve legacy flash clients.
|
||||
setCrossDomainPolicy,
|
||||
// Validates all incoming requests to have a valid date header.
|
||||
setTimeValidityHandler,
|
||||
// CORS setting for all browser API requests.
|
||||
setCorsHandler,
|
||||
// Validates all incoming URL resources, for invalid/unsupported
|
||||
// resources client receives a HTTP error.
|
||||
setIgnoreResourcesHandler,
|
||||
// Auth handler verifies incoming authorization headers and
|
||||
// routes them accordingly. Client receives a HTTP error for
|
||||
// invalid/unsupported signatures.
|
||||
setAuthHandler,
|
||||
}
|
||||
|
||||
apiServer := NewServerMux(ctx.String("address"), registerHandlers(router, handlerFns...))
|
||||
|
||||
// Set if we are SSL enabled S3 gateway.
|
||||
globalIsSSL = isSSL()
|
||||
|
||||
// Start server, automatically configures TLS if certs are available.
|
||||
go func() {
|
||||
cert, key := "", ""
|
||||
if globalIsSSL {
|
||||
cert, key = getPublicCertFile(), getPrivateKeyFile()
|
||||
}
|
||||
if aerr := apiServer.ListenAndServe(cert, key); aerr != nil {
|
||||
console.Fatalf("Failed to start minio server. Error: %s\n", aerr)
|
||||
}
|
||||
}()
|
||||
|
||||
apiEndPoints, err := finalizeAPIEndpoints(apiServer.Addr)
|
||||
fatalIf(err, "Unable to finalize API endpoints for %s", apiServer.Addr)
|
||||
|
||||
// Once endpoints are finalized, initialize the new object api.
|
||||
globalObjLayerMutex.Lock()
|
||||
globalObjectAPI = newObject
|
||||
globalObjLayerMutex.Unlock()
|
||||
|
||||
// Prints the formatted startup message once object layer is initialized.
|
||||
if !quietFlag {
|
||||
mode := ""
|
||||
if gatewayBackend(backendType) == azureBackend {
|
||||
mode = globalMinioModeGatewayAzure
|
||||
}
|
||||
checkUpdate(mode)
|
||||
printGatewayStartupMessage(apiEndPoints, accessKey, secretKey, backendType)
|
||||
}
|
||||
|
||||
<-globalServiceDoneCh
|
||||
}
|
122
cmd/gateway-router.go
Normal file
122
cmd/gateway-router.go
Normal file
@ -0,0 +1,122 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
router "github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
// GatewayLayer - Interface to implement gateway mode.
|
||||
type GatewayLayer interface {
|
||||
ObjectLayer
|
||||
AnonGetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error)
|
||||
AnonGetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error)
|
||||
SetBucketPolicies(string, []BucketAccessPolicy) error
|
||||
GetBucketPolicies(string) ([]BucketAccessPolicy, error)
|
||||
DeleteBucketPolicies(string) error
|
||||
AnonListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error)
|
||||
AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, err error)
|
||||
}
|
||||
|
||||
// Implements and provides http handlers for S3 API.
|
||||
// Overrides GetObject HeadObject and Policy related handlers.
|
||||
type gatewayAPIHandlers struct {
|
||||
objectAPIHandlers
|
||||
ObjectAPI func() GatewayLayer
|
||||
}
|
||||
|
||||
// registerAPIRouter - registers S3 compatible APIs.
|
||||
func registerGatewayAPIRouter(mux *router.Router, gw GatewayLayer) {
|
||||
// Initialize API.
|
||||
api := gatewayAPIHandlers{
|
||||
ObjectAPI: func() GatewayLayer { return gw },
|
||||
objectAPIHandlers: objectAPIHandlers{
|
||||
ObjectAPI: newObjectLayerFn,
|
||||
},
|
||||
}
|
||||
|
||||
// API Router
|
||||
apiRouter := mux.NewRoute().PathPrefix("/").Subrouter()
|
||||
|
||||
// Bucket router
|
||||
bucket := apiRouter.PathPrefix("/{bucket}").Subrouter()
|
||||
|
||||
/// Object operations
|
||||
|
||||
// HeadObject
|
||||
bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(api.HeadObjectHandler)
|
||||
// CopyObjectPart
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(api.CopyObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// PutObjectPart
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(api.PutObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// ListObjectPxarts
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(api.ListObjectPartsHandler).Queries("uploadId", "{uploadId:.*}")
|
||||
// CompleteMultipartUpload
|
||||
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(api.CompleteMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}")
|
||||
// NewMultipartUpload
|
||||
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(api.NewMultipartUploadHandler).Queries("uploads", "")
|
||||
// AbortMultipartUpload
|
||||
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(api.AbortMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}")
|
||||
// GetObject
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(api.GetObjectHandler)
|
||||
// CopyObject
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(api.CopyObjectHandler)
|
||||
// PutObject
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(api.PutObjectHandler)
|
||||
// DeleteObject
|
||||
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(api.DeleteObjectHandler)
|
||||
|
||||
/// Bucket operations
|
||||
|
||||
// GetBucketLocation
|
||||
bucket.Methods("GET").HandlerFunc(api.GetBucketLocationHandler).Queries("location", "")
|
||||
// GetBucketPolicy
|
||||
bucket.Methods("GET").HandlerFunc(api.GetBucketPolicyHandler).Queries("policy", "")
|
||||
// GetBucketNotification
|
||||
bucket.Methods("GET").HandlerFunc(api.GetBucketNotificationHandler).Queries("notification", "")
|
||||
// ListenBucketNotification
|
||||
bucket.Methods("GET").HandlerFunc(api.ListenBucketNotificationHandler).Queries("events", "{events:.*}")
|
||||
// ListMultipartUploads
|
||||
bucket.Methods("GET").HandlerFunc(api.ListMultipartUploadsHandler).Queries("uploads", "")
|
||||
// ListObjectsV2
|
||||
bucket.Methods("GET").HandlerFunc(api.ListObjectsV2Handler).Queries("list-type", "2")
|
||||
// ListObjectsV1 (Legacy)
|
||||
bucket.Methods("GET").HandlerFunc(api.ListObjectsV1Handler)
|
||||
// PutBucketPolicy
|
||||
bucket.Methods("PUT").HandlerFunc(api.PutBucketPolicyHandler).Queries("policy", "")
|
||||
// PutBucketNotification
|
||||
bucket.Methods("PUT").HandlerFunc(api.PutBucketNotificationHandler).Queries("notification", "")
|
||||
// PutBucket
|
||||
bucket.Methods("PUT").HandlerFunc(api.PutBucketHandler)
|
||||
// HeadBucket
|
||||
bucket.Methods("HEAD").HandlerFunc(api.HeadBucketHandler)
|
||||
// PostPolicy
|
||||
bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(api.PostPolicyBucketHandler)
|
||||
// DeleteMultipleObjects
|
||||
bucket.Methods("POST").HandlerFunc(api.DeleteMultipleObjectsHandler)
|
||||
// DeleteBucketPolicy
|
||||
bucket.Methods("DELETE").HandlerFunc(api.DeleteBucketPolicyHandler).Queries("policy", "")
|
||||
// DeleteBucket
|
||||
bucket.Methods("DELETE").HandlerFunc(api.DeleteBucketHandler)
|
||||
|
||||
/// Root operation
|
||||
|
||||
// ListBuckets
|
||||
apiRouter.Methods("GET").HandlerFunc(api.ListBucketsHandler)
|
||||
}
|
67
cmd/gateway-startup-msg.go
Normal file
67
cmd/gateway-startup-msg.go
Normal file
@ -0,0 +1,67 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/mc/pkg/console"
|
||||
)
|
||||
|
||||
// Prints the formatted startup message.
|
||||
func printGatewayStartupMessage(apiEndPoints []string, accessKey, secretKey, backendType string) {
|
||||
// Prints credential.
|
||||
printGatewayCommonMsg(apiEndPoints, accessKey, secretKey)
|
||||
|
||||
// Prints `mc` cli configuration message chooses
|
||||
// first endpoint as default.
|
||||
endPoint := apiEndPoints[0]
|
||||
|
||||
// Configure 'mc', following block prints platform specific information for minio client.
|
||||
console.Println(colorBlue("\nCommand-line Access: ") + mcQuickStartGuide)
|
||||
if runtime.GOOS == globalWindowsOSName {
|
||||
mcMessage := fmt.Sprintf("$ mc.exe config host add my%s %s %s %s", backendType, endPoint, accessKey, secretKey)
|
||||
console.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
|
||||
} else {
|
||||
mcMessage := fmt.Sprintf("$ mc config host add my%s %s %s %s", backendType, endPoint, accessKey, secretKey)
|
||||
console.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
|
||||
}
|
||||
|
||||
// Prints documentation message.
|
||||
printObjectAPIMsg()
|
||||
|
||||
// SSL is configured reads certification chain, prints
|
||||
// authority and expiry.
|
||||
if globalIsSSL {
|
||||
certs, err := readCertificateChain()
|
||||
if err != nil {
|
||||
console.Fatalf("Unable to read certificate chain. Error: %s", err)
|
||||
}
|
||||
printCertificateMsg(certs)
|
||||
}
|
||||
}
|
||||
|
||||
// Prints common server startup message. Prints credential, region and browser access.
|
||||
func printGatewayCommonMsg(apiEndpoints []string, accessKey, secretKey string) {
|
||||
apiEndpointStr := strings.Join(apiEndpoints, " ")
|
||||
// Colorize the message and print.
|
||||
console.Println(colorBlue("\nEndpoint: ") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr)))
|
||||
console.Println(colorBlue("AccessKey: ") + colorBold(fmt.Sprintf("%s ", accessKey)))
|
||||
console.Println(colorBlue("SecretKey: ") + colorBold(fmt.Sprintf("%s ", secretKey)))
|
||||
}
|
31
cmd/gateway-startup-msg_test.go
Normal file
31
cmd/gateway-startup-msg_test.go
Normal file
@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import "testing"
|
||||
|
||||
// Test printing Gateway common message.
|
||||
func TestPrintGatewayCommonMessage(t *testing.T) {
|
||||
apiEndpoints := []string{"127.0.0.1:9000"}
|
||||
printGatewayCommonMsg(apiEndpoints, "abcd1", "abcd123")
|
||||
}
|
||||
|
||||
// Test print gateway startup message.
|
||||
func TestPrintGatewayStartupMessage(t *testing.T) {
|
||||
apiEndpoints := []string{"127.0.0.1:9000"}
|
||||
printGatewayStartupMessage(apiEndpoints, "abcd1", "abcd123", "azure")
|
||||
}
|
@ -36,6 +36,10 @@ const (
|
||||
globalWindowsOSName = "windows"
|
||||
globalNetBSDOSName = "netbsd"
|
||||
globalSolarisOSName = "solaris"
|
||||
globalMinioModeFS = "mode-server-fs"
|
||||
globalMinioModeXL = "mode-server-xl"
|
||||
globalMinioModeDistXL = "mode-server-distributed-xl"
|
||||
globalMinioModeGatewayAzure = "mode-gateway-azure"
|
||||
// Add new global values here.
|
||||
)
|
||||
|
||||
|
@ -98,6 +98,7 @@ func newApp() *cli.App {
|
||||
registerCommand(serverCmd)
|
||||
registerCommand(versionCmd)
|
||||
registerCommand(updateCmd)
|
||||
registerCommand(gatewayCmd)
|
||||
|
||||
// Set up app.
|
||||
cli.HelpFlag = cli.BoolFlag{
|
||||
|
@ -48,10 +48,10 @@ var serverCmd = cli.Command{
|
||||
Flags: append(serverFlags, globalFlags...),
|
||||
Action: serverMain,
|
||||
CustomHelpTemplate: `NAME:
|
||||
{{.HelpName}} - {{.Usage}}
|
||||
{{.HelpName}} - {{.Usage}}
|
||||
|
||||
USAGE:
|
||||
{{.HelpName}} {{if .VisibleFlags}}[FLAGS] {{end}}PATH [PATH...]
|
||||
{{.HelpName}} {{if .VisibleFlags}}[FLAGS] {{end}}PATH [PATH...]
|
||||
{{if .VisibleFlags}}
|
||||
FLAGS:
|
||||
{{range .VisibleFlags}}{{.}}
|
||||
@ -85,9 +85,9 @@ EXAMPLES:
|
||||
}
|
||||
|
||||
// Check for updates and print a notification message
|
||||
func checkUpdate() {
|
||||
func checkUpdate(mode string) {
|
||||
// Its OK to ignore any errors during getUpdateInfo() here.
|
||||
if older, downloadURL, err := getUpdateInfo(1 * time.Second); err == nil {
|
||||
if older, downloadURL, err := getUpdateInfo(1*time.Second, mode); err == nil {
|
||||
if older > time.Duration(0) {
|
||||
console.Println(colorizeUpdateMessage(downloadURL, older))
|
||||
}
|
||||
@ -485,11 +485,6 @@ func serverMain(c *cli.Context) {
|
||||
// Initializes server config, certs, logging and system settings.
|
||||
initServerConfig(c)
|
||||
|
||||
// Check for new updates from dl.minio.io.
|
||||
if !quietFlag {
|
||||
checkUpdate()
|
||||
}
|
||||
|
||||
// Server address.
|
||||
serverAddr := c.String("address")
|
||||
|
||||
@ -538,6 +533,18 @@ func serverMain(c *cli.Context) {
|
||||
globalIsXL = true
|
||||
}
|
||||
|
||||
if !quietFlag {
|
||||
// Check for new updates from dl.minio.io.
|
||||
mode := globalMinioModeFS
|
||||
if globalIsXL {
|
||||
mode = globalMinioModeXL
|
||||
}
|
||||
if globalIsDistXL {
|
||||
mode = globalMinioModeDistXL
|
||||
}
|
||||
checkUpdate(mode)
|
||||
}
|
||||
|
||||
// Initialize name space lock.
|
||||
initNSLock(globalIsDistXL)
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016, 2017, 2017 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -175,4 +175,5 @@ func getCertificateChainMsg(certs []*x509.Certificate) string {
|
||||
// Prints the certificate expiry message.
|
||||
func printCertificateMsg(certs []*x509.Certificate) {
|
||||
console.Println(getCertificateChainMsg(certs))
|
||||
|
||||
}
|
||||
|
@ -133,8 +133,11 @@ func IsSourceBuild() bool {
|
||||
// Minio (<OS>; <ARCH>[; docker][; source]) Minio/<VERSION> Minio/<RELEASE-TAG> Minio/<COMMIT-ID>
|
||||
//
|
||||
// For any change here should be discussed by openning an issue at https://github.com/minio/minio/issues.
|
||||
func getUserAgent() string {
|
||||
func getUserAgent(mode string) string {
|
||||
userAgent := "Minio (" + runtime.GOOS + "; " + runtime.GOARCH
|
||||
if mode != "" {
|
||||
userAgent += "; " + mode
|
||||
}
|
||||
if IsDocker() {
|
||||
userAgent += "; docker"
|
||||
}
|
||||
@ -146,12 +149,12 @@ func getUserAgent() string {
|
||||
return userAgent
|
||||
}
|
||||
|
||||
func downloadReleaseData(releaseChecksumURL string, timeout time.Duration) (data string, err error) {
|
||||
func downloadReleaseData(releaseChecksumURL string, timeout time.Duration, mode string) (data string, err error) {
|
||||
req, err := http.NewRequest("GET", releaseChecksumURL, nil)
|
||||
if err != nil {
|
||||
return data, err
|
||||
}
|
||||
req.Header.Set("User-Agent", getUserAgent())
|
||||
req.Header.Set("User-Agent", getUserAgent(mode))
|
||||
|
||||
client := &http.Client{
|
||||
Timeout: timeout,
|
||||
@ -184,8 +187,8 @@ func downloadReleaseData(releaseChecksumURL string, timeout time.Duration) (data
|
||||
}
|
||||
|
||||
// DownloadReleaseData - downloads release data from minio official server.
|
||||
func DownloadReleaseData(timeout time.Duration) (data string, err error) {
|
||||
return downloadReleaseData(minioReleaseURL+"minio.shasum", timeout)
|
||||
func DownloadReleaseData(timeout time.Duration, mode string) (data string, err error) {
|
||||
return downloadReleaseData(minioReleaseURL+"minio.shasum", timeout, mode)
|
||||
}
|
||||
|
||||
func parseReleaseData(data string) (releaseTime time.Time, err error) {
|
||||
@ -214,8 +217,8 @@ func parseReleaseData(data string) (releaseTime time.Time, err error) {
|
||||
return releaseTime, err
|
||||
}
|
||||
|
||||
func getLatestReleaseTime(timeout time.Duration) (releaseTime time.Time, err error) {
|
||||
data, err := DownloadReleaseData(timeout)
|
||||
func getLatestReleaseTime(timeout time.Duration, mode string) (releaseTime time.Time, err error) {
|
||||
data, err := DownloadReleaseData(timeout, mode)
|
||||
if err != nil {
|
||||
return releaseTime, err
|
||||
}
|
||||
@ -235,13 +238,13 @@ func getDownloadURL() (downloadURL string) {
|
||||
return minioReleaseURL + "minio"
|
||||
}
|
||||
|
||||
func getUpdateInfo(timeout time.Duration) (older time.Duration, downloadURL string, err error) {
|
||||
func getUpdateInfo(timeout time.Duration, mode string) (older time.Duration, downloadURL string, err error) {
|
||||
currentReleaseTime, err := GetCurrentReleaseTime()
|
||||
if err != nil {
|
||||
return older, downloadURL, err
|
||||
}
|
||||
|
||||
latestReleaseTime, err := getLatestReleaseTime(timeout)
|
||||
latestReleaseTime, err := getLatestReleaseTime(timeout, mode)
|
||||
if err != nil {
|
||||
return older, downloadURL, err
|
||||
}
|
||||
@ -266,7 +269,8 @@ func mainUpdate(ctx *cli.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
older, downloadURL, err := getUpdateInfo(10 * time.Second)
|
||||
minioMode := ""
|
||||
older, downloadURL, err := getUpdateInfo(10*time.Second, minioMode)
|
||||
if err != nil {
|
||||
quietPrintln(err)
|
||||
os.Exit(-1)
|
||||
|
@ -260,7 +260,7 @@ func TestDownloadReleaseData(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
result, err := downloadReleaseData(testCase.releaseChecksumURL, 1*time.Second)
|
||||
result, err := downloadReleaseData(testCase.releaseChecksumURL, 1*time.Second, "")
|
||||
if testCase.expectedErr == nil {
|
||||
if err != nil {
|
||||
t.Fatalf("error: expected: %v, got: %v", testCase.expectedErr, err)
|
||||
|
@ -708,8 +708,8 @@ type ListAllBucketPoliciesArgs struct {
|
||||
BucketName string `json:"bucketName"`
|
||||
}
|
||||
|
||||
// Collection of canned bucket policy at a given prefix.
|
||||
type bucketAccessPolicy struct {
|
||||
// BucketAccessPolicy - Collection of canned bucket policy at a given prefix.
|
||||
type BucketAccessPolicy struct {
|
||||
Prefix string `json:"prefix"`
|
||||
Policy policy.BucketPolicy `json:"policy"`
|
||||
}
|
||||
@ -717,7 +717,7 @@ type bucketAccessPolicy struct {
|
||||
// ListAllBucketPoliciesRep - get all bucket policy reply.
|
||||
type ListAllBucketPoliciesRep struct {
|
||||
UIVersion string `json:"uiVersion"`
|
||||
Policies []bucketAccessPolicy `json:"policies"`
|
||||
Policies []BucketAccessPolicy `json:"policies"`
|
||||
}
|
||||
|
||||
// GetllBucketPolicy - get all bucket policy.
|
||||
@ -738,7 +738,7 @@ func (web *webAPIHandlers) ListAllBucketPolicies(r *http.Request, args *ListAllB
|
||||
|
||||
reply.UIVersion = browser.UIVersion
|
||||
for prefix, policy := range policy.GetPolicies(policyInfo.Statements, args.BucketName) {
|
||||
reply.Policies = append(reply.Policies, bucketAccessPolicy{
|
||||
reply.Policies = append(reply.Policies, BucketAccessPolicy{
|
||||
Prefix: prefix,
|
||||
Policy: policy,
|
||||
})
|
||||
|
@ -1136,13 +1136,13 @@ func testWebListAllBucketPoliciesHandler(obj ObjectLayer, instanceType string, t
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
testCaseResult1 := []bucketAccessPolicy{{
|
||||
testCaseResult1 := []BucketAccessPolicy{{
|
||||
Prefix: bucketName + "/hello*",
|
||||
Policy: policy.BucketPolicyReadWrite,
|
||||
}}
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
expectedResult []bucketAccessPolicy
|
||||
expectedResult []BucketAccessPolicy
|
||||
}{
|
||||
{bucketName, testCaseResult1},
|
||||
}
|
||||
|
48
docs/gateway/README.md
Normal file
48
docs/gateway/README.md
Normal file
@ -0,0 +1,48 @@
|
||||
# Minio Gateway [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io)
|
||||
|
||||
Minio gateway adds Amazon S3 compatibility to third party cloud storage providers. Supported providers are:
|
||||
|
||||
- Azure Blob Storage
|
||||
|
||||
## Run Minio Gateway for Azure Blob Storage
|
||||
|
||||
### Using Docker
|
||||
|
||||
```
|
||||
docker run -p 9000:9000 --name azure-s3 \
|
||||
-e "MINIO_ACCESS_KEY=azureaccountname" \
|
||||
-e "MINIO_SECRET_KEY=azureaccountkey" \
|
||||
minio/minio gateway azure
|
||||
```
|
||||
|
||||
### Using Binary
|
||||
|
||||
```
|
||||
export MINIO_ACCESS_KEY=azureaccountname
|
||||
export MINIO_SECRET_KEY=azureaccountkey
|
||||
minio gateway azure
|
||||
```
|
||||
|
||||
## Test using Minio Client `mc`
|
||||
`mc` provides a modern alternative to UNIX commands such as ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services.
|
||||
|
||||
### Configure `mc`
|
||||
|
||||
```
|
||||
mc config host add myazure http://gateway-ip:9000 azureaccountname azureaccountkey
|
||||
```
|
||||
|
||||
### List containers on Azure
|
||||
|
||||
```
|
||||
mc ls myazure
|
||||
[2017-02-22 01:50:43 PST] 0B ferenginar/
|
||||
[2017-02-26 21:43:51 PST] 0B my-container/
|
||||
[2017-02-26 22:10:11 PST] 0B test-container1/
|
||||
```
|
||||
|
||||
## Explore Further
|
||||
- [`mc` command-line interface](https://docs.minio.io/docs/minio-client-quickstart-guide)
|
||||
- [`aws` command-line interface](https://docs.minio.io/docs/aws-cli-with-minio)
|
||||
- [`minfs` filesystem interface](http://docs.minio.io/docs/minfs-quickstart-guide)
|
||||
- [`minio-go` Go SDK](https://docs.minio.io/docs/golang-client-quickstart-guide)
|
19
docs/gateway/azure-limitations.md
Normal file
19
docs/gateway/azure-limitations.md
Normal file
@ -0,0 +1,19 @@
|
||||
## Minio Azure Gateway Limitations
|
||||
|
||||
Gateway inherits the following Azure limitations:
|
||||
|
||||
- Maximum Multipart part size is 100MB.
|
||||
- Maximum Multipart object size is 10000*100 MB = 1TB
|
||||
- No support for prefix based bucket policies. Only top level bucket policy is supported.
|
||||
- Gateway restart implies all the ongoing multipart uploads must be restarted.
|
||||
i.e clients must again start with NewMultipartUpload
|
||||
This is because S3 clients send metadata in NewMultipartUpload but Azure expects metadata to
|
||||
be set during CompleteMultipartUpload (PutBlockList in Azure terminology). We store the metadata
|
||||
sent by the client during NewMultipartUpload in memory so that it can be set on Azure later during
|
||||
CompleteMultipartUpload. When the gateway is restarted this information is lost.
|
||||
- Bucket names with "." in the bucket name is not supported.
|
||||
- Non-empty buckets get removed on a DeleteBucket() call.
|
||||
|
||||
Other limitations:
|
||||
- Current implementation of ListMultipartUploads is incomplete. Right now it returns if the object with name "prefix" has any uploaded parts.
|
||||
- Bucket notification not supported.
|
202
vendor/github.com/Azure/azure-sdk-for-go/LICENSE
generated
vendored
Normal file
202
vendor/github.com/Azure/azure-sdk-for-go/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2016 Microsoft Corporation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
5
vendor/github.com/Azure/azure-sdk-for-go/storage/README.md
generated
vendored
Normal file
5
vendor/github.com/Azure/azure-sdk-for-go/storage/README.md
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
# Azure Storage SDK for Go
|
||||
|
||||
The `github.com/Azure/azure-sdk-for-go/storage` package is used to perform operations in Azure Storage Service. To manage your storage accounts (Azure Resource Manager / ARM), use the [github.com/Azure/azure-sdk-for-go/arm/storage](../arm/storage) package. For your classic storage accounts (Azure Service Management / ASM), use [github.com/Azure/azure-sdk-for-go/management/storageservice](../management/storageservice) package.
|
||||
|
||||
This package includes support for [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/)
|
223
vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go
generated
vendored
Normal file
223
vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go
generated
vendored
Normal file
@ -0,0 +1,223 @@
|
||||
// Package storage provides clients for Microsoft Azure Storage Services.
|
||||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/authentication-for-the-azure-storage-services
|
||||
|
||||
type authentication string
|
||||
|
||||
const (
|
||||
sharedKey authentication = "sharedKey"
|
||||
sharedKeyForTable authentication = "sharedKeyTable"
|
||||
sharedKeyLite authentication = "sharedKeyLite"
|
||||
sharedKeyLiteForTable authentication = "sharedKeyLiteTable"
|
||||
|
||||
// headers
|
||||
headerAuthorization = "Authorization"
|
||||
headerContentLength = "Content-Length"
|
||||
headerDate = "Date"
|
||||
headerXmsDate = "x-ms-date"
|
||||
headerXmsVersion = "x-ms-version"
|
||||
headerContentEncoding = "Content-Encoding"
|
||||
headerContentLanguage = "Content-Language"
|
||||
headerContentType = "Content-Type"
|
||||
headerContentMD5 = "Content-MD5"
|
||||
headerIfModifiedSince = "If-Modified-Since"
|
||||
headerIfMatch = "If-Match"
|
||||
headerIfNoneMatch = "If-None-Match"
|
||||
headerIfUnmodifiedSince = "If-Unmodified-Since"
|
||||
headerRange = "Range"
|
||||
)
|
||||
|
||||
func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]string, auth authentication) (map[string]string, error) {
|
||||
authHeader, err := c.getSharedKey(verb, url, headers, auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headers[headerAuthorization] = authHeader
|
||||
return headers, nil
|
||||
}
|
||||
|
||||
func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth authentication) (string, error) {
|
||||
canRes, err := c.buildCanonicalizedResource(url, auth)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
canString, err := buildCanonicalizedString(verb, headers, canRes, auth)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return c.createAuthorizationHeader(canString, auth), nil
|
||||
}
|
||||
|
||||
func (c *Client) buildCanonicalizedResource(uri string, auth authentication) (string, error) {
|
||||
errMsg := "buildCanonicalizedResource error: %s"
|
||||
u, err := url.Parse(uri)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf(errMsg, err.Error())
|
||||
}
|
||||
|
||||
cr := bytes.NewBufferString("/")
|
||||
cr.WriteString(c.getCanonicalizedAccountName())
|
||||
|
||||
if len(u.Path) > 0 {
|
||||
// Any portion of the CanonicalizedResource string that is derived from
|
||||
// the resource's URI should be encoded exactly as it is in the URI.
|
||||
// -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx
|
||||
cr.WriteString(u.EscapedPath())
|
||||
}
|
||||
|
||||
params, err := url.ParseQuery(u.RawQuery)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf(errMsg, err.Error())
|
||||
}
|
||||
|
||||
// See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277
|
||||
if auth == sharedKey {
|
||||
if len(params) > 0 {
|
||||
cr.WriteString("\n")
|
||||
|
||||
keys := []string{}
|
||||
for key := range params {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
completeParams := []string{}
|
||||
for _, key := range keys {
|
||||
if len(params[key]) > 1 {
|
||||
sort.Strings(params[key])
|
||||
}
|
||||
|
||||
completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ",")))
|
||||
}
|
||||
cr.WriteString(strings.Join(completeParams, "\n"))
|
||||
}
|
||||
} else {
|
||||
// search for "comp" parameter, if exists then add it to canonicalizedresource
|
||||
if v, ok := params["comp"]; ok {
|
||||
cr.WriteString("?comp=" + v[0])
|
||||
}
|
||||
}
|
||||
|
||||
return string(cr.Bytes()), nil
|
||||
}
|
||||
|
||||
func (c *Client) getCanonicalizedAccountName() string {
|
||||
// since we may be trying to access a secondary storage account, we need to
|
||||
// remove the -secondary part of the storage name
|
||||
return strings.TrimSuffix(c.accountName, "-secondary")
|
||||
}
|
||||
|
||||
func buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string, auth authentication) (string, error) {
|
||||
contentLength := headers[headerContentLength]
|
||||
if contentLength == "0" {
|
||||
contentLength = ""
|
||||
}
|
||||
date := headers[headerDate]
|
||||
if v, ok := headers[headerXmsDate]; ok {
|
||||
if auth == sharedKey || auth == sharedKeyLite {
|
||||
date = ""
|
||||
} else {
|
||||
date = v
|
||||
}
|
||||
}
|
||||
var canString string
|
||||
switch auth {
|
||||
case sharedKey:
|
||||
canString = strings.Join([]string{
|
||||
verb,
|
||||
headers[headerContentEncoding],
|
||||
headers[headerContentLanguage],
|
||||
contentLength,
|
||||
headers[headerContentMD5],
|
||||
headers[headerContentType],
|
||||
date,
|
||||
headers[headerIfModifiedSince],
|
||||
headers[headerIfMatch],
|
||||
headers[headerIfNoneMatch],
|
||||
headers[headerIfUnmodifiedSince],
|
||||
headers[headerRange],
|
||||
buildCanonicalizedHeader(headers),
|
||||
canonicalizedResource,
|
||||
}, "\n")
|
||||
case sharedKeyForTable:
|
||||
canString = strings.Join([]string{
|
||||
verb,
|
||||
headers[headerContentMD5],
|
||||
headers[headerContentType],
|
||||
date,
|
||||
canonicalizedResource,
|
||||
}, "\n")
|
||||
case sharedKeyLite:
|
||||
canString = strings.Join([]string{
|
||||
verb,
|
||||
headers[headerContentMD5],
|
||||
headers[headerContentType],
|
||||
date,
|
||||
buildCanonicalizedHeader(headers),
|
||||
canonicalizedResource,
|
||||
}, "\n")
|
||||
case sharedKeyLiteForTable:
|
||||
canString = strings.Join([]string{
|
||||
date,
|
||||
canonicalizedResource,
|
||||
}, "\n")
|
||||
default:
|
||||
return "", fmt.Errorf("%s authentication is not supported yet", auth)
|
||||
}
|
||||
return canString, nil
|
||||
}
|
||||
|
||||
func buildCanonicalizedHeader(headers map[string]string) string {
|
||||
cm := make(map[string]string)
|
||||
|
||||
for k, v := range headers {
|
||||
headerName := strings.TrimSpace(strings.ToLower(k))
|
||||
if strings.HasPrefix(headerName, "x-ms-") {
|
||||
cm[headerName] = v
|
||||
}
|
||||
}
|
||||
|
||||
if len(cm) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
keys := []string{}
|
||||
for key := range cm {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
sort.Strings(keys)
|
||||
|
||||
ch := bytes.NewBufferString("")
|
||||
|
||||
for _, key := range keys {
|
||||
ch.WriteString(key)
|
||||
ch.WriteRune(':')
|
||||
ch.WriteString(cm[key])
|
||||
ch.WriteRune('\n')
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(string(ch.Bytes()), "\n")
|
||||
}
|
||||
|
||||
func (c *Client) createAuthorizationHeader(canonicalizedString string, auth authentication) string {
|
||||
signature := c.computeHmac256(canonicalizedString)
|
||||
var key string
|
||||
switch auth {
|
||||
case sharedKey, sharedKeyForTable:
|
||||
key = "SharedKey"
|
||||
case sharedKeyLite, sharedKeyLiteForTable:
|
||||
key = "SharedKeyLite"
|
||||
}
|
||||
return fmt.Sprintf("%s %s:%s", key, c.getCanonicalizedAccountName(), signature)
|
||||
}
|
1539
vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go
generated
vendored
Normal file
1539
vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
469
vendor/github.com/Azure/azure-sdk-for-go/storage/client.go
generated
vendored
Normal file
469
vendor/github.com/Azure/azure-sdk-for-go/storage/client.go
generated
vendored
Normal file
@ -0,0 +1,469 @@
|
||||
// Package storage provides clients for Microsoft Azure Storage Services.
|
||||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultBaseURL is the domain name used for storage requests when a
|
||||
// default client is created.
|
||||
DefaultBaseURL = "core.windows.net"
|
||||
|
||||
// DefaultAPIVersion is the Azure Storage API version string used when a
|
||||
// basic client is created.
|
||||
DefaultAPIVersion = "2015-02-21"
|
||||
|
||||
defaultUseHTTPS = true
|
||||
|
||||
// StorageEmulatorAccountName is the fixed storage account used by Azure Storage Emulator
|
||||
StorageEmulatorAccountName = "devstoreaccount1"
|
||||
|
||||
// StorageEmulatorAccountKey is the the fixed storage account used by Azure Storage Emulator
|
||||
StorageEmulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
|
||||
|
||||
blobServiceName = "blob"
|
||||
tableServiceName = "table"
|
||||
queueServiceName = "queue"
|
||||
fileServiceName = "file"
|
||||
|
||||
storageEmulatorBlob = "127.0.0.1:10000"
|
||||
storageEmulatorTable = "127.0.0.1:10002"
|
||||
storageEmulatorQueue = "127.0.0.1:10001"
|
||||
|
||||
userAgentHeader = "User-Agent"
|
||||
)
|
||||
|
||||
// Client is the object that needs to be constructed to perform
|
||||
// operations on the storage account.
|
||||
type Client struct {
|
||||
// HTTPClient is the http.Client used to initiate API
|
||||
// requests. If it is nil, http.DefaultClient is used.
|
||||
HTTPClient *http.Client
|
||||
|
||||
accountName string
|
||||
accountKey []byte
|
||||
useHTTPS bool
|
||||
UseSharedKeyLite bool
|
||||
baseURL string
|
||||
apiVersion string
|
||||
userAgent string
|
||||
}
|
||||
|
||||
type storageResponse struct {
|
||||
statusCode int
|
||||
headers http.Header
|
||||
body io.ReadCloser
|
||||
}
|
||||
|
||||
type odataResponse struct {
|
||||
storageResponse
|
||||
odata odataErrorMessage
|
||||
}
|
||||
|
||||
// AzureStorageServiceError contains fields of the error response from
|
||||
// Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx
|
||||
// Some fields might be specific to certain calls.
|
||||
type AzureStorageServiceError struct {
|
||||
Code string `xml:"Code"`
|
||||
Message string `xml:"Message"`
|
||||
AuthenticationErrorDetail string `xml:"AuthenticationErrorDetail"`
|
||||
QueryParameterName string `xml:"QueryParameterName"`
|
||||
QueryParameterValue string `xml:"QueryParameterValue"`
|
||||
Reason string `xml:"Reason"`
|
||||
StatusCode int
|
||||
RequestID string
|
||||
}
|
||||
|
||||
type odataErrorMessageMessage struct {
|
||||
Lang string `json:"lang"`
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
type odataErrorMessageInternal struct {
|
||||
Code string `json:"code"`
|
||||
Message odataErrorMessageMessage `json:"message"`
|
||||
}
|
||||
|
||||
type odataErrorMessage struct {
|
||||
Err odataErrorMessageInternal `json:"odata.error"`
|
||||
}
|
||||
|
||||
// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
|
||||
// nor with an HTTP status code indicating success.
|
||||
type UnexpectedStatusCodeError struct {
|
||||
allowed []int
|
||||
got int
|
||||
}
|
||||
|
||||
func (e UnexpectedStatusCodeError) Error() string {
|
||||
s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) }
|
||||
|
||||
got := s(e.got)
|
||||
expected := []string{}
|
||||
for _, v := range e.allowed {
|
||||
expected = append(expected, s(v))
|
||||
}
|
||||
return fmt.Sprintf("storage: status code from service response is %s; was expecting %s", got, strings.Join(expected, " or "))
|
||||
}
|
||||
|
||||
// Got is the actual status code returned by Azure.
|
||||
func (e UnexpectedStatusCodeError) Got() int {
|
||||
return e.got
|
||||
}
|
||||
|
||||
// NewBasicClient constructs a Client with given storage service name and
|
||||
// key.
|
||||
func NewBasicClient(accountName, accountKey string) (Client, error) {
|
||||
if accountName == StorageEmulatorAccountName {
|
||||
return NewEmulatorClient()
|
||||
}
|
||||
return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS)
|
||||
|
||||
}
|
||||
|
||||
//NewEmulatorClient contructs a Client intended to only work with Azure
|
||||
//Storage Emulator
|
||||
func NewEmulatorClient() (Client, error) {
|
||||
return NewClient(StorageEmulatorAccountName, StorageEmulatorAccountKey, DefaultBaseURL, DefaultAPIVersion, false)
|
||||
}
|
||||
|
||||
// NewClient constructs a Client. This should be used if the caller wants
|
||||
// to specify whether to use HTTPS, a specific REST API version or a custom
|
||||
// storage endpoint than Azure Public Cloud.
|
||||
func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, useHTTPS bool) (Client, error) {
|
||||
var c Client
|
||||
if accountName == "" {
|
||||
return c, fmt.Errorf("azure: account name required")
|
||||
} else if accountKey == "" {
|
||||
return c, fmt.Errorf("azure: account key required")
|
||||
} else if blobServiceBaseURL == "" {
|
||||
return c, fmt.Errorf("azure: base storage service url required")
|
||||
}
|
||||
|
||||
key, err := base64.StdEncoding.DecodeString(accountKey)
|
||||
if err != nil {
|
||||
return c, fmt.Errorf("azure: malformed storage account key: %v", err)
|
||||
}
|
||||
|
||||
c = Client{
|
||||
accountName: accountName,
|
||||
accountKey: key,
|
||||
useHTTPS: useHTTPS,
|
||||
baseURL: blobServiceBaseURL,
|
||||
apiVersion: apiVersion,
|
||||
UseSharedKeyLite: false,
|
||||
}
|
||||
c.userAgent = c.getDefaultUserAgent()
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c Client) getDefaultUserAgent() string {
|
||||
return fmt.Sprintf("Go/%s (%s-%s) Azure-SDK-For-Go/%s storage-dataplane/%s",
|
||||
runtime.Version(),
|
||||
runtime.GOARCH,
|
||||
runtime.GOOS,
|
||||
sdkVersion,
|
||||
c.apiVersion,
|
||||
)
|
||||
}
|
||||
|
||||
// AddToUserAgent adds an extension to the current user agent
|
||||
func (c *Client) AddToUserAgent(extension string) error {
|
||||
if extension != "" {
|
||||
c.userAgent = fmt.Sprintf("%s %s", c.userAgent, extension)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.userAgent)
|
||||
}
|
||||
|
||||
// protectUserAgent is used in funcs that include extraheaders as a parameter.
|
||||
// It prevents the User-Agent header to be overwritten, instead if it happens to
|
||||
// be present, it gets added to the current User-Agent. Use it before getStandardHeaders
|
||||
func (c *Client) protectUserAgent(extraheaders map[string]string) map[string]string {
|
||||
if v, ok := extraheaders[userAgentHeader]; ok {
|
||||
c.AddToUserAgent(v)
|
||||
delete(extraheaders, userAgentHeader)
|
||||
}
|
||||
return extraheaders
|
||||
}
|
||||
|
||||
func (c Client) getBaseURL(service string) string {
|
||||
scheme := "http"
|
||||
if c.useHTTPS {
|
||||
scheme = "https"
|
||||
}
|
||||
host := ""
|
||||
if c.accountName == StorageEmulatorAccountName {
|
||||
switch service {
|
||||
case blobServiceName:
|
||||
host = storageEmulatorBlob
|
||||
case tableServiceName:
|
||||
host = storageEmulatorTable
|
||||
case queueServiceName:
|
||||
host = storageEmulatorQueue
|
||||
}
|
||||
} else {
|
||||
host = fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL)
|
||||
}
|
||||
|
||||
u := &url.URL{
|
||||
Scheme: scheme,
|
||||
Host: host}
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (c Client) getEndpoint(service, path string, params url.Values) string {
|
||||
u, err := url.Parse(c.getBaseURL(service))
|
||||
if err != nil {
|
||||
// really should not be happening
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// API doesn't accept path segments not starting with '/'
|
||||
if !strings.HasPrefix(path, "/") {
|
||||
path = fmt.Sprintf("/%v", path)
|
||||
}
|
||||
|
||||
if c.accountName == StorageEmulatorAccountName {
|
||||
path = fmt.Sprintf("/%v%v", StorageEmulatorAccountName, path)
|
||||
}
|
||||
|
||||
u.Path = path
|
||||
u.RawQuery = params.Encode()
|
||||
return u.String()
|
||||
}
|
||||
|
||||
// GetBlobService returns a BlobStorageClient which can operate on the blob
|
||||
// service of the storage account.
|
||||
func (c Client) GetBlobService() BlobStorageClient {
|
||||
b := BlobStorageClient{
|
||||
client: c,
|
||||
}
|
||||
b.client.AddToUserAgent(blobServiceName)
|
||||
b.auth = sharedKey
|
||||
if c.UseSharedKeyLite {
|
||||
b.auth = sharedKeyLite
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// GetQueueService returns a QueueServiceClient which can operate on the queue
|
||||
// service of the storage account.
|
||||
func (c Client) GetQueueService() QueueServiceClient {
|
||||
q := QueueServiceClient{
|
||||
client: c,
|
||||
}
|
||||
q.client.AddToUserAgent(queueServiceName)
|
||||
q.auth = sharedKey
|
||||
if c.UseSharedKeyLite {
|
||||
q.auth = sharedKeyLite
|
||||
}
|
||||
return q
|
||||
}
|
||||
|
||||
// GetTableService returns a TableServiceClient which can operate on the table
|
||||
// service of the storage account.
|
||||
func (c Client) GetTableService() TableServiceClient {
|
||||
t := TableServiceClient{
|
||||
client: c,
|
||||
}
|
||||
t.client.AddToUserAgent(tableServiceName)
|
||||
t.auth = sharedKeyForTable
|
||||
if c.UseSharedKeyLite {
|
||||
t.auth = sharedKeyLiteForTable
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// GetFileService returns a FileServiceClient which can operate on the file
|
||||
// service of the storage account.
|
||||
func (c Client) GetFileService() FileServiceClient {
|
||||
f := FileServiceClient{
|
||||
client: c,
|
||||
}
|
||||
f.client.AddToUserAgent(fileServiceName)
|
||||
f.auth = sharedKey
|
||||
if c.UseSharedKeyLite {
|
||||
f.auth = sharedKeyLite
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
func (c Client) getStandardHeaders() map[string]string {
|
||||
return map[string]string{
|
||||
userAgentHeader: c.userAgent,
|
||||
"x-ms-version": c.apiVersion,
|
||||
"x-ms-date": currentTimeRfc1123Formatted(),
|
||||
}
|
||||
}
|
||||
|
||||
func (c Client) exec(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*storageResponse, error) {
|
||||
headers, err := c.addAuthorizationHeader(verb, url, headers, auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(verb, url, body)
|
||||
if err != nil {
|
||||
return nil, errors.New("azure/storage: error creating request: " + err.Error())
|
||||
}
|
||||
|
||||
if clstr, ok := headers["Content-Length"]; ok {
|
||||
// content length header is being signed, but completely ignored by golang.
|
||||
// instead we have to use the ContentLength property on the request struct
|
||||
// (see https://golang.org/src/net/http/request.go?s=18140:18370#L536 and
|
||||
// https://golang.org/src/net/http/transfer.go?s=1739:2467#L49)
|
||||
req.ContentLength, err = strconv.ParseInt(clstr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
for k, v := range headers {
|
||||
req.Header.Add(k, v)
|
||||
}
|
||||
|
||||
httpClient := c.HTTPClient
|
||||
if httpClient == nil {
|
||||
httpClient = http.DefaultClient
|
||||
}
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
statusCode := resp.StatusCode
|
||||
if statusCode >= 400 && statusCode <= 505 {
|
||||
var respBody []byte
|
||||
respBody, err = readResponseBody(resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
requestID := resp.Header.Get("x-ms-request-id")
|
||||
if len(respBody) == 0 {
|
||||
// no error in response body, might happen in HEAD requests
|
||||
err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID)
|
||||
} else {
|
||||
// response contains storage service error object, unmarshal
|
||||
storageErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, requestID)
|
||||
if err != nil { // error unmarshaling the error response
|
||||
err = errIn
|
||||
}
|
||||
err = storageErr
|
||||
}
|
||||
return &storageResponse{
|
||||
statusCode: resp.StatusCode,
|
||||
headers: resp.Header,
|
||||
body: ioutil.NopCloser(bytes.NewReader(respBody)), /* restore the body */
|
||||
}, err
|
||||
}
|
||||
|
||||
return &storageResponse{
|
||||
statusCode: resp.StatusCode,
|
||||
headers: resp.Header,
|
||||
body: resp.Body}, nil
|
||||
}
|
||||
|
||||
func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) {
|
||||
headers, err := c.addAuthorizationHeader(verb, url, headers, auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(verb, url, body)
|
||||
for k, v := range headers {
|
||||
req.Header.Add(k, v)
|
||||
}
|
||||
|
||||
httpClient := c.HTTPClient
|
||||
if httpClient == nil {
|
||||
httpClient = http.DefaultClient
|
||||
}
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
respToRet := &odataResponse{}
|
||||
respToRet.body = resp.Body
|
||||
respToRet.statusCode = resp.StatusCode
|
||||
respToRet.headers = resp.Header
|
||||
|
||||
statusCode := resp.StatusCode
|
||||
if statusCode >= 400 && statusCode <= 505 {
|
||||
var respBody []byte
|
||||
respBody, err = readResponseBody(resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(respBody) == 0 {
|
||||
// no error in response body, might happen in HEAD requests
|
||||
err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, resp.Header.Get("x-ms-request-id"))
|
||||
return respToRet, err
|
||||
}
|
||||
// try unmarshal as odata.error json
|
||||
err = json.Unmarshal(respBody, &respToRet.odata)
|
||||
return respToRet, err
|
||||
}
|
||||
|
||||
return respToRet, nil
|
||||
}
|
||||
|
||||
func readResponseBody(resp *http.Response) ([]byte, error) {
|
||||
defer resp.Body.Close()
|
||||
out, err := ioutil.ReadAll(resp.Body)
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
return out, err
|
||||
}
|
||||
|
||||
func serviceErrFromXML(body []byte, statusCode int, requestID string) (AzureStorageServiceError, error) {
|
||||
var storageErr AzureStorageServiceError
|
||||
if err := xml.Unmarshal(body, &storageErr); err != nil {
|
||||
return storageErr, err
|
||||
}
|
||||
storageErr.StatusCode = statusCode
|
||||
storageErr.RequestID = requestID
|
||||
return storageErr, nil
|
||||
}
|
||||
|
||||
func serviceErrFromStatusCode(code int, status string, requestID string) AzureStorageServiceError {
|
||||
return AzureStorageServiceError{
|
||||
StatusCode: code,
|
||||
Code: status,
|
||||
RequestID: requestID,
|
||||
Message: "no response body was available for error status code",
|
||||
}
|
||||
}
|
||||
|
||||
func (e AzureStorageServiceError) Error() string {
|
||||
return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s, QueryParameterName=%s, QueryParameterValue=%s",
|
||||
e.StatusCode, e.Code, e.Message, e.RequestID, e.QueryParameterName, e.QueryParameterValue)
|
||||
}
|
||||
|
||||
// checkRespCode returns UnexpectedStatusError if the given response code is not
|
||||
// one of the allowed status codes; otherwise nil.
|
||||
func checkRespCode(respCode int, allowed []int) error {
|
||||
for _, v := range allowed {
|
||||
if respCode == v {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return UnexpectedStatusCodeError{allowed, respCode}
|
||||
}
|
217
vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go
generated
vendored
Normal file
217
vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go
generated
vendored
Normal file
@ -0,0 +1,217 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// Directory represents a directory on a share.
|
||||
type Directory struct {
|
||||
fsc *FileServiceClient
|
||||
Metadata map[string]string
|
||||
Name string `xml:"Name"`
|
||||
parent *Directory
|
||||
Properties DirectoryProperties
|
||||
share *Share
|
||||
}
|
||||
|
||||
// DirectoryProperties contains various properties of a directory.
|
||||
type DirectoryProperties struct {
|
||||
LastModified string `xml:"Last-Modified"`
|
||||
Etag string `xml:"Etag"`
|
||||
}
|
||||
|
||||
// ListDirsAndFilesParameters defines the set of customizable parameters to
|
||||
// make a List Files and Directories call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx
|
||||
type ListDirsAndFilesParameters struct {
|
||||
Marker string
|
||||
MaxResults uint
|
||||
Timeout uint
|
||||
}
|
||||
|
||||
// DirsAndFilesListResponse contains the response fields from
|
||||
// a List Files and Directories call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx
|
||||
type DirsAndFilesListResponse struct {
|
||||
XMLName xml.Name `xml:"EnumerationResults"`
|
||||
Xmlns string `xml:"xmlns,attr"`
|
||||
Marker string `xml:"Marker"`
|
||||
MaxResults int64 `xml:"MaxResults"`
|
||||
Directories []Directory `xml:"Entries>Directory"`
|
||||
Files []File `xml:"Entries>File"`
|
||||
NextMarker string `xml:"NextMarker"`
|
||||
}
|
||||
|
||||
// builds the complete directory path for this directory object.
|
||||
func (d *Directory) buildPath() string {
|
||||
path := ""
|
||||
current := d
|
||||
for current.Name != "" {
|
||||
path = "/" + current.Name + path
|
||||
current = current.parent
|
||||
}
|
||||
return d.share.buildPath() + path
|
||||
}
|
||||
|
||||
// Create this directory in the associated share.
|
||||
// If a directory with the same name already exists, the operation fails.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166993.aspx
|
||||
func (d *Directory) Create() error {
|
||||
// if this is the root directory exit early
|
||||
if d.parent == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
headers, err := d.fsc.createResource(d.buildPath(), resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.updateEtagAndLastModified(headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateIfNotExists creates this directory under the associated share if the
|
||||
// directory does not exists. Returns true if the directory is newly created or
|
||||
// false if the directory already exists.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166993.aspx
|
||||
func (d *Directory) CreateIfNotExists() (bool, error) {
|
||||
// if this is the root directory exit early
|
||||
if d.parent == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
resp, err := d.fsc.createResourceNoClose(d.buildPath(), resourceDirectory, nil)
|
||||
if resp != nil {
|
||||
defer resp.body.Close()
|
||||
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
|
||||
if resp.statusCode == http.StatusCreated {
|
||||
d.updateEtagAndLastModified(resp.headers)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, d.FetchAttributes()
|
||||
}
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Delete removes this directory. It must be empty in order to be deleted.
|
||||
// If the directory does not exist the operation fails.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166969.aspx
|
||||
func (d *Directory) Delete() error {
|
||||
return d.fsc.deleteResource(d.buildPath(), resourceDirectory)
|
||||
}
|
||||
|
||||
// DeleteIfExists removes this directory if it exists.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166969.aspx
|
||||
func (d *Directory) DeleteIfExists() (bool, error) {
|
||||
resp, err := d.fsc.deleteResourceNoClose(d.buildPath(), resourceDirectory)
|
||||
if resp != nil {
|
||||
defer resp.body.Close()
|
||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusAccepted, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Exists returns true if this directory exists.
|
||||
func (d *Directory) Exists() (bool, error) {
|
||||
exists, headers, err := d.fsc.resourceExists(d.buildPath(), resourceDirectory)
|
||||
if exists {
|
||||
d.updateEtagAndLastModified(headers)
|
||||
}
|
||||
return exists, err
|
||||
}
|
||||
|
||||
// FetchAttributes retrieves metadata for this directory.
|
||||
func (d *Directory) FetchAttributes() error {
|
||||
headers, err := d.fsc.getResourceHeaders(d.buildPath(), compNone, resourceDirectory, http.MethodHead)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.updateEtagAndLastModified(headers)
|
||||
d.Metadata = getMetadataFromHeaders(headers)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetDirectoryReference returns a child Directory object for this directory.
|
||||
func (d *Directory) GetDirectoryReference(name string) *Directory {
|
||||
return &Directory{
|
||||
fsc: d.fsc,
|
||||
Name: name,
|
||||
parent: d,
|
||||
share: d.share,
|
||||
}
|
||||
}
|
||||
|
||||
// GetFileReference returns a child File object for this directory.
|
||||
func (d *Directory) GetFileReference(name string) *File {
|
||||
return &File{
|
||||
fsc: d.fsc,
|
||||
Name: name,
|
||||
parent: d,
|
||||
share: d.share,
|
||||
}
|
||||
}
|
||||
|
||||
// ListDirsAndFiles returns a list of files and directories under this directory.
|
||||
// It also contains a pagination token and other response details.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx
|
||||
func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAndFilesListResponse, error) {
|
||||
q := mergeParams(params.getParameters(), getURLInitValues(compList, resourceDirectory))
|
||||
|
||||
resp, err := d.fsc.listContent(d.buildPath(), q, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer resp.body.Close()
|
||||
var out DirsAndFilesListResponse
|
||||
err = xmlUnmarshal(resp.body, &out)
|
||||
return &out, err
|
||||
}
|
||||
|
||||
// SetMetadata replaces the metadata for this directory.
|
||||
//
|
||||
// Some keys may be converted to Camel-Case before sending. All keys
|
||||
// are returned in lower case by GetDirectoryMetadata. HTTP header names
|
||||
// are case-insensitive so case munging should not matter to other
|
||||
// applications either.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/mt427370.aspx
|
||||
func (d *Directory) SetMetadata() error {
|
||||
headers, err := d.fsc.setResourceHeaders(d.buildPath(), compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.updateEtagAndLastModified(headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
// updates Etag and last modified date
|
||||
func (d *Directory) updateEtagAndLastModified(headers http.Header) {
|
||||
d.Properties.Etag = headers.Get("Etag")
|
||||
d.Properties.LastModified = headers.Get("Last-Modified")
|
||||
}
|
||||
|
||||
// URL gets the canonical URL to this directory.
|
||||
// This method does not create a publicly accessible URL if the directory
|
||||
// is private and this method does not check if the directory exists.
|
||||
func (d *Directory) URL() string {
|
||||
return d.fsc.client.getEndpoint(fileServiceName, d.buildPath(), url.Values{})
|
||||
}
|
360
vendor/github.com/Azure/azure-sdk-for-go/storage/file.go
generated
vendored
Normal file
360
vendor/github.com/Azure/azure-sdk-for-go/storage/file.go
generated
vendored
Normal file
@ -0,0 +1,360 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const fourMB = uint64(4194304)
|
||||
const oneTB = uint64(1099511627776)
|
||||
|
||||
// File represents a file on a share.
|
||||
type File struct {
|
||||
fsc *FileServiceClient
|
||||
Metadata map[string]string
|
||||
Name string `xml:"Name"`
|
||||
parent *Directory
|
||||
Properties FileProperties `xml:"Properties"`
|
||||
share *Share
|
||||
}
|
||||
|
||||
// FileProperties contains various properties of a file.
|
||||
type FileProperties struct {
|
||||
CacheControl string `header:"x-ms-cache-control"`
|
||||
Disposition string `header:"x-ms-content-disposition"`
|
||||
Encoding string `header:"x-ms-content-encoding"`
|
||||
Etag string
|
||||
Language string `header:"x-ms-content-language"`
|
||||
LastModified string
|
||||
Length uint64 `xml:"Content-Length"`
|
||||
MD5 string `header:"x-ms-content-md5"`
|
||||
Type string `header:"x-ms-content-type"`
|
||||
}
|
||||
|
||||
// FileCopyState contains various properties of a file copy operation.
|
||||
type FileCopyState struct {
|
||||
CompletionTime string
|
||||
ID string
|
||||
Progress string
|
||||
Source string
|
||||
Status string
|
||||
StatusDesc string
|
||||
}
|
||||
|
||||
// FileStream contains file data returned from a call to GetFile.
|
||||
type FileStream struct {
|
||||
Body io.ReadCloser
|
||||
ContentMD5 string
|
||||
}
|
||||
|
||||
// FileRanges contains a list of file range information for a file.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx
|
||||
type FileRanges struct {
|
||||
ContentLength uint64
|
||||
LastModified string
|
||||
ETag string
|
||||
FileRanges []FileRange `xml:"Range"`
|
||||
}
|
||||
|
||||
// FileRange contains range information for a file.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx
|
||||
type FileRange struct {
|
||||
Start uint64 `xml:"Start"`
|
||||
End uint64 `xml:"End"`
|
||||
}
|
||||
|
||||
func (fr FileRange) String() string {
|
||||
return fmt.Sprintf("bytes=%d-%d", fr.Start, fr.End)
|
||||
}
|
||||
|
||||
// builds the complete file path for this file object
|
||||
func (f *File) buildPath() string {
|
||||
return f.parent.buildPath() + "/" + f.Name
|
||||
}
|
||||
|
||||
// ClearRange releases the specified range of space in a file.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn194276.aspx
|
||||
func (f *File) ClearRange(fileRange FileRange) error {
|
||||
headers, err := f.modifyRange(nil, fileRange, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.updateEtagAndLastModified(headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create creates a new file or replaces an existing one.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn194271.aspx
|
||||
func (f *File) Create(maxSize uint64) error {
|
||||
if maxSize > oneTB {
|
||||
return fmt.Errorf("max file size is 1TB")
|
||||
}
|
||||
|
||||
extraHeaders := map[string]string{
|
||||
"x-ms-content-length": strconv.FormatUint(maxSize, 10),
|
||||
"x-ms-type": "file",
|
||||
}
|
||||
|
||||
headers, err := f.fsc.createResource(f.buildPath(), resourceFile, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.Properties.Length = maxSize
|
||||
f.updateEtagAndLastModified(headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete immediately removes this file from the storage account.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn689085.aspx
|
||||
func (f *File) Delete() error {
|
||||
return f.fsc.deleteResource(f.buildPath(), resourceFile)
|
||||
}
|
||||
|
||||
// DeleteIfExists removes this file if it exists.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn689085.aspx
|
||||
func (f *File) DeleteIfExists() (bool, error) {
|
||||
resp, err := f.fsc.deleteResourceNoClose(f.buildPath(), resourceFile)
|
||||
if resp != nil {
|
||||
defer resp.body.Close()
|
||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusAccepted, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// DownloadRangeToStream operation downloads the specified range of this file with optional MD5 hash.
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file
|
||||
func (f *File) DownloadRangeToStream(fileRange FileRange, getContentMD5 bool) (fs FileStream, err error) {
|
||||
if getContentMD5 && isRangeTooBig(fileRange) {
|
||||
return fs, fmt.Errorf("must specify a range less than or equal to 4MB when getContentMD5 is true")
|
||||
}
|
||||
|
||||
extraHeaders := map[string]string{
|
||||
"Range": fileRange.String(),
|
||||
}
|
||||
if getContentMD5 == true {
|
||||
extraHeaders["x-ms-range-get-content-md5"] = "true"
|
||||
}
|
||||
|
||||
resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, http.MethodGet, extraHeaders)
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
|
||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK, http.StatusPartialContent}); err != nil {
|
||||
resp.body.Close()
|
||||
return fs, err
|
||||
}
|
||||
|
||||
fs.Body = resp.body
|
||||
if getContentMD5 {
|
||||
fs.ContentMD5 = resp.headers.Get("Content-MD5")
|
||||
}
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
// Exists returns true if this file exists.
|
||||
func (f *File) Exists() (bool, error) {
|
||||
exists, headers, err := f.fsc.resourceExists(f.buildPath(), resourceFile)
|
||||
if exists {
|
||||
f.updateEtagAndLastModified(headers)
|
||||
f.updateProperties(headers)
|
||||
}
|
||||
return exists, err
|
||||
}
|
||||
|
||||
// FetchAttributes updates metadata and properties for this file.
|
||||
func (f *File) FetchAttributes() error {
|
||||
headers, err := f.fsc.getResourceHeaders(f.buildPath(), compNone, resourceFile, http.MethodHead)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.updateEtagAndLastModified(headers)
|
||||
f.updateProperties(headers)
|
||||
f.Metadata = getMetadataFromHeaders(headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
// returns true if the range is larger than 4MB
|
||||
func isRangeTooBig(fileRange FileRange) bool {
|
||||
if fileRange.End-fileRange.Start > fourMB {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// ListRanges returns the list of valid ranges for this file.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx
|
||||
func (f *File) ListRanges(listRange *FileRange) (*FileRanges, error) {
|
||||
params := url.Values{"comp": {"rangelist"}}
|
||||
|
||||
// add optional range to list
|
||||
var headers map[string]string
|
||||
if listRange != nil {
|
||||
headers = make(map[string]string)
|
||||
headers["Range"] = listRange.String()
|
||||
}
|
||||
|
||||
resp, err := f.fsc.listContent(f.buildPath(), params, headers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer resp.body.Close()
|
||||
var cl uint64
|
||||
cl, err = strconv.ParseUint(resp.headers.Get("x-ms-content-length"), 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var out FileRanges
|
||||
out.ContentLength = cl
|
||||
out.ETag = resp.headers.Get("ETag")
|
||||
out.LastModified = resp.headers.Get("Last-Modified")
|
||||
|
||||
err = xmlUnmarshal(resp.body, &out)
|
||||
return &out, err
|
||||
}
|
||||
|
||||
// modifies a range of bytes in this file
|
||||
func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, contentMD5 *string) (http.Header, error) {
|
||||
if err := f.fsc.checkForStorageEmulator(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fileRange.End < fileRange.Start {
|
||||
return nil, errors.New("the value for rangeEnd must be greater than or equal to rangeStart")
|
||||
}
|
||||
if bytes != nil && isRangeTooBig(fileRange) {
|
||||
return nil, errors.New("range cannot exceed 4MB in size")
|
||||
}
|
||||
|
||||
uri := f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), url.Values{"comp": {"range"}})
|
||||
|
||||
// default to clear
|
||||
write := "clear"
|
||||
cl := uint64(0)
|
||||
|
||||
// if bytes is not nil then this is an update operation
|
||||
if bytes != nil {
|
||||
write = "update"
|
||||
cl = (fileRange.End - fileRange.Start) + 1
|
||||
}
|
||||
|
||||
extraHeaders := map[string]string{
|
||||
"Content-Length": strconv.FormatUint(cl, 10),
|
||||
"Range": fileRange.String(),
|
||||
"x-ms-write": write,
|
||||
}
|
||||
|
||||
if contentMD5 != nil {
|
||||
extraHeaders["Content-MD5"] = *contentMD5
|
||||
}
|
||||
|
||||
headers := mergeHeaders(f.fsc.client.getStandardHeaders(), extraHeaders)
|
||||
resp, err := f.fsc.client.exec(http.MethodPut, uri, headers, bytes, f.fsc.auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
// SetMetadata replaces the metadata for this file.
|
||||
//
|
||||
// Some keys may be converted to Camel-Case before sending. All keys
|
||||
// are returned in lower case by GetFileMetadata. HTTP header names
|
||||
// are case-insensitive so case munging should not matter to other
|
||||
// applications either.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn689097.aspx
|
||||
func (f *File) SetMetadata() error {
|
||||
headers, err := f.fsc.setResourceHeaders(f.buildPath(), compMetadata, resourceFile, mergeMDIntoExtraHeaders(f.Metadata, nil))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.updateEtagAndLastModified(headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetProperties sets system properties on this file.
|
||||
//
|
||||
// Some keys may be converted to Camel-Case before sending. All keys
|
||||
// are returned in lower case by SetFileProperties. HTTP header names
|
||||
// are case-insensitive so case munging should not matter to other
|
||||
// applications either.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166975.aspx
|
||||
func (f *File) SetProperties() error {
|
||||
headers, err := f.fsc.setResourceHeaders(f.buildPath(), compProperties, resourceFile, headersFromStruct(f.Properties))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.updateEtagAndLastModified(headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
// updates Etag and last modified date
|
||||
func (f *File) updateEtagAndLastModified(headers http.Header) {
|
||||
f.Properties.Etag = headers.Get("Etag")
|
||||
f.Properties.LastModified = headers.Get("Last-Modified")
|
||||
}
|
||||
|
||||
// updates file properties from the specified HTTP header
|
||||
func (f *File) updateProperties(header http.Header) {
|
||||
size, err := strconv.ParseUint(header.Get("Content-Length"), 10, 64)
|
||||
if err == nil {
|
||||
f.Properties.Length = size
|
||||
}
|
||||
|
||||
f.updateEtagAndLastModified(header)
|
||||
f.Properties.CacheControl = header.Get("Cache-Control")
|
||||
f.Properties.Disposition = header.Get("Content-Disposition")
|
||||
f.Properties.Encoding = header.Get("Content-Encoding")
|
||||
f.Properties.Language = header.Get("Content-Language")
|
||||
f.Properties.MD5 = header.Get("Content-MD5")
|
||||
f.Properties.Type = header.Get("Content-Type")
|
||||
}
|
||||
|
||||
// URL gets the canonical URL to this file.
|
||||
// This method does not create a publicly accessible URL if the file
|
||||
// is private and this method does not check if the file exists.
|
||||
func (f *File) URL() string {
|
||||
return f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), url.Values{})
|
||||
}
|
||||
|
||||
// WriteRange writes a range of bytes to this file with an optional MD5 hash of the content.
|
||||
// Note that the length of bytes must match (rangeEnd - rangeStart) + 1 with a maximum size of 4MB.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn194276.aspx
|
||||
func (f *File) WriteRange(bytes io.Reader, fileRange FileRange, contentMD5 *string) error {
|
||||
if bytes == nil {
|
||||
return errors.New("bytes cannot be nil")
|
||||
}
|
||||
|
||||
headers, err := f.modifyRange(bytes, fileRange, contentMD5)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.updateEtagAndLastModified(headers)
|
||||
return nil
|
||||
}
|
360
vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go
generated
vendored
Normal file
360
vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go
generated
vendored
Normal file
@ -0,0 +1,360 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// FileServiceClient contains operations for Microsoft Azure File Service.
|
||||
type FileServiceClient struct {
|
||||
client Client
|
||||
auth authentication
|
||||
}
|
||||
|
||||
// ListSharesParameters defines the set of customizable parameters to make a
|
||||
// List Shares call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx
|
||||
type ListSharesParameters struct {
|
||||
Prefix string
|
||||
Marker string
|
||||
Include string
|
||||
MaxResults uint
|
||||
Timeout uint
|
||||
}
|
||||
|
||||
// ShareListResponse contains the response fields from
|
||||
// ListShares call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx
|
||||
type ShareListResponse struct {
|
||||
XMLName xml.Name `xml:"EnumerationResults"`
|
||||
Xmlns string `xml:"xmlns,attr"`
|
||||
Prefix string `xml:"Prefix"`
|
||||
Marker string `xml:"Marker"`
|
||||
NextMarker string `xml:"NextMarker"`
|
||||
MaxResults int64 `xml:"MaxResults"`
|
||||
Shares []Share `xml:"Shares>Share"`
|
||||
}
|
||||
|
||||
type compType string
|
||||
|
||||
const (
|
||||
compNone compType = ""
|
||||
compList compType = "list"
|
||||
compMetadata compType = "metadata"
|
||||
compProperties compType = "properties"
|
||||
compRangeList compType = "rangelist"
|
||||
)
|
||||
|
||||
func (ct compType) String() string {
|
||||
return string(ct)
|
||||
}
|
||||
|
||||
type resourceType string
|
||||
|
||||
const (
|
||||
resourceDirectory resourceType = "directory"
|
||||
resourceFile resourceType = ""
|
||||
resourceShare resourceType = "share"
|
||||
)
|
||||
|
||||
func (rt resourceType) String() string {
|
||||
return string(rt)
|
||||
}
|
||||
|
||||
func (p ListSharesParameters) getParameters() url.Values {
|
||||
out := url.Values{}
|
||||
|
||||
if p.Prefix != "" {
|
||||
out.Set("prefix", p.Prefix)
|
||||
}
|
||||
if p.Marker != "" {
|
||||
out.Set("marker", p.Marker)
|
||||
}
|
||||
if p.Include != "" {
|
||||
out.Set("include", p.Include)
|
||||
}
|
||||
if p.MaxResults != 0 {
|
||||
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
|
||||
}
|
||||
if p.Timeout != 0 {
|
||||
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func (p ListDirsAndFilesParameters) getParameters() url.Values {
|
||||
out := url.Values{}
|
||||
|
||||
if p.Marker != "" {
|
||||
out.Set("marker", p.Marker)
|
||||
}
|
||||
if p.MaxResults != 0 {
|
||||
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
|
||||
}
|
||||
if p.Timeout != 0 {
|
||||
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// returns url.Values for the specified types
|
||||
func getURLInitValues(comp compType, res resourceType) url.Values {
|
||||
values := url.Values{}
|
||||
if comp != compNone {
|
||||
values.Set("comp", comp.String())
|
||||
}
|
||||
if res != resourceFile {
|
||||
values.Set("restype", res.String())
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
// GetShareReference returns a Share object for the specified share name.
|
||||
func (f FileServiceClient) GetShareReference(name string) Share {
|
||||
return Share{
|
||||
fsc: &f,
|
||||
Name: name,
|
||||
Properties: ShareProperties{
|
||||
Quota: -1,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ListShares returns the list of shares in a storage account along with
|
||||
// pagination token and other response details.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
|
||||
func (f FileServiceClient) ListShares(params ListSharesParameters) (*ShareListResponse, error) {
|
||||
q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
|
||||
|
||||
var out ShareListResponse
|
||||
resp, err := f.listContent("", q, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
err = xmlUnmarshal(resp.body, &out)
|
||||
|
||||
// assign our client to the newly created Share objects
|
||||
for i := range out.Shares {
|
||||
out.Shares[i].fsc = &f
|
||||
}
|
||||
return &out, err
|
||||
}
|
||||
|
||||
// retrieves directory or share content
|
||||
func (f FileServiceClient) listContent(path string, params url.Values, extraHeaders map[string]string) (*storageResponse, error) {
|
||||
if err := f.checkForStorageEmulator(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uri := f.client.getEndpoint(fileServiceName, path, params)
|
||||
extraHeaders = f.client.protectUserAgent(extraHeaders)
|
||||
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
|
||||
|
||||
resp, err := f.client.exec(http.MethodGet, uri, headers, nil, f.auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
resp.body.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// returns true if the specified resource exists
|
||||
func (f FileServiceClient) resourceExists(path string, res resourceType) (bool, http.Header, error) {
|
||||
if err := f.checkForStorageEmulator(); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
uri := f.client.getEndpoint(fileServiceName, path, getURLInitValues(compNone, res))
|
||||
headers := f.client.getStandardHeaders()
|
||||
|
||||
resp, err := f.client.exec(http.MethodHead, uri, headers, nil, f.auth)
|
||||
if resp != nil {
|
||||
defer resp.body.Close()
|
||||
if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusOK, resp.headers, nil
|
||||
}
|
||||
}
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
// creates a resource depending on the specified resource type
|
||||
func (f FileServiceClient) createResource(path string, res resourceType, extraHeaders map[string]string) (http.Header, error) {
|
||||
resp, err := f.createResourceNoClose(path, res, extraHeaders)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
// creates a resource depending on the specified resource type, doesn't close the response body
|
||||
func (f FileServiceClient) createResourceNoClose(path string, res resourceType, extraHeaders map[string]string) (*storageResponse, error) {
|
||||
if err := f.checkForStorageEmulator(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
values := getURLInitValues(compNone, res)
|
||||
uri := f.client.getEndpoint(fileServiceName, path, values)
|
||||
extraHeaders = f.client.protectUserAgent(extraHeaders)
|
||||
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
|
||||
|
||||
return f.client.exec(http.MethodPut, uri, headers, nil, f.auth)
|
||||
}
|
||||
|
||||
// returns HTTP header data for the specified directory or share
|
||||
func (f FileServiceClient) getResourceHeaders(path string, comp compType, res resourceType, verb string) (http.Header, error) {
|
||||
resp, err := f.getResourceNoClose(path, comp, res, verb, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resp.headers, nil
|
||||
}
|
||||
|
||||
// gets the specified resource, doesn't close the response body
|
||||
func (f FileServiceClient) getResourceNoClose(path string, comp compType, res resourceType, verb string, extraHeaders map[string]string) (*storageResponse, error) {
|
||||
if err := f.checkForStorageEmulator(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
params := getURLInitValues(comp, res)
|
||||
uri := f.client.getEndpoint(fileServiceName, path, params)
|
||||
extraHeaders = f.client.protectUserAgent(extraHeaders)
|
||||
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
|
||||
|
||||
return f.client.exec(verb, uri, headers, nil, f.auth)
|
||||
}
|
||||
|
||||
// deletes the resource and returns the response
|
||||
func (f FileServiceClient) deleteResource(path string, res resourceType) error {
|
||||
resp, err := f.deleteResourceNoClose(path, res)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
|
||||
}
|
||||
|
||||
// deletes the resource and returns the response, doesn't close the response body
|
||||
func (f FileServiceClient) deleteResourceNoClose(path string, res resourceType) (*storageResponse, error) {
|
||||
if err := f.checkForStorageEmulator(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
values := getURLInitValues(compNone, res)
|
||||
uri := f.client.getEndpoint(fileServiceName, path, values)
|
||||
return f.client.exec(http.MethodDelete, uri, f.client.getStandardHeaders(), nil, f.auth)
|
||||
}
|
||||
|
||||
// merges metadata into extraHeaders and returns extraHeaders
|
||||
func mergeMDIntoExtraHeaders(metadata, extraHeaders map[string]string) map[string]string {
|
||||
if metadata == nil && extraHeaders == nil {
|
||||
return nil
|
||||
}
|
||||
if extraHeaders == nil {
|
||||
extraHeaders = make(map[string]string)
|
||||
}
|
||||
for k, v := range metadata {
|
||||
extraHeaders[userDefinedMetadataHeaderPrefix+k] = v
|
||||
}
|
||||
return extraHeaders
|
||||
}
|
||||
|
||||
// merges extraHeaders into headers and returns headers
|
||||
func mergeHeaders(headers, extraHeaders map[string]string) map[string]string {
|
||||
for k, v := range extraHeaders {
|
||||
headers[k] = v
|
||||
}
|
||||
return headers
|
||||
}
|
||||
|
||||
// sets extra header data for the specified resource
|
||||
func (f FileServiceClient) setResourceHeaders(path string, comp compType, res resourceType, extraHeaders map[string]string) (http.Header, error) {
|
||||
if err := f.checkForStorageEmulator(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
params := getURLInitValues(comp, res)
|
||||
uri := f.client.getEndpoint(fileServiceName, path, params)
|
||||
extraHeaders = f.client.protectUserAgent(extraHeaders)
|
||||
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
|
||||
|
||||
resp, err := f.client.exec(http.MethodPut, uri, headers, nil, f.auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusOK})
|
||||
}
|
||||
|
||||
// gets metadata for the specified resource
|
||||
func (f FileServiceClient) getMetadata(path string, res resourceType) (map[string]string, error) {
|
||||
if err := f.checkForStorageEmulator(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
headers, err := f.getResourceHeaders(path, compMetadata, res, http.MethodGet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return getMetadataFromHeaders(headers), nil
|
||||
}
|
||||
|
||||
// returns a map of custom metadata values from the specified HTTP header
|
||||
func getMetadataFromHeaders(header http.Header) map[string]string {
|
||||
metadata := make(map[string]string)
|
||||
for k, v := range header {
|
||||
// Can't trust CanonicalHeaderKey() to munge case
|
||||
// reliably. "_" is allowed in identifiers:
|
||||
// https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
||||
// https://msdn.microsoft.com/library/aa664670(VS.71).aspx
|
||||
// http://tools.ietf.org/html/rfc7230#section-3.2
|
||||
// ...but "_" is considered invalid by
|
||||
// CanonicalMIMEHeaderKey in
|
||||
// https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542
|
||||
// so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar".
|
||||
k = strings.ToLower(k)
|
||||
if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) {
|
||||
continue
|
||||
}
|
||||
// metadata["foo"] = content of the last X-Ms-Meta-Foo header
|
||||
k = k[len(userDefinedMetadataHeaderPrefix):]
|
||||
metadata[k] = v[len(v)-1]
|
||||
}
|
||||
|
||||
if len(metadata) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return metadata
|
||||
}
|
||||
|
||||
//checkForStorageEmulator determines if the client is setup for use with
|
||||
//Azure Storage Emulator, and returns a relevant error
|
||||
func (f FileServiceClient) checkForStorageEmulator() error {
|
||||
if f.client.accountName == StorageEmulatorAccountName {
|
||||
return fmt.Errorf("Error: File service is not currently supported by Azure Storage Emulator")
|
||||
}
|
||||
return nil
|
||||
}
|
346
vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go
generated
vendored
Normal file
346
vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go
generated
vendored
Normal file
@ -0,0 +1,346 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// casing is per Golang's http.Header canonicalizing the header names.
|
||||
approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count"
|
||||
userDefinedMetadataHeaderPrefix = "X-Ms-Meta-"
|
||||
)
|
||||
|
||||
// QueueServiceClient contains operations for Microsoft Azure Queue Storage
|
||||
// Service.
|
||||
type QueueServiceClient struct {
|
||||
client Client
|
||||
auth authentication
|
||||
}
|
||||
|
||||
func pathForQueue(queue string) string { return fmt.Sprintf("/%s", queue) }
|
||||
func pathForQueueMessages(queue string) string { return fmt.Sprintf("/%s/messages", queue) }
|
||||
func pathForMessage(queue, name string) string { return fmt.Sprintf("/%s/messages/%s", queue, name) }
|
||||
|
||||
type putMessageRequest struct {
|
||||
XMLName xml.Name `xml:"QueueMessage"`
|
||||
MessageText string `xml:"MessageText"`
|
||||
}
|
||||
|
||||
// PutMessageParameters is the set of options can be specified for Put Messsage
|
||||
// operation. A zero struct does not use any preferences for the request.
|
||||
type PutMessageParameters struct {
|
||||
VisibilityTimeout int
|
||||
MessageTTL int
|
||||
}
|
||||
|
||||
func (p PutMessageParameters) getParameters() url.Values {
|
||||
out := url.Values{}
|
||||
if p.VisibilityTimeout != 0 {
|
||||
out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout))
|
||||
}
|
||||
if p.MessageTTL != 0 {
|
||||
out.Set("messagettl", strconv.Itoa(p.MessageTTL))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// GetMessagesParameters is the set of options can be specified for Get
|
||||
// Messsages operation. A zero struct does not use any preferences for the
|
||||
// request.
|
||||
type GetMessagesParameters struct {
|
||||
NumOfMessages int
|
||||
VisibilityTimeout int
|
||||
}
|
||||
|
||||
func (p GetMessagesParameters) getParameters() url.Values {
|
||||
out := url.Values{}
|
||||
if p.NumOfMessages != 0 {
|
||||
out.Set("numofmessages", strconv.Itoa(p.NumOfMessages))
|
||||
}
|
||||
if p.VisibilityTimeout != 0 {
|
||||
out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// PeekMessagesParameters is the set of options can be specified for Peek
|
||||
// Messsage operation. A zero struct does not use any preferences for the
|
||||
// request.
|
||||
type PeekMessagesParameters struct {
|
||||
NumOfMessages int
|
||||
}
|
||||
|
||||
func (p PeekMessagesParameters) getParameters() url.Values {
|
||||
out := url.Values{"peekonly": {"true"}} // Required for peek operation
|
||||
if p.NumOfMessages != 0 {
|
||||
out.Set("numofmessages", strconv.Itoa(p.NumOfMessages))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// UpdateMessageParameters is the set of options can be specified for Update Messsage
|
||||
// operation. A zero struct does not use any preferences for the request.
|
||||
type UpdateMessageParameters struct {
|
||||
PopReceipt string
|
||||
VisibilityTimeout int
|
||||
}
|
||||
|
||||
func (p UpdateMessageParameters) getParameters() url.Values {
|
||||
out := url.Values{}
|
||||
if p.PopReceipt != "" {
|
||||
out.Set("popreceipt", p.PopReceipt)
|
||||
}
|
||||
if p.VisibilityTimeout != 0 {
|
||||
out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// GetMessagesResponse represents a response returned from Get Messages
|
||||
// operation.
|
||||
type GetMessagesResponse struct {
|
||||
XMLName xml.Name `xml:"QueueMessagesList"`
|
||||
QueueMessagesList []GetMessageResponse `xml:"QueueMessage"`
|
||||
}
|
||||
|
||||
// GetMessageResponse represents a QueueMessage object returned from Get
|
||||
// Messages operation response.
|
||||
type GetMessageResponse struct {
|
||||
MessageID string `xml:"MessageId"`
|
||||
InsertionTime string `xml:"InsertionTime"`
|
||||
ExpirationTime string `xml:"ExpirationTime"`
|
||||
PopReceipt string `xml:"PopReceipt"`
|
||||
TimeNextVisible string `xml:"TimeNextVisible"`
|
||||
DequeueCount int `xml:"DequeueCount"`
|
||||
MessageText string `xml:"MessageText"`
|
||||
}
|
||||
|
||||
// PeekMessagesResponse represents a response returned from Get Messages
|
||||
// operation.
|
||||
type PeekMessagesResponse struct {
|
||||
XMLName xml.Name `xml:"QueueMessagesList"`
|
||||
QueueMessagesList []PeekMessageResponse `xml:"QueueMessage"`
|
||||
}
|
||||
|
||||
// PeekMessageResponse represents a QueueMessage object returned from Peek
|
||||
// Messages operation response.
|
||||
type PeekMessageResponse struct {
|
||||
MessageID string `xml:"MessageId"`
|
||||
InsertionTime string `xml:"InsertionTime"`
|
||||
ExpirationTime string `xml:"ExpirationTime"`
|
||||
DequeueCount int `xml:"DequeueCount"`
|
||||
MessageText string `xml:"MessageText"`
|
||||
}
|
||||
|
||||
// QueueMetadataResponse represents user defined metadata and queue
|
||||
// properties on a specific queue.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx
|
||||
type QueueMetadataResponse struct {
|
||||
ApproximateMessageCount int
|
||||
UserDefinedMetadata map[string]string
|
||||
}
|
||||
|
||||
// SetMetadata operation sets user-defined metadata on the specified queue.
|
||||
// Metadata is associated with the queue as name-value pairs.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179348.aspx
|
||||
func (c QueueServiceClient) SetMetadata(name string, metadata map[string]string) error {
|
||||
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}})
|
||||
metadata = c.client.protectUserAgent(metadata)
|
||||
headers := c.client.getStandardHeaders()
|
||||
for k, v := range metadata {
|
||||
headers[userDefinedMetadataHeaderPrefix+k] = v
|
||||
}
|
||||
|
||||
resp, err := c.client.exec(http.MethodPut, uri, headers, nil, c.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
// GetMetadata operation retrieves user-defined metadata and queue
|
||||
// properties on the specified queue. Metadata is associated with
|
||||
// the queue as name-values pairs.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx
|
||||
//
|
||||
// Because the way Golang's http client (and http.Header in particular)
|
||||
// canonicalize header names, the returned metadata names would always
|
||||
// be all lower case.
|
||||
func (c QueueServiceClient) GetMetadata(name string) (QueueMetadataResponse, error) {
|
||||
qm := QueueMetadataResponse{}
|
||||
qm.UserDefinedMetadata = make(map[string]string)
|
||||
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}})
|
||||
headers := c.client.getStandardHeaders()
|
||||
resp, err := c.client.exec(http.MethodGet, uri, headers, nil, c.auth)
|
||||
if err != nil {
|
||||
return qm, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
for k, v := range resp.headers {
|
||||
if len(v) != 1 {
|
||||
return qm, fmt.Errorf("Unexpected number of values (%d) in response header '%s'", len(v), k)
|
||||
}
|
||||
|
||||
value := v[0]
|
||||
|
||||
if k == approximateMessagesCountHeader {
|
||||
qm.ApproximateMessageCount, err = strconv.Atoi(value)
|
||||
if err != nil {
|
||||
return qm, fmt.Errorf("Unexpected value in response header '%s': '%s' ", k, value)
|
||||
}
|
||||
} else if strings.HasPrefix(k, userDefinedMetadataHeaderPrefix) {
|
||||
name := strings.TrimPrefix(k, userDefinedMetadataHeaderPrefix)
|
||||
qm.UserDefinedMetadata[strings.ToLower(name)] = value
|
||||
}
|
||||
}
|
||||
|
||||
return qm, checkRespCode(resp.statusCode, []int{http.StatusOK})
|
||||
}
|
||||
|
||||
// CreateQueue operation creates a queue under the given account.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179342.aspx
|
||||
func (c QueueServiceClient) CreateQueue(name string) error {
|
||||
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{})
|
||||
headers := c.client.getStandardHeaders()
|
||||
resp, err := c.client.exec(http.MethodPut, uri, headers, nil, c.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
// DeleteQueue operation permanently deletes the specified queue.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179436.aspx
|
||||
func (c QueueServiceClient) DeleteQueue(name string) error {
|
||||
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{})
|
||||
resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
// QueueExists returns true if a queue with given name exists.
|
||||
func (c QueueServiceClient) QueueExists(name string) (bool, error) {
|
||||
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": {"metadata"}})
|
||||
resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth)
|
||||
if resp != nil && (resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound) {
|
||||
return resp.statusCode == http.StatusOK, nil
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
// PutMessage operation adds a new message to the back of the message queue.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179346.aspx
|
||||
func (c QueueServiceClient) PutMessage(queue string, message string, params PutMessageParameters) error {
|
||||
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters())
|
||||
req := putMessageRequest{MessageText: message}
|
||||
body, nn, err := xmlMarshal(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headers := c.client.getStandardHeaders()
|
||||
headers["Content-Length"] = strconv.Itoa(nn)
|
||||
resp, err := c.client.exec(http.MethodPost, uri, headers, body, c.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
// ClearMessages operation deletes all messages from the specified queue.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179454.aspx
|
||||
func (c QueueServiceClient) ClearMessages(queue string) error {
|
||||
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), url.Values{})
|
||||
resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
// GetMessages operation retrieves one or more messages from the front of the
|
||||
// queue.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179474.aspx
|
||||
func (c QueueServiceClient) GetMessages(queue string, params GetMessagesParameters) (GetMessagesResponse, error) {
|
||||
var r GetMessagesResponse
|
||||
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters())
|
||||
resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth)
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
err = xmlUnmarshal(resp.body, &r)
|
||||
return r, err
|
||||
}
|
||||
|
||||
// PeekMessages retrieves one or more messages from the front of the queue, but
|
||||
// does not alter the visibility of the message.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179472.aspx
|
||||
func (c QueueServiceClient) PeekMessages(queue string, params PeekMessagesParameters) (PeekMessagesResponse, error) {
|
||||
var r PeekMessagesResponse
|
||||
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters())
|
||||
resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth)
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
err = xmlUnmarshal(resp.body, &r)
|
||||
return r, err
|
||||
}
|
||||
|
||||
// DeleteMessage operation deletes the specified message.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx
|
||||
func (c QueueServiceClient) DeleteMessage(queue, messageID, popReceipt string) error {
|
||||
uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), url.Values{
|
||||
"popreceipt": {popReceipt}})
|
||||
resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
// UpdateMessage operation deletes the specified message.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/hh452234.aspx
|
||||
func (c QueueServiceClient) UpdateMessage(queue string, messageID string, message string, params UpdateMessageParameters) error {
|
||||
uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), params.getParameters())
|
||||
req := putMessageRequest{MessageText: message}
|
||||
body, nn, err := xmlMarshal(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headers := c.client.getStandardHeaders()
|
||||
headers["Content-Length"] = fmt.Sprintf("%d", nn)
|
||||
resp, err := c.client.exec(http.MethodPut, uri, headers, body, c.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
||||
}
|
186
vendor/github.com/Azure/azure-sdk-for-go/storage/share.go
generated
vendored
Normal file
186
vendor/github.com/Azure/azure-sdk-for-go/storage/share.go
generated
vendored
Normal file
@ -0,0 +1,186 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Share represents an Azure file share.
|
||||
type Share struct {
|
||||
fsc *FileServiceClient
|
||||
Name string `xml:"Name"`
|
||||
Properties ShareProperties `xml:"Properties"`
|
||||
Metadata map[string]string
|
||||
}
|
||||
|
||||
// ShareProperties contains various properties of a share.
|
||||
type ShareProperties struct {
|
||||
LastModified string `xml:"Last-Modified"`
|
||||
Etag string `xml:"Etag"`
|
||||
Quota int `xml:"Quota"`
|
||||
}
|
||||
|
||||
// builds the complete path for this share object.
|
||||
func (s *Share) buildPath() string {
|
||||
return fmt.Sprintf("/%s", s.Name)
|
||||
}
|
||||
|
||||
// Create this share under the associated account.
|
||||
// If a share with the same name already exists, the operation fails.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx
|
||||
func (s *Share) Create() error {
|
||||
headers, err := s.fsc.createResource(s.buildPath(), resourceShare, mergeMDIntoExtraHeaders(s.Metadata, nil))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.updateEtagAndLastModified(headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateIfNotExists creates this share under the associated account if
|
||||
// it does not exist. Returns true if the share is newly created or false if
|
||||
// the share already exists.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx
|
||||
func (s *Share) CreateIfNotExists() (bool, error) {
|
||||
resp, err := s.fsc.createResourceNoClose(s.buildPath(), resourceShare, nil)
|
||||
if resp != nil {
|
||||
defer resp.body.Close()
|
||||
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
|
||||
if resp.statusCode == http.StatusCreated {
|
||||
s.updateEtagAndLastModified(resp.headers)
|
||||
return true, nil
|
||||
}
|
||||
return false, s.FetchAttributes()
|
||||
}
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Delete marks this share for deletion. The share along with any files
|
||||
// and directories contained within it are later deleted during garbage
|
||||
// collection. If the share does not exist the operation fails
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx
|
||||
func (s *Share) Delete() error {
|
||||
return s.fsc.deleteResource(s.buildPath(), resourceShare)
|
||||
}
|
||||
|
||||
// DeleteIfExists operation marks this share for deletion if it exists.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx
|
||||
func (s *Share) DeleteIfExists() (bool, error) {
|
||||
resp, err := s.fsc.deleteResourceNoClose(s.buildPath(), resourceShare)
|
||||
if resp != nil {
|
||||
defer resp.body.Close()
|
||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusAccepted, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Exists returns true if this share already exists
|
||||
// on the storage account, otherwise returns false.
|
||||
func (s *Share) Exists() (bool, error) {
|
||||
exists, headers, err := s.fsc.resourceExists(s.buildPath(), resourceShare)
|
||||
if exists {
|
||||
s.updateEtagAndLastModified(headers)
|
||||
s.updateQuota(headers)
|
||||
}
|
||||
return exists, err
|
||||
}
|
||||
|
||||
// FetchAttributes retrieves metadata and properties for this share.
|
||||
func (s *Share) FetchAttributes() error {
|
||||
headers, err := s.fsc.getResourceHeaders(s.buildPath(), compNone, resourceShare, http.MethodHead)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.updateEtagAndLastModified(headers)
|
||||
s.updateQuota(headers)
|
||||
s.Metadata = getMetadataFromHeaders(headers)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetRootDirectoryReference returns a Directory object at the root of this share.
|
||||
func (s *Share) GetRootDirectoryReference() *Directory {
|
||||
return &Directory{
|
||||
fsc: s.fsc,
|
||||
share: s,
|
||||
}
|
||||
}
|
||||
|
||||
// ServiceClient returns the FileServiceClient associated with this share.
|
||||
func (s *Share) ServiceClient() *FileServiceClient {
|
||||
return s.fsc
|
||||
}
|
||||
|
||||
// SetMetadata replaces the metadata for this share.
|
||||
//
|
||||
// Some keys may be converted to Camel-Case before sending. All keys
|
||||
// are returned in lower case by GetShareMetadata. HTTP header names
|
||||
// are case-insensitive so case munging should not matter to other
|
||||
// applications either.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
||||
func (s *Share) SetMetadata() error {
|
||||
headers, err := s.fsc.setResourceHeaders(s.buildPath(), compMetadata, resourceShare, mergeMDIntoExtraHeaders(s.Metadata, nil))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.updateEtagAndLastModified(headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetProperties sets system properties for this share.
|
||||
//
|
||||
// Some keys may be converted to Camel-Case before sending. All keys
|
||||
// are returned in lower case by SetShareProperties. HTTP header names
|
||||
// are case-insensitive so case munging should not matter to other
|
||||
// applications either.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/mt427368.aspx
|
||||
func (s *Share) SetProperties() error {
|
||||
if s.Properties.Quota < 1 || s.Properties.Quota > 5120 {
|
||||
return fmt.Errorf("invalid value %v for quota, valid values are [1, 5120]", s.Properties.Quota)
|
||||
}
|
||||
|
||||
headers, err := s.fsc.setResourceHeaders(s.buildPath(), compProperties, resourceShare, map[string]string{
|
||||
"x-ms-share-quota": strconv.Itoa(s.Properties.Quota),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.updateEtagAndLastModified(headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
// updates Etag and last modified date
|
||||
func (s *Share) updateEtagAndLastModified(headers http.Header) {
|
||||
s.Properties.Etag = headers.Get("Etag")
|
||||
s.Properties.LastModified = headers.Get("Last-Modified")
|
||||
}
|
||||
|
||||
// updates quota value
|
||||
func (s *Share) updateQuota(headers http.Header) {
|
||||
quota, err := strconv.Atoi(headers.Get("x-ms-share-quota"))
|
||||
if err == nil {
|
||||
s.Properties.Quota = quota
|
||||
}
|
||||
}
|
||||
|
||||
// URL gets the canonical URL to this share. This method does not create a publicly accessible
|
||||
// URL if the share is private and this method does not check if the share exists.
|
||||
func (s *Share) URL() string {
|
||||
return s.fsc.client.getEndpoint(fileServiceName, s.buildPath(), url.Values{})
|
||||
}
|
47
vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go
generated
vendored
Normal file
47
vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// AccessPolicyDetailsXML has specifics about an access policy
|
||||
// annotated with XML details.
|
||||
type AccessPolicyDetailsXML struct {
|
||||
StartTime time.Time `xml:"Start"`
|
||||
ExpiryTime time.Time `xml:"Expiry"`
|
||||
Permission string `xml:"Permission"`
|
||||
}
|
||||
|
||||
// SignedIdentifier is a wrapper for a specific policy
|
||||
type SignedIdentifier struct {
|
||||
ID string `xml:"Id"`
|
||||
AccessPolicy AccessPolicyDetailsXML `xml:"AccessPolicy"`
|
||||
}
|
||||
|
||||
// SignedIdentifiers part of the response from GetPermissions call.
|
||||
type SignedIdentifiers struct {
|
||||
SignedIdentifiers []SignedIdentifier `xml:"SignedIdentifier"`
|
||||
}
|
||||
|
||||
// AccessPolicy is the response type from the GetPermissions call.
|
||||
type AccessPolicy struct {
|
||||
SignedIdentifiersList SignedIdentifiers `xml:"SignedIdentifiers"`
|
||||
}
|
||||
|
||||
// convertAccessPolicyToXMLStructs converts between AccessPolicyDetails which is a struct better for API usage to the
|
||||
// AccessPolicy struct which will get converted to XML.
|
||||
func convertAccessPolicyToXMLStructs(id string, startTime time.Time, expiryTime time.Time, permissions string) SignedIdentifier {
|
||||
return SignedIdentifier{
|
||||
ID: id,
|
||||
AccessPolicy: AccessPolicyDetailsXML{
|
||||
StartTime: startTime.UTC().Round(time.Second),
|
||||
ExpiryTime: expiryTime.UTC().Round(time.Second),
|
||||
Permission: permissions,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func updatePermissions(permissions, permission string) bool {
|
||||
return strings.Contains(permissions, permission)
|
||||
}
|
258
vendor/github.com/Azure/azure-sdk-for-go/storage/table.go
generated
vendored
Normal file
258
vendor/github.com/Azure/azure-sdk-for-go/storage/table.go
generated
vendored
Normal file
@ -0,0 +1,258 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TableServiceClient contains operations for Microsoft Azure Table Storage
|
||||
// Service.
|
||||
type TableServiceClient struct {
|
||||
client Client
|
||||
auth authentication
|
||||
}
|
||||
|
||||
// AzureTable is the typedef of the Azure Table name
|
||||
type AzureTable string
|
||||
|
||||
const (
|
||||
tablesURIPath = "/Tables"
|
||||
)
|
||||
|
||||
type createTableRequest struct {
|
||||
TableName string `json:"TableName"`
|
||||
}
|
||||
|
||||
// TableAccessPolicy are used for SETTING table policies
|
||||
type TableAccessPolicy struct {
|
||||
ID string
|
||||
StartTime time.Time
|
||||
ExpiryTime time.Time
|
||||
CanRead bool
|
||||
CanAppend bool
|
||||
CanUpdate bool
|
||||
CanDelete bool
|
||||
}
|
||||
|
||||
func pathForTable(table AzureTable) string { return fmt.Sprintf("%s", table) }
|
||||
|
||||
func (c *TableServiceClient) getStandardHeaders() map[string]string {
|
||||
return map[string]string{
|
||||
"x-ms-version": "2015-02-21",
|
||||
"x-ms-date": currentTimeRfc1123Formatted(),
|
||||
"Accept": "application/json;odata=nometadata",
|
||||
"Accept-Charset": "UTF-8",
|
||||
"Content-Type": "application/json",
|
||||
userAgentHeader: c.client.userAgent,
|
||||
}
|
||||
}
|
||||
|
||||
// QueryTables returns the tables created in the
|
||||
// *TableServiceClient storage account.
|
||||
func (c *TableServiceClient) QueryTables() ([]AzureTable, error) {
|
||||
uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
|
||||
|
||||
headers := c.getStandardHeaders()
|
||||
headers["Content-Length"] = "0"
|
||||
|
||||
resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := buf.ReadFrom(resp.body); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var respArray queryTablesResponse
|
||||
if err := json.Unmarshal(buf.Bytes(), &respArray); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := make([]AzureTable, len(respArray.TableName))
|
||||
for i, elem := range respArray.TableName {
|
||||
s[i] = AzureTable(elem.TableName)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// CreateTable creates the table given the specific
|
||||
// name. This function fails if the name is not compliant
|
||||
// with the specification or the tables already exists.
|
||||
func (c *TableServiceClient) CreateTable(table AzureTable) error {
|
||||
uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
|
||||
|
||||
headers := c.getStandardHeaders()
|
||||
|
||||
req := createTableRequest{TableName: string(table)}
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
if err := json.NewEncoder(buf).Encode(req); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
headers["Content-Length"] = fmt.Sprintf("%d", buf.Len())
|
||||
|
||||
resp, err := c.client.execInternalJSON(http.MethodPost, uri, headers, buf, c.auth)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteTable deletes the table given the specific
|
||||
// name. This function fails if the table is not present.
|
||||
// Be advised: DeleteTable deletes all the entries
|
||||
// that may be present.
|
||||
func (c *TableServiceClient) DeleteTable(table AzureTable) error {
|
||||
uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
|
||||
uri += fmt.Sprintf("('%s')", string(table))
|
||||
|
||||
headers := c.getStandardHeaders()
|
||||
|
||||
headers["Content-Length"] = "0"
|
||||
|
||||
resp, err := c.client.execInternalJSON(http.MethodDelete, uri, headers, nil, c.auth)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
||||
return err
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetTablePermissions sets up table ACL permissions as per REST details https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Table-ACL
|
||||
func (c *TableServiceClient) SetTablePermissions(table AzureTable, policies []TableAccessPolicy, timeout uint) (err error) {
|
||||
params := url.Values{"comp": {"acl"}}
|
||||
|
||||
if timeout > 0 {
|
||||
params.Add("timeout", fmt.Sprint(timeout))
|
||||
}
|
||||
|
||||
uri := c.client.getEndpoint(tableServiceName, string(table), params)
|
||||
headers := c.client.getStandardHeaders()
|
||||
|
||||
body, length, err := generateTableACLPayload(policies)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headers["Content-Length"] = fmt.Sprintf("%v", length)
|
||||
|
||||
resp, err := c.client.execInternalJSON(http.MethodPut, uri, headers, body, c.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateTableACLPayload(policies []TableAccessPolicy) (io.Reader, int, error) {
|
||||
sil := SignedIdentifiers{
|
||||
SignedIdentifiers: []SignedIdentifier{},
|
||||
}
|
||||
for _, tap := range policies {
|
||||
permission := generateTablePermissions(&tap)
|
||||
signedIdentifier := convertAccessPolicyToXMLStructs(tap.ID, tap.StartTime, tap.ExpiryTime, permission)
|
||||
sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier)
|
||||
}
|
||||
return xmlMarshal(sil)
|
||||
}
|
||||
|
||||
// GetTablePermissions gets the table ACL permissions, as per REST details https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-acl
|
||||
func (c *TableServiceClient) GetTablePermissions(table AzureTable, timeout int) (permissionResponse []TableAccessPolicy, err error) {
|
||||
params := url.Values{"comp": {"acl"}}
|
||||
|
||||
if timeout > 0 {
|
||||
params.Add("timeout", strconv.Itoa(timeout))
|
||||
}
|
||||
|
||||
uri := c.client.getEndpoint(tableServiceName, string(table), params)
|
||||
headers := c.client.getStandardHeaders()
|
||||
resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ap AccessPolicy
|
||||
err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out := updateTableAccessPolicy(ap)
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func updateTableAccessPolicy(ap AccessPolicy) []TableAccessPolicy {
|
||||
out := []TableAccessPolicy{}
|
||||
for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
|
||||
tap := TableAccessPolicy{
|
||||
ID: policy.ID,
|
||||
StartTime: policy.AccessPolicy.StartTime,
|
||||
ExpiryTime: policy.AccessPolicy.ExpiryTime,
|
||||
}
|
||||
tap.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r")
|
||||
tap.CanAppend = updatePermissions(policy.AccessPolicy.Permission, "a")
|
||||
tap.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u")
|
||||
tap.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d")
|
||||
|
||||
out = append(out, tap)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func generateTablePermissions(tap *TableAccessPolicy) (permissions string) {
|
||||
// generate the permissions string (raud).
|
||||
// still want the end user API to have bool flags.
|
||||
permissions = ""
|
||||
|
||||
if tap.CanRead {
|
||||
permissions += "r"
|
||||
}
|
||||
|
||||
if tap.CanAppend {
|
||||
permissions += "a"
|
||||
}
|
||||
|
||||
if tap.CanUpdate {
|
||||
permissions += "u"
|
||||
}
|
||||
|
||||
if tap.CanDelete {
|
||||
permissions += "d"
|
||||
}
|
||||
return permissions
|
||||
}
|
345
vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go
generated
vendored
Normal file
345
vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go
generated
vendored
Normal file
@ -0,0 +1,345 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// Annotating as secure for gas scanning
|
||||
/* #nosec */
|
||||
const (
|
||||
partitionKeyNode = "PartitionKey"
|
||||
rowKeyNode = "RowKey"
|
||||
tag = "table"
|
||||
tagIgnore = "-"
|
||||
continuationTokenPartitionKeyHeader = "X-Ms-Continuation-Nextpartitionkey"
|
||||
continuationTokenRowHeader = "X-Ms-Continuation-Nextrowkey"
|
||||
maxTopParameter = 1000
|
||||
)
|
||||
|
||||
type queryTablesResponse struct {
|
||||
TableName []struct {
|
||||
TableName string `json:"TableName"`
|
||||
} `json:"value"`
|
||||
}
|
||||
|
||||
const (
|
||||
tableOperationTypeInsert = iota
|
||||
tableOperationTypeUpdate = iota
|
||||
tableOperationTypeMerge = iota
|
||||
tableOperationTypeInsertOrReplace = iota
|
||||
tableOperationTypeInsertOrMerge = iota
|
||||
)
|
||||
|
||||
type tableOperation int
|
||||
|
||||
// TableEntity interface specifies
|
||||
// the functions needed to support
|
||||
// marshaling and unmarshaling into
|
||||
// Azure Tables. The struct must only contain
|
||||
// simple types because Azure Tables do not
|
||||
// support hierarchy.
|
||||
type TableEntity interface {
|
||||
PartitionKey() string
|
||||
RowKey() string
|
||||
SetPartitionKey(string) error
|
||||
SetRowKey(string) error
|
||||
}
|
||||
|
||||
// ContinuationToken is an opaque (ie not useful to inspect)
|
||||
// struct that Get... methods can return if there are more
|
||||
// entries to be returned than the ones already
|
||||
// returned. Just pass it to the same function to continue
|
||||
// receiving the remaining entries.
|
||||
type ContinuationToken struct {
|
||||
NextPartitionKey string
|
||||
NextRowKey string
|
||||
}
|
||||
|
||||
type getTableEntriesResponse struct {
|
||||
Elements []map[string]interface{} `json:"value"`
|
||||
}
|
||||
|
||||
// QueryTableEntities queries the specified table and returns the unmarshaled
|
||||
// entities of type retType.
|
||||
// top parameter limits the returned entries up to top. Maximum top
|
||||
// allowed by Azure API is 1000. In case there are more than top entries to be
|
||||
// returned the function will return a non nil *ContinuationToken. You can call the
|
||||
// same function again passing the received ContinuationToken as previousContToken
|
||||
// parameter in order to get the following entries. The query parameter
|
||||
// is the odata query. To retrieve all the entries pass the empty string.
|
||||
// The function returns a pointer to a TableEntity slice, the *ContinuationToken
|
||||
// if there are more entries to be returned and an error in case something went
|
||||
// wrong.
|
||||
//
|
||||
// Example:
|
||||
// entities, cToken, err = tSvc.QueryTableEntities("table", cToken, reflect.TypeOf(entity), 20, "")
|
||||
func (c *TableServiceClient) QueryTableEntities(tableName AzureTable, previousContToken *ContinuationToken, retType reflect.Type, top int, query string) ([]TableEntity, *ContinuationToken, error) {
|
||||
if top > maxTopParameter {
|
||||
return nil, nil, fmt.Errorf("top accepts at maximum %d elements. Requested %d instead", maxTopParameter, top)
|
||||
}
|
||||
|
||||
uri := c.client.getEndpoint(tableServiceName, pathForTable(tableName), url.Values{})
|
||||
uri += fmt.Sprintf("?$top=%d", top)
|
||||
if query != "" {
|
||||
uri += fmt.Sprintf("&$filter=%s", url.QueryEscape(query))
|
||||
}
|
||||
|
||||
if previousContToken != nil {
|
||||
uri += fmt.Sprintf("&NextPartitionKey=%s&NextRowKey=%s", previousContToken.NextPartitionKey, previousContToken.NextRowKey)
|
||||
}
|
||||
|
||||
headers := c.getStandardHeaders()
|
||||
|
||||
headers["Content-Length"] = "0"
|
||||
|
||||
resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth)
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
contToken := extractContinuationTokenFromHeaders(resp.headers)
|
||||
|
||||
defer resp.body.Close()
|
||||
|
||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
return nil, contToken, err
|
||||
}
|
||||
|
||||
retEntries, err := deserializeEntity(retType, resp.body)
|
||||
if err != nil {
|
||||
return nil, contToken, err
|
||||
}
|
||||
|
||||
return retEntries, contToken, nil
|
||||
}
|
||||
|
||||
// InsertEntity inserts an entity in the specified table.
|
||||
// The function fails if there is an entity with the same
|
||||
// PartitionKey and RowKey in the table.
|
||||
func (c *TableServiceClient) InsertEntity(table AzureTable, entity TableEntity) error {
|
||||
if sc, err := c.execTable(table, entity, false, http.MethodPost); err != nil {
|
||||
return checkRespCode(sc, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *TableServiceClient) execTable(table AzureTable, entity TableEntity, specifyKeysInURL bool, method string) (int, error) {
|
||||
uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{})
|
||||
if specifyKeysInURL {
|
||||
uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey()))
|
||||
}
|
||||
|
||||
headers := c.getStandardHeaders()
|
||||
|
||||
var buf bytes.Buffer
|
||||
|
||||
if err := injectPartitionAndRowKeys(entity, &buf); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
headers["Content-Length"] = fmt.Sprintf("%d", buf.Len())
|
||||
|
||||
resp, err := c.client.execInternalJSON(method, uri, headers, &buf, c.auth)
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
defer resp.body.Close()
|
||||
|
||||
return resp.statusCode, nil
|
||||
}
|
||||
|
||||
// UpdateEntity updates the contents of an entity with the
|
||||
// one passed as parameter. The function fails if there is no entity
|
||||
// with the same PartitionKey and RowKey in the table.
|
||||
func (c *TableServiceClient) UpdateEntity(table AzureTable, entity TableEntity) error {
|
||||
if sc, err := c.execTable(table, entity, true, http.MethodPut); err != nil {
|
||||
return checkRespCode(sc, []int{http.StatusNoContent})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MergeEntity merges the contents of an entity with the
|
||||
// one passed as parameter.
|
||||
// The function fails if there is no entity
|
||||
// with the same PartitionKey and RowKey in the table.
|
||||
func (c *TableServiceClient) MergeEntity(table AzureTable, entity TableEntity) error {
|
||||
if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil {
|
||||
return checkRespCode(sc, []int{http.StatusNoContent})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteEntityWithoutCheck deletes the entity matching by
|
||||
// PartitionKey and RowKey. There is no check on IfMatch
|
||||
// parameter so the entity is always deleted.
|
||||
// The function fails if there is no entity
|
||||
// with the same PartitionKey and RowKey in the table.
|
||||
func (c *TableServiceClient) DeleteEntityWithoutCheck(table AzureTable, entity TableEntity) error {
|
||||
return c.DeleteEntity(table, entity, "*")
|
||||
}
|
||||
|
||||
// DeleteEntity deletes the entity matching by
|
||||
// PartitionKey, RowKey and ifMatch field.
|
||||
// The function fails if there is no entity
|
||||
// with the same PartitionKey and RowKey in the table or
|
||||
// the ifMatch is different.
|
||||
func (c *TableServiceClient) DeleteEntity(table AzureTable, entity TableEntity, ifMatch string) error {
|
||||
uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{})
|
||||
uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey()))
|
||||
|
||||
headers := c.getStandardHeaders()
|
||||
|
||||
headers["Content-Length"] = "0"
|
||||
headers["If-Match"] = ifMatch
|
||||
|
||||
resp, err := c.client.execInternalJSON(http.MethodDelete, uri, headers, nil, c.auth)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertOrReplaceEntity inserts an entity in the specified table
|
||||
// or replaced the existing one.
|
||||
func (c *TableServiceClient) InsertOrReplaceEntity(table AzureTable, entity TableEntity) error {
|
||||
if sc, err := c.execTable(table, entity, true, http.MethodPut); err != nil {
|
||||
return checkRespCode(sc, []int{http.StatusNoContent})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertOrMergeEntity inserts an entity in the specified table
|
||||
// or merges the existing one.
|
||||
func (c *TableServiceClient) InsertOrMergeEntity(table AzureTable, entity TableEntity) error {
|
||||
if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil {
|
||||
return checkRespCode(sc, []int{http.StatusNoContent})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func injectPartitionAndRowKeys(entity TableEntity, buf *bytes.Buffer) error {
|
||||
if err := json.NewEncoder(buf).Encode(entity); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dec := make(map[string]interface{})
|
||||
if err := json.NewDecoder(buf).Decode(&dec); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Inject PartitionKey and RowKey
|
||||
dec[partitionKeyNode] = entity.PartitionKey()
|
||||
dec[rowKeyNode] = entity.RowKey()
|
||||
|
||||
// Remove tagged fields
|
||||
// The tag is defined in the const section
|
||||
// This is useful to avoid storing the PartitionKey and RowKey twice.
|
||||
numFields := reflect.ValueOf(entity).Elem().NumField()
|
||||
for i := 0; i < numFields; i++ {
|
||||
f := reflect.ValueOf(entity).Elem().Type().Field(i)
|
||||
|
||||
if f.Tag.Get(tag) == tagIgnore {
|
||||
// we must look for its JSON name in the dictionary
|
||||
// as the user can rename it using a tag
|
||||
jsonName := f.Name
|
||||
if f.Tag.Get("json") != "" {
|
||||
jsonName = f.Tag.Get("json")
|
||||
}
|
||||
delete(dec, jsonName)
|
||||
}
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
|
||||
if err := json.NewEncoder(buf).Encode(&dec); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func deserializeEntity(retType reflect.Type, reader io.Reader) ([]TableEntity, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
var ret getTableEntriesResponse
|
||||
if err := json.NewDecoder(reader).Decode(&ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tEntries := make([]TableEntity, len(ret.Elements))
|
||||
|
||||
for i, entry := range ret.Elements {
|
||||
|
||||
buf.Reset()
|
||||
if err := json.NewEncoder(buf).Encode(entry); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dec := make(map[string]interface{})
|
||||
if err := json.NewDecoder(buf).Decode(&dec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var pKey, rKey string
|
||||
// strip pk and rk
|
||||
for key, val := range dec {
|
||||
switch key {
|
||||
case partitionKeyNode:
|
||||
pKey = val.(string)
|
||||
case rowKeyNode:
|
||||
rKey = val.(string)
|
||||
}
|
||||
}
|
||||
|
||||
delete(dec, partitionKeyNode)
|
||||
delete(dec, rowKeyNode)
|
||||
|
||||
buf.Reset()
|
||||
if err := json.NewEncoder(buf).Encode(dec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create a empty retType instance
|
||||
tEntries[i] = reflect.New(retType.Elem()).Interface().(TableEntity)
|
||||
// Popolate it with the values
|
||||
if err := json.NewDecoder(buf).Decode(&tEntries[i]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Reset PartitionKey and RowKey
|
||||
if err := tEntries[i].SetPartitionKey(pKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := tEntries[i].SetRowKey(rKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return tEntries, nil
|
||||
}
|
||||
|
||||
func extractContinuationTokenFromHeaders(h http.Header) *ContinuationToken {
|
||||
ct := ContinuationToken{h.Get(continuationTokenPartitionKeyHeader), h.Get(continuationTokenRowHeader)}
|
||||
|
||||
if ct.NextPartitionKey != "" && ct.NextRowKey != "" {
|
||||
return &ct
|
||||
}
|
||||
return nil
|
||||
}
|
85
vendor/github.com/Azure/azure-sdk-for-go/storage/util.go
generated
vendored
Normal file
85
vendor/github.com/Azure/azure-sdk-for-go/storage/util.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (c Client) computeHmac256(message string) string {
|
||||
h := hmac.New(sha256.New, c.accountKey)
|
||||
h.Write([]byte(message))
|
||||
return base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
func currentTimeRfc1123Formatted() string {
|
||||
return timeRfc1123Formatted(time.Now().UTC())
|
||||
}
|
||||
|
||||
func timeRfc1123Formatted(t time.Time) string {
|
||||
return t.Format(http.TimeFormat)
|
||||
}
|
||||
|
||||
func mergeParams(v1, v2 url.Values) url.Values {
|
||||
out := url.Values{}
|
||||
for k, v := range v1 {
|
||||
out[k] = v
|
||||
}
|
||||
for k, v := range v2 {
|
||||
vals, ok := out[k]
|
||||
if ok {
|
||||
vals = append(vals, v...)
|
||||
out[k] = vals
|
||||
} else {
|
||||
out[k] = v
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func prepareBlockListRequest(blocks []Block) string {
|
||||
s := `<?xml version="1.0" encoding="utf-8"?><BlockList>`
|
||||
for _, v := range blocks {
|
||||
s += fmt.Sprintf("<%s>%s</%s>", v.Status, v.ID, v.Status)
|
||||
}
|
||||
s += `</BlockList>`
|
||||
return s
|
||||
}
|
||||
|
||||
func xmlUnmarshal(body io.Reader, v interface{}) error {
|
||||
data, err := ioutil.ReadAll(body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return xml.Unmarshal(data, v)
|
||||
}
|
||||
|
||||
func xmlMarshal(v interface{}) (io.Reader, int, error) {
|
||||
b, err := xml.Marshal(v)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
return bytes.NewReader(b), len(b), nil
|
||||
}
|
||||
|
||||
func headersFromStruct(v interface{}) map[string]string {
|
||||
headers := make(map[string]string)
|
||||
value := reflect.ValueOf(v)
|
||||
for i := 0; i < value.NumField(); i++ {
|
||||
key := value.Type().Field(i).Tag.Get("header")
|
||||
val := value.Field(i).String()
|
||||
if key != "" && val != "" {
|
||||
headers[key] = val
|
||||
}
|
||||
}
|
||||
return headers
|
||||
}
|
5
vendor/github.com/Azure/azure-sdk-for-go/storage/version.go
generated
vendored
Normal file
5
vendor/github.com/Azure/azure-sdk-for-go/storage/version.go
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
package storage
|
||||
|
||||
var (
|
||||
sdkVersion = "8.0.0-beta"
|
||||
)
|
55
vendor/github.com/minio/cli/help.go
generated
vendored
55
vendor/github.com/minio/cli/help.go
generated
vendored
@ -13,69 +13,68 @@ import (
|
||||
// cli.go uses text/template to render templates. You can
|
||||
// render custom help text by setting this variable.
|
||||
var AppHelpTemplate = `NAME:
|
||||
{{.Name}}{{if .Usage}} - {{.Usage}}{{end}}
|
||||
{{.Name}}{{if .Usage}} - {{.Usage}}{{end}}
|
||||
|
||||
USAGE:
|
||||
{{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Version}}{{if not .HideVersion}}
|
||||
{{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Version}}{{if not .HideVersion}}
|
||||
|
||||
VERSION:
|
||||
{{.Version}}{{end}}{{end}}{{if .Description}}
|
||||
{{.Version}}{{end}}{{end}}{{if .Description}}
|
||||
|
||||
DESCRIPTION:
|
||||
{{.Description}}{{end}}{{if len .Authors}}
|
||||
{{.Description}}{{end}}{{if len .Authors}}
|
||||
|
||||
AUTHOR{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}:
|
||||
{{range $index, $author := .Authors}}{{if $index}}
|
||||
{{end}}{{$author}}{{end}}{{end}}{{if .VisibleCommands}}
|
||||
{{range $index, $author := .Authors}}{{if $index}}
|
||||
{{end}}{{$author}}{{end}}{{end}}{{if .VisibleCommands}}
|
||||
|
||||
COMMANDS:{{range .VisibleCategories}}{{if .Name}}
|
||||
{{.Name}}:{{end}}{{range .VisibleCommands}}
|
||||
{{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{end}}{{end}}{{if .VisibleFlags}}
|
||||
{{.Name}}:{{end}}{{range .VisibleCommands}}
|
||||
{{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{end}}{{end}}{{if .VisibleFlags}}
|
||||
|
||||
GLOBAL OPTIONS:
|
||||
{{range $index, $option := .VisibleFlags}}{{if $index}}
|
||||
{{end}}{{$option}}{{end}}{{end}}{{if .Copyright}}
|
||||
GLOBAL FLAGS:
|
||||
{{range $index, $option := .VisibleFlags}}{{if $index}}
|
||||
{{end}}{{$option}}{{end}}{{end}}{{if .Copyright}}
|
||||
|
||||
COPYRIGHT:
|
||||
{{.Copyright}}{{end}}
|
||||
{{.Copyright}}{{end}}
|
||||
`
|
||||
|
||||
// CommandHelpTemplate is the text template for the command help topic.
|
||||
// cli.go uses text/template to render templates. You can
|
||||
// render custom help text by setting this variable.
|
||||
var CommandHelpTemplate = `NAME:
|
||||
{{.HelpName}} - {{.Usage}}
|
||||
{{.HelpName}} - {{.Usage}}
|
||||
|
||||
USAGE:
|
||||
{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{if .Category}}
|
||||
{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{if .Category}}
|
||||
|
||||
CATEGORY:
|
||||
{{.Category}}{{end}}{{if .Description}}
|
||||
{{.Category}}{{end}}{{if .Description}}
|
||||
|
||||
DESCRIPTION:
|
||||
{{.Description}}{{end}}{{if .VisibleFlags}}
|
||||
{{.Description}}{{end}}{{if .VisibleFlags}}
|
||||
|
||||
OPTIONS:
|
||||
{{range .VisibleFlags}}{{.}}
|
||||
{{end}}{{end}}
|
||||
FLAGS:
|
||||
{{range .VisibleFlags}}{{.}}
|
||||
{{end}}{{end}}
|
||||
`
|
||||
|
||||
// SubcommandHelpTemplate is the text template for the subcommand help topic.
|
||||
// cli.go uses text/template to render templates. You can
|
||||
// render custom help text by setting this variable.
|
||||
var SubcommandHelpTemplate = `NAME:
|
||||
{{.HelpName}} - {{if .Description}}{{.Description}}{{else}}{{.Usage}}{{end}}
|
||||
{{.HelpName}} - {{if .Description}}{{.Description}}{{else}}{{.Usage}}{{end}}
|
||||
|
||||
USAGE:
|
||||
{{.HelpName}} command{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}
|
||||
{{.HelpName}} COMMAND{{if .VisibleFlags}} [COMMAND FLAGS | -h]{{end}} [ARGUMENTS...]
|
||||
|
||||
COMMANDS:{{range .VisibleCategories}}{{if .Name}}
|
||||
{{.Name}}:{{end}}{{range .VisibleCommands}}
|
||||
{{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}
|
||||
{{end}}{{if .VisibleFlags}}
|
||||
OPTIONS:
|
||||
{{range .VisibleFlags}}{{.}}
|
||||
{{end}}{{end}}
|
||||
COMMANDS:
|
||||
{{range .VisibleCommands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}}
|
||||
{{end}}{{if .VisibleFlags}}
|
||||
FLAGS:
|
||||
{{range .VisibleFlags}}{{.}}
|
||||
{{end}}{{end}}
|
||||
`
|
||||
|
||||
var helpCommand = Command{
|
||||
|
12
vendor/vendor.json
vendored
12
vendor/vendor.json
vendored
@ -2,6 +2,12 @@
|
||||
"comment": "",
|
||||
"ignore": "test",
|
||||
"package": [
|
||||
{
|
||||
"checksumSHA1": "rK3ght7KTtHGdm0V4+U7fv9+tUU=",
|
||||
"path": "github.com/Azure/azure-sdk-for-go/storage",
|
||||
"revision": "8e625d1702a32d01cef05a9252198d231c4af113",
|
||||
"revisionTime": "2017-02-08T01:01:20Z"
|
||||
},
|
||||
{
|
||||
"path": "github.com/Sirupsen/logrus",
|
||||
"revision": "32055c351ea8b00b96d70f28db48d9840feaf0ec",
|
||||
@ -175,10 +181,10 @@
|
||||
"revisionTime": "2016-07-23T06:10:19Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "7PcmjItrQSx/1sZ6Q395LCzT+iw=",
|
||||
"checksumSHA1": "fUWokilZyc1QDKnIgCDJE8n1S9U=",
|
||||
"path": "github.com/minio/cli",
|
||||
"revision": "06bb2061ef1493532baf0444818eb5fb4c83caac",
|
||||
"revisionTime": "2017-02-20T03:57:28Z"
|
||||
"revision": "b8ae5507c0ceceecc22d5dbd386b58fbd4fdce72",
|
||||
"revisionTime": "2017-02-27T07:32:28Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "NBGyq2+iTtJvJ+ElG4FzHLe1WSY=",
|
||||
|
Loading…
Reference in New Issue
Block a user