2017-03-16 15:21:58 -04:00
|
|
|
/*
|
|
|
|
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
2017-05-22 18:42:00 -04:00
|
|
|
"crypto/md5"
|
2017-03-16 15:21:58 -04:00
|
|
|
"encoding/base64"
|
|
|
|
"encoding/hex"
|
|
|
|
"fmt"
|
|
|
|
"hash"
|
|
|
|
"io"
|
|
|
|
"net/http"
|
|
|
|
"net/url"
|
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/Azure/azure-sdk-for-go/storage"
|
|
|
|
"github.com/minio/minio-go/pkg/policy"
|
|
|
|
"github.com/minio/sha256-simd"
|
|
|
|
)
|
|
|
|
|
|
|
|
const globalAzureAPIVersion = "2016-05-31"
|
|
|
|
|
2017-05-30 23:05:41 -04:00
|
|
|
// Canonicalize the metadata headers, without this azure-sdk calculates
|
|
|
|
// incorrect signature. This attempt to canonicalize is to convert
|
|
|
|
// any HTTP header which is of form say `accept-encoding` should be
|
|
|
|
// converted to `Accept-Encoding` in its canonical form.
|
|
|
|
// Also replaces X-Amz-Meta prefix with X-Ms-Meta as Azure expects user
|
|
|
|
// defined metadata to have X-Ms-Meta prefix.
|
|
|
|
func s3ToAzureHeaders(headers map[string]string) (newHeaders map[string]string) {
|
|
|
|
newHeaders = make(map[string]string)
|
|
|
|
for k, v := range headers {
|
|
|
|
k = http.CanonicalHeaderKey(k)
|
|
|
|
if strings.HasPrefix(k, "X-Amz-Meta") {
|
|
|
|
k = strings.Replace(k, "X-Amz-Meta", "X-Ms-Meta", -1)
|
|
|
|
}
|
|
|
|
newHeaders[k] = v
|
|
|
|
}
|
|
|
|
return newHeaders
|
|
|
|
}
|
|
|
|
|
|
|
|
// Prefix user metadata with "X-Amz-Meta-".
|
|
|
|
// client.GetBlobMetadata() already strips "X-Ms-Meta-"
|
|
|
|
func azureToS3Metadata(meta map[string]string) (newMeta map[string]string) {
|
|
|
|
newMeta = make(map[string]string)
|
|
|
|
|
|
|
|
for k, v := range meta {
|
|
|
|
k = "X-Amz-Meta-" + k
|
|
|
|
newMeta[k] = v
|
|
|
|
}
|
|
|
|
return newMeta
|
|
|
|
}
|
|
|
|
|
2017-03-16 15:21:58 -04:00
|
|
|
// To store metadata during NewMultipartUpload which will be used after
|
|
|
|
// CompleteMultipartUpload to call SetBlobMetadata.
|
|
|
|
type azureMultipartMetaInfo struct {
|
|
|
|
meta map[string]map[string]string
|
|
|
|
*sync.Mutex
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return metadata map of the multipart object.
|
|
|
|
func (a *azureMultipartMetaInfo) get(key string) map[string]string {
|
|
|
|
a.Lock()
|
|
|
|
defer a.Unlock()
|
|
|
|
return a.meta[key]
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set metadata map for the multipart object.
|
|
|
|
func (a *azureMultipartMetaInfo) set(key string, value map[string]string) {
|
|
|
|
a.Lock()
|
|
|
|
defer a.Unlock()
|
|
|
|
a.meta[key] = value
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete metadata map for the multipart object.
|
|
|
|
func (a *azureMultipartMetaInfo) del(key string) {
|
|
|
|
a.Lock()
|
|
|
|
defer a.Unlock()
|
|
|
|
delete(a.meta, key)
|
|
|
|
}
|
|
|
|
|
2017-05-15 03:52:33 -04:00
|
|
|
// azureObjects - Implements Object layer for Azure blob storage.
|
|
|
|
type azureObjects struct {
|
2017-03-16 15:21:58 -04:00
|
|
|
client storage.BlobStorageClient // Azure sdk client
|
|
|
|
metaInfo azureMultipartMetaInfo
|
|
|
|
}
|
|
|
|
|
|
|
|
// Convert azure errors to minio object layer errors.
|
|
|
|
func azureToObjectError(err error, params ...string) error {
|
|
|
|
if err == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
e, ok := err.(*Error)
|
|
|
|
if !ok {
|
|
|
|
// Code should be fixed if this function is called without doing traceError()
|
|
|
|
// Else handling different situations in this function makes this function complicated.
|
|
|
|
errorIf(err, "Expected type *Error")
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = e.e
|
|
|
|
bucket := ""
|
|
|
|
object := ""
|
|
|
|
if len(params) >= 1 {
|
|
|
|
bucket = params[0]
|
|
|
|
}
|
|
|
|
if len(params) == 2 {
|
|
|
|
object = params[1]
|
|
|
|
}
|
|
|
|
|
|
|
|
azureErr, ok := err.(storage.AzureStorageServiceError)
|
|
|
|
if !ok {
|
|
|
|
// We don't interpret non Azure errors. As azure errors will
|
|
|
|
// have StatusCode to help to convert to object errors.
|
|
|
|
return e
|
|
|
|
}
|
|
|
|
|
|
|
|
switch azureErr.Code {
|
|
|
|
case "ContainerAlreadyExists":
|
|
|
|
err = BucketExists{Bucket: bucket}
|
|
|
|
case "InvalidResourceName":
|
|
|
|
err = BucketNameInvalid{Bucket: bucket}
|
|
|
|
default:
|
|
|
|
switch azureErr.StatusCode {
|
|
|
|
case http.StatusNotFound:
|
|
|
|
if object != "" {
|
|
|
|
err = ObjectNotFound{bucket, object}
|
|
|
|
} else {
|
|
|
|
err = BucketNotFound{Bucket: bucket}
|
|
|
|
}
|
|
|
|
case http.StatusBadRequest:
|
|
|
|
err = BucketNameInvalid{Bucket: bucket}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
e.e = err
|
|
|
|
return e
|
|
|
|
}
|
|
|
|
|
2017-05-15 03:52:33 -04:00
|
|
|
// Inits azure blob storage client and returns azureObjects.
|
2017-04-11 20:44:26 -04:00
|
|
|
func newAzureLayer(endPoint string, account, key string, secure bool) (GatewayLayer, error) {
|
|
|
|
if endPoint == "" {
|
|
|
|
endPoint = storage.DefaultBaseURL
|
|
|
|
}
|
|
|
|
c, err := storage.NewClient(account, key, endPoint, globalAzureAPIVersion, secure)
|
2017-03-16 15:21:58 -04:00
|
|
|
if err != nil {
|
2017-05-15 03:52:33 -04:00
|
|
|
return &azureObjects{}, err
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
2017-05-15 03:52:33 -04:00
|
|
|
return &azureObjects{
|
2017-03-16 15:21:58 -04:00
|
|
|
client: c.GetBlobService(),
|
|
|
|
metaInfo: azureMultipartMetaInfo{
|
|
|
|
meta: make(map[string]map[string]string),
|
|
|
|
Mutex: &sync.Mutex{},
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Shutdown - save any gateway metadata to disk
|
|
|
|
// if necessary and reload upon next restart.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) Shutdown() error {
|
2017-03-16 15:21:58 -04:00
|
|
|
// TODO
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// StorageInfo - Not relevant to Azure backend.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) StorageInfo() StorageInfo {
|
2017-03-16 15:21:58 -04:00
|
|
|
return StorageInfo{}
|
|
|
|
}
|
|
|
|
|
2017-04-27 14:26:00 -04:00
|
|
|
// MakeBucketWithLocation - Create a new container on azure backend.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) MakeBucketWithLocation(bucket, location string) error {
|
2017-03-16 15:21:58 -04:00
|
|
|
err := a.client.CreateContainer(bucket, storage.ContainerAccessTypePrivate)
|
|
|
|
return azureToObjectError(traceError(err), bucket)
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetBucketInfo - Get bucket metadata..
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) GetBucketInfo(bucket string) (BucketInfo, error) {
|
2017-03-16 15:21:58 -04:00
|
|
|
// Azure does not have an equivalent call, hence use ListContainers.
|
|
|
|
resp, err := a.client.ListContainers(storage.ListContainersParameters{
|
|
|
|
Prefix: bucket,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return BucketInfo{}, azureToObjectError(traceError(err), bucket)
|
|
|
|
}
|
|
|
|
for _, container := range resp.Containers {
|
|
|
|
if container.Name == bucket {
|
|
|
|
t, e := time.Parse(time.RFC1123, container.Properties.LastModified)
|
|
|
|
if e == nil {
|
|
|
|
return BucketInfo{
|
|
|
|
Name: bucket,
|
|
|
|
Created: t,
|
|
|
|
}, nil
|
|
|
|
} // else continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return BucketInfo{}, traceError(BucketNotFound{Bucket: bucket})
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListBuckets - Lists all azure containers, uses Azure equivalent ListContainers.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) ListBuckets() (buckets []BucketInfo, err error) {
|
2017-03-16 15:21:58 -04:00
|
|
|
resp, err := a.client.ListContainers(storage.ListContainersParameters{})
|
|
|
|
if err != nil {
|
|
|
|
return nil, azureToObjectError(traceError(err))
|
|
|
|
}
|
|
|
|
for _, container := range resp.Containers {
|
|
|
|
t, e := time.Parse(time.RFC1123, container.Properties.LastModified)
|
|
|
|
if e != nil {
|
|
|
|
return nil, traceError(e)
|
|
|
|
}
|
|
|
|
buckets = append(buckets, BucketInfo{
|
|
|
|
Name: container.Name,
|
|
|
|
Created: t,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return buckets, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteBucket - delete a container on azure, uses Azure equivalent DeleteContainer.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) DeleteBucket(bucket string) error {
|
2017-03-16 15:21:58 -04:00
|
|
|
return azureToObjectError(traceError(a.client.DeleteContainer(bucket)), bucket)
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListObjects - lists all blobs on azure with in a container filtered by prefix
|
|
|
|
// and marker, uses Azure equivalent ListBlobs.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) {
|
2017-03-16 15:21:58 -04:00
|
|
|
resp, err := a.client.ListBlobs(bucket, storage.ListBlobsParameters{
|
|
|
|
Prefix: prefix,
|
|
|
|
Marker: marker,
|
|
|
|
Delimiter: delimiter,
|
|
|
|
MaxResults: uint(maxKeys),
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return result, azureToObjectError(traceError(err), bucket, prefix)
|
|
|
|
}
|
|
|
|
result.IsTruncated = resp.NextMarker != ""
|
|
|
|
result.NextMarker = resp.NextMarker
|
|
|
|
for _, object := range resp.Blobs {
|
|
|
|
t, e := time.Parse(time.RFC1123, object.Properties.LastModified)
|
|
|
|
if e != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
result.Objects = append(result.Objects, ObjectInfo{
|
|
|
|
Bucket: bucket,
|
|
|
|
Name: object.Name,
|
|
|
|
ModTime: t,
|
|
|
|
Size: object.Properties.ContentLength,
|
2017-05-14 15:05:51 -04:00
|
|
|
ETag: canonicalizeETag(object.Properties.Etag),
|
2017-03-16 15:21:58 -04:00
|
|
|
ContentType: object.Properties.ContentType,
|
|
|
|
ContentEncoding: object.Properties.ContentEncoding,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
result.Prefixes = resp.BlobPrefixes
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetObject - reads an object from azure. Supports additional
|
|
|
|
// parameters like offset and length which are synonymous with
|
|
|
|
// HTTP Range requests.
|
|
|
|
//
|
|
|
|
// startOffset indicates the starting read location of the object.
|
|
|
|
// length indicates the total length of the object.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) error {
|
2017-03-16 15:21:58 -04:00
|
|
|
byteRange := fmt.Sprintf("%d-", startOffset)
|
|
|
|
if length > 0 && startOffset > 0 {
|
|
|
|
byteRange = fmt.Sprintf("%d-%d", startOffset, startOffset+length-1)
|
|
|
|
}
|
|
|
|
|
|
|
|
var rc io.ReadCloser
|
|
|
|
var err error
|
|
|
|
if startOffset == 0 && length == 0 {
|
|
|
|
rc, err = a.client.GetBlob(bucket, object)
|
|
|
|
} else {
|
|
|
|
rc, err = a.client.GetBlobRange(bucket, object, byteRange, nil)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return azureToObjectError(traceError(err), bucket, object)
|
|
|
|
}
|
|
|
|
_, err = io.Copy(writer, rc)
|
|
|
|
rc.Close()
|
|
|
|
return traceError(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetObjectInfo - reads blob metadata properties and replies back ObjectInfo,
|
|
|
|
// uses zure equivalent GetBlobProperties.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) GetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) {
|
2017-05-30 23:05:41 -04:00
|
|
|
blobMeta, err := a.client.GetBlobMetadata(bucket, object)
|
|
|
|
if err != nil {
|
|
|
|
return objInfo, azureToObjectError(traceError(err), bucket, object)
|
|
|
|
}
|
|
|
|
|
|
|
|
meta := azureToS3Metadata(blobMeta)
|
|
|
|
|
2017-03-16 15:21:58 -04:00
|
|
|
prop, err := a.client.GetBlobProperties(bucket, object)
|
|
|
|
if err != nil {
|
|
|
|
return objInfo, azureToObjectError(traceError(err), bucket, object)
|
|
|
|
}
|
|
|
|
t, err := time.Parse(time.RFC1123, prop.LastModified)
|
|
|
|
if err != nil {
|
|
|
|
return objInfo, traceError(err)
|
|
|
|
}
|
2017-05-30 23:05:41 -04:00
|
|
|
|
|
|
|
if prop.ContentEncoding != "" {
|
|
|
|
meta["Content-Encoding"] = prop.ContentEncoding
|
|
|
|
}
|
|
|
|
meta["Content-Type"] = prop.ContentType
|
|
|
|
|
2017-03-16 15:21:58 -04:00
|
|
|
objInfo = ObjectInfo{
|
|
|
|
Bucket: bucket,
|
2017-05-30 23:05:41 -04:00
|
|
|
UserDefined: meta,
|
2017-05-14 15:05:51 -04:00
|
|
|
ETag: canonicalizeETag(prop.Etag),
|
2017-03-16 15:21:58 -04:00
|
|
|
ModTime: t,
|
|
|
|
Name: object,
|
|
|
|
Size: prop.ContentLength,
|
|
|
|
}
|
|
|
|
|
2017-05-30 23:05:41 -04:00
|
|
|
return objInfo, nil
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// PutObject - Create a new blob with the incoming data,
|
|
|
|
// uses Azure equivalent CreateBlockBlobFromReader.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) PutObject(bucket, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, err error) {
|
2017-03-16 15:21:58 -04:00
|
|
|
var sha256Writer hash.Hash
|
2017-05-22 18:42:00 -04:00
|
|
|
var md5sumWriter hash.Hash
|
|
|
|
|
|
|
|
var writers []io.Writer
|
|
|
|
|
|
|
|
md5sum := metadata["etag"]
|
|
|
|
delete(metadata, "etag")
|
|
|
|
|
2017-03-16 15:21:58 -04:00
|
|
|
teeReader := data
|
2017-05-22 18:42:00 -04:00
|
|
|
|
2017-03-16 15:21:58 -04:00
|
|
|
if sha256sum != "" {
|
|
|
|
sha256Writer = sha256.New()
|
2017-05-22 18:42:00 -04:00
|
|
|
writers = append(writers, sha256Writer)
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
|
|
|
|
2017-05-22 18:42:00 -04:00
|
|
|
if md5sum != "" {
|
|
|
|
md5sumWriter = md5.New()
|
|
|
|
writers = append(writers, md5sumWriter)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(writers) > 0 {
|
|
|
|
teeReader = io.TeeReader(data, io.MultiWriter(writers...))
|
|
|
|
}
|
2017-03-16 15:21:58 -04:00
|
|
|
|
2017-05-30 23:05:41 -04:00
|
|
|
err = a.client.CreateBlockBlobFromReader(bucket, object, uint64(size), teeReader, s3ToAzureHeaders(metadata))
|
2017-03-16 15:21:58 -04:00
|
|
|
if err != nil {
|
|
|
|
return objInfo, azureToObjectError(traceError(err), bucket, object)
|
|
|
|
}
|
|
|
|
|
2017-05-22 18:42:00 -04:00
|
|
|
if md5sum != "" {
|
|
|
|
newMD5sum := hex.EncodeToString(md5sumWriter.Sum(nil))
|
|
|
|
if newMD5sum != md5sum {
|
|
|
|
a.client.DeleteBlob(bucket, object, nil)
|
|
|
|
return ObjectInfo{}, azureToObjectError(traceError(BadDigest{md5sum, newMD5sum}))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-16 15:21:58 -04:00
|
|
|
if sha256sum != "" {
|
|
|
|
newSHA256sum := hex.EncodeToString(sha256Writer.Sum(nil))
|
|
|
|
if newSHA256sum != sha256sum {
|
|
|
|
a.client.DeleteBlob(bucket, object, nil)
|
2017-05-22 18:42:00 -04:00
|
|
|
return ObjectInfo{}, azureToObjectError(traceError(SHA256Mismatch{}))
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return a.GetObjectInfo(bucket, object)
|
|
|
|
}
|
|
|
|
|
|
|
|
// CopyObject - Copies a blob from source container to destination container.
|
|
|
|
// Uses Azure equivalent CopyBlob API.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) CopyObject(srcBucket, srcObject, destBucket, destObject string, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
2017-03-16 15:21:58 -04:00
|
|
|
err = a.client.CopyBlob(destBucket, destObject, a.client.GetBlobURL(srcBucket, srcObject))
|
|
|
|
if err != nil {
|
|
|
|
return objInfo, azureToObjectError(traceError(err), srcBucket, srcObject)
|
|
|
|
}
|
|
|
|
return a.GetObjectInfo(destBucket, destObject)
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteObject - Deletes a blob on azure container, uses Azure
|
|
|
|
// equivalent DeleteBlob API.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) DeleteObject(bucket, object string) error {
|
2017-03-16 15:21:58 -04:00
|
|
|
err := a.client.DeleteBlob(bucket, object, nil)
|
|
|
|
if err != nil {
|
|
|
|
return azureToObjectError(traceError(err), bucket, object)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListMultipartUploads - Incomplete implementation, for now just return the prefix if it is an incomplete upload.
|
|
|
|
// FIXME: Full ListMultipartUploads is not supported yet. It is supported just enough to help our client libs to
|
|
|
|
// support re-uploads. a.client.ListBlobs() can be made to return entries which include uncommitted blobs using
|
|
|
|
// which we need to filter out the committed blobs to get the list of uncommitted blobs.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) {
|
2017-03-16 15:21:58 -04:00
|
|
|
result.MaxUploads = maxUploads
|
|
|
|
result.Prefix = prefix
|
|
|
|
result.Delimiter = delimiter
|
|
|
|
meta := a.metaInfo.get(prefix)
|
|
|
|
if meta == nil {
|
|
|
|
// In case minio was restarted after NewMultipartUpload and before CompleteMultipartUpload we expect
|
|
|
|
// the client to do a fresh upload so that any metadata like content-type are sent again in the
|
|
|
|
// NewMultipartUpload.
|
|
|
|
return result, nil
|
|
|
|
}
|
2017-03-18 14:28:41 -04:00
|
|
|
result.Uploads = []uploadMetadata{{prefix, prefix, UTCNow(), "", nil}}
|
2017-03-16 15:21:58 -04:00
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewMultipartUpload - Use Azure equivalent CreateBlockBlob.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) NewMultipartUpload(bucket, object string, metadata map[string]string) (uploadID string, err error) {
|
2017-03-16 15:21:58 -04:00
|
|
|
// Azure doesn't return a unique upload ID and we use object name in place of it. Azure allows multiple uploads to
|
|
|
|
// co-exist as long as the user keeps the blocks uploaded (in block blobs) unique amongst concurrent upload attempts.
|
|
|
|
// Each concurrent client, keeps its own blockID list which it can commit.
|
|
|
|
uploadID = object
|
|
|
|
if metadata == nil {
|
|
|
|
// Store an empty map as a placeholder else ListObjectParts/PutObjectPart will not work properly.
|
|
|
|
metadata = make(map[string]string)
|
|
|
|
} else {
|
2017-05-30 23:05:41 -04:00
|
|
|
metadata = s3ToAzureHeaders(metadata)
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
|
|
|
a.metaInfo.set(uploadID, metadata)
|
|
|
|
return uploadID, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// CopyObjectPart - Not implemented.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64) (info PartInfo, err error) {
|
2017-03-16 15:21:58 -04:00
|
|
|
return info, traceError(NotImplemented{})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Encode partID+md5Hex to a blockID.
|
|
|
|
func azureGetBlockID(partID int, md5Hex string) string {
|
|
|
|
return base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%.5d.%s", partID, md5Hex)))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Decode blockID to partID+md5Hex.
|
|
|
|
func azureParseBlockID(blockID string) (int, string, error) {
|
|
|
|
idByte, err := base64.StdEncoding.DecodeString(blockID)
|
|
|
|
if err != nil {
|
|
|
|
return 0, "", traceError(err)
|
|
|
|
}
|
|
|
|
idStr := string(idByte)
|
|
|
|
splitRes := strings.Split(idStr, ".")
|
|
|
|
if len(splitRes) != 2 {
|
|
|
|
return 0, "", traceError(errUnexpected)
|
|
|
|
}
|
|
|
|
partID, err := strconv.Atoi(splitRes[0])
|
|
|
|
if err != nil {
|
|
|
|
return 0, "", traceError(err)
|
|
|
|
}
|
|
|
|
return partID, splitRes[1], nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// PutObjectPart - Use Azure equivalent PutBlockWithLength.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (info PartInfo, err error) {
|
2017-03-16 15:21:58 -04:00
|
|
|
if meta := a.metaInfo.get(uploadID); meta == nil {
|
|
|
|
return info, traceError(InvalidUploadID{})
|
|
|
|
}
|
|
|
|
var sha256Writer hash.Hash
|
2017-05-22 18:42:00 -04:00
|
|
|
var md5sumWriter hash.Hash
|
|
|
|
var etag string
|
|
|
|
|
|
|
|
var writers []io.Writer
|
|
|
|
|
2017-03-16 15:21:58 -04:00
|
|
|
if sha256sum != "" {
|
|
|
|
sha256Writer = sha256.New()
|
2017-05-22 18:42:00 -04:00
|
|
|
writers = append(writers, sha256Writer)
|
|
|
|
}
|
|
|
|
|
|
|
|
if md5Hex != "" {
|
|
|
|
md5sumWriter = md5.New()
|
|
|
|
writers = append(writers, md5sumWriter)
|
|
|
|
etag = md5Hex
|
|
|
|
} else {
|
|
|
|
// Generate random ETag.
|
|
|
|
etag = getMD5Hash([]byte(mustGetUUID()))
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
|
|
|
|
2017-05-22 18:42:00 -04:00
|
|
|
teeReader := data
|
|
|
|
|
|
|
|
if len(writers) > 0 {
|
|
|
|
teeReader = io.TeeReader(data, io.MultiWriter(writers...))
|
|
|
|
}
|
2017-03-16 15:21:58 -04:00
|
|
|
|
2017-05-22 18:42:00 -04:00
|
|
|
id := azureGetBlockID(partID, etag)
|
2017-03-16 15:21:58 -04:00
|
|
|
err = a.client.PutBlockWithLength(bucket, object, id, uint64(size), teeReader, nil)
|
|
|
|
if err != nil {
|
|
|
|
return info, azureToObjectError(traceError(err), bucket, object)
|
|
|
|
}
|
|
|
|
|
2017-05-22 18:42:00 -04:00
|
|
|
if md5Hex != "" {
|
|
|
|
newMD5sum := hex.EncodeToString(md5sumWriter.Sum(nil))
|
|
|
|
if newMD5sum != md5Hex {
|
|
|
|
a.client.DeleteBlob(bucket, object, nil)
|
|
|
|
return PartInfo{}, azureToObjectError(traceError(BadDigest{md5Hex, newMD5sum}))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-16 15:21:58 -04:00
|
|
|
if sha256sum != "" {
|
|
|
|
newSHA256sum := hex.EncodeToString(sha256Writer.Sum(nil))
|
|
|
|
if newSHA256sum != sha256sum {
|
2017-05-22 18:42:00 -04:00
|
|
|
return PartInfo{}, azureToObjectError(traceError(SHA256Mismatch{}))
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
info.PartNumber = partID
|
2017-05-22 18:42:00 -04:00
|
|
|
info.ETag = etag
|
2017-03-18 14:28:41 -04:00
|
|
|
info.LastModified = UTCNow()
|
2017-03-16 15:21:58 -04:00
|
|
|
info.Size = size
|
|
|
|
return info, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListObjectParts - Use Azure equivalent GetBlockList.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error) {
|
2017-03-16 15:21:58 -04:00
|
|
|
result.Bucket = bucket
|
|
|
|
result.Object = object
|
|
|
|
result.UploadID = uploadID
|
|
|
|
result.MaxParts = maxParts
|
|
|
|
|
|
|
|
if meta := a.metaInfo.get(uploadID); meta == nil {
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
resp, err := a.client.GetBlockList(bucket, object, storage.BlockListTypeUncommitted)
|
|
|
|
if err != nil {
|
|
|
|
return result, azureToObjectError(traceError(err), bucket, object)
|
|
|
|
}
|
|
|
|
tmpMaxParts := 0
|
|
|
|
partCount := 0 // Used for figuring out IsTruncated.
|
|
|
|
nextPartNumberMarker := 0
|
|
|
|
for _, part := range resp.UncommittedBlocks {
|
|
|
|
if tmpMaxParts == maxParts {
|
|
|
|
// Also takes care of the case if maxParts = 0
|
|
|
|
break
|
|
|
|
}
|
|
|
|
partCount++
|
|
|
|
partID, md5Hex, err := azureParseBlockID(part.Name)
|
|
|
|
if err != nil {
|
|
|
|
return result, err
|
|
|
|
}
|
|
|
|
if partID <= partNumberMarker {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
result.Parts = append(result.Parts, PartInfo{
|
|
|
|
partID,
|
2017-03-18 14:28:41 -04:00
|
|
|
UTCNow(),
|
2017-03-16 15:21:58 -04:00
|
|
|
md5Hex,
|
|
|
|
part.Size,
|
|
|
|
})
|
|
|
|
tmpMaxParts++
|
|
|
|
nextPartNumberMarker = partID
|
|
|
|
}
|
|
|
|
if partCount < len(resp.UncommittedBlocks) {
|
|
|
|
result.IsTruncated = true
|
|
|
|
result.NextPartNumberMarker = nextPartNumberMarker
|
|
|
|
}
|
|
|
|
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// AbortMultipartUpload - Not Implemented.
|
|
|
|
// There is no corresponding API in azure to abort an incomplete upload. The uncommmitted blocks
|
|
|
|
// gets deleted after one week.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) AbortMultipartUpload(bucket, object, uploadID string) error {
|
2017-03-16 15:21:58 -04:00
|
|
|
a.metaInfo.del(uploadID)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// CompleteMultipartUpload - Use Azure equivalent PutBlockList.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string, uploadedParts []completePart) (objInfo ObjectInfo, err error) {
|
2017-03-16 15:21:58 -04:00
|
|
|
meta := a.metaInfo.get(uploadID)
|
|
|
|
if meta == nil {
|
|
|
|
return objInfo, traceError(InvalidUploadID{uploadID})
|
|
|
|
}
|
|
|
|
var blocks []storage.Block
|
|
|
|
for _, part := range uploadedParts {
|
|
|
|
blocks = append(blocks, storage.Block{
|
|
|
|
ID: azureGetBlockID(part.PartNumber, part.ETag),
|
|
|
|
Status: storage.BlockStatusUncommitted,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
err = a.client.PutBlockList(bucket, object, blocks)
|
|
|
|
if err != nil {
|
|
|
|
return objInfo, azureToObjectError(traceError(err), bucket, object)
|
|
|
|
}
|
|
|
|
if len(meta) > 0 {
|
|
|
|
prop := storage.BlobHeaders{
|
|
|
|
ContentMD5: meta["Content-Md5"],
|
|
|
|
ContentLanguage: meta["Content-Language"],
|
|
|
|
ContentEncoding: meta["Content-Encoding"],
|
|
|
|
ContentType: meta["Content-Type"],
|
|
|
|
CacheControl: meta["Cache-Control"],
|
|
|
|
}
|
|
|
|
err = a.client.SetBlobProperties(bucket, object, prop)
|
|
|
|
if err != nil {
|
|
|
|
return objInfo, azureToObjectError(traceError(err), bucket, object)
|
|
|
|
}
|
2017-05-30 23:05:41 -04:00
|
|
|
err = a.client.SetBlobMetadata(bucket, object, nil, meta)
|
|
|
|
if err != nil {
|
|
|
|
return objInfo, azureToObjectError(traceError(err), bucket, object)
|
|
|
|
}
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
|
|
|
a.metaInfo.del(uploadID)
|
|
|
|
return a.GetObjectInfo(bucket, object)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copied from github.com/Azure/azure-sdk-for-go/storage/blob.go
|
|
|
|
func azureListBlobsGetParameters(p storage.ListBlobsParameters) url.Values {
|
|
|
|
out := url.Values{}
|
|
|
|
|
|
|
|
if p.Prefix != "" {
|
|
|
|
out.Set("prefix", p.Prefix)
|
|
|
|
}
|
|
|
|
if p.Delimiter != "" {
|
|
|
|
out.Set("delimiter", p.Delimiter)
|
|
|
|
}
|
|
|
|
if p.Marker != "" {
|
|
|
|
out.Set("marker", p.Marker)
|
|
|
|
}
|
|
|
|
if p.Include != "" {
|
|
|
|
out.Set("include", p.Include)
|
|
|
|
}
|
|
|
|
if p.MaxResults != 0 {
|
|
|
|
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
|
|
|
|
}
|
|
|
|
if p.Timeout != 0 {
|
|
|
|
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
|
|
|
}
|
|
|
|
|
|
|
|
return out
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetBucketPolicies - Azure supports three types of container policies:
|
|
|
|
// storage.ContainerAccessTypeContainer - readonly in minio terminology
|
|
|
|
// storage.ContainerAccessTypeBlob - readonly without listing in minio terminology
|
|
|
|
// storage.ContainerAccessTypePrivate - none in minio terminology
|
|
|
|
// As the common denominator for minio and azure is readonly and none, we support
|
|
|
|
// these two policies at the bucket level.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error {
|
2017-05-02 15:27:25 -04:00
|
|
|
var policies []BucketAccessPolicy
|
|
|
|
|
|
|
|
for prefix, policy := range policy.GetPolicies(policyInfo.Statements, bucket) {
|
|
|
|
policies = append(policies, BucketAccessPolicy{
|
|
|
|
Prefix: prefix,
|
|
|
|
Policy: policy,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
prefix := bucket + "/*" // For all objects inside the bucket.
|
|
|
|
if len(policies) != 1 {
|
|
|
|
return traceError(NotImplemented{})
|
|
|
|
}
|
|
|
|
if policies[0].Prefix != prefix {
|
|
|
|
return traceError(NotImplemented{})
|
|
|
|
}
|
|
|
|
if policies[0].Policy != policy.BucketPolicyReadOnly {
|
|
|
|
return traceError(NotImplemented{})
|
|
|
|
}
|
|
|
|
perm := storage.ContainerPermissions{
|
|
|
|
AccessType: storage.ContainerAccessTypeContainer,
|
|
|
|
AccessPolicies: nil,
|
|
|
|
}
|
|
|
|
err := a.client.SetContainerPermissions(bucket, perm, 0, "")
|
|
|
|
return azureToObjectError(traceError(err), bucket)
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetBucketPolicies - Get the container ACL and convert it to canonical []bucketAccessPolicy
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) {
|
2017-05-02 15:27:25 -04:00
|
|
|
policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"}
|
|
|
|
perm, err := a.client.GetContainerPermissions(bucket, 0, "")
|
|
|
|
if err != nil {
|
|
|
|
return policy.BucketAccessPolicy{}, azureToObjectError(traceError(err), bucket)
|
|
|
|
}
|
|
|
|
switch perm.AccessType {
|
|
|
|
case storage.ContainerAccessTypePrivate:
|
|
|
|
// Do nothing
|
|
|
|
case storage.ContainerAccessTypeContainer:
|
|
|
|
policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyReadOnly, bucket, "")
|
|
|
|
default:
|
|
|
|
return policy.BucketAccessPolicy{}, azureToObjectError(traceError(NotImplemented{}))
|
|
|
|
}
|
|
|
|
return policyInfo, nil
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteBucketPolicies - Set the container ACL to "private"
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) DeleteBucketPolicies(bucket string) error {
|
2017-05-02 15:27:25 -04:00
|
|
|
perm := storage.ContainerPermissions{
|
|
|
|
AccessType: storage.ContainerAccessTypePrivate,
|
|
|
|
AccessPolicies: nil,
|
|
|
|
}
|
|
|
|
err := a.client.SetContainerPermissions(bucket, perm, 0, "")
|
|
|
|
return azureToObjectError(traceError(err))
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|