2017-03-16 15:21:58 -04:00
|
|
|
/*
|
|
|
|
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
2017-09-19 19:08:08 -04:00
|
|
|
"bytes"
|
|
|
|
"crypto/rand"
|
|
|
|
"crypto/sha256"
|
2017-03-16 15:21:58 -04:00
|
|
|
"encoding/base64"
|
2017-09-19 19:08:08 -04:00
|
|
|
"encoding/hex"
|
|
|
|
"encoding/json"
|
|
|
|
"errors"
|
2017-03-16 15:21:58 -04:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"net/http"
|
2017-09-19 19:08:08 -04:00
|
|
|
"strconv"
|
2017-03-16 15:21:58 -04:00
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/Azure/azure-sdk-for-go/storage"
|
2017-09-05 19:56:23 -04:00
|
|
|
humanize "github.com/dustin/go-humanize"
|
2017-03-16 15:21:58 -04:00
|
|
|
"github.com/minio/minio-go/pkg/policy"
|
|
|
|
)
|
|
|
|
|
|
|
|
const globalAzureAPIVersion = "2016-05-31"
|
2017-09-05 19:56:23 -04:00
|
|
|
const azureBlockSize = 100 * humanize.MiByte
|
2017-09-19 19:08:08 -04:00
|
|
|
const metadataObjectNameTemplate = globalMinioSysTmp + "multipart/v1/%s.%x/azure.json"
|
2017-03-16 15:21:58 -04:00
|
|
|
|
2017-09-28 18:23:46 -04:00
|
|
|
// s3MetaToAzureProperties converts metadata meant for S3 PUT/COPY
|
|
|
|
// object into Azure data structures - BlobMetadata and
|
|
|
|
// BlobProperties.
|
|
|
|
//
|
|
|
|
// BlobMetadata contains user defined key-value pairs and each key is
|
|
|
|
// automatically prefixed with `X-Ms-Meta-` by the Azure SDK. S3
|
|
|
|
// user-metadata is translated to Azure metadata by removing the
|
|
|
|
// `X-Amz-Meta-` prefix.
|
|
|
|
//
|
|
|
|
// BlobProperties contains commonly set metadata for objects such as
|
|
|
|
// Content-Encoding, etc. Such metadata that is accepted by S3 is
|
|
|
|
// copied into BlobProperties.
|
|
|
|
//
|
|
|
|
// Header names are canonicalized as in http.Header.
|
|
|
|
func s3MetaToAzureProperties(s3Metadata map[string]string) (storage.BlobMetadata,
|
|
|
|
storage.BlobProperties) {
|
|
|
|
|
|
|
|
// Azure does not permit user-defined metadata key names to
|
|
|
|
// contain hyphens. So we map hyphens to underscores for
|
|
|
|
// encryption headers. More such headers may be added in the
|
|
|
|
// future.
|
2017-09-12 19:14:41 -04:00
|
|
|
gatewayHeaders := map[string]string{
|
|
|
|
"X-Amz-Meta-X-Amz-Key": "X-Amz-Meta-x_minio_key",
|
|
|
|
"X-Amz-Meta-X-Amz-Matdesc": "X-Amz-Meta-x_minio_matdesc",
|
|
|
|
"X-Amz-Meta-X-Amz-Iv": "X-Amz-Meta-x_minio_iv",
|
|
|
|
}
|
|
|
|
|
2017-09-28 18:23:46 -04:00
|
|
|
var blobMeta storage.BlobMetadata = make(map[string]string)
|
|
|
|
var props storage.BlobProperties
|
|
|
|
for k, v := range s3Metadata {
|
2017-05-30 23:05:41 -04:00
|
|
|
k = http.CanonicalHeaderKey(k)
|
2017-09-12 19:14:41 -04:00
|
|
|
if nk, ok := gatewayHeaders[k]; ok {
|
|
|
|
k = nk
|
|
|
|
}
|
2017-09-28 18:23:46 -04:00
|
|
|
switch {
|
|
|
|
case strings.HasPrefix(k, "X-Amz-Meta-"):
|
|
|
|
// Strip header prefix, to let Azure SDK
|
|
|
|
// handle it for storage.
|
|
|
|
k = strings.Replace(k, "X-Amz-Meta-", "", 1)
|
|
|
|
blobMeta[k] = v
|
|
|
|
|
|
|
|
// All cases below, extract common metadata that is
|
|
|
|
// accepted by S3 into BlobProperties for setting on
|
|
|
|
// Azure - see
|
|
|
|
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
|
|
|
|
case k == "Cache-Control":
|
|
|
|
props.CacheControl = v
|
|
|
|
case k == "Content-Disposition":
|
|
|
|
props.ContentDisposition = v
|
|
|
|
case k == "Content-Encoding":
|
|
|
|
props.ContentEncoding = v
|
|
|
|
case k == "Content-Length":
|
|
|
|
// assume this doesn't fail
|
|
|
|
props.ContentLength, _ = strconv.ParseInt(v, 10, 64)
|
|
|
|
case k == "Content-MD5":
|
|
|
|
props.ContentMD5 = v
|
|
|
|
case k == "Content-Type":
|
|
|
|
props.ContentType = v
|
2017-05-30 23:05:41 -04:00
|
|
|
}
|
|
|
|
}
|
2017-09-28 18:23:46 -04:00
|
|
|
return blobMeta, props
|
2017-05-30 23:05:41 -04:00
|
|
|
}
|
|
|
|
|
2017-09-28 18:23:46 -04:00
|
|
|
// azurePropertiesToS3Meta converts Azure metadata/properties to S3
|
|
|
|
// metadata. It is the reverse of s3MetaToAzureProperties. Azure's
|
|
|
|
// `.GetMetadata()` lower-cases all header keys, so this is taken into
|
|
|
|
// account by this function.
|
|
|
|
func azurePropertiesToS3Meta(meta storage.BlobMetadata, props storage.BlobProperties) map[string]string {
|
|
|
|
// Remap underscores to hyphens to restore encryption
|
|
|
|
// headers. See s3MetaToAzureProperties for details.
|
2017-09-12 19:14:41 -04:00
|
|
|
gatewayHeaders := map[string]string{
|
|
|
|
"X-Amz-Meta-x_minio_key": "X-Amz-Meta-X-Amz-Key",
|
|
|
|
"X-Amz-Meta-x_minio_matdesc": "X-Amz-Meta-X-Amz-Matdesc",
|
|
|
|
"X-Amz-Meta-x_minio_iv": "X-Amz-Meta-X-Amz-Iv",
|
|
|
|
}
|
|
|
|
|
2017-09-28 18:23:46 -04:00
|
|
|
s3Metadata := make(map[string]string)
|
2017-05-30 23:05:41 -04:00
|
|
|
for k, v := range meta {
|
2017-09-28 18:23:46 -04:00
|
|
|
// k's `x-ms-meta-` prefix is already stripped by
|
|
|
|
// Azure SDK, so we add the AMZ prefix.
|
2017-05-30 23:05:41 -04:00
|
|
|
k = "X-Amz-Meta-" + k
|
2017-09-12 19:14:41 -04:00
|
|
|
if nk, ok := gatewayHeaders[k]; ok {
|
|
|
|
k = nk
|
|
|
|
}
|
2017-09-28 18:23:46 -04:00
|
|
|
k = http.CanonicalHeaderKey(k)
|
|
|
|
s3Metadata[k] = v
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add each property from BlobProperties that is supported by
|
|
|
|
// S3 PUT/COPY common metadata.
|
|
|
|
if props.CacheControl != "" {
|
|
|
|
s3Metadata["Cache-Control"] = props.CacheControl
|
|
|
|
}
|
|
|
|
if props.ContentDisposition != "" {
|
|
|
|
s3Metadata["Content-Disposition"] = props.ContentDisposition
|
|
|
|
}
|
|
|
|
if props.ContentEncoding != "" {
|
|
|
|
s3Metadata["Content-Encoding"] = props.ContentEncoding
|
|
|
|
}
|
|
|
|
if props.ContentLength != 0 {
|
|
|
|
s3Metadata["Content-Length"] = fmt.Sprintf("%d", props.ContentLength)
|
|
|
|
}
|
|
|
|
if props.ContentMD5 != "" {
|
|
|
|
s3Metadata["Content-MD5"] = props.ContentMD5
|
2017-05-30 23:05:41 -04:00
|
|
|
}
|
2017-09-28 18:23:46 -04:00
|
|
|
if props.ContentType != "" {
|
|
|
|
s3Metadata["Content-Type"] = props.ContentType
|
|
|
|
}
|
|
|
|
return s3Metadata
|
2017-05-30 23:05:41 -04:00
|
|
|
}
|
|
|
|
|
2017-07-10 21:21:12 -04:00
|
|
|
// Append "-1" to etag so that clients do not interpret it as MD5.
|
|
|
|
func azureToS3ETag(etag string) string {
|
|
|
|
return canonicalizeETag(etag) + "-1"
|
|
|
|
}
|
|
|
|
|
2017-05-15 03:52:33 -04:00
|
|
|
// azureObjects - Implements Object layer for Azure blob storage.
|
|
|
|
type azureObjects struct {
|
2017-10-09 19:41:35 -04:00
|
|
|
gatewayUnsupported
|
2017-09-19 19:08:08 -04:00
|
|
|
client storage.BlobStorageClient // Azure sdk client
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Convert azure errors to minio object layer errors.
|
|
|
|
func azureToObjectError(err error, params ...string) error {
|
|
|
|
if err == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
e, ok := err.(*Error)
|
|
|
|
if !ok {
|
|
|
|
// Code should be fixed if this function is called without doing traceError()
|
|
|
|
// Else handling different situations in this function makes this function complicated.
|
|
|
|
errorIf(err, "Expected type *Error")
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = e.e
|
|
|
|
bucket := ""
|
|
|
|
object := ""
|
|
|
|
if len(params) >= 1 {
|
|
|
|
bucket = params[0]
|
|
|
|
}
|
|
|
|
if len(params) == 2 {
|
|
|
|
object = params[1]
|
|
|
|
}
|
|
|
|
|
|
|
|
azureErr, ok := err.(storage.AzureStorageServiceError)
|
|
|
|
if !ok {
|
|
|
|
// We don't interpret non Azure errors. As azure errors will
|
|
|
|
// have StatusCode to help to convert to object errors.
|
|
|
|
return e
|
|
|
|
}
|
|
|
|
|
|
|
|
switch azureErr.Code {
|
|
|
|
case "ContainerAlreadyExists":
|
|
|
|
err = BucketExists{Bucket: bucket}
|
|
|
|
case "InvalidResourceName":
|
|
|
|
err = BucketNameInvalid{Bucket: bucket}
|
2017-07-12 19:42:14 -04:00
|
|
|
case "RequestBodyTooLarge":
|
|
|
|
err = PartTooBig{}
|
2017-09-12 19:14:41 -04:00
|
|
|
case "InvalidMetadata":
|
|
|
|
err = UnsupportedMetadata{}
|
2017-03-16 15:21:58 -04:00
|
|
|
default:
|
|
|
|
switch azureErr.StatusCode {
|
|
|
|
case http.StatusNotFound:
|
|
|
|
if object != "" {
|
|
|
|
err = ObjectNotFound{bucket, object}
|
|
|
|
} else {
|
|
|
|
err = BucketNotFound{Bucket: bucket}
|
|
|
|
}
|
|
|
|
case http.StatusBadRequest:
|
|
|
|
err = BucketNameInvalid{Bucket: bucket}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
e.e = err
|
|
|
|
return e
|
|
|
|
}
|
|
|
|
|
2017-09-19 19:08:08 -04:00
|
|
|
// mustGetAzureUploadID - returns new upload ID which is hex encoded 8 bytes random value.
|
|
|
|
func mustGetAzureUploadID() string {
|
|
|
|
var id [8]byte
|
|
|
|
|
|
|
|
n, err := io.ReadFull(rand.Reader, id[:])
|
|
|
|
if err != nil {
|
|
|
|
panic(fmt.Errorf("unable to generate upload ID for azure. %s", err))
|
|
|
|
}
|
|
|
|
if n != len(id) {
|
|
|
|
panic(fmt.Errorf("insufficient random data (expected: %d, read: %d)", len(id), n))
|
|
|
|
}
|
|
|
|
|
|
|
|
return fmt.Sprintf("%x", id[:])
|
|
|
|
}
|
|
|
|
|
|
|
|
// checkAzureUploadID - returns error in case of given string is upload ID.
|
|
|
|
func checkAzureUploadID(uploadID string) (err error) {
|
|
|
|
if len(uploadID) != 16 {
|
|
|
|
return traceError(MalformedUploadID{uploadID})
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err = hex.DecodeString(uploadID); err != nil {
|
|
|
|
return traceError(MalformedUploadID{uploadID})
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Encode partID, subPartNumber, uploadID and md5Hex to blockID.
|
|
|
|
func azureGetBlockID(partID, subPartNumber int, uploadID, md5Hex string) string {
|
|
|
|
return base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%05d.%02d.%s.%s", partID, subPartNumber, uploadID, md5Hex)))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse blockID into partID, subPartNumber and md5Hex.
|
|
|
|
func azureParseBlockID(blockID string) (partID, subPartNumber int, uploadID, md5Hex string, err error) {
|
|
|
|
var blockIDBytes []byte
|
|
|
|
if blockIDBytes, err = base64.StdEncoding.DecodeString(blockID); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
tokens := strings.Split(string(blockIDBytes), ".")
|
|
|
|
if len(tokens) != 4 {
|
|
|
|
err = fmt.Errorf("invalid block id '%s'", string(blockIDBytes))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if partID, err = strconv.Atoi(tokens[0]); err != nil || partID <= 0 {
|
|
|
|
err = fmt.Errorf("invalid part number in block id '%s'", string(blockIDBytes))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if subPartNumber, err = strconv.Atoi(tokens[1]); err != nil || subPartNumber <= 0 {
|
|
|
|
err = fmt.Errorf("invalid sub-part number in block id '%s'", string(blockIDBytes))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
uploadID = tokens[2]
|
|
|
|
md5Hex = tokens[3]
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-05-08 18:42:48 -04:00
|
|
|
// Inits azure blob storage client and returns AzureObjects.
|
2017-06-09 22:50:51 -04:00
|
|
|
func newAzureLayer(host string) (GatewayLayer, error) {
|
2017-05-12 01:38:22 -04:00
|
|
|
var err error
|
2017-06-09 22:50:51 -04:00
|
|
|
var endpoint = storage.DefaultBaseURL
|
|
|
|
var secure = true
|
2017-05-12 01:38:22 -04:00
|
|
|
|
|
|
|
// If user provided some parameters
|
2017-06-09 22:50:51 -04:00
|
|
|
if host != "" {
|
|
|
|
endpoint, secure, err = parseGatewayEndpoint(host)
|
2017-05-12 01:38:22 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-04-11 20:44:26 -04:00
|
|
|
}
|
2017-05-08 18:42:48 -04:00
|
|
|
|
2017-06-09 22:50:51 -04:00
|
|
|
creds := serverConfig.GetCredential()
|
|
|
|
c, err := storage.NewClient(creds.AccessKey, creds.SecretKey, endpoint, globalAzureAPIVersion, secure)
|
2017-03-16 15:21:58 -04:00
|
|
|
if err != nil {
|
2017-05-15 03:52:33 -04:00
|
|
|
return &azureObjects{}, err
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
2017-08-31 20:19:03 -04:00
|
|
|
c.HTTPClient = &http.Client{Transport: newCustomHTTPTransport()}
|
2017-06-09 22:50:51 -04:00
|
|
|
|
2017-05-15 03:52:33 -04:00
|
|
|
return &azureObjects{
|
2017-03-16 15:21:58 -04:00
|
|
|
client: c.GetBlobService(),
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Shutdown - save any gateway metadata to disk
|
|
|
|
// if necessary and reload upon next restart.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) Shutdown() error {
|
2017-03-16 15:21:58 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// StorageInfo - Not relevant to Azure backend.
|
2017-06-21 22:53:09 -04:00
|
|
|
func (a *azureObjects) StorageInfo() (si StorageInfo) {
|
|
|
|
return si
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
|
|
|
|
2017-04-27 14:26:00 -04:00
|
|
|
// MakeBucketWithLocation - Create a new container on azure backend.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) MakeBucketWithLocation(bucket, location string) error {
|
2017-09-28 18:23:46 -04:00
|
|
|
container := a.client.GetContainerReference(bucket)
|
|
|
|
err := container.Create(&storage.CreateContainerOptions{
|
|
|
|
Access: storage.ContainerAccessTypePrivate,
|
|
|
|
})
|
2017-03-16 15:21:58 -04:00
|
|
|
return azureToObjectError(traceError(err), bucket)
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetBucketInfo - Get bucket metadata..
|
2017-06-21 22:53:09 -04:00
|
|
|
func (a *azureObjects) GetBucketInfo(bucket string) (bi BucketInfo, e error) {
|
2017-09-28 22:37:09 -04:00
|
|
|
// Verify if bucket (container-name) is valid.
|
|
|
|
// IsValidBucketName has same restrictions as container names mentioned
|
|
|
|
// in azure documentation, so we will simply use the same function here.
|
|
|
|
// Ref - https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata
|
|
|
|
if !IsValidBucketName(bucket) {
|
|
|
|
return bi, traceError(BucketNameInvalid{Bucket: bucket})
|
|
|
|
}
|
|
|
|
|
2017-09-28 18:23:46 -04:00
|
|
|
// Azure does not have an equivalent call, hence use
|
|
|
|
// ListContainers with prefix
|
2017-03-16 15:21:58 -04:00
|
|
|
resp, err := a.client.ListContainers(storage.ListContainersParameters{
|
|
|
|
Prefix: bucket,
|
|
|
|
})
|
|
|
|
if err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return bi, azureToObjectError(traceError(err), bucket)
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
|
|
|
for _, container := range resp.Containers {
|
|
|
|
if container.Name == bucket {
|
|
|
|
t, e := time.Parse(time.RFC1123, container.Properties.LastModified)
|
|
|
|
if e == nil {
|
|
|
|
return BucketInfo{
|
|
|
|
Name: bucket,
|
|
|
|
Created: t,
|
|
|
|
}, nil
|
|
|
|
} // else continue
|
|
|
|
}
|
|
|
|
}
|
2017-06-21 22:53:09 -04:00
|
|
|
return bi, traceError(BucketNotFound{Bucket: bucket})
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// ListBuckets - Lists all azure containers, uses Azure equivalent ListContainers.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) ListBuckets() (buckets []BucketInfo, err error) {
|
2017-03-16 15:21:58 -04:00
|
|
|
resp, err := a.client.ListContainers(storage.ListContainersParameters{})
|
|
|
|
if err != nil {
|
|
|
|
return nil, azureToObjectError(traceError(err))
|
|
|
|
}
|
|
|
|
for _, container := range resp.Containers {
|
|
|
|
t, e := time.Parse(time.RFC1123, container.Properties.LastModified)
|
|
|
|
if e != nil {
|
|
|
|
return nil, traceError(e)
|
|
|
|
}
|
|
|
|
buckets = append(buckets, BucketInfo{
|
|
|
|
Name: container.Name,
|
|
|
|
Created: t,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return buckets, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteBucket - delete a container on azure, uses Azure equivalent DeleteContainer.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) DeleteBucket(bucket string) error {
|
2017-09-28 18:23:46 -04:00
|
|
|
container := a.client.GetContainerReference(bucket)
|
|
|
|
return azureToObjectError(traceError(container.Delete(nil)), bucket)
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// ListObjects - lists all blobs on azure with in a container filtered by prefix
|
|
|
|
// and marker, uses Azure equivalent ListBlobs.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) {
|
2017-09-29 15:08:23 -04:00
|
|
|
var objects []ObjectInfo
|
|
|
|
var prefixes []string
|
2017-09-28 18:23:46 -04:00
|
|
|
container := a.client.GetContainerReference(bucket)
|
2017-09-29 15:08:23 -04:00
|
|
|
for len(objects) == 0 && len(prefixes) == 0 {
|
|
|
|
resp, err := container.ListBlobs(storage.ListBlobsParameters{
|
|
|
|
Prefix: prefix,
|
|
|
|
Marker: marker,
|
|
|
|
Delimiter: delimiter,
|
|
|
|
MaxResults: uint(maxKeys),
|
2017-06-17 01:17:00 -04:00
|
|
|
})
|
2017-09-29 15:08:23 -04:00
|
|
|
if err != nil {
|
|
|
|
return result, azureToObjectError(traceError(err), bucket, prefix)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, object := range resp.Blobs {
|
|
|
|
if strings.HasPrefix(object.Name, globalMinioSysTmp) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
objects = append(objects, ObjectInfo{
|
|
|
|
Bucket: bucket,
|
|
|
|
Name: object.Name,
|
|
|
|
ModTime: time.Time(object.Properties.LastModified),
|
|
|
|
Size: object.Properties.ContentLength,
|
|
|
|
ETag: azureToS3ETag(object.Properties.Etag),
|
|
|
|
ContentType: object.Properties.ContentType,
|
|
|
|
ContentEncoding: object.Properties.ContentEncoding,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove minio.sys.tmp prefix.
|
|
|
|
for _, prefix := range resp.BlobPrefixes {
|
|
|
|
if prefix != globalMinioSysTmp {
|
|
|
|
prefixes = append(prefixes, prefix)
|
|
|
|
}
|
|
|
|
}
|
2017-09-19 19:08:08 -04:00
|
|
|
|
2017-09-29 15:08:23 -04:00
|
|
|
marker = resp.NextMarker
|
|
|
|
if resp.NextMarker == "" {
|
2017-09-19 19:08:08 -04:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2017-09-29 15:08:23 -04:00
|
|
|
|
|
|
|
result.Objects = objects
|
|
|
|
result.Prefixes = prefixes
|
|
|
|
result.NextMarker = marker
|
|
|
|
result.IsTruncated = (marker != "")
|
2017-06-17 01:17:00 -04:00
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListObjectsV2 - list all blobs in Azure bucket filtered by prefix
|
2017-09-29 15:08:23 -04:00
|
|
|
func (a *azureObjects) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) {
|
|
|
|
marker := continuationToken
|
|
|
|
if startAfter != "" {
|
|
|
|
marker = startAfter
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
2017-09-19 19:08:08 -04:00
|
|
|
|
2017-09-29 15:08:23 -04:00
|
|
|
var resultV1 ListObjectsInfo
|
|
|
|
resultV1, err = a.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
|
|
|
|
if err != nil {
|
|
|
|
return result, err
|
2017-09-19 19:08:08 -04:00
|
|
|
}
|
2017-09-29 15:08:23 -04:00
|
|
|
|
|
|
|
result.Objects = resultV1.Objects
|
|
|
|
result.Prefixes = resultV1.Prefixes
|
|
|
|
result.ContinuationToken = continuationToken
|
|
|
|
result.NextContinuationToken = resultV1.NextMarker
|
|
|
|
result.IsTruncated = (resultV1.NextMarker != "")
|
2017-03-16 15:21:58 -04:00
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetObject - reads an object from azure. Supports additional
|
|
|
|
// parameters like offset and length which are synonymous with
|
|
|
|
// HTTP Range requests.
|
|
|
|
//
|
|
|
|
// startOffset indicates the starting read location of the object.
|
|
|
|
// length indicates the total length of the object.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) error {
|
2017-09-28 18:23:46 -04:00
|
|
|
blobRange := &storage.BlobRange{Start: uint64(startOffset)}
|
2017-03-16 15:21:58 -04:00
|
|
|
if length > 0 && startOffset > 0 {
|
2017-09-28 18:23:46 -04:00
|
|
|
blobRange.End = uint64(startOffset + length - 1)
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
|
|
|
|
2017-09-28 18:23:46 -04:00
|
|
|
blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
|
2017-03-16 15:21:58 -04:00
|
|
|
var rc io.ReadCloser
|
|
|
|
var err error
|
|
|
|
if startOffset == 0 && length == 0 {
|
2017-09-28 18:23:46 -04:00
|
|
|
rc, err = blob.Get(nil)
|
2017-03-16 15:21:58 -04:00
|
|
|
} else {
|
2017-09-28 18:23:46 -04:00
|
|
|
rc, err = blob.GetRange(&storage.GetBlobRangeOptions{
|
|
|
|
Range: blobRange,
|
|
|
|
})
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return azureToObjectError(traceError(err), bucket, object)
|
|
|
|
}
|
|
|
|
_, err = io.Copy(writer, rc)
|
|
|
|
rc.Close()
|
|
|
|
return traceError(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetObjectInfo - reads blob metadata properties and replies back ObjectInfo,
|
|
|
|
// uses zure equivalent GetBlobProperties.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) GetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) {
|
2017-09-28 18:23:46 -04:00
|
|
|
blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
|
|
|
|
err = blob.GetProperties(nil)
|
2017-05-30 23:05:41 -04:00
|
|
|
if err != nil {
|
|
|
|
return objInfo, azureToObjectError(traceError(err), bucket, object)
|
|
|
|
}
|
|
|
|
|
2017-09-28 18:23:46 -04:00
|
|
|
meta := azurePropertiesToS3Meta(blob.Metadata, blob.Properties)
|
2017-03-16 15:21:58 -04:00
|
|
|
objInfo = ObjectInfo{
|
2017-09-28 18:23:46 -04:00
|
|
|
Bucket: bucket,
|
|
|
|
UserDefined: meta,
|
|
|
|
ETag: azureToS3ETag(blob.Properties.Etag),
|
|
|
|
ModTime: time.Time(blob.Properties.LastModified),
|
|
|
|
Name: object,
|
|
|
|
Size: blob.Properties.ContentLength,
|
|
|
|
ContentType: blob.Properties.ContentType,
|
|
|
|
ContentEncoding: blob.Properties.ContentEncoding,
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
2017-05-30 23:05:41 -04:00
|
|
|
return objInfo, nil
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// PutObject - Create a new blob with the incoming data,
|
|
|
|
// uses Azure equivalent CreateBlockBlobFromReader.
|
2017-09-19 15:40:27 -04:00
|
|
|
func (a *azureObjects) PutObject(bucket, object string, data *HashReader, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
2017-05-22 18:42:00 -04:00
|
|
|
delete(metadata, "etag")
|
2017-09-28 18:23:46 -04:00
|
|
|
blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
|
|
|
|
blob.Metadata, blob.Properties = s3MetaToAzureProperties(metadata)
|
|
|
|
err = blob.CreateBlockBlobFromReader(data, nil)
|
2017-03-16 15:21:58 -04:00
|
|
|
if err != nil {
|
|
|
|
return objInfo, azureToObjectError(traceError(err), bucket, object)
|
|
|
|
}
|
2017-09-19 15:40:27 -04:00
|
|
|
if err = data.Verify(); err != nil {
|
2017-09-28 18:23:46 -04:00
|
|
|
errorIf(err, "Verification of uploaded object data failed against client provided checksums.")
|
|
|
|
derr := blob.Delete(nil)
|
|
|
|
errorIf(derr, "Failed to delete blob when cleaning up a bad blob upload.")
|
2017-09-19 15:40:27 -04:00
|
|
|
return ObjectInfo{}, azureToObjectError(traceError(err))
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
|
|
|
return a.GetObjectInfo(bucket, object)
|
|
|
|
}
|
|
|
|
|
|
|
|
// CopyObject - Copies a blob from source container to destination container.
|
|
|
|
// Uses Azure equivalent CopyBlob API.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) CopyObject(srcBucket, srcObject, destBucket, destObject string, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
2017-09-28 18:23:46 -04:00
|
|
|
srcBlobURL := a.client.GetContainerReference(srcBucket).GetBlobReference(srcObject).GetURL()
|
|
|
|
destBlob := a.client.GetContainerReference(destBucket).GetBlobReference(destObject)
|
2017-09-29 13:58:40 -04:00
|
|
|
azureMeta, props := s3MetaToAzureProperties(metadata)
|
|
|
|
destBlob.Metadata = azureMeta
|
2017-09-28 18:23:46 -04:00
|
|
|
err = destBlob.Copy(srcBlobURL, nil)
|
2017-03-16 15:21:58 -04:00
|
|
|
if err != nil {
|
|
|
|
return objInfo, azureToObjectError(traceError(err), srcBucket, srcObject)
|
|
|
|
}
|
2017-09-29 13:58:40 -04:00
|
|
|
destBlob.Properties = props
|
|
|
|
err = destBlob.SetProperties(nil)
|
|
|
|
if err != nil {
|
|
|
|
return objInfo, azureToObjectError(traceError(err), srcBucket, srcObject)
|
|
|
|
}
|
2017-03-16 15:21:58 -04:00
|
|
|
return a.GetObjectInfo(destBucket, destObject)
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteObject - Deletes a blob on azure container, uses Azure
|
|
|
|
// equivalent DeleteBlob API.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) DeleteObject(bucket, object string) error {
|
2017-09-28 18:23:46 -04:00
|
|
|
blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
|
|
|
|
err := blob.Delete(nil)
|
2017-03-16 15:21:58 -04:00
|
|
|
if err != nil {
|
|
|
|
return azureToObjectError(traceError(err), bucket, object)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-09-19 19:08:08 -04:00
|
|
|
// ListMultipartUploads - It's decided not to support List Multipart Uploads, hence returning empty result.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) {
|
2017-09-19 19:08:08 -04:00
|
|
|
// It's decided not to support List Multipart Uploads, hence returning empty result.
|
2017-03-16 15:21:58 -04:00
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2017-09-19 19:08:08 -04:00
|
|
|
type azureMultipartMetadata struct {
|
|
|
|
Name string `json:"name"`
|
|
|
|
Metadata map[string]string `json:"metadata"`
|
|
|
|
}
|
|
|
|
|
|
|
|
func getAzureMetadataObjectName(objectName, uploadID string) string {
|
|
|
|
return fmt.Sprintf(metadataObjectNameTemplate, uploadID, sha256.Sum256([]byte(objectName)))
|
|
|
|
}
|
|
|
|
|
|
|
|
func (a *azureObjects) checkUploadIDExists(bucketName, objectName, uploadID string) (err error) {
|
2017-09-28 18:23:46 -04:00
|
|
|
blob := a.client.GetContainerReference(bucketName).GetBlobReference(
|
|
|
|
getAzureMetadataObjectName(objectName, uploadID))
|
|
|
|
err = blob.GetMetadata(nil)
|
2017-09-19 19:08:08 -04:00
|
|
|
err = azureToObjectError(traceError(err), bucketName, objectName)
|
|
|
|
oerr := ObjectNotFound{bucketName, objectName}
|
|
|
|
if errorCause(err) == oerr {
|
|
|
|
err = traceError(InvalidUploadID{})
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-03-16 15:21:58 -04:00
|
|
|
// NewMultipartUpload - Use Azure equivalent CreateBlockBlob.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) NewMultipartUpload(bucket, object string, metadata map[string]string) (uploadID string, err error) {
|
2017-03-16 15:21:58 -04:00
|
|
|
if metadata == nil {
|
|
|
|
metadata = make(map[string]string)
|
|
|
|
}
|
|
|
|
|
2017-09-19 19:08:08 -04:00
|
|
|
uploadID = mustGetAzureUploadID()
|
|
|
|
if err = a.checkUploadIDExists(bucket, object, uploadID); err == nil {
|
|
|
|
return "", traceError(errors.New("Upload ID name collision"))
|
|
|
|
}
|
|
|
|
metadataObject := getAzureMetadataObjectName(object, uploadID)
|
2017-03-16 15:21:58 -04:00
|
|
|
|
2017-09-19 19:08:08 -04:00
|
|
|
var jsonData []byte
|
|
|
|
if jsonData, err = json.Marshal(azureMultipartMetadata{Name: object, Metadata: metadata}); err != nil {
|
|
|
|
return "", traceError(err)
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
2017-09-05 19:56:23 -04:00
|
|
|
|
2017-09-28 18:23:46 -04:00
|
|
|
blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject)
|
|
|
|
err = blob.CreateBlockBlobFromReader(bytes.NewBuffer(jsonData), nil)
|
2017-09-19 19:08:08 -04:00
|
|
|
if err != nil {
|
|
|
|
return "", azureToObjectError(traceError(err), bucket, metadataObject)
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
2017-09-05 19:56:23 -04:00
|
|
|
|
2017-09-19 19:08:08 -04:00
|
|
|
return uploadID, nil
|
|
|
|
}
|
|
|
|
|
2017-03-16 15:21:58 -04:00
|
|
|
// PutObjectPart - Use Azure equivalent PutBlockWithLength.
|
2017-09-19 15:40:27 -04:00
|
|
|
func (a *azureObjects) PutObjectPart(bucket, object, uploadID string, partID int, data *HashReader) (info PartInfo, err error) {
|
2017-09-19 19:08:08 -04:00
|
|
|
if err = a.checkUploadIDExists(bucket, object, uploadID); err != nil {
|
|
|
|
return info, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = checkAzureUploadID(uploadID); err != nil {
|
|
|
|
return info, err
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
2017-05-22 18:42:00 -04:00
|
|
|
|
2017-09-19 15:40:27 -04:00
|
|
|
etag := data.md5Sum
|
|
|
|
if etag == "" {
|
2017-05-22 18:42:00 -04:00
|
|
|
// Generate random ETag.
|
|
|
|
etag = getMD5Hash([]byte(mustGetUUID()))
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
|
|
|
|
2017-09-19 15:40:27 -04:00
|
|
|
subPartSize, subPartNumber := int64(azureBlockSize), 1
|
|
|
|
for remainingSize := data.Size(); remainingSize >= 0; remainingSize -= subPartSize {
|
2017-09-05 19:56:23 -04:00
|
|
|
// Allow to create zero sized part.
|
|
|
|
if remainingSize == 0 && subPartNumber > 1 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
if remainingSize < subPartSize {
|
|
|
|
subPartSize = remainingSize
|
|
|
|
}
|
|
|
|
|
2017-09-19 19:08:08 -04:00
|
|
|
id := azureGetBlockID(partID, subPartNumber, uploadID, etag)
|
2017-09-28 18:23:46 -04:00
|
|
|
blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
|
|
|
|
err = blob.PutBlockWithLength(id, uint64(subPartSize), io.LimitReader(data, subPartSize), nil)
|
2017-09-05 19:56:23 -04:00
|
|
|
if err != nil {
|
|
|
|
return info, azureToObjectError(traceError(err), bucket, object)
|
|
|
|
}
|
|
|
|
subPartNumber++
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
2017-09-19 15:40:27 -04:00
|
|
|
if err = data.Verify(); err != nil {
|
2017-09-28 18:23:46 -04:00
|
|
|
errorIf(err, "Verification of uploaded object data failed against client provided checksums.")
|
|
|
|
blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
|
|
|
|
derr := blob.Delete(nil)
|
|
|
|
errorIf(derr, "Failed to delete blob when cleaning up a bad blob upload.")
|
2017-09-19 15:40:27 -04:00
|
|
|
return info, azureToObjectError(traceError(err), bucket, object)
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
info.PartNumber = partID
|
2017-05-22 18:42:00 -04:00
|
|
|
info.ETag = etag
|
2017-03-18 14:28:41 -04:00
|
|
|
info.LastModified = UTCNow()
|
2017-09-19 15:40:27 -04:00
|
|
|
info.Size = data.Size()
|
2017-03-16 15:21:58 -04:00
|
|
|
return info, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListObjectParts - Use Azure equivalent GetBlockList.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error) {
|
2017-09-19 19:08:08 -04:00
|
|
|
if err = a.checkUploadIDExists(bucket, object, uploadID); err != nil {
|
|
|
|
return result, err
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
|
|
|
|
2017-09-19 19:08:08 -04:00
|
|
|
// It's decided not to support List Object Parts, hence returning empty result.
|
2017-03-16 15:21:58 -04:00
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// AbortMultipartUpload - Not Implemented.
|
|
|
|
// There is no corresponding API in azure to abort an incomplete upload. The uncommmitted blocks
|
|
|
|
// gets deleted after one week.
|
2017-09-19 19:08:08 -04:00
|
|
|
func (a *azureObjects) AbortMultipartUpload(bucket, object, uploadID string) (err error) {
|
|
|
|
if err = a.checkUploadIDExists(bucket, object, uploadID); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-09-28 18:23:46 -04:00
|
|
|
blob := a.client.GetContainerReference(bucket).GetBlobReference(
|
|
|
|
getAzureMetadataObjectName(object, uploadID))
|
|
|
|
return blob.Delete(nil)
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// CompleteMultipartUpload - Use Azure equivalent PutBlockList.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string, uploadedParts []completePart) (objInfo ObjectInfo, err error) {
|
2017-09-19 19:08:08 -04:00
|
|
|
metadataObject := getAzureMetadataObjectName(object, uploadID)
|
|
|
|
if err = a.checkUploadIDExists(bucket, object, uploadID); err != nil {
|
|
|
|
return objInfo, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = checkAzureUploadID(uploadID); err != nil {
|
|
|
|
return objInfo, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var metadataReader io.Reader
|
2017-09-28 18:23:46 -04:00
|
|
|
blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject)
|
|
|
|
if metadataReader, err = blob.Get(nil); err != nil {
|
2017-09-19 19:08:08 -04:00
|
|
|
return objInfo, azureToObjectError(traceError(err), bucket, metadataObject)
|
|
|
|
}
|
|
|
|
|
|
|
|
var metadata azureMultipartMetadata
|
|
|
|
if err = json.NewDecoder(metadataReader).Decode(&metadata); err != nil {
|
|
|
|
return objInfo, azureToObjectError(traceError(err), bucket, metadataObject)
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
2017-09-05 19:56:23 -04:00
|
|
|
|
2017-09-19 19:08:08 -04:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-09-28 18:23:46 -04:00
|
|
|
blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject)
|
|
|
|
derr := blob.Delete(nil)
|
2017-09-19 19:08:08 -04:00
|
|
|
errorIf(derr, "unable to remove meta data object for upload ID %s", uploadID)
|
|
|
|
}()
|
|
|
|
|
2017-09-28 18:23:46 -04:00
|
|
|
objBlob := a.client.GetContainerReference(bucket).GetBlobReference(object)
|
|
|
|
resp, err := objBlob.GetBlockList(storage.BlockListTypeUncommitted, nil)
|
2017-09-05 19:56:23 -04:00
|
|
|
if err != nil {
|
|
|
|
return objInfo, azureToObjectError(traceError(err), bucket, object)
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
2017-09-05 19:56:23 -04:00
|
|
|
|
|
|
|
getBlocks := func(partNumber int, etag string) (blocks []storage.Block, size int64, err error) {
|
|
|
|
for _, part := range resp.UncommittedBlocks {
|
|
|
|
var partID int
|
2017-09-19 19:08:08 -04:00
|
|
|
var readUploadID string
|
2017-09-05 19:56:23 -04:00
|
|
|
var md5Hex string
|
2017-09-19 19:08:08 -04:00
|
|
|
if partID, _, readUploadID, md5Hex, err = azureParseBlockID(part.Name); err != nil {
|
2017-09-05 19:56:23 -04:00
|
|
|
return nil, 0, err
|
|
|
|
}
|
|
|
|
|
2017-09-19 19:08:08 -04:00
|
|
|
if partNumber == partID && uploadID == readUploadID && etag == md5Hex {
|
2017-09-05 19:56:23 -04:00
|
|
|
blocks = append(blocks, storage.Block{
|
|
|
|
ID: part.Name,
|
|
|
|
Status: storage.BlockStatusUncommitted,
|
|
|
|
})
|
|
|
|
|
|
|
|
size += part.Size
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(blocks) == 0 {
|
|
|
|
return nil, 0, InvalidPart{}
|
|
|
|
}
|
|
|
|
|
|
|
|
return blocks, size, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var allBlocks []storage.Block
|
|
|
|
partSizes := make([]int64, len(uploadedParts))
|
|
|
|
for i, part := range uploadedParts {
|
|
|
|
var blocks []storage.Block
|
|
|
|
var size int64
|
|
|
|
blocks, size, err = getBlocks(part.PartNumber, part.ETag)
|
|
|
|
if err != nil {
|
|
|
|
return objInfo, traceError(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
allBlocks = append(allBlocks, blocks...)
|
|
|
|
partSizes[i] = size
|
|
|
|
}
|
|
|
|
|
|
|
|
// Error out if parts except last part sizing < 5MiB.
|
|
|
|
for i, size := range partSizes[:len(partSizes)-1] {
|
|
|
|
if size < globalMinPartSize {
|
|
|
|
return objInfo, traceError(PartTooSmall{
|
|
|
|
PartNumber: uploadedParts[i].PartNumber,
|
|
|
|
PartSize: size,
|
|
|
|
PartETag: uploadedParts[i].ETag,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-28 18:23:46 -04:00
|
|
|
err = objBlob.PutBlockList(allBlocks, nil)
|
2017-03-16 15:21:58 -04:00
|
|
|
if err != nil {
|
|
|
|
return objInfo, azureToObjectError(traceError(err), bucket, object)
|
|
|
|
}
|
2017-09-28 18:23:46 -04:00
|
|
|
if len(metadata.Metadata) > 0 {
|
|
|
|
objBlob.Metadata, objBlob.Properties = s3MetaToAzureProperties(metadata.Metadata)
|
|
|
|
err = objBlob.SetProperties(nil)
|
2017-03-16 15:21:58 -04:00
|
|
|
if err != nil {
|
|
|
|
return objInfo, azureToObjectError(traceError(err), bucket, object)
|
|
|
|
}
|
2017-09-28 18:23:46 -04:00
|
|
|
err = objBlob.SetMetadata(nil)
|
2017-05-30 23:05:41 -04:00
|
|
|
if err != nil {
|
|
|
|
return objInfo, azureToObjectError(traceError(err), bucket, object)
|
|
|
|
}
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
|
|
|
return a.GetObjectInfo(bucket, object)
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetBucketPolicies - Azure supports three types of container policies:
|
|
|
|
// storage.ContainerAccessTypeContainer - readonly in minio terminology
|
|
|
|
// storage.ContainerAccessTypeBlob - readonly without listing in minio terminology
|
|
|
|
// storage.ContainerAccessTypePrivate - none in minio terminology
|
|
|
|
// As the common denominator for minio and azure is readonly and none, we support
|
|
|
|
// these two policies at the bucket level.
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error {
|
2017-05-02 15:27:25 -04:00
|
|
|
var policies []BucketAccessPolicy
|
|
|
|
|
|
|
|
for prefix, policy := range policy.GetPolicies(policyInfo.Statements, bucket) {
|
|
|
|
policies = append(policies, BucketAccessPolicy{
|
|
|
|
Prefix: prefix,
|
|
|
|
Policy: policy,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
prefix := bucket + "/*" // For all objects inside the bucket.
|
|
|
|
if len(policies) != 1 {
|
|
|
|
return traceError(NotImplemented{})
|
|
|
|
}
|
|
|
|
if policies[0].Prefix != prefix {
|
|
|
|
return traceError(NotImplemented{})
|
|
|
|
}
|
|
|
|
if policies[0].Policy != policy.BucketPolicyReadOnly {
|
|
|
|
return traceError(NotImplemented{})
|
|
|
|
}
|
|
|
|
perm := storage.ContainerPermissions{
|
|
|
|
AccessType: storage.ContainerAccessTypeContainer,
|
|
|
|
AccessPolicies: nil,
|
|
|
|
}
|
2017-09-28 18:23:46 -04:00
|
|
|
container := a.client.GetContainerReference(bucket)
|
|
|
|
err := container.SetPermissions(perm, nil)
|
2017-05-02 15:27:25 -04:00
|
|
|
return azureToObjectError(traceError(err), bucket)
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetBucketPolicies - Get the container ACL and convert it to canonical []bucketAccessPolicy
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) {
|
2017-05-02 15:27:25 -04:00
|
|
|
policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"}
|
2017-09-28 18:23:46 -04:00
|
|
|
container := a.client.GetContainerReference(bucket)
|
|
|
|
perm, err := container.GetPermissions(nil)
|
2017-05-02 15:27:25 -04:00
|
|
|
if err != nil {
|
|
|
|
return policy.BucketAccessPolicy{}, azureToObjectError(traceError(err), bucket)
|
|
|
|
}
|
|
|
|
switch perm.AccessType {
|
|
|
|
case storage.ContainerAccessTypePrivate:
|
2017-08-07 01:24:40 -04:00
|
|
|
return policy.BucketAccessPolicy{}, traceError(PolicyNotFound{Bucket: bucket})
|
2017-05-02 15:27:25 -04:00
|
|
|
case storage.ContainerAccessTypeContainer:
|
|
|
|
policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyReadOnly, bucket, "")
|
|
|
|
default:
|
|
|
|
return policy.BucketAccessPolicy{}, azureToObjectError(traceError(NotImplemented{}))
|
|
|
|
}
|
|
|
|
return policyInfo, nil
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteBucketPolicies - Set the container ACL to "private"
|
2017-05-15 03:52:33 -04:00
|
|
|
func (a *azureObjects) DeleteBucketPolicies(bucket string) error {
|
2017-05-02 15:27:25 -04:00
|
|
|
perm := storage.ContainerPermissions{
|
|
|
|
AccessType: storage.ContainerAccessTypePrivate,
|
|
|
|
AccessPolicies: nil,
|
|
|
|
}
|
2017-09-28 18:23:46 -04:00
|
|
|
container := a.client.GetContainerReference(bucket)
|
|
|
|
err := container.SetPermissions(perm, nil)
|
2017-05-02 15:27:25 -04:00
|
|
|
return azureToObjectError(traceError(err))
|
2017-03-16 15:21:58 -04:00
|
|
|
}
|