Implement gateway S3 support (#3940)

This commit is contained in:
Remco Verhoef 2017-04-27 20:26:00 +02:00 committed by Harshavardhana
parent 57c5c75611
commit 3a539ce660
61 changed files with 9607 additions and 199 deletions

View File

@ -114,6 +114,7 @@ const (
ErrInvalidQueryParams
ErrBucketAlreadyOwnedByYou
ErrInvalidDuration
ErrNotSupported
// Add new error codes here.
// Bucket notification related errors.
@ -666,6 +667,8 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
apiErr = ErrInvalidBucketName
case BucketNotFound:
apiErr = ErrNoSuchBucket
case BucketAlreadyOwnedByYou:
apiErr = ErrBucketAlreadyOwnedByYou
case BucketNotEmpty:
apiErr = ErrBucketNotEmpty
case BucketExists:
@ -698,8 +701,12 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
apiErr = ErrEntityTooLarge
case ObjectTooSmall:
apiErr = ErrEntityTooSmall
case NotSupported:
apiErr = ErrNotSupported
case NotImplemented:
apiErr = ErrNotImplemented
case PolicyNotFound:
apiErr = ErrNoSuchBucketPolicy
default:
apiErr = ErrInternalError
}

View File

@ -103,6 +103,10 @@ func TestAPIErrCode(t *testing.T) {
StorageFull{},
ErrStorageFull,
},
{
NotSupported{},
ErrNotSupported,
},
{
NotImplemented{},
ErrNotImplemented,

View File

@ -154,6 +154,12 @@ func (a AzureObjects) StorageInfo() StorageInfo {
// MakeBucket - Create a new container on azure backend.
func (a AzureObjects) MakeBucket(bucket string) error {
// will never be called, only satisfy ObjectLayer interface
return traceError(NotImplemented{})
}
// MakeBucketWithLocation - Create a new container on azure backend.
func (a AzureObjects) MakeBucketWithLocation(bucket, location string) error {
err := a.client.CreateContainer(bucket, storage.ContainerAccessTypePrivate)
return azureToObjectError(traceError(err), bucket)
}
@ -592,46 +598,16 @@ func azureListBlobsGetParameters(p storage.ListBlobsParameters) url.Values {
// storage.ContainerAccessTypePrivate - none in minio terminology
// As the common denominator for minio and azure is readonly and none, we support
// these two policies at the bucket level.
func (a AzureObjects) SetBucketPolicies(bucket string, policies []BucketAccessPolicy) error {
prefix := bucket + "/*" // For all objects inside the bucket.
if len(policies) != 1 {
return traceError(NotImplemented{})
}
if policies[0].Prefix != prefix {
return traceError(NotImplemented{})
}
if policies[0].Policy != policy.BucketPolicyReadOnly {
return traceError(NotImplemented{})
}
perm := storage.ContainerPermissions{
AccessType: storage.ContainerAccessTypeContainer,
AccessPolicies: nil,
}
err := a.client.SetContainerPermissions(bucket, perm, 0, "")
return azureToObjectError(traceError(err), bucket)
func (a AzureObjects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error {
return traceError(NotSupported{})
}
// GetBucketPolicies - Get the container ACL and convert it to canonical []bucketAccessPolicy
func (a AzureObjects) GetBucketPolicies(bucket string) ([]BucketAccessPolicy, error) {
perm, err := a.client.GetContainerPermissions(bucket, 0, "")
if err != nil {
return nil, azureToObjectError(traceError(err), bucket)
}
switch perm.AccessType {
case storage.ContainerAccessTypePrivate:
return nil, nil
case storage.ContainerAccessTypeContainer:
return []BucketAccessPolicy{{"", policy.BucketPolicyReadOnly}}, nil
}
return nil, azureToObjectError(traceError(NotImplemented{}))
func (a AzureObjects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) {
return policy.BucketAccessPolicy{}, traceError(NotSupported{})
}
// DeleteBucketPolicies - Set the container ACL to "private"
func (a AzureObjects) DeleteBucketPolicies(bucket string) error {
perm := storage.ContainerPermissions{
AccessType: storage.ContainerAccessTypePrivate,
AccessPolicies: nil,
}
err := a.client.SetContainerPermissions(bucket, perm, 0, "")
return azureToObjectError(traceError(err))
return traceError(NotSupported{})
}

View File

@ -17,7 +17,6 @@
package cmd
import (
"bytes"
"io"
"io/ioutil"
"net/http"
@ -354,37 +353,13 @@ func (api gatewayAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *h
return
}
{
// FIXME: consolidate bucketPolicy and policy.BucketAccessPolicy so that
// the verification below is done on the same type.
// Parse bucket policy.
policyInfo := &bucketPolicy{}
err = parseBucketPolicy(bytes.NewReader(policyBytes), policyInfo)
if err != nil {
errorIf(err, "Unable to parse bucket policy.")
writeErrorResponse(w, ErrInvalidPolicyDocument, r.URL)
return
}
// Parse check bucket policy.
if s3Error := checkBucketPolicyResources(bucket, policyInfo); s3Error != ErrNone {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
}
policyInfo := &policy.BucketAccessPolicy{}
if err = json.Unmarshal(policyBytes, policyInfo); err != nil {
policyInfo := policy.BucketAccessPolicy{}
if err = json.Unmarshal(policyBytes, &policyInfo); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
var policies []BucketAccessPolicy
for prefix, policy := range policy.GetPolicies(policyInfo.Statements, bucket) {
policies = append(policies, BucketAccessPolicy{
Prefix: prefix,
Policy: policy,
})
}
if err = objAPI.SetBucketPolicies(bucket, policies); err != nil {
if err = objAPI.SetBucketPolicies(bucket, policyInfo); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
@ -453,17 +428,14 @@ func (api gatewayAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *h
return
}
policies, err := objAPI.GetBucketPolicies(bucket)
bp, err := objAPI.GetBucketPolicies(bucket)
if err != nil {
errorIf(err, "Unable to read bucket policy.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"}
for _, p := range policies {
policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, p.Policy, bucket, p.Prefix)
}
policyBytes, err := json.Marshal(&policyInfo)
policyBytes, err := json.Marshal(bp)
if err != nil {
errorIf(err, "Unable to read bucket policy.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
@ -499,6 +471,65 @@ func (api gatewayAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWri
writeErrorResponse(w, ErrNotImplemented, r.URL)
}
// PutBucketHandler - PUT Bucket
// ----------
// This implementation of the PUT operation creates a new bucket for authenticated request
func (api gatewayAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return
}
// PutBucket does not have any bucket action.
s3Error := checkRequestAuthType(r, "", "", globalMinioDefaultRegion)
if s3Error == ErrInvalidRegion {
// Clients like boto3 send putBucket() call signed with region that is configured.
s3Error = checkRequestAuthType(r, "", "", serverConfig.GetRegion())
}
if s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
vars := router.Vars(r)
bucket := vars["bucket"]
// Validate if incoming location constraint is valid, reject
// requests which do not follow valid region requirements.
location, s3Error := parseLocationConstraint(r)
if s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
// validating region here, because isValidLocationConstraint
// reads body which has been read already. So only validating
// region here.
serverRegion := serverConfig.GetRegion()
if serverRegion != location {
writeErrorResponse(w, ErrInvalidRegion, r.URL)
return
}
bucketLock := globalNSMutex.NewNSLock(bucket, "")
bucketLock.Lock()
defer bucketLock.Unlock()
// Proceed to creating a bucket.
err := objectAPI.MakeBucketWithLocation(bucket, location)
if err != nil {
errorIf(err, "Unable to create a bucket.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
// Make sure to add Location information here only for bucket
w.Header().Set("Location", getLocation(r))
writeSuccessResponseHeadersOnly(w)
}
// DeleteBucketHandler - Delete bucket
func (api gatewayAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
objectAPI := api.ObjectAPI()

View File

@ -61,7 +61,13 @@ var gatewayCmd = cli.Command{
Flags: append(serverFlags, cli.BoolFlag{
Name: "quiet",
Usage: "Disable startup banner.",
}),
},
cli.StringFlag{
Name: "endpoint",
Usage: "The endpoint.",
Value: "https://s3.amazonaws.com/",
},
),
HideHelpCommand: true,
}
@ -70,6 +76,7 @@ type gatewayBackend string
const (
azureBackend gatewayBackend = "azure"
s3Backend gatewayBackend = "s3"
// Add more backends here.
)
@ -89,11 +96,15 @@ func mustGetGatewayCredsFromEnv() (accessKey, secretKey string) {
//
// - Azure Blob Storage.
// - Add your favorite backend here.
func newGatewayLayer(backendType, endPoint, accessKey, secretKey string, secure bool) (GatewayLayer, error) {
if gatewayBackend(backendType) != azureBackend {
return nil, fmt.Errorf("Unrecognized backend type %s", backendType)
func newGatewayLayer(backendType, endpoint, accessKey, secretKey string, secure bool) (GatewayLayer, error) {
switch gatewayBackend(backendType) {
case azureBackend:
return newAzureLayer(endpoint, accessKey, secretKey, secure)
case s3Backend:
return newS3Gateway(endpoint, accessKey, secretKey, secure)
}
return newAzureLayer(endPoint, accessKey, secretKey, secure)
return nil, fmt.Errorf("Unrecognized backend type %s", backendType)
}
// Initialize a new gateway config.
@ -130,6 +141,7 @@ func parseGatewayEndpoint(arg string) (endPoint string, secure bool, err error)
// Default connection will be "secure".
arg = "https://" + arg
}
u, err := url.Parse(arg)
if err != nil {
return "", false, err
@ -230,6 +242,8 @@ func gatewayMain(ctx *cli.Context) {
mode := ""
if gatewayBackend(backendType) == azureBackend {
mode = globalMinioModeGatewayAzure
} else if gatewayBackend(backendType) == s3Backend {
mode = globalMinioModeGatewayS3
}
checkUpdate(mode)
apiEndpoints := getAPIEndpoints(apiServer.Addr)

View File

@ -20,15 +20,19 @@ import (
"io"
router "github.com/gorilla/mux"
"github.com/minio/minio-go/pkg/policy"
)
// GatewayLayer - Interface to implement gateway mode.
type GatewayLayer interface {
ObjectLayer
MakeBucketWithLocation(bucket, location string) error
AnonGetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error)
AnonGetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error)
SetBucketPolicies(string, []BucketAccessPolicy) error
GetBucketPolicies(string) ([]BucketAccessPolicy, error)
SetBucketPolicies(string, policy.BucketAccessPolicy) error
GetBucketPolicies(string) (policy.BucketAccessPolicy, error)
DeleteBucketPolicies(string) error
AnonListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error)
AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, err error)

View File

@ -39,6 +39,7 @@ const (
globalMinioModeXL = "mode-server-xl"
globalMinioModeDistXL = "mode-server-distributed-xl"
globalMinioModeGatewayAzure = "mode-gateway-azure"
globalMinioModeGatewayS3 = "mode-gateway-s3"
// Add new global values here.
)

View File

@ -221,6 +221,30 @@ type ListObjectsInfo struct {
Prefixes []string
}
// ListObjectsV2Info - container for list objects version 2.
type ListObjectsV2Info struct {
// Indicates whether the returned list objects response is truncated. A
// value of true indicates that the list was truncated. The list can be truncated
// if the number of objects exceeds the limit allowed or specified
// by max keys.
IsTruncated bool
// When response is truncated (the IsTruncated element value in the response
// is true), you can use the key name in this field as marker in the subsequent
// request to get next set of objects.
//
// NOTE: This element is returned only if you have delimiter request parameter
// specified.
ContinuationToken string
NextContinuationToken string
// List of objects info for this request.
Objects []ObjectInfo
// List of prefixes for this request.
Prefixes []string
}
// PartInfo - represents individual part metadata.
type PartInfo struct {
// Part number that identifies the part. This is a positive integer between

View File

@ -144,6 +144,13 @@ func (e BucketNotFound) Error() string {
return "Bucket not found: " + e.Bucket
}
// BucketAlreadyOwnedByYou already owned by you.
type BucketAlreadyOwnedByYou GenericError
func (e BucketAlreadyOwnedByYou) Error() string {
return "Bucket already owned by you: " + e.Bucket
}
// BucketNotEmpty bucket is not empty.
type BucketNotEmpty GenericError
@ -321,6 +328,13 @@ func (e NotImplemented) Error() string {
return "Not Implemented"
}
// NotSupported If a feature is not supported
type NotSupported struct{}
func (e NotSupported) Error() string {
return "Not Supported"
}
// PolicyNesting - policy nesting conflict.
type PolicyNesting struct{}
@ -328,6 +342,13 @@ func (e PolicyNesting) Error() string {
return "New bucket policy conflicts with an existing policy. Please try again with new prefix."
}
// PolicyNotFound - policy not found
type PolicyNotFound GenericError
func (e PolicyNotFound) Error() string {
return "Policy not found"
}
// Check if error type is IncompleteBody.
func isErrIncompleteBody(err error) bool {
err = errorCause(err)

83
cmd/s3-layer-anonymous.go Normal file
View File

@ -0,0 +1,83 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import "io"
// AnonGetObject - Get object anonymously
func (l *s3Gateway) AnonGetObject(bucket string, key string, startOffset int64, length int64, writer io.Writer) error {
object, err := l.anonClient.GetObject(bucket, key)
if err != nil {
return s3ToObjectError(traceError(err), bucket, key)
}
defer object.Close()
object.Seek(startOffset, io.SeekStart)
if _, err := io.CopyN(writer, object, length); err != nil {
return s3ToObjectError(traceError(err), bucket, key)
}
return nil
}
// AnonGetObjectInfo - Get object info anonymously
func (l *s3Gateway) AnonGetObjectInfo(bucket string, object string) (ObjectInfo, error) {
oi, err := l.anonClient.StatObject(bucket, object)
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
}
return fromMinioClientObjectInfo(bucket, oi), nil
}
// AnonListObjects - List objects anonymously
func (l *s3Gateway) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) {
result, err := l.anonClient.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
return ListObjectsInfo{}, s3ToObjectError(traceError(err), bucket)
}
return fromMinioClientListBucketResult(bucket, result), nil
}
// AnonGetBucketInfo - Get bucket metadata anonymously.
func (l *s3Gateway) AnonGetBucketInfo(bucket string) (BucketInfo, error) {
if exists, err := l.anonClient.BucketExists(bucket); err != nil {
return BucketInfo{}, s3ToObjectError(traceError(err), bucket)
} else if !exists {
return BucketInfo{}, traceError(BucketNotFound{Bucket: bucket})
}
buckets, err := l.anonClient.ListBuckets()
if err != nil {
return BucketInfo{}, s3ToObjectError(traceError(err), bucket)
}
for _, bi := range buckets {
if bi.Name != bucket {
continue
}
return BucketInfo{
Name: bi.Name,
Created: bi.CreationDate,
}, nil
}
return BucketInfo{}, traceError(BucketNotFound{Bucket: bucket})
}

View File

@ -0,0 +1,42 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
// HealBucket - Not relevant.
func (l *s3Gateway) HealBucket(bucket string) error {
return traceError(NotImplemented{})
}
// ListBucketsHeal - Not relevant.
func (l *s3Gateway) ListBucketsHeal() (buckets []BucketInfo, err error) {
return []BucketInfo{}, traceError(NotImplemented{})
}
// HealObject - Not relevant.
func (l *s3Gateway) HealObject(bucket string, object string) (int, int, error) {
return 0, 0, traceError(NotImplemented{})
}
// ListObjectsHeal - Not relevant.
func (l *s3Gateway) ListObjectsHeal(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) {
return ListObjectsInfo{}, traceError(NotImplemented{})
}
// ListUploadsHeal - Not relevant.
func (l *s3Gateway) ListUploadsHeal(bucket string, prefix string, marker string, uploadIDMarker string, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
return ListMultipartsInfo{}, traceError(NotImplemented{})
}

571
cmd/s3-layer.go Normal file
View File

@ -0,0 +1,571 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"crypto/sha256"
"hash"
"io"
"net/http"
"path"
"encoding/hex"
minio "github.com/minio/minio-go"
"github.com/minio/minio-go/pkg/policy"
)
// Convert Minio errors to minio object layer errors.
func s3ToObjectError(err error, params ...string) error {
if err == nil {
return nil
}
e, ok := err.(*Error)
if !ok {
// Code should be fixed if this function is called without doing traceError()
// Else handling different situations in this function makes this function complicated.
errorIf(err, "Expected type *Error")
return err
}
err = e.e
bucket := ""
object := ""
if len(params) >= 1 {
bucket = params[0]
}
if len(params) == 2 {
object = params[1]
}
minioErr, ok := err.(minio.ErrorResponse)
if !ok {
// We don't interpret non Minio errors. As minio errors will
// have StatusCode to help to convert to object errors.
return e
}
switch minioErr.Code {
case "BucketAlreadyOwnedByYou":
err = BucketAlreadyOwnedByYou{}
case "BucketNotEmpty":
err = BucketNotEmpty{}
case "NoSuchBucketPolicy":
err = PolicyNotFound{}
case "InvalidBucketName":
err = BucketNameInvalid{Bucket: bucket}
case "NoSuchBucket":
err = BucketNotFound{Bucket: bucket}
case "NoSuchKey":
if object != "" {
err = ObjectNotFound{Bucket: bucket, Object: object}
} else {
err = BucketNotFound{Bucket: bucket}
}
case "XMinioInvalidObjectName":
err = ObjectNameInvalid{}
case "AccessDenied":
err = PrefixAccessDenied{
Bucket: bucket,
Object: object,
}
case "XAmzContentSHA256Mismatch":
err = SHA256Mismatch{}
}
e.e = err
return e
}
// s3Gateway - Implements gateway for Minio and S3 compatible object storage servers.
type s3Gateway struct {
Client *minio.Core
anonClient *minio.Core
}
// newS3Gateway returns s3 gatewaylayer
func newS3Gateway(endpoint string, accessKey, secretKey string, secure bool) (GatewayLayer, error) {
// Initialize minio client object.
client, err := minio.NewCore(endpoint, accessKey, secretKey, secure)
if err != nil {
return nil, err
}
anonClient, err := minio.NewCore(endpoint, "", "", secure)
if err != nil {
return nil, err
}
return &s3Gateway{
Client: client,
anonClient: anonClient,
}, nil
}
// Shutdown - save any gateway metadata to disk
// if necessary and reload upon next restart.
func (l *s3Gateway) Shutdown() error {
// TODO
return nil
}
// StorageInfo - Not relevant to S3 backend.
func (l *s3Gateway) StorageInfo() StorageInfo {
return StorageInfo{}
}
// MakeBucket - Create a new container on S3 backend.
func (l *s3Gateway) MakeBucket(bucket string) error {
// will never be called, only satisfy ObjectLayer interface
return traceError(NotImplemented{})
}
// MakeBucket - Create a new container on S3 backend.
func (l *s3Gateway) MakeBucketWithLocation(bucket, location string) error {
err := l.Client.MakeBucket(bucket, location)
if err != nil {
return s3ToObjectError(traceError(err), bucket)
}
return err
}
// GetBucketInfo - Get bucket metadata..
func (l *s3Gateway) GetBucketInfo(bucket string) (BucketInfo, error) {
buckets, err := l.Client.ListBuckets()
if err != nil {
return BucketInfo{}, s3ToObjectError(traceError(err), bucket)
}
for _, bi := range buckets {
if bi.Name != bucket {
continue
}
return BucketInfo{
Name: bi.Name,
Created: bi.CreationDate,
}, nil
}
return BucketInfo{}, traceError(BucketNotFound{Bucket: bucket})
}
// ListBuckets - Lists all S3 buckets
func (l *s3Gateway) ListBuckets() ([]BucketInfo, error) {
buckets, err := l.Client.ListBuckets()
if err != nil {
return nil, err
}
b := make([]BucketInfo, len(buckets))
for i, bi := range buckets {
b[i] = BucketInfo{
Name: bi.Name,
Created: bi.CreationDate,
}
}
return b, err
}
// DeleteBucket - delete a bucket on S3
func (l *s3Gateway) DeleteBucket(bucket string) error {
err := l.Client.RemoveBucket(bucket)
if err != nil {
return s3ToObjectError(traceError(err), bucket)
}
return nil
}
// ListObjects - lists all blobs in S3 bucket filtered by prefix
func (l *s3Gateway) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) {
result, err := l.Client.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
return ListObjectsInfo{}, s3ToObjectError(traceError(err), bucket)
}
return fromMinioClientListBucketResult(bucket, result), nil
}
// ListObjectsV2 - lists all blobs in S3 bucket filtered by prefix
func (l *s3Gateway) ListObjectsV2(bucket, prefix, continuationToken string, fetchOwner bool, delimiter string, maxKeys int) (ListObjectsV2Info, error) {
result, err := l.Client.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys)
if err != nil {
return ListObjectsV2Info{}, s3ToObjectError(traceError(err), bucket)
}
return fromMinioClientListBucketV2Result(bucket, result), nil
}
// fromMinioClientListBucketV2Result - convert minio ListBucketResult to ListObjectsInfo
func fromMinioClientListBucketV2Result(bucket string, result minio.ListBucketV2Result) ListObjectsV2Info {
objects := make([]ObjectInfo, len(result.Contents))
for i, oi := range result.Contents {
objects[i] = fromMinioClientObjectInfo(bucket, oi)
}
prefixes := make([]string, len(result.CommonPrefixes))
for i, p := range result.CommonPrefixes {
prefixes[i] = p.Prefix
}
return ListObjectsV2Info{
IsTruncated: result.IsTruncated,
Prefixes: prefixes,
Objects: objects,
ContinuationToken: result.ContinuationToken,
NextContinuationToken: result.NextContinuationToken,
}
}
// fromMinioClientListBucketResult - convert minio ListBucketResult to ListObjectsInfo
func fromMinioClientListBucketResult(bucket string, result minio.ListBucketResult) ListObjectsInfo {
objects := make([]ObjectInfo, len(result.Contents))
for i, oi := range result.Contents {
objects[i] = fromMinioClientObjectInfo(bucket, oi)
}
prefixes := make([]string, len(result.CommonPrefixes))
for i, p := range result.CommonPrefixes {
prefixes[i] = p.Prefix
}
return ListObjectsInfo{
IsTruncated: result.IsTruncated,
NextMarker: result.NextMarker,
Prefixes: prefixes,
Objects: objects,
}
}
// GetObject - reads an object from S3. Supports additional
// parameters like offset and length which are synonymous with
// HTTP Range requests.
//
// startOffset indicates the starting read location of the object.
// length indicates the total length of the object.
func (l *s3Gateway) GetObject(bucket string, key string, startOffset int64, length int64, writer io.Writer) error {
object, err := l.Client.GetObject(bucket, key)
if err != nil {
return s3ToObjectError(traceError(err), bucket, key)
}
defer object.Close()
if _, err := object.Seek(startOffset, io.SeekStart); err != nil {
return s3ToObjectError(traceError(err), bucket, key)
}
if _, err := io.CopyN(writer, object, length); err != nil {
return s3ToObjectError(traceError(err), bucket, key)
}
return nil
}
// fromMinioClientObjectInfo -- converts minio ObjectInfo to gateway ObjectInfo
func fromMinioClientObjectInfo(bucket string, oi minio.ObjectInfo) ObjectInfo {
userDefined := fromMinioClientMetadata(oi.Metadata)
userDefined["Content-Type"] = oi.ContentType
return ObjectInfo{
Bucket: bucket,
Name: oi.Key,
ModTime: oi.LastModified,
Size: oi.Size,
MD5Sum: oi.ETag,
UserDefined: userDefined,
ContentType: oi.ContentType,
ContentEncoding: oi.Metadata.Get("Content-Encoding"),
}
}
// GetObjectInfo - reads object info and replies back ObjectInfo
func (l *s3Gateway) GetObjectInfo(bucket string, object string) (objInfo ObjectInfo, err error) {
oi, err := l.Client.StatObject(bucket, object)
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
}
return fromMinioClientObjectInfo(bucket, oi), nil
}
// PutObject - Create a new object with the incoming data,
func (l *s3Gateway) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (ObjectInfo, error) {
var sha256Writer hash.Hash
sha256sumBytes := []byte{}
teeReader := data
if sha256sum == "" {
} else if b, err := hex.DecodeString(sha256sum); err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
} else {
sha256sumBytes = b
sha256Writer = sha256.New()
teeReader = io.TeeReader(data, sha256Writer)
}
delete(metadata, "md5Sum")
oi, err := l.Client.PutObject(bucket, object, size, teeReader, nil, sha256sumBytes, toMinioClientMetadata(metadata))
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
}
if sha256sum != "" {
newSHA256sum := hex.EncodeToString(sha256Writer.Sum(nil))
if newSHA256sum != sha256sum {
l.Client.RemoveObject(bucket, object)
return ObjectInfo{}, traceError(SHA256Mismatch{})
}
}
return fromMinioClientObjectInfo(bucket, oi), nil
}
// CopyObject - Copies a blob from source container to destination container.
func (l *s3Gateway) CopyObject(srcBucket string, srcObject string, destBucket string, destObject string, metadata map[string]string) (ObjectInfo, error) {
err := l.Client.CopyObject(destBucket, destObject, path.Join(srcBucket, srcObject), minio.CopyConditions{})
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), srcBucket, srcObject)
}
oi, err := l.GetObjectInfo(destBucket, destObject)
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), destBucket, destObject)
}
return oi, nil
}
// DeleteObject - Deletes a blob in bucket
func (l *s3Gateway) DeleteObject(bucket string, object string) error {
err := l.Client.RemoveObject(bucket, object)
if err != nil {
return s3ToObjectError(traceError(err), bucket, object)
}
return nil
}
// fromMinioClientUploadMetadata converts ObjectMultipartInfo to uploadMetadata
func fromMinioClientUploadMetadata(omi minio.ObjectMultipartInfo) uploadMetadata {
return uploadMetadata{
Object: omi.Key,
UploadID: omi.UploadID,
Initiated: omi.Initiated,
}
}
// fromMinioClientListMultipartsInfo converts minio ListMultipartUploadsResult to ListMultipartsInfo
func fromMinioClientListMultipartsInfo(lmur minio.ListMultipartUploadsResult) ListMultipartsInfo {
uploads := make([]uploadMetadata, len(lmur.Uploads))
for i, um := range lmur.Uploads {
uploads[i] = fromMinioClientUploadMetadata(um)
}
commonPrefixes := make([]string, len(lmur.CommonPrefixes))
for i, cp := range lmur.CommonPrefixes {
commonPrefixes[i] = cp.Prefix
}
return ListMultipartsInfo{
KeyMarker: lmur.KeyMarker,
UploadIDMarker: lmur.UploadIDMarker,
NextKeyMarker: lmur.NextKeyMarker,
NextUploadIDMarker: lmur.NextUploadIDMarker,
MaxUploads: int(lmur.MaxUploads),
IsTruncated: lmur.IsTruncated,
Uploads: uploads,
Prefix: lmur.Prefix,
Delimiter: lmur.Delimiter,
CommonPrefixes: commonPrefixes,
EncodingType: lmur.EncodingType,
}
}
// ListMultipartUploads - lists all multipart uploads.
func (l *s3Gateway) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
result, err := l.Client.ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
if err != nil {
return ListMultipartsInfo{}, err
}
return fromMinioClientListMultipartsInfo(result), nil
}
// fromMinioClientMetadata converts minio metadata to map[string]string
func fromMinioClientMetadata(metadata map[string][]string) map[string]string {
mm := map[string]string{}
for k, v := range metadata {
mm[http.CanonicalHeaderKey(k)] = v[0]
}
return mm
}
// toMinioClientMetadata converts metadata to map[string][]string
func toMinioClientMetadata(metadata map[string]string) map[string][]string {
mm := map[string][]string{}
for k, v := range metadata {
mm[http.CanonicalHeaderKey(k)] = []string{v}
}
return mm
}
// NewMultipartUpload - upload object in multiple parts
func (l *s3Gateway) NewMultipartUpload(bucket string, object string, metadata map[string]string) (uploadID string, err error) {
return l.Client.NewMultipartUpload(bucket, object, toMinioClientMetadata(metadata))
}
// CopyObjectPart - copy part of object to other bucket and object
func (l *s3Gateway) CopyObjectPart(srcBucket string, srcObject string, destBucket string, destObject string, uploadID string, partID int, startOffset int64, length int64) (info PartInfo, err error) {
// FIXME: implement CopyObjectPart
return PartInfo{}, traceError(NotImplemented{})
}
// fromMinioClientObjectPart - converts minio ObjectPart to PartInfo
func fromMinioClientObjectPart(op minio.ObjectPart) PartInfo {
return PartInfo{
Size: op.Size,
ETag: op.ETag,
LastModified: op.LastModified,
PartNumber: op.PartNumber,
}
}
// PutObjectPart puts a part of object in bucket
func (l *s3Gateway) PutObjectPart(bucket string, object string, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (PartInfo, error) {
md5HexBytes, err := hex.DecodeString(md5Hex)
if err != nil {
return PartInfo{}, err
}
sha256sumBytes, err := hex.DecodeString(sha256sum)
if err != nil {
return PartInfo{}, err
}
info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, size, data, md5HexBytes, sha256sumBytes)
if err != nil {
return PartInfo{}, err
}
return fromMinioClientObjectPart(info), nil
}
// fromMinioClientObjectParts - converts minio ObjectPart to PartInfo
func fromMinioClientObjectParts(parts []minio.ObjectPart) []PartInfo {
toParts := make([]PartInfo, len(parts))
for i, part := range parts {
toParts[i] = fromMinioClientObjectPart(part)
}
return toParts
}
// fromMinioClientListPartsInfo converts minio ListObjectPartsResult to ListPartsInfo
func fromMinioClientListPartsInfo(lopr minio.ListObjectPartsResult) ListPartsInfo {
return ListPartsInfo{
UploadID: lopr.UploadID,
Bucket: lopr.Bucket,
Object: lopr.Key,
StorageClass: "",
PartNumberMarker: lopr.PartNumberMarker,
NextPartNumberMarker: lopr.NextPartNumberMarker,
MaxParts: lopr.MaxParts,
IsTruncated: lopr.IsTruncated,
EncodingType: lopr.EncodingType,
Parts: fromMinioClientObjectParts(lopr.ObjectParts),
}
}
// ListObjectParts returns all object parts for specified object in specified bucket
func (l *s3Gateway) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (ListPartsInfo, error) {
result, err := l.Client.ListObjectParts(bucket, object, uploadID, partNumberMarker, maxParts)
if err != nil {
return ListPartsInfo{}, err
}
return fromMinioClientListPartsInfo(result), nil
}
// AbortMultipartUpload aborts a ongoing multipart upload
func (l *s3Gateway) AbortMultipartUpload(bucket string, object string, uploadID string) error {
return l.Client.AbortMultipartUpload(bucket, object, uploadID)
}
// toMinioClientCompletePart converts completePart to minio CompletePart
func toMinioClientCompletePart(part completePart) minio.CompletePart {
return minio.CompletePart{
ETag: part.ETag,
PartNumber: part.PartNumber,
}
}
// toMinioClientCompletePart converts []completePart to minio []CompletePart
func toMinioClientCompleteParts(parts []completePart) []minio.CompletePart {
mparts := make([]minio.CompletePart, len(parts))
for i, part := range parts {
mparts[i] = toMinioClientCompletePart(part)
}
return mparts
}
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object
func (l *s3Gateway) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []completePart) (ObjectInfo, error) {
err := l.Client.CompleteMultipartUpload(bucket, object, uploadID, toMinioClientCompleteParts(uploadedParts))
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
}
return l.GetObjectInfo(bucket, object)
}
// SetBucketPolicies - Set policy on bucket
func (l *s3Gateway) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error {
if err := l.Client.PutBucketPolicy(bucket, policyInfo); err != nil {
return s3ToObjectError(traceError(err), bucket, "")
}
return nil
}
// GetBucketPolicies - Get policy on bucket
func (l *s3Gateway) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) {
policyInfo, err := l.Client.GetBucketPolicy(bucket)
if err != nil {
return policy.BucketAccessPolicy{}, s3ToObjectError(traceError(err), bucket, "")
}
return policyInfo, nil
}
// DeleteBucketPolicies - Delete all policies on bucket
func (l *s3Gateway) DeleteBucketPolicies(bucket string) error {
if err := l.Client.PutBucketPolicy(bucket, policy.BucketAccessPolicy{}); err != nil {
return s3ToObjectError(traceError(err), bucket, "")
}
return nil
}

17
cmd/s3-layer_test.go Normal file
View File

@ -0,0 +1,17 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd

23
vendor/github.com/minio/minio-go/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,23 @@
### Developer Guidelines
``minio-go`` welcomes your contribution. To make the process as seamless as possible, we ask for the following:
* Go ahead and fork the project and make your changes. We encourage pull requests to discuss code changes.
- Fork it
- Create your feature branch (git checkout -b my-new-feature)
- Commit your changes (git commit -am 'Add some feature')
- Push to the branch (git push origin my-new-feature)
- Create new Pull Request
* When you're ready to create a pull request, be sure to:
- Have test cases for the new code. If you have questions about how to do it, please ask in your pull request.
- Run `go fmt`
- Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request.
- Make sure `go test -race ./...` and `go build` completes.
NOTE: go test runs functional tests and requires you to have a AWS S3 account. Set them as environment variables
``ACCESS_KEY`` and ``SECRET_KEY``. To run shorter version of the tests please use ``go test -short -race ./...``
* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project
- `minio-go` project is strictly conformant with Golang style
- if you happen to observe offending code, please feel free to send a pull request

19
vendor/github.com/minio/minio-go/MAINTAINERS.md generated vendored Normal file
View File

@ -0,0 +1,19 @@
# For maintainers only
## Responsibilities
Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522)
### Making new releases
Edit `libraryVersion` constant in `api.go`.
```
$ grep libraryVersion api.go
libraryVersion = "0.3.0"
```
```
$ git tag 0.3.0
$ git push --tags
```

250
vendor/github.com/minio/minio-go/README.md generated vendored Normal file
View File

@ -0,0 +1,250 @@
# Minio Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge)
The Minio Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage.
**Supported cloud storage providers:**
- AWS Signature Version 4
- Amazon S3
- Minio
- AWS Signature Version 2
- Google Cloud Storage (Compatibility Mode)
- Openstack Swift + Swift3 middleware
- Ceph Object Gateway
- Riak CS
This quickstart guide will show you how to install the Minio client SDK, connect to Minio, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference).
This document assumes that you have a working [Go development environment](https://docs.minio.io/docs/how-to-install-golang).
## Download from Github
```sh
go get -u github.com/minio/minio-go
```
## Initialize Minio Client
Minio client requires the following four parameters specified to connect to an Amazon S3 compatible object storage.
| Parameter | Description|
| :--- | :--- |
| endpoint | URL to object storage service. |
| accessKeyID | Access key is the user ID that uniquely identifies your account. |
| secretAccessKey | Secret key is the password to your account. |
| secure | Set this value to 'true' to enable secure (HTTPS) access. |
```go
package main
import (
"github.com/minio/minio-go"
"log"
)
func main() {
endpoint := "play.minio.io:9000"
accessKeyID := "Q3AM3UQ867SPQQA43P2F"
secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
useSSL := true
// Initialize minio client object.
minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL)
if err != nil {
log.Fatalln(err)
}
log.Println("%v", minioClient) // minioClient is now setup
```
## Quick Start Example - File Uploader
This example program connects to an object storage server, creates a bucket and uploads a file to the bucket.
We will use the Minio server running at [https://play.minio.io:9000](https://play.minio.io:9000) in this example. Feel free to use this service for testing and development. Access credentials shown in this example are open to the public.
### FileUploader.go
```go
package main
import (
"github.com/minio/minio-go"
"log"
)
func main() {
endpoint := "play.minio.io:9000"
accessKeyID := "Q3AM3UQ867SPQQA43P2F"
secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
useSSL := true
// Initialize minio client object.
minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL)
if err != nil {
log.Fatalln(err)
}
// Make a new bucket called mymusic.
bucketName := "mymusic"
location := "us-east-1"
err = minioClient.MakeBucket(bucketName, location)
if err != nil {
// Check to see if we already own this bucket (which happens if you run this twice)
exists, err := minioClient.BucketExists(bucketName)
if err == nil && exists {
log.Printf("We already own %s\n", bucketName)
} else {
log.Fatalln(err)
}
}
log.Printf("Successfully created %s\n", bucketName)
// Upload the zip file
objectName := "golden-oldies.zip"
filePath := "/tmp/golden-oldies.zip"
contentType := "application/zip"
// Upload the zip file with FPutObject
n, err := minioClient.FPutObject(bucketName, objectName, filePath, contentType)
if err != nil {
log.Fatalln(err)
}
log.Printf("Successfully uploaded %s of size %d\n", objectName, n)
}
```
### Run FileUploader
```sh
go run file-uploader.go
2016/08/13 17:03:28 Successfully created mymusic
2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413
mc ls play/mymusic/
[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip
```
## API Reference
The full API Reference is available here.
* [Complete API Reference](https://docs.minio.io/docs/golang-client-api-reference)
### API Reference : Bucket Operations
* [`MakeBucket`](https://docs.minio.io/docs/golang-client-api-reference#MakeBucket)
* [`ListBuckets`](https://docs.minio.io/docs/golang-client-api-reference#ListBuckets)
* [`BucketExists`](https://docs.minio.io/docs/golang-client-api-reference#BucketExists)
* [`RemoveBucket`](https://docs.minio.io/docs/golang-client-api-reference#RemoveBucket)
* [`ListObjects`](https://docs.minio.io/docs/golang-client-api-reference#ListObjects)
* [`ListObjectsV2`](https://docs.minio.io/docs/golang-client-api-reference#ListObjectsV2)
* [`ListIncompleteUploads`](https://docs.minio.io/docs/golang-client-api-reference#ListIncompleteUploads)
### API Reference : Bucket policy Operations
* [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy)
* [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy)
* [`ListBucketPolicies`](https://docs.minio.io/docs/golang-client-api-reference#ListBucketPolicies)
### API Reference : Bucket notification Operations
* [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification)
* [`GetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketNotification)
* [`RemoveAllBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#RemoveAllBucketNotification)
* [`ListenBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#ListenBucketNotification) (Minio Extension)
### API Reference : File Object Operations
* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
### API Reference : Object Operations
* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject)
* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject)
* [`PutObjectStreaming`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectStreaming)
* [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject)
* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject)
* [`RemoveObject`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObject)
* [`RemoveObjects`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObjects)
* [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload)
### API Reference: Encrypted Object Operations
* [`GetEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#GetEncryptedObject)
* [`PutEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#PutEncryptedObject)
### API Reference : Presigned Operations
* [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject)
* [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject)
* [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy)
### API Reference : Client custom settings
* [`SetAppInfo`](http://docs.minio.io/docs/golang-client-api-reference#SetAppInfo)
* [`SetCustomTransport`](http://docs.minio.io/docs/golang-client-api-reference#SetCustomTransport)
* [`TraceOn`](http://docs.minio.io/docs/golang-client-api-reference#TraceOn)
* [`TraceOff`](http://docs.minio.io/docs/golang-client-api-reference#TraceOff)
## Full Examples
#### Full Examples : Bucket Operations
* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go)
* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go)
* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go)
* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go)
* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go)
* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go)
* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go)
#### Full Examples : Bucket policy Operations
* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go)
* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go)
* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go)
#### Full Examples : Bucket notification Operations
* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)
* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go)
* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (Minio Extension)
#### Full Examples : File Object Operations
* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go)
* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go)
#### Full Examples : Object Operations
* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go)
* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go)
* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go)
* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go)
* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go)
* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go)
* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go)
#### Full Examples : Encrypted Object Operations
* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go)
* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go)
#### Full Examples : Presigned Operations
* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go)
* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go)
* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go)
## Explore Further
* [Complete Documentation](https://docs.minio.io)
* [Minio Go Client SDK API Reference](https://docs.minio.io/docs/golang-client-api-reference)
* [Go Music Player App Full Application Example](https://docs.minio.io/docs/go-music-player-app)
## Contribute
[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md)
[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go)
[![Build status](https://ci.appveyor.com/api/projects/status/1d05e6nvxcelmrak?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go)

83
vendor/github.com/minio/minio-go/api-datatypes.go generated vendored Normal file
View File

@ -0,0 +1,83 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"net/http"
"time"
)
// BucketInfo container for bucket metadata.
type BucketInfo struct {
// The name of the bucket.
Name string `json:"name"`
// Date the bucket was created.
CreationDate time.Time `json:"creationDate"`
}
// ObjectInfo container for object metadata.
type ObjectInfo struct {
// An ETag is optionally set to md5sum of an object. In case of multipart objects,
// ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of
// each parts concatenated into one string.
ETag string `json:"etag"`
Key string `json:"name"` // Name of the object
LastModified time.Time `json:"lastModified"` // Date and time the object was last modified.
Size int64 `json:"size"` // Size in bytes of the object.
ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data.
// Collection of additional metadata on the object.
// eg: x-amz-meta-*, content-encoding etc.
Metadata http.Header `json:"metadata"`
// Owner name.
Owner struct {
DisplayName string `json:"name"`
ID string `json:"id"`
} `json:"owner"`
// The class of storage used to store the object.
StorageClass string `json:"storageClass"`
// Error
Err error `json:"-"`
}
// ObjectMultipartInfo container for multipart object metadata.
type ObjectMultipartInfo struct {
// Date and time at which the multipart upload was initiated.
Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"`
Initiator initiator
Owner owner
// The type of storage to use for the object. Defaults to 'STANDARD'.
StorageClass string
// Key of the object for which the multipart upload was initiated.
Key string
// Size in bytes of the object.
Size int64
// Upload ID that identifies the multipart upload.
UploadID string `xml:"UploadId"`
// Error
Err error
}

257
vendor/github.com/minio/minio-go/api-error-response.go generated vendored Normal file
View File

@ -0,0 +1,257 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"encoding/xml"
"fmt"
"net/http"
"strconv"
)
/* **** SAMPLE ERROR RESPONSE ****
<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>AccessDenied</Code>
<Message>Access Denied</Message>
<BucketName>bucketName</BucketName>
<Key>objectName</Key>
<RequestId>F19772218238A85A</RequestId>
<HostId>GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD</HostId>
</Error>
*/
// ErrorResponse - Is the typed error returned by all API operations.
type ErrorResponse struct {
XMLName xml.Name `xml:"Error" json:"-"`
Code string
Message string
BucketName string
Key string
RequestID string `xml:"RequestId"`
HostID string `xml:"HostId"`
// Region where the bucket is located. This header is returned
// only in HEAD bucket and ListObjects response.
Region string
// Headers of the returned S3 XML error
Headers http.Header `xml:"-" json:"-"`
}
// ToErrorResponse - Returns parsed ErrorResponse struct from body and
// http headers.
//
// For example:
//
// import s3 "github.com/minio/minio-go"
// ...
// ...
// reader, stat, err := s3.GetObject(...)
// if err != nil {
// resp := s3.ToErrorResponse(err)
// }
// ...
func ToErrorResponse(err error) ErrorResponse {
switch err := err.(type) {
case ErrorResponse:
return err
default:
return ErrorResponse{}
}
}
// Error - Returns S3 error string.
func (e ErrorResponse) Error() string {
if e.Message == "" {
msg, ok := s3ErrorResponseMap[e.Code]
if !ok {
msg = fmt.Sprintf("Error response code %s.", e.Code)
}
return msg
}
return e.Message
}
// Common string for errors to report issue location in unexpected
// cases.
const (
reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues."
)
// httpRespToErrorResponse returns a new encoded ErrorResponse
// structure as error.
func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error {
if resp == nil {
msg := "Response is empty. " + reportIssue
return ErrInvalidArgument(msg)
}
var errResp ErrorResponse
err := xmlDecoder(resp.Body, &errResp)
// Xml decoding failed with no body, fall back to HTTP headers.
if err != nil {
switch resp.StatusCode {
case http.StatusNotFound:
if objectName == "" {
errResp = ErrorResponse{
Code: "NoSuchBucket",
Message: "The specified bucket does not exist.",
BucketName: bucketName,
}
} else {
errResp = ErrorResponse{
Code: "NoSuchKey",
Message: "The specified key does not exist.",
BucketName: bucketName,
Key: objectName,
}
}
case http.StatusForbidden:
errResp = ErrorResponse{
Code: "AccessDenied",
Message: "Access Denied.",
BucketName: bucketName,
Key: objectName,
}
case http.StatusConflict:
errResp = ErrorResponse{
Code: "Conflict",
Message: "Bucket not empty.",
BucketName: bucketName,
}
default:
errResp = ErrorResponse{
Code: resp.Status,
Message: resp.Status,
BucketName: bucketName,
}
}
}
// Save hodID, requestID and region information
// from headers if not available through error XML.
if errResp.RequestID == "" {
errResp.RequestID = resp.Header.Get("x-amz-request-id")
}
if errResp.HostID == "" {
errResp.HostID = resp.Header.Get("x-amz-id-2")
}
if errResp.Region == "" {
errResp.Region = resp.Header.Get("x-amz-bucket-region")
}
// Save headers returned in the API XML error
errResp.Headers = resp.Header
return errResp
}
// ErrTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration.
func ErrTransferAccelerationBucket(bucketName string) error {
msg := fmt.Sprintf("The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (\".\").")
return ErrorResponse{
Code: "InvalidArgument",
Message: msg,
BucketName: bucketName,
}
}
// ErrEntityTooLarge - Input size is larger than supported maximum.
func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error {
msg := fmt.Sprintf("Your proposed upload size %d exceeds the maximum allowed object size %d for single PUT operation.", totalSize, maxObjectSize)
return ErrorResponse{
Code: "EntityTooLarge",
Message: msg,
BucketName: bucketName,
Key: objectName,
}
}
// ErrEntityTooSmall - Input size is smaller than supported minimum.
func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error {
msg := fmt.Sprintf("Your proposed upload size %d is below the minimum allowed object size '0B' for single PUT operation.", totalSize)
return ErrorResponse{
Code: "EntityTooLarge",
Message: msg,
BucketName: bucketName,
Key: objectName,
}
}
// ErrUnexpectedEOF - Unexpected end of file reached.
func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error {
msg := fmt.Sprintf("Data read %s is not equal to the size %s of the input Reader.",
strconv.FormatInt(totalRead, 10), strconv.FormatInt(totalSize, 10))
return ErrorResponse{
Code: "UnexpectedEOF",
Message: msg,
BucketName: bucketName,
Key: objectName,
}
}
// ErrInvalidBucketName - Invalid bucket name response.
func ErrInvalidBucketName(message string) error {
return ErrorResponse{
Code: "InvalidBucketName",
Message: message,
RequestID: "minio",
}
}
// ErrInvalidObjectName - Invalid object name response.
func ErrInvalidObjectName(message string) error {
return ErrorResponse{
Code: "NoSuchKey",
Message: message,
RequestID: "minio",
}
}
// ErrInvalidObjectPrefix - Invalid object prefix response is
// similar to object name response.
var ErrInvalidObjectPrefix = ErrInvalidObjectName
// ErrInvalidArgument - Invalid argument response.
func ErrInvalidArgument(message string) error {
return ErrorResponse{
Code: "InvalidArgument",
Message: message,
RequestID: "minio",
}
}
// ErrNoSuchBucketPolicy - No Such Bucket Policy response
// The specified bucket does not have a bucket policy.
func ErrNoSuchBucketPolicy(message string) error {
return ErrorResponse{
Code: "NoSuchBucketPolicy",
Message: message,
RequestID: "minio",
}
}
// ErrAPINotSupported - API not supported response
// The specified API call is not supported
func ErrAPINotSupported(message string) error {
return ErrorResponse{
Code: "APINotSupported",
Message: message,
RequestID: "minio",
}
}

104
vendor/github.com/minio/minio-go/api-get-object-file.go generated vendored Normal file
View File

@ -0,0 +1,104 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"io"
"os"
"path/filepath"
)
// FGetObject - download contents of an object to a local file.
func (c Client) FGetObject(bucketName, objectName, filePath string) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return err
}
if err := isValidObjectName(objectName); err != nil {
return err
}
// Verify if destination already exists.
st, err := os.Stat(filePath)
if err == nil {
// If the destination exists and is a directory.
if st.IsDir() {
return ErrInvalidArgument("fileName is a directory.")
}
}
// Proceed if file does not exist. return for all other errors.
if err != nil {
if !os.IsNotExist(err) {
return err
}
}
// Extract top level directory.
objectDir, _ := filepath.Split(filePath)
if objectDir != "" {
// Create any missing top level directories.
if err := os.MkdirAll(objectDir, 0700); err != nil {
return err
}
}
// Gather md5sum.
objectStat, err := c.StatObject(bucketName, objectName)
if err != nil {
return err
}
// Write to a temporary file "fileName.part.minio" before saving.
filePartPath := filePath + objectStat.ETag + ".part.minio"
// If exists, open in append mode. If not create it as a part file.
filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
return err
}
// Issue Stat to get the current offset.
st, err = filePart.Stat()
if err != nil {
return err
}
// Seek to current position for incoming reader.
objectReader, objectStat, err := c.getObject(bucketName, objectName, st.Size(), 0)
if err != nil {
return err
}
// Write to the part file.
if _, err = io.CopyN(filePart, objectReader, objectStat.Size); err != nil {
return err
}
// Close the file before rename, this is specifically needed for Windows users.
if err = filePart.Close(); err != nil {
return err
}
// Safely completed. Now commit by renaming to actual filename.
if err = os.Rename(filePartPath, filePath); err != nil {
return err
}
// Return.
return nil
}

662
vendor/github.com/minio/minio-go/api-get-object.go generated vendored Normal file
View File

@ -0,0 +1,662 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"errors"
"fmt"
"io"
"net/http"
"strings"
"sync"
"time"
"github.com/minio/minio-go/pkg/encrypt"
)
// GetEncryptedObject deciphers and streams data stored in the server after applying a specifed encryption materiels
func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMaterials encrypt.Materials) (io.Reader, error) {
if encryptMaterials == nil {
return nil, ErrInvalidArgument("Unable to recognize empty encryption properties")
}
// Fetch encrypted object
encReader, err := c.GetObject(bucketName, objectName)
if err != nil {
return nil, err
}
// Stat object to get its encryption metadata
st, err := encReader.Stat()
if err != nil {
return nil, err
}
// Setup object for decrytion, object is transparently
// decrypted as the consumer starts reading.
encryptMaterials.SetupDecryptMode(encReader, st.Metadata.Get(amzHeaderIV), st.Metadata.Get(amzHeaderKey))
// Success.
return encryptMaterials, nil
}
// GetObject - returns an seekable, readable object.
func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return nil, err
}
if err := isValidObjectName(objectName); err != nil {
return nil, err
}
var httpReader io.ReadCloser
var objectInfo ObjectInfo
var err error
// Create request channel.
reqCh := make(chan getRequest)
// Create response channel.
resCh := make(chan getResponse)
// Create done channel.
doneCh := make(chan struct{})
// This routine feeds partial object data as and when the caller reads.
go func() {
defer close(reqCh)
defer close(resCh)
// Loop through the incoming control messages and read data.
for {
select {
// When the done channel is closed exit our routine.
case <-doneCh:
// Close the http response body before returning.
// This ends the connection with the server.
if httpReader != nil {
httpReader.Close()
}
return
// Gather incoming request.
case req := <-reqCh:
// If this is the first request we may not need to do a getObject request yet.
if req.isFirstReq {
// First request is a Read/ReadAt.
if req.isReadOp {
// Differentiate between wanting the whole object and just a range.
if req.isReadAt {
// If this is a ReadAt request only get the specified range.
// Range is set with respect to the offset and length of the buffer requested.
// Do not set objectInfo from the first readAt request because it will not get
// the whole object.
httpReader, _, err = c.getObject(bucketName, objectName, req.Offset, int64(len(req.Buffer)))
} else {
// First request is a Read request.
httpReader, objectInfo, err = c.getObject(bucketName, objectName, req.Offset, 0)
}
if err != nil {
resCh <- getResponse{
Error: err,
}
return
}
// Read at least firstReq.Buffer bytes, if not we have
// reached our EOF.
size, err := io.ReadFull(httpReader, req.Buffer)
if err == io.ErrUnexpectedEOF {
// If an EOF happens after reading some but not
// all the bytes ReadFull returns ErrUnexpectedEOF
err = io.EOF
}
// Send back the first response.
resCh <- getResponse{
objectInfo: objectInfo,
Size: int(size),
Error: err,
didRead: true,
}
} else {
// First request is a Stat or Seek call.
// Only need to run a StatObject until an actual Read or ReadAt request comes through.
objectInfo, err = c.StatObject(bucketName, objectName)
if err != nil {
resCh <- getResponse{
Error: err,
}
// Exit the go-routine.
return
}
// Send back the first response.
resCh <- getResponse{
objectInfo: objectInfo,
}
}
} else if req.settingObjectInfo { // Request is just to get objectInfo.
objectInfo, err := c.StatObject(bucketName, objectName)
if err != nil {
resCh <- getResponse{
Error: err,
}
// Exit the goroutine.
return
}
// Send back the objectInfo.
resCh <- getResponse{
objectInfo: objectInfo,
}
} else {
// Offset changes fetch the new object at an Offset.
// Because the httpReader may not be set by the first
// request if it was a stat or seek it must be checked
// if the object has been read or not to only initialize
// new ones when they haven't been already.
// All readAt requests are new requests.
if req.DidOffsetChange || !req.beenRead {
if httpReader != nil {
// Close previously opened http reader.
httpReader.Close()
}
// If this request is a readAt only get the specified range.
if req.isReadAt {
// Range is set with respect to the offset and length of the buffer requested.
httpReader, _, err = c.getObject(bucketName, objectName, req.Offset, int64(len(req.Buffer)))
} else {
httpReader, objectInfo, err = c.getObject(bucketName, objectName, req.Offset, 0)
}
if err != nil {
resCh <- getResponse{
Error: err,
}
return
}
}
// Read at least req.Buffer bytes, if not we have
// reached our EOF.
size, err := io.ReadFull(httpReader, req.Buffer)
if err == io.ErrUnexpectedEOF {
// If an EOF happens after reading some but not
// all the bytes ReadFull returns ErrUnexpectedEOF
err = io.EOF
}
// Reply back how much was read.
resCh <- getResponse{
Size: int(size),
Error: err,
didRead: true,
objectInfo: objectInfo,
}
}
}
}
}()
// Create a newObject through the information sent back by reqCh.
return newObject(reqCh, resCh, doneCh), nil
}
// get request message container to communicate with internal
// go-routine.
type getRequest struct {
Buffer []byte
Offset int64 // readAt offset.
DidOffsetChange bool // Tracks the offset changes for Seek requests.
beenRead bool // Determines if this is the first time an object is being read.
isReadAt bool // Determines if this request is a request to a specific range
isReadOp bool // Determines if this request is a Read or Read/At request.
isFirstReq bool // Determines if this request is the first time an object is being accessed.
settingObjectInfo bool // Determines if this request is to set the objectInfo of an object.
}
// get response message container to reply back for the request.
type getResponse struct {
Size int
Error error
didRead bool // Lets subsequent calls know whether or not httpReader has been initiated.
objectInfo ObjectInfo // Used for the first request.
}
// Object represents an open object. It implements Read, ReadAt,
// Seeker, Close for a HTTP stream.
type Object struct {
// Mutex.
mutex *sync.Mutex
// User allocated and defined.
reqCh chan<- getRequest
resCh <-chan getResponse
doneCh chan<- struct{}
currOffset int64
objectInfo ObjectInfo
// Ask lower level to initiate data fetching based on currOffset
seekData bool
// Keeps track of closed call.
isClosed bool
// Keeps track of if this is the first call.
isStarted bool
// Previous error saved for future calls.
prevErr error
// Keeps track of if this object has been read yet.
beenRead bool
// Keeps track of if objectInfo has been set yet.
objectInfoSet bool
}
// doGetRequest - sends and blocks on the firstReqCh and reqCh of an object.
// Returns back the size of the buffer read, if anything was read, as well
// as any error encountered. For all first requests sent on the object
// it is also responsible for sending back the objectInfo.
func (o *Object) doGetRequest(request getRequest) (getResponse, error) {
o.reqCh <- request
response := <-o.resCh
// This was the first request.
if !o.isStarted {
// The object has been operated on.
o.isStarted = true
}
// Set the objectInfo if the request was not readAt
// and it hasn't been set before.
if !o.objectInfoSet && !request.isReadAt {
o.objectInfo = response.objectInfo
o.objectInfoSet = true
}
// Set beenRead only if it has not been set before.
if !o.beenRead {
o.beenRead = response.didRead
}
// Return any error to the top level.
if response.Error != nil {
return response, response.Error
}
// Data are ready on the wire, no need to reinitiate connection in lower level
o.seekData = false
return response, nil
}
// setOffset - handles the setting of offsets for
// Read/ReadAt/Seek requests.
func (o *Object) setOffset(bytesRead int64) error {
// Update the currentOffset.
o.currOffset += bytesRead
if o.currOffset >= o.objectInfo.Size {
return io.EOF
}
return nil
}
// Read reads up to len(b) bytes into b. It returns the number of
// bytes read (0 <= n <= len(p)) and any error encountered. Returns
// io.EOF upon end of file.
func (o *Object) Read(b []byte) (n int, err error) {
if o == nil {
return 0, ErrInvalidArgument("Object is nil")
}
// Locking.
o.mutex.Lock()
defer o.mutex.Unlock()
// prevErr is previous error saved from previous operation.
if o.prevErr != nil || o.isClosed {
return 0, o.prevErr
}
// Create a new request.
readReq := getRequest{
isReadOp: true,
beenRead: o.beenRead,
Buffer: b,
}
// Alert that this is the first request.
if !o.isStarted {
readReq.isFirstReq = true
}
// Ask to establish a new data fetch routine based on seekData flag
readReq.DidOffsetChange = o.seekData
readReq.Offset = o.currOffset
// Send and receive from the first request.
response, err := o.doGetRequest(readReq)
if err != nil && err != io.EOF {
// Save the error for future calls.
o.prevErr = err
return response.Size, err
}
// Bytes read.
bytesRead := int64(response.Size)
// Set the new offset.
oerr := o.setOffset(bytesRead)
if oerr != nil {
// Save the error for future calls.
o.prevErr = oerr
return response.Size, oerr
}
// Return the response.
return response.Size, err
}
// Stat returns the ObjectInfo structure describing Object.
func (o *Object) Stat() (ObjectInfo, error) {
if o == nil {
return ObjectInfo{}, ErrInvalidArgument("Object is nil")
}
// Locking.
o.mutex.Lock()
defer o.mutex.Unlock()
if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed {
return ObjectInfo{}, o.prevErr
}
// This is the first request.
if !o.isStarted || !o.objectInfoSet {
statReq := getRequest{
isFirstReq: !o.isStarted,
settingObjectInfo: !o.objectInfoSet,
}
// Send the request and get the response.
_, err := o.doGetRequest(statReq)
if err != nil {
o.prevErr = err
return ObjectInfo{}, err
}
}
return o.objectInfo, nil
}
// ReadAt reads len(b) bytes from the File starting at byte offset
// off. It returns the number of bytes read and the error, if any.
// ReadAt always returns a non-nil error when n < len(b). At end of
// file, that error is io.EOF.
func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
if o == nil {
return 0, ErrInvalidArgument("Object is nil")
}
// Locking.
o.mutex.Lock()
defer o.mutex.Unlock()
// prevErr is error which was saved in previous operation.
if o.prevErr != nil || o.isClosed {
return 0, o.prevErr
}
// Can only compare offsets to size when size has been set.
if o.objectInfoSet {
// If offset is negative than we return io.EOF.
// If offset is greater than or equal to object size we return io.EOF.
if offset >= o.objectInfo.Size || offset < 0 {
return 0, io.EOF
}
}
// Create the new readAt request.
readAtReq := getRequest{
isReadOp: true,
isReadAt: true,
DidOffsetChange: true, // Offset always changes.
beenRead: o.beenRead, // Set if this is the first request to try and read.
Offset: offset, // Set the offset.
Buffer: b,
}
// Alert that this is the first request.
if !o.isStarted {
readAtReq.isFirstReq = true
}
// Send and receive from the first request.
response, err := o.doGetRequest(readAtReq)
if err != nil && err != io.EOF {
// Save the error.
o.prevErr = err
return response.Size, err
}
// Bytes read.
bytesRead := int64(response.Size)
// There is no valid objectInfo yet
// to compare against for EOF.
if !o.objectInfoSet {
// Update the currentOffset.
o.currOffset += bytesRead
} else {
// If this was not the first request update
// the offsets and compare against objectInfo
// for EOF.
oerr := o.setOffset(bytesRead)
if oerr != nil {
o.prevErr = oerr
return response.Size, oerr
}
}
return response.Size, err
}
// Seek sets the offset for the next Read or Write to offset,
// interpreted according to whence: 0 means relative to the
// origin of the file, 1 means relative to the current offset,
// and 2 means relative to the end.
// Seek returns the new offset and an error, if any.
//
// Seeking to a negative offset is an error. Seeking to any positive
// offset is legal, subsequent io operations succeed until the
// underlying object is not closed.
func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
if o == nil {
return 0, ErrInvalidArgument("Object is nil")
}
// Locking.
o.mutex.Lock()
defer o.mutex.Unlock()
if o.prevErr != nil {
// At EOF seeking is legal allow only io.EOF, for any other errors we return.
if o.prevErr != io.EOF {
return 0, o.prevErr
}
}
// Negative offset is valid for whence of '2'.
if offset < 0 && whence != 2 {
return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d.", whence))
}
// This is the first request. So before anything else
// get the ObjectInfo.
if !o.isStarted || !o.objectInfoSet {
// Create the new Seek request.
seekReq := getRequest{
isReadOp: false,
Offset: offset,
isFirstReq: true,
}
// Send and receive from the seek request.
_, err := o.doGetRequest(seekReq)
if err != nil {
// Save the error.
o.prevErr = err
return 0, err
}
}
// Switch through whence.
switch whence {
default:
return 0, ErrInvalidArgument(fmt.Sprintf("Invalid whence %d", whence))
case 0:
if offset > o.objectInfo.Size {
return 0, io.EOF
}
o.currOffset = offset
case 1:
if o.currOffset+offset > o.objectInfo.Size {
return 0, io.EOF
}
o.currOffset += offset
case 2:
// Seeking to positive offset is valid for whence '2', but
// since we are backing a Reader we have reached 'EOF' if
// offset is positive.
if offset > 0 {
return 0, io.EOF
}
// Seeking to negative position not allowed for whence.
if o.objectInfo.Size+offset < 0 {
return 0, ErrInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence))
}
o.currOffset = o.objectInfo.Size + offset
}
// Reset the saved error since we successfully seeked, let the Read
// and ReadAt decide.
if o.prevErr == io.EOF {
o.prevErr = nil
}
// Ask lower level to fetch again from source
o.seekData = true
// Return the effective offset.
return o.currOffset, nil
}
// Close - The behavior of Close after the first call returns error
// for subsequent Close() calls.
func (o *Object) Close() (err error) {
if o == nil {
return ErrInvalidArgument("Object is nil")
}
// Locking.
o.mutex.Lock()
defer o.mutex.Unlock()
// if already closed return an error.
if o.isClosed {
return o.prevErr
}
// Close successfully.
close(o.doneCh)
// Save for future operations.
errMsg := "Object is already closed. Bad file descriptor."
o.prevErr = errors.New(errMsg)
// Save here that we closed done channel successfully.
o.isClosed = true
return nil
}
// newObject instantiates a new *minio.Object*
// ObjectInfo will be set by setObjectInfo
func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<- struct{}) *Object {
return &Object{
mutex: &sync.Mutex{},
reqCh: reqCh,
resCh: resCh,
doneCh: doneCh,
}
}
// getObject - retrieve object from Object Storage.
//
// Additionally this function also takes range arguments to download the specified
// range bytes of an object. Setting offset and length = 0 will download the full object.
//
// For more information about the HTTP Range header.
// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
func (c Client) getObject(bucketName, objectName string, offset, length int64) (io.ReadCloser, ObjectInfo, error) {
// Validate input arguments.
if err := isValidBucketName(bucketName); err != nil {
return nil, ObjectInfo{}, err
}
if err := isValidObjectName(objectName); err != nil {
return nil, ObjectInfo{}, err
}
customHeader := make(http.Header)
// Set ranges if length and offset are valid.
// See https://tools.ietf.org/html/rfc7233#section-3.1 for reference.
if length > 0 && offset >= 0 {
customHeader.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
} else if offset > 0 && length == 0 {
customHeader.Set("Range", fmt.Sprintf("bytes=%d-", offset))
} else if length < 0 && offset == 0 {
customHeader.Set("Range", fmt.Sprintf("bytes=%d", length))
}
// Execute GET on objectName.
resp, err := c.executeMethod("GET", requestMetadata{
bucketName: bucketName,
objectName: objectName,
customHeader: customHeader,
})
if err != nil {
return nil, ObjectInfo{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
return nil, ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
}
}
// Trim off the odd double quotes from ETag in the beginning and end.
md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
md5sum = strings.TrimSuffix(md5sum, "\"")
// Parse the date.
date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
if err != nil {
msg := "Last-Modified time format not recognized. " + reportIssue
return nil, ObjectInfo{}, ErrorResponse{
Code: "InternalError",
Message: msg,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
Region: resp.Header.Get("x-amz-bucket-region"),
}
}
// Get content-type.
contentType := strings.TrimSpace(resp.Header.Get("Content-Type"))
if contentType == "" {
contentType = "application/octet-stream"
}
var objectStat ObjectInfo
objectStat.ETag = md5sum
objectStat.Key = objectName
objectStat.Size = resp.ContentLength
objectStat.LastModified = date
objectStat.ContentType = contentType
// do not close body here, caller will close
return resp.Body, objectStat, nil
}

105
vendor/github.com/minio/minio-go/api-get-policy.go generated vendored Normal file
View File

@ -0,0 +1,105 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"encoding/json"
"io/ioutil"
"net/http"
"net/url"
"github.com/minio/minio-go/pkg/policy"
)
// GetBucketPolicy - get bucket policy at a given path.
func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy policy.BucketPolicy, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return policy.BucketPolicyNone, err
}
if err := isValidObjectPrefix(objectPrefix); err != nil {
return policy.BucketPolicyNone, err
}
policyInfo, err := c.getBucketPolicy(bucketName)
if err != nil {
errResponse := ToErrorResponse(err)
if errResponse.Code == "NoSuchBucketPolicy" {
return policy.BucketPolicyNone, nil
}
return policy.BucketPolicyNone, err
}
return policy.GetPolicy(policyInfo.Statements, bucketName, objectPrefix), nil
}
// ListBucketPolicies - list all policies for a given prefix and all its children.
func (c Client) ListBucketPolicies(bucketName, objectPrefix string) (bucketPolicies map[string]policy.BucketPolicy, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return map[string]policy.BucketPolicy{}, err
}
if err := isValidObjectPrefix(objectPrefix); err != nil {
return map[string]policy.BucketPolicy{}, err
}
policyInfo, err := c.getBucketPolicy(bucketName)
if err != nil {
errResponse := ToErrorResponse(err)
if errResponse.Code == "NoSuchBucketPolicy" {
return map[string]policy.BucketPolicy{}, nil
}
return map[string]policy.BucketPolicy{}, err
}
return policy.GetPolicies(policyInfo.Statements, bucketName), nil
}
// Default empty bucket access policy.
var emptyBucketAccessPolicy = policy.BucketAccessPolicy{
Version: "2012-10-17",
}
// Request server for current bucket policy.
func (c Client) getBucketPolicy(bucketName string) (policy.BucketAccessPolicy, error) {
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
urlValues.Set("policy", "")
// Execute GET on bucket to list objects.
resp, err := c.executeMethod("GET", requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
})
defer closeResponse(resp)
if err != nil {
return emptyBucketAccessPolicy, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return emptyBucketAccessPolicy, httpRespToErrorResponse(resp, bucketName, "")
}
}
bucketPolicyBuf, err := ioutil.ReadAll(resp.Body)
if err != nil {
return emptyBucketAccessPolicy, err
}
policy := policy.BucketAccessPolicy{}
err = json.Unmarshal(bucketPolicyBuf, &policy)
return policy, err
}

699
vendor/github.com/minio/minio-go/api-list.go generated vendored Normal file
View File

@ -0,0 +1,699 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"fmt"
"net/http"
"net/url"
"strings"
)
// ListBuckets list all buckets owned by this authenticated user.
//
// This call requires explicit authentication, no anonymous requests are
// allowed for listing buckets.
//
// api := client.New(....)
// for message := range api.ListBuckets() {
// fmt.Println(message)
// }
//
func (c Client) ListBuckets() ([]BucketInfo, error) {
// Execute GET on service.
resp, err := c.executeMethod("GET", requestMetadata{})
defer closeResponse(resp)
if err != nil {
return nil, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp, "", "")
}
}
listAllMyBucketsResult := listAllMyBucketsResult{}
err = xmlDecoder(resp.Body, &listAllMyBucketsResult)
if err != nil {
return nil, err
}
return listAllMyBucketsResult.Buckets.Bucket, nil
}
/// Bucket Read Operations.
// ListObjectsV2 lists all objects matching the objectPrefix from
// the specified bucket. If recursion is enabled it would list
// all subdirectories and all its contents.
//
// Your input parameters are just bucketName, objectPrefix, recursive
// and a done channel for pro-actively closing the internal go
// routine. If you enable recursive as 'true' this function will
// return back all the objects in a given bucket name and object
// prefix.
//
// api := client.New(....)
// // Create a done channel.
// doneCh := make(chan struct{})
// defer close(doneCh)
// // Recurively list all objects in 'mytestbucket'
// recursive := true
// for message := range api.ListObjectsV2("mytestbucket", "starthere", recursive, doneCh) {
// fmt.Println(message)
// }
//
func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo {
// Allocate new list objects channel.
objectStatCh := make(chan ObjectInfo, 1)
// Default listing is delimited at "/"
delimiter := "/"
if recursive {
// If recursive we do not delimit.
delimiter = ""
}
// Return object owner information by default
fetchOwner := true
// Validate bucket name.
if err := isValidBucketName(bucketName); err != nil {
defer close(objectStatCh)
objectStatCh <- ObjectInfo{
Err: err,
}
return objectStatCh
}
// Validate incoming object prefix.
if err := isValidObjectPrefix(objectPrefix); err != nil {
defer close(objectStatCh)
objectStatCh <- ObjectInfo{
Err: err,
}
return objectStatCh
}
// Initiate list objects goroutine here.
go func(objectStatCh chan<- ObjectInfo) {
defer close(objectStatCh)
// Save continuationToken for next request.
var continuationToken string
for {
// Get list of objects a maximum of 1000 per request.
result, err := c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, 1000)
if err != nil {
objectStatCh <- ObjectInfo{
Err: err,
}
return
}
// If contents are available loop through and send over channel.
for _, object := range result.Contents {
// Save the marker.
select {
// Send object content.
case objectStatCh <- object:
// If receives done from the caller, return here.
case <-doneCh:
return
}
}
// Send all common prefixes if any.
// NOTE: prefixes are only present if the request is delimited.
for _, obj := range result.CommonPrefixes {
object := ObjectInfo{}
object.Key = obj.Prefix
object.Size = 0
select {
// Send object prefixes.
case objectStatCh <- object:
// If receives done from the caller, return here.
case <-doneCh:
return
}
}
// If continuation token present, save it for next request.
if result.NextContinuationToken != "" {
continuationToken = result.NextContinuationToken
}
// Listing ends result is not truncated, return right here.
if !result.IsTruncated {
return
}
}
}(objectStatCh)
return objectStatCh
}
// listObjectsV2Query - (List Objects V2) - List some or all (up to 1000) of the objects in a bucket.
//
// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
// request parameters :-
// ---------
// ?continuation-token - Specifies the key to start with when listing objects in a bucket.
// ?delimiter - A delimiter is a character you use to group keys.
// ?prefix - Limits the response to keys that begin with the specified prefix.
// ?max-keys - Sets the maximum number of keys returned in the response body.
func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (ListBucketV2Result, error) {
// Validate bucket name.
if err := isValidBucketName(bucketName); err != nil {
return ListBucketV2Result{}, err
}
// Validate object prefix.
if err := isValidObjectPrefix(objectPrefix); err != nil {
return ListBucketV2Result{}, err
}
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
// Always set list-type in ListObjects V2
urlValues.Set("list-type", "2")
// Set object prefix.
if objectPrefix != "" {
urlValues.Set("prefix", objectPrefix)
}
// Set continuation token
if continuationToken != "" {
urlValues.Set("continuation-token", continuationToken)
}
// Set delimiter.
if delimiter != "" {
urlValues.Set("delimiter", delimiter)
}
// Fetch owner when listing
if fetchOwner {
urlValues.Set("fetch-owner", "true")
}
// maxkeys should default to 1000 or less.
if maxkeys == 0 || maxkeys > 1000 {
maxkeys = 1000
}
// Set max keys.
urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
// Execute GET on bucket to list objects.
resp, err := c.executeMethod("GET", requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
})
defer closeResponse(resp)
if err != nil {
return ListBucketV2Result{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return ListBucketV2Result{}, httpRespToErrorResponse(resp, bucketName, "")
}
}
// Decode listBuckets XML.
listBucketResult := ListBucketV2Result{}
err = xmlDecoder(resp.Body, &listBucketResult)
if err != nil {
return listBucketResult, err
}
return listBucketResult, nil
}
// ListObjects - (List Objects) - List some objects or all recursively.
//
// ListObjects lists all objects matching the objectPrefix from
// the specified bucket. If recursion is enabled it would list
// all subdirectories and all its contents.
//
// Your input parameters are just bucketName, objectPrefix, recursive
// and a done channel for pro-actively closing the internal go
// routine. If you enable recursive as 'true' this function will
// return back all the objects in a given bucket name and object
// prefix.
//
// api := client.New(....)
// // Create a done channel.
// doneCh := make(chan struct{})
// defer close(doneCh)
// // Recurively list all objects in 'mytestbucket'
// recursive := true
// for message := range api.ListObjects("mytestbucket", "starthere", recursive, doneCh) {
// fmt.Println(message)
// }
//
func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo {
// Allocate new list objects channel.
objectStatCh := make(chan ObjectInfo, 1)
// Default listing is delimited at "/"
delimiter := "/"
if recursive {
// If recursive we do not delimit.
delimiter = ""
}
// Validate bucket name.
if err := isValidBucketName(bucketName); err != nil {
defer close(objectStatCh)
objectStatCh <- ObjectInfo{
Err: err,
}
return objectStatCh
}
// Validate incoming object prefix.
if err := isValidObjectPrefix(objectPrefix); err != nil {
defer close(objectStatCh)
objectStatCh <- ObjectInfo{
Err: err,
}
return objectStatCh
}
// Initiate list objects goroutine here.
go func(objectStatCh chan<- ObjectInfo) {
defer close(objectStatCh)
// Save marker for next request.
var marker string
for {
// Get list of objects a maximum of 1000 per request.
result, err := c.listObjectsQuery(bucketName, objectPrefix, marker, delimiter, 1000)
if err != nil {
objectStatCh <- ObjectInfo{
Err: err,
}
return
}
// If contents are available loop through and send over channel.
for _, object := range result.Contents {
// Save the marker.
marker = object.Key
select {
// Send object content.
case objectStatCh <- object:
// If receives done from the caller, return here.
case <-doneCh:
return
}
}
// Send all common prefixes if any.
// NOTE: prefixes are only present if the request is delimited.
for _, obj := range result.CommonPrefixes {
object := ObjectInfo{}
object.Key = obj.Prefix
object.Size = 0
select {
// Send object prefixes.
case objectStatCh <- object:
// If receives done from the caller, return here.
case <-doneCh:
return
}
}
// If next marker present, save it for next request.
if result.NextMarker != "" {
marker = result.NextMarker
}
// Listing ends result is not truncated, return right here.
if !result.IsTruncated {
return
}
}
}(objectStatCh)
return objectStatCh
}
// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket.
//
// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
// request parameters :-
// ---------
// ?marker - Specifies the key to start with when listing objects in a bucket.
// ?delimiter - A delimiter is a character you use to group keys.
// ?prefix - Limits the response to keys that begin with the specified prefix.
// ?max-keys - Sets the maximum number of keys returned in the response body.
func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (ListBucketResult, error) {
// Validate bucket name.
if err := isValidBucketName(bucketName); err != nil {
return ListBucketResult{}, err
}
// Validate object prefix.
if err := isValidObjectPrefix(objectPrefix); err != nil {
return ListBucketResult{}, err
}
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
// Set object prefix.
if objectPrefix != "" {
urlValues.Set("prefix", objectPrefix)
}
// Set object marker.
if objectMarker != "" {
urlValues.Set("marker", objectMarker)
}
// Set delimiter.
if delimiter != "" {
urlValues.Set("delimiter", delimiter)
}
// maxkeys should default to 1000 or less.
if maxkeys == 0 || maxkeys > 1000 {
maxkeys = 1000
}
// Set max keys.
urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
// Execute GET on bucket to list objects.
resp, err := c.executeMethod("GET", requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
})
defer closeResponse(resp)
if err != nil {
return ListBucketResult{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return ListBucketResult{}, httpRespToErrorResponse(resp, bucketName, "")
}
}
// Decode listBuckets XML.
listBucketResult := ListBucketResult{}
err = xmlDecoder(resp.Body, &listBucketResult)
if err != nil {
return listBucketResult, err
}
return listBucketResult, nil
}
// ListIncompleteUploads - List incompletely uploaded multipart objects.
//
// ListIncompleteUploads lists all incompleted objects matching the
// objectPrefix from the specified bucket. If recursion is enabled
// it would list all subdirectories and all its contents.
//
// Your input parameters are just bucketName, objectPrefix, recursive
// and a done channel to pro-actively close the internal go routine.
// If you enable recursive as 'true' this function will return back all
// the multipart objects in a given bucket name.
//
// api := client.New(....)
// // Create a done channel.
// doneCh := make(chan struct{})
// defer close(doneCh)
// // Recurively list all objects in 'mytestbucket'
// recursive := true
// for message := range api.ListIncompleteUploads("mytestbucket", "starthere", recursive) {
// fmt.Println(message)
// }
//
func (c Client) ListIncompleteUploads(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo {
// Turn on size aggregation of individual parts.
isAggregateSize := true
return c.listIncompleteUploads(bucketName, objectPrefix, recursive, isAggregateSize, doneCh)
}
// listIncompleteUploads lists all incomplete uploads.
func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive, aggregateSize bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo {
// Allocate channel for multipart uploads.
objectMultipartStatCh := make(chan ObjectMultipartInfo, 1)
// Delimiter is set to "/" by default.
delimiter := "/"
if recursive {
// If recursive do not delimit.
delimiter = ""
}
// Validate bucket name.
if err := isValidBucketName(bucketName); err != nil {
defer close(objectMultipartStatCh)
objectMultipartStatCh <- ObjectMultipartInfo{
Err: err,
}
return objectMultipartStatCh
}
// Validate incoming object prefix.
if err := isValidObjectPrefix(objectPrefix); err != nil {
defer close(objectMultipartStatCh)
objectMultipartStatCh <- ObjectMultipartInfo{
Err: err,
}
return objectMultipartStatCh
}
go func(objectMultipartStatCh chan<- ObjectMultipartInfo) {
defer close(objectMultipartStatCh)
// object and upload ID marker for future requests.
var objectMarker string
var uploadIDMarker string
for {
// list all multipart uploads.
result, err := c.listMultipartUploadsQuery(bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 1000)
if err != nil {
objectMultipartStatCh <- ObjectMultipartInfo{
Err: err,
}
return
}
// Save objectMarker and uploadIDMarker for next request.
objectMarker = result.NextKeyMarker
uploadIDMarker = result.NextUploadIDMarker
// Send all multipart uploads.
for _, obj := range result.Uploads {
// Calculate total size of the uploaded parts if 'aggregateSize' is enabled.
if aggregateSize {
// Get total multipart size.
obj.Size, err = c.getTotalMultipartSize(bucketName, obj.Key, obj.UploadID)
if err != nil {
objectMultipartStatCh <- ObjectMultipartInfo{
Err: err,
}
continue
}
}
select {
// Send individual uploads here.
case objectMultipartStatCh <- obj:
// If done channel return here.
case <-doneCh:
return
}
}
// Send all common prefixes if any.
// NOTE: prefixes are only present if the request is delimited.
for _, obj := range result.CommonPrefixes {
object := ObjectMultipartInfo{}
object.Key = obj.Prefix
object.Size = 0
select {
// Send delimited prefixes here.
case objectMultipartStatCh <- object:
// If done channel return here.
case <-doneCh:
return
}
}
// Listing ends if result not truncated, return right here.
if !result.IsTruncated {
return
}
}
}(objectMultipartStatCh)
// return.
return objectMultipartStatCh
}
// listMultipartUploads - (List Multipart Uploads).
// - Lists some or all (up to 1000) in-progress multipart uploads in a bucket.
//
// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket.
// request parameters. :-
// ---------
// ?key-marker - Specifies the multipart upload after which listing should begin.
// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin.
// ?delimiter - A delimiter is a character you use to group keys.
// ?prefix - Limits the response to keys that begin with the specified prefix.
// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body.
func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) {
// Get resources properly escaped and lined up before using them in http request.
urlValues := make(url.Values)
// Set uploads.
urlValues.Set("uploads", "")
// Set object key marker.
if keyMarker != "" {
urlValues.Set("key-marker", keyMarker)
}
// Set upload id marker.
if uploadIDMarker != "" {
urlValues.Set("upload-id-marker", uploadIDMarker)
}
// Set prefix marker.
if prefix != "" {
urlValues.Set("prefix", prefix)
}
// Set delimiter.
if delimiter != "" {
urlValues.Set("delimiter", delimiter)
}
// maxUploads should be 1000 or less.
if maxUploads == 0 || maxUploads > 1000 {
maxUploads = 1000
}
// Set max-uploads.
urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads))
// Execute GET on bucketName to list multipart uploads.
resp, err := c.executeMethod("GET", requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
})
defer closeResponse(resp)
if err != nil {
return ListMultipartUploadsResult{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return ListMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "")
}
}
// Decode response body.
listMultipartUploadsResult := ListMultipartUploadsResult{}
err = xmlDecoder(resp.Body, &listMultipartUploadsResult)
if err != nil {
return listMultipartUploadsResult, err
}
return listMultipartUploadsResult, nil
}
// listObjectParts list all object parts recursively.
func (c Client) listObjectParts(bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) {
// Part number marker for the next batch of request.
var nextPartNumberMarker int
partsInfo = make(map[int]ObjectPart)
for {
// Get list of uploaded parts a maximum of 1000 per request.
listObjPartsResult, err := c.listObjectPartsQuery(bucketName, objectName, uploadID, nextPartNumberMarker, 1000)
if err != nil {
return nil, err
}
// Append to parts info.
for _, part := range listObjPartsResult.ObjectParts {
// Trim off the odd double quotes from ETag in the beginning and end.
part.ETag = strings.TrimPrefix(part.ETag, "\"")
part.ETag = strings.TrimSuffix(part.ETag, "\"")
partsInfo[part.PartNumber] = part
}
// Keep part number marker, for the next iteration.
nextPartNumberMarker = listObjPartsResult.NextPartNumberMarker
// Listing ends result is not truncated, return right here.
if !listObjPartsResult.IsTruncated {
break
}
}
// Return all the parts.
return partsInfo, nil
}
// findUploadID lists all incomplete uploads and finds the uploadID of the matching object name.
func (c Client) findUploadID(bucketName, objectName string) (uploadID string, err error) {
// Make list incomplete uploads recursive.
isRecursive := true
// Turn off size aggregation of individual parts, in this request.
isAggregateSize := false
// latestUpload to track the latest multipart info for objectName.
var latestUpload ObjectMultipartInfo
// Create done channel to cleanup the routine.
doneCh := make(chan struct{})
defer close(doneCh)
// List all incomplete uploads.
for mpUpload := range c.listIncompleteUploads(bucketName, objectName, isRecursive, isAggregateSize, doneCh) {
if mpUpload.Err != nil {
return "", mpUpload.Err
}
if objectName == mpUpload.Key {
if mpUpload.Initiated.Sub(latestUpload.Initiated) > 0 {
latestUpload = mpUpload
}
}
}
// Return the latest upload id.
return latestUpload.UploadID, nil
}
// getTotalMultipartSize - calculate total uploaded size for the a given multipart object.
func (c Client) getTotalMultipartSize(bucketName, objectName, uploadID string) (size int64, err error) {
// Iterate over all parts and aggregate the size.
partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID)
if err != nil {
return 0, err
}
for _, partInfo := range partsInfo {
size += partInfo.Size
}
return size, nil
}
// listObjectPartsQuery (List Parts query)
// - lists some or all (up to 1000) parts that have been uploaded
// for a specific multipart upload
//
// You can use the request parameters as selection criteria to return
// a subset of the uploads in a bucket, request parameters :-
// ---------
// ?part-number-marker - Specifies the part after which listing should
// begin.
// ?max-parts - Maximum parts to be listed per request.
func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) {
// Get resources properly escaped and lined up before using them in http request.
urlValues := make(url.Values)
// Set part number marker.
urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker))
// Set upload id.
urlValues.Set("uploadId", uploadID)
// maxParts should be 1000 or less.
if maxParts == 0 || maxParts > 1000 {
maxParts = 1000
}
// Set max parts.
urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts))
// Execute GET on objectName to get list of parts.
resp, err := c.executeMethod("GET", requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
})
defer closeResponse(resp)
if err != nil {
return ListObjectPartsResult{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return ListObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
}
}
// Decode list object parts XML.
listObjectPartsResult := ListObjectPartsResult{}
err = xmlDecoder(resp.Body, &listObjectPartsResult)
if err != nil {
return listObjectPartsResult, err
}
return listObjectPartsResult, nil
}

223
vendor/github.com/minio/minio-go/api-notification.go generated vendored Normal file
View File

@ -0,0 +1,223 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bufio"
"encoding/json"
"io"
"net/http"
"net/url"
"time"
"github.com/minio/minio-go/pkg/s3utils"
)
// GetBucketNotification - get bucket notification at a given path.
func (c Client) GetBucketNotification(bucketName string) (bucketNotification BucketNotification, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return BucketNotification{}, err
}
notification, err := c.getBucketNotification(bucketName)
if err != nil {
return BucketNotification{}, err
}
return notification, nil
}
// Request server for notification rules.
func (c Client) getBucketNotification(bucketName string) (BucketNotification, error) {
urlValues := make(url.Values)
urlValues.Set("notification", "")
// Execute GET on bucket to list objects.
resp, err := c.executeMethod("GET", requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
})
defer closeResponse(resp)
if err != nil {
return BucketNotification{}, err
}
return processBucketNotificationResponse(bucketName, resp)
}
// processes the GetNotification http response from the server.
func processBucketNotificationResponse(bucketName string, resp *http.Response) (BucketNotification, error) {
if resp.StatusCode != http.StatusOK {
errResponse := httpRespToErrorResponse(resp, bucketName, "")
return BucketNotification{}, errResponse
}
var bucketNotification BucketNotification
err := xmlDecoder(resp.Body, &bucketNotification)
if err != nil {
return BucketNotification{}, err
}
return bucketNotification, nil
}
// Indentity represents the user id, this is a compliance field.
type identity struct {
PrincipalID string `json:"principalId"`
}
// Notification event bucket metadata.
type bucketMeta struct {
Name string `json:"name"`
OwnerIdentity identity `json:"ownerIdentity"`
ARN string `json:"arn"`
}
// Notification event object metadata.
type objectMeta struct {
Key string `json:"key"`
Size int64 `json:"size,omitempty"`
ETag string `json:"eTag,omitempty"`
VersionID string `json:"versionId,omitempty"`
Sequencer string `json:"sequencer"`
}
// Notification event server specific metadata.
type eventMeta struct {
SchemaVersion string `json:"s3SchemaVersion"`
ConfigurationID string `json:"configurationId"`
Bucket bucketMeta `json:"bucket"`
Object objectMeta `json:"object"`
}
// sourceInfo represents information on the client that
// triggered the event notification.
type sourceInfo struct {
Host string `json:"host"`
Port string `json:"port"`
UserAgent string `json:"userAgent"`
}
// NotificationEvent represents an Amazon an S3 bucket notification event.
type NotificationEvent struct {
EventVersion string `json:"eventVersion"`
EventSource string `json:"eventSource"`
AwsRegion string `json:"awsRegion"`
EventTime string `json:"eventTime"`
EventName string `json:"eventName"`
UserIdentity identity `json:"userIdentity"`
RequestParameters map[string]string `json:"requestParameters"`
ResponseElements map[string]string `json:"responseElements"`
S3 eventMeta `json:"s3"`
Source sourceInfo `json:"source"`
}
// NotificationInfo - represents the collection of notification events, additionally
// also reports errors if any while listening on bucket notifications.
type NotificationInfo struct {
Records []NotificationEvent
Err error
}
// ListenBucketNotification - listen on bucket notifications.
func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, events []string, doneCh <-chan struct{}) <-chan NotificationInfo {
notificationInfoCh := make(chan NotificationInfo, 1)
// Only success, start a routine to start reading line by line.
go func(notificationInfoCh chan<- NotificationInfo) {
defer close(notificationInfoCh)
// Validate the bucket name.
if err := isValidBucketName(bucketName); err != nil {
notificationInfoCh <- NotificationInfo{
Err: err,
}
return
}
// Check ARN partition to verify if listening bucket is supported
if s3utils.IsAmazonEndpoint(c.endpointURL) || s3utils.IsGoogleEndpoint(c.endpointURL) {
notificationInfoCh <- NotificationInfo{
Err: ErrAPINotSupported("Listening bucket notification is specific only to `minio` partitions"),
}
return
}
// Continously run and listen on bucket notification.
// Create a done channel to control 'ListObjects' go routine.
retryDoneCh := make(chan struct{}, 1)
// Indicate to our routine to exit cleanly upon return.
defer close(retryDoneCh)
// Wait on the jitter retry loop.
for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) {
urlValues := make(url.Values)
urlValues.Set("prefix", prefix)
urlValues.Set("suffix", suffix)
urlValues["events"] = events
// Execute GET on bucket to list objects.
resp, err := c.executeMethod("GET", requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
})
if err != nil {
continue
}
// Validate http response, upon error return quickly.
if resp.StatusCode != http.StatusOK {
errResponse := httpRespToErrorResponse(resp, bucketName, "")
notificationInfoCh <- NotificationInfo{
Err: errResponse,
}
return
}
// Initialize a new bufio scanner, to read line by line.
bio := bufio.NewScanner(resp.Body)
// Close the response body.
defer resp.Body.Close()
// Unmarshal each line, returns marshalled values.
for bio.Scan() {
var notificationInfo NotificationInfo
if err = json.Unmarshal(bio.Bytes(), &notificationInfo); err != nil {
continue
}
// Send notifications on channel only if there are events received.
if len(notificationInfo.Records) > 0 {
select {
case notificationInfoCh <- notificationInfo:
case <-doneCh:
return
}
}
}
// Look for any underlying errors.
if err = bio.Err(); err != nil {
// For an unexpected connection drop from server, we close the body
// and re-connect.
if err == io.ErrUnexpectedEOF {
resp.Body.Close()
}
}
}
}(notificationInfoCh)
// Returns the notification info channel, for caller to start reading from.
return notificationInfoCh
}

180
vendor/github.com/minio/minio-go/api-presigned.go generated vendored Normal file
View File

@ -0,0 +1,180 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"errors"
"net/url"
"time"
"github.com/minio/minio-go/pkg/s3signer"
"github.com/minio/minio-go/pkg/s3utils"
)
// supportedGetReqParams - supported request parameters for GET presigned request.
var supportedGetReqParams = map[string]struct{}{
"response-expires": {},
"response-content-type": {},
"response-cache-control": {},
"response-content-language": {},
"response-content-encoding": {},
"response-content-disposition": {},
}
// presignURL - Returns a presigned URL for an input 'method'.
// Expires maximum is 7days - ie. 604800 and minimum is 1.
func (c Client) presignURL(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
// Input validation.
if method == "" {
return nil, ErrInvalidArgument("method cannot be empty.")
}
if err := isValidBucketName(bucketName); err != nil {
return nil, err
}
if err := isValidObjectName(objectName); err != nil {
return nil, err
}
if err := isValidExpiry(expires); err != nil {
return nil, err
}
// Convert expires into seconds.
expireSeconds := int64(expires / time.Second)
reqMetadata := requestMetadata{
presignURL: true,
bucketName: bucketName,
objectName: objectName,
expires: expireSeconds,
}
// For "GET" we are handling additional request parameters to
// override its response headers.
if method == "GET" {
// Verify if input map has unsupported params, if yes exit.
for k := range reqParams {
if _, ok := supportedGetReqParams[k]; !ok {
return nil, ErrInvalidArgument(k + " unsupported request parameter for presigned GET.")
}
}
// Save the request parameters to be used in presigning for GET request.
reqMetadata.queryValues = reqParams
}
// Instantiate a new request.
// Since expires is set newRequest will presign the request.
req, err := c.newRequest(method, reqMetadata)
if err != nil {
return nil, err
}
return req.URL, nil
}
// PresignedGetObject - Returns a presigned URL to access an object
// without credentials. Expires maximum is 7days - ie. 604800 and
// minimum is 1. Additionally you can override a set of response
// headers using the query parameters.
func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
return c.presignURL("GET", bucketName, objectName, expires, reqParams)
}
// PresignedPutObject - Returns a presigned URL to upload an object without credentials.
// Expires maximum is 7days - ie. 604800 and minimum is 1.
func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) {
return c.presignURL("PUT", bucketName, objectName, expires, nil)
}
// PresignedPostPolicy - Returns POST urlString, form data to upload an object.
func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[string]string, err error) {
// Validate input arguments.
if p.expiration.IsZero() {
return nil, nil, errors.New("Expiration time must be specified")
}
if _, ok := p.formData["key"]; !ok {
return nil, nil, errors.New("object key must be specified")
}
if _, ok := p.formData["bucket"]; !ok {
return nil, nil, errors.New("bucket name must be specified")
}
bucketName := p.formData["bucket"]
// Fetch the bucket location.
location, err := c.getBucketLocation(bucketName)
if err != nil {
return nil, nil, err
}
u, err = c.makeTargetURL(bucketName, "", location, nil)
if err != nil {
return nil, nil, err
}
// Keep time.
t := time.Now().UTC()
// For signature version '2' handle here.
if c.signature.isV2() {
policyBase64 := p.base64()
p.formData["policy"] = policyBase64
// For Google endpoint set this value to be 'GoogleAccessId'.
if s3utils.IsGoogleEndpoint(c.endpointURL) {
p.formData["GoogleAccessId"] = c.accessKeyID
} else {
// For all other endpoints set this value to be 'AWSAccessKeyId'.
p.formData["AWSAccessKeyId"] = c.accessKeyID
}
// Sign the policy.
p.formData["signature"] = s3signer.PostPresignSignatureV2(policyBase64, c.secretAccessKey)
return u, p.formData, nil
}
// Add date policy.
if err = p.addNewPolicy(policyCondition{
matchType: "eq",
condition: "$x-amz-date",
value: t.Format(iso8601DateFormat),
}); err != nil {
return nil, nil, err
}
// Add algorithm policy.
if err = p.addNewPolicy(policyCondition{
matchType: "eq",
condition: "$x-amz-algorithm",
value: signV4Algorithm,
}); err != nil {
return nil, nil, err
}
// Add a credential policy.
credential := s3signer.GetCredential(c.accessKeyID, location, t)
if err = p.addNewPolicy(policyCondition{
matchType: "eq",
condition: "$x-amz-credential",
value: credential,
}); err != nil {
return nil, nil, err
}
// Get base64 encoded policy.
policyBase64 := p.base64()
// Fill in the form data.
p.formData["policy"] = policyBase64
p.formData["x-amz-algorithm"] = signV4Algorithm
p.formData["x-amz-credential"] = credential
p.formData["x-amz-date"] = t.Format(iso8601DateFormat)
p.formData["x-amz-signature"] = s3signer.PostPresignSignatureV4(policyBase64, t, c.secretAccessKey, location)
return u, p.formData, nil
}

329
vendor/github.com/minio/minio-go/api-put-bucket.go generated vendored Normal file
View File

@ -0,0 +1,329 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bytes"
"encoding/base64"
"encoding/hex"
"encoding/json"
"encoding/xml"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"path"
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio-go/pkg/s3signer"
)
/// Bucket operations
// MakeBucket creates a new bucket with bucketName.
//
// Location is an optional argument, by default all buckets are
// created in US Standard Region.
//
// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html
// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations
func (c Client) MakeBucket(bucketName string, location string) (err error) {
defer func() {
// Save the location into cache on a successful makeBucket response.
if err == nil {
c.bucketLocCache.Set(bucketName, location)
}
}()
// Validate the input arguments.
if err := isValidBucketName(bucketName); err != nil {
return err
}
// If location is empty, treat is a default region 'us-east-1'.
if location == "" {
location = "us-east-1"
}
// Try creating bucket with the provided region, in case of
// invalid region error let's guess the appropriate region
// from S3 API headers
// Create a done channel to control 'newRetryTimer' go routine.
doneCh := make(chan struct{}, 1)
// Indicate to our routine to exit cleanly upon return.
defer close(doneCh)
// Blank indentifier is kept here on purpose since 'range' without
// blank identifiers is only supported since go1.4
// https://golang.org/doc/go1.4#forrange.
for _ = range c.newRetryTimer(MaxRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) {
// Initialize the makeBucket request.
req, err := c.makeBucketRequest(bucketName, location)
if err != nil {
return err
}
// Execute make bucket request.
resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
err := httpRespToErrorResponse(resp, bucketName, "")
errResp := ToErrorResponse(err)
if errResp.Code == "InvalidRegion" && errResp.Region != "" {
// Fetch bucket region found in headers
// of S3 error response, attempt bucket
// create again.
location = errResp.Region
continue
}
// Nothing to retry, fail.
return err
}
// Control reaches here when bucket create was successful,
// break out.
break
}
// Success.
return nil
}
// Low level wrapper API For makeBucketRequest.
func (c Client) makeBucketRequest(bucketName string, location string) (*http.Request, error) {
// Validate input arguments.
if err := isValidBucketName(bucketName); err != nil {
return nil, err
}
// In case of Amazon S3. The make bucket issued on
// already existing bucket would fail with
// 'AuthorizationMalformed' error if virtual style is
// used. So we default to 'path style' as that is the
// preferred method here. The final location of the
// 'bucket' is provided through XML LocationConstraint
// data with the request.
targetURL := c.endpointURL
targetURL.Path = path.Join(bucketName, "") + "/"
// get a new HTTP request for the method.
req, err := http.NewRequest("PUT", targetURL.String(), nil)
if err != nil {
return nil, err
}
// set UserAgent for the request.
c.setUserAgent(req)
// set sha256 sum for signature calculation only with
// signature version '4'.
if c.signature.isV4() {
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
}
// If location is not 'us-east-1' create bucket location config.
if location != "us-east-1" && location != "" {
createBucketConfig := createBucketConfiguration{}
createBucketConfig.Location = location
var createBucketConfigBytes []byte
createBucketConfigBytes, err = xml.Marshal(createBucketConfig)
if err != nil {
return nil, err
}
createBucketConfigBuffer := bytes.NewBuffer(createBucketConfigBytes)
req.Body = ioutil.NopCloser(createBucketConfigBuffer)
req.ContentLength = int64(len(createBucketConfigBytes))
// Set content-md5.
req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(sumMD5(createBucketConfigBytes)))
if c.signature.isV4() {
// Set sha256.
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(createBucketConfigBytes)))
}
}
// Sign the request.
if c.signature.isV4() {
// Signature calculated for MakeBucket request should be for 'us-east-1',
// regardless of the bucket's location constraint.
req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
} else if c.signature.isV2() {
req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
}
// Return signed request.
return req, nil
}
// SetBucketPolicy set the access permissions on an existing bucket.
//
// For example
//
// none - owner gets full access [default].
// readonly - anonymous get access for everyone at a given object prefix.
// readwrite - anonymous list/put/delete access to a given object prefix.
// writeonly - anonymous put/delete access to a given object prefix.
func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPolicy policy.BucketPolicy) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return err
}
if err := isValidObjectPrefix(objectPrefix); err != nil {
return err
}
if !bucketPolicy.IsValidBucketPolicy() {
return ErrInvalidArgument(fmt.Sprintf("Invalid bucket policy provided. %s", bucketPolicy))
}
policyInfo, err := c.getBucketPolicy(bucketName)
errResponse := ToErrorResponse(err)
if err != nil && errResponse.Code != "NoSuchBucketPolicy" {
return err
}
if bucketPolicy == policy.BucketPolicyNone && policyInfo.Statements == nil {
// As the request is for removing policy and the bucket
// has empty policy statements, just return success.
return nil
}
policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, bucketPolicy, bucketName, objectPrefix)
// Save the updated policies.
return c.putBucketPolicy(bucketName, policyInfo)
}
// Saves a new bucket policy.
func (c Client) putBucketPolicy(bucketName string, policyInfo policy.BucketAccessPolicy) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return err
}
// If there are no policy statements, we should remove entire policy.
if len(policyInfo.Statements) == 0 {
return c.removeBucketPolicy(bucketName)
}
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
urlValues.Set("policy", "")
policyBytes, err := json.Marshal(&policyInfo)
if err != nil {
return err
}
policyBuffer := bytes.NewReader(policyBytes)
reqMetadata := requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
contentBody: policyBuffer,
contentLength: int64(len(policyBytes)),
contentMD5Bytes: sumMD5(policyBytes),
contentSHA256Bytes: sum256(policyBytes),
}
// Execute PUT to upload a new bucket policy.
resp, err := c.executeMethod("PUT", reqMetadata)
defer closeResponse(resp)
if err != nil {
return err
}
if resp != nil {
if resp.StatusCode != http.StatusNoContent {
return httpRespToErrorResponse(resp, bucketName, "")
}
}
return nil
}
// Removes all policies on a bucket.
func (c Client) removeBucketPolicy(bucketName string) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return err
}
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
urlValues.Set("policy", "")
// Execute DELETE on objectName.
resp, err := c.executeMethod("DELETE", requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
})
defer closeResponse(resp)
if err != nil {
return err
}
return nil
}
// SetBucketNotification saves a new bucket notification.
func (c Client) SetBucketNotification(bucketName string, bucketNotification BucketNotification) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return err
}
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
urlValues.Set("notification", "")
notifBytes, err := xml.Marshal(bucketNotification)
if err != nil {
return err
}
notifBuffer := bytes.NewReader(notifBytes)
reqMetadata := requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
contentBody: notifBuffer,
contentLength: int64(len(notifBytes)),
contentMD5Bytes: sumMD5(notifBytes),
contentSHA256Bytes: sum256(notifBytes),
}
// Execute PUT to upload a new bucket notification.
resp, err := c.executeMethod("PUT", reqMetadata)
defer closeResponse(resp)
if err != nil {
return err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp, bucketName, "")
}
}
return nil
}
// RemoveAllBucketNotification - Remove bucket notification clears all previously specified config
func (c Client) RemoveAllBucketNotification(bucketName string) error {
return c.SetBucketNotification(bucketName, BucketNotification{})
}

View File

@ -0,0 +1,251 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"fmt"
"hash"
"io"
"io/ioutil"
"math"
"os"
)
// Verify if reader is *os.File
func isFile(reader io.Reader) (ok bool) {
_, ok = reader.(*os.File)
return
}
// Verify if reader is *minio.Object
func isObject(reader io.Reader) (ok bool) {
_, ok = reader.(*Object)
return
}
// Verify if reader is a generic ReaderAt
func isReadAt(reader io.Reader) (ok bool) {
_, ok = reader.(io.ReaderAt)
return
}
// shouldUploadPart - verify if part should be uploaded.
func shouldUploadPart(objPart ObjectPart, uploadReq uploadPartReq) bool {
// If part not found should upload the part.
if uploadReq.Part == nil {
return true
}
// if size mismatches should upload the part.
if objPart.Size != uploadReq.Part.Size {
return true
}
// if md5sum mismatches should upload the part.
if objPart.ETag != uploadReq.Part.ETag {
return true
}
return false
}
// optimalPartInfo - calculate the optimal part info for a given
// object size.
//
// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible
// object storage it will have the following parameters as constants.
//
// maxPartsCount - 10000
// minPartSize - 64MiB
// maxMultipartPutObjectSize - 5TiB
//
func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) {
// object size is '-1' set it to 5TiB.
if objectSize == -1 {
objectSize = maxMultipartPutObjectSize
}
// object size is larger than supported maximum.
if objectSize > maxMultipartPutObjectSize {
err = ErrEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "")
return
}
// Use floats for part size for all calculations to avoid
// overflows during float64 to int64 conversions.
partSizeFlt := math.Ceil(float64(objectSize / maxPartsCount))
partSizeFlt = math.Ceil(partSizeFlt/minPartSize) * minPartSize
// Total parts count.
totalPartsCount = int(math.Ceil(float64(objectSize) / partSizeFlt))
// Part size.
partSize = int64(partSizeFlt)
// Last part size.
lastPartSize = objectSize - int64(totalPartsCount-1)*partSize
return totalPartsCount, partSize, lastPartSize, nil
}
// hashCopyBuffer is identical to hashCopyN except that it doesn't take
// any size argument but takes a buffer argument and reader should be
// of io.ReaderAt interface.
//
// Stages reads from offsets into the buffer, if buffer is nil it is
// initialized to optimalBufferSize.
func hashCopyBuffer(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, writer io.Writer, reader io.ReaderAt, buf []byte) (size int64, err error) {
hashWriter := writer
for _, v := range hashAlgorithms {
hashWriter = io.MultiWriter(hashWriter, v)
}
// Buffer is nil, initialize.
if buf == nil {
buf = make([]byte, optimalReadBufferSize)
}
// Offset to start reading from.
var readAtOffset int64
// Following block reads data at an offset from the input
// reader and copies data to into local temporary file.
for {
readAtSize, rerr := reader.ReadAt(buf, readAtOffset)
if rerr != nil {
if rerr != io.EOF {
return 0, rerr
}
}
writeSize, werr := hashWriter.Write(buf[:readAtSize])
if werr != nil {
return 0, werr
}
if readAtSize != writeSize {
return 0, fmt.Errorf("Read size was not completely written to writer. wanted %d, got %d - %s", readAtSize, writeSize, reportIssue)
}
readAtOffset += int64(writeSize)
size += int64(writeSize)
if rerr == io.EOF {
break
}
}
for k, v := range hashAlgorithms {
hashSums[k] = v.Sum(nil)
}
return size, err
}
// hashCopyN - Calculates chosen hashes up to partSize amount of bytes.
func hashCopyN(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, writer io.Writer, reader io.Reader, partSize int64) (size int64, err error) {
hashWriter := writer
for _, v := range hashAlgorithms {
hashWriter = io.MultiWriter(hashWriter, v)
}
// Copies to input at writer.
size, err = io.CopyN(hashWriter, reader, partSize)
if err != nil {
// If not EOF return error right here.
if err != io.EOF {
return 0, err
}
}
for k, v := range hashAlgorithms {
hashSums[k] = v.Sum(nil)
}
return size, err
}
// getUploadID - fetch upload id if already present for an object name
// or initiate a new request to fetch a new upload id.
func (c Client) newUploadID(bucketName, objectName string, metaData map[string][]string) (uploadID string, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return "", err
}
if err := isValidObjectName(objectName); err != nil {
return "", err
}
// Initiate multipart upload for an object.
initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, metaData)
if err != nil {
return "", err
}
return initMultipartUploadResult.UploadID, nil
}
// getMpartUploadSession returns the upload id and the uploaded parts to continue a previous upload session
// or initiate a new multipart session if no current one found
func (c Client) getMpartUploadSession(bucketName, objectName string, metaData map[string][]string) (string, map[int]ObjectPart, error) {
// A map of all uploaded parts.
var partsInfo map[int]ObjectPart
var err error
uploadID, err := c.findUploadID(bucketName, objectName)
if err != nil {
return "", nil, err
}
if uploadID == "" {
// Initiates a new multipart request
uploadID, err = c.newUploadID(bucketName, objectName, metaData)
if err != nil {
return "", nil, err
}
} else {
// Fetch previously upload parts and maximum part size.
partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
if err != nil {
// When the server returns NoSuchUpload even if its previouls acknowleged the existance of the upload id,
// initiate a new multipart upload
if respErr, ok := err.(ErrorResponse); ok && respErr.Code == "NoSuchUpload" {
uploadID, err = c.newUploadID(bucketName, objectName, metaData)
if err != nil {
return "", nil, err
}
} else {
return "", nil, err
}
}
}
// Allocate partsInfo if not done yet
if partsInfo == nil {
partsInfo = make(map[int]ObjectPart)
}
return uploadID, partsInfo, nil
}
// computeHash - Calculates hashes for an input read Seeker.
func computeHash(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, reader io.ReadSeeker) (size int64, err error) {
hashWriter := ioutil.Discard
for _, v := range hashAlgorithms {
hashWriter = io.MultiWriter(hashWriter, v)
}
// If no buffer is provided, no need to allocate just use io.Copy.
size, err = io.Copy(hashWriter, reader)
if err != nil {
return 0, err
}
// Seek back reader to the beginning location.
if _, err := reader.Seek(0, 0); err != nil {
return 0, err
}
for k, v := range hashAlgorithms {
hashSums[k] = v.Sum(nil)
}
return size, nil
}

View File

@ -0,0 +1,72 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"net/http"
"github.com/minio/minio-go/pkg/s3utils"
)
// CopyObject - copy a source object into a new object with the provided name in the provided bucket
func (c Client) CopyObject(bucketName string, objectName string, objectSource string, cpCond CopyConditions) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return err
}
if err := isValidObjectName(objectName); err != nil {
return err
}
if objectSource == "" {
return ErrInvalidArgument("Object source cannot be empty.")
}
// customHeaders apply headers.
customHeaders := make(http.Header)
for _, cond := range cpCond.conditions {
customHeaders.Set(cond.key, cond.value)
}
// Set copy source.
customHeaders.Set("x-amz-copy-source", s3utils.EncodePath(objectSource))
// Execute PUT on objectName.
resp, err := c.executeMethod("PUT", requestMetadata{
bucketName: bucketName,
objectName: objectName,
customHeader: customHeaders,
})
defer closeResponse(resp)
if err != nil {
return err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp, bucketName, objectName)
}
}
// Decode copy response on success.
cpObjRes := copyObjectResult{}
err = xmlDecoder(resp.Body, &cpObjRes)
if err != nil {
return err
}
// Return nil on success.
return nil
}

294
vendor/github.com/minio/minio-go/api-put-object-file.go generated vendored Normal file
View File

@ -0,0 +1,294 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"crypto/md5"
"crypto/sha256"
"encoding/hex"
"fmt"
"hash"
"io"
"io/ioutil"
"mime"
"os"
"path/filepath"
"sort"
"github.com/minio/minio-go/pkg/s3utils"
)
// FPutObject - Create an object in a bucket, with contents from file at filePath.
func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
// Open the referenced file.
fileReader, err := os.Open(filePath)
// If any error fail quickly here.
if err != nil {
return 0, err
}
defer fileReader.Close()
// Save the file stat.
fileStat, err := fileReader.Stat()
if err != nil {
return 0, err
}
// Save the file size.
fileSize := fileStat.Size()
// Check for largest object size allowed.
if fileSize > int64(maxMultipartPutObjectSize) {
return 0, ErrEntityTooLarge(fileSize, maxMultipartPutObjectSize, bucketName, objectName)
}
objMetadata := make(map[string][]string)
// Set contentType based on filepath extension if not given or default
// value of "binary/octet-stream" if the extension has no associated type.
if contentType == "" {
if contentType = mime.TypeByExtension(filepath.Ext(filePath)); contentType == "" {
contentType = "application/octet-stream"
}
}
objMetadata["Content-Type"] = []string{contentType}
// NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs.
// Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers.
if s3utils.IsGoogleEndpoint(c.endpointURL) {
if fileSize > int64(maxSinglePutObjectSize) {
return 0, ErrorResponse{
Code: "NotImplemented",
Message: fmt.Sprintf("Invalid Content-Length %d for file uploads to Google Cloud Storage.", fileSize),
Key: objectName,
BucketName: bucketName,
}
}
// Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size.
return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
}
// Small object upload is initiated for uploads for input data size smaller than 5MiB.
if fileSize < minPartSize && fileSize >= 0 {
return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
}
// Upload all large objects as multipart.
n, err = c.putObjectMultipartFromFile(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
if err != nil {
errResp := ToErrorResponse(err)
// Verify if multipart functionality is not available, if not
// fall back to single PutObject operation.
if errResp.Code == "NotImplemented" {
// If size of file is greater than '5GiB' fail.
if fileSize > maxSinglePutObjectSize {
return 0, ErrEntityTooLarge(fileSize, maxSinglePutObjectSize, bucketName, objectName)
}
// Fall back to uploading as single PutObject operation.
return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
}
return n, err
}
return n, nil
}
// putObjectMultipartFromFile - Creates object from contents of *os.File
//
// NOTE: This function is meant to be used for readers with local
// file as in *os.File. This function resumes by skipping all the
// necessary parts which were already uploaded by verifying them
// against MD5SUM of each individual parts. This function also
// effectively utilizes file system capabilities of reading from
// specific sections and not having to create temporary files.
func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileReader io.ReaderAt, fileSize int64, metaData map[string][]string, progress io.Reader) (int64, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
// Get the upload id of a previously partially uploaded object or initiate a new multipart upload
uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData)
if err != nil {
return 0, err
}
// Total data read and written to server. should be equal to 'size' at the end of the call.
var totalUploadedSize int64
// Complete multipart upload.
var complMultipartUpload completeMultipartUpload
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(fileSize)
if err != nil {
return 0, err
}
// Create a channel to communicate a part was uploaded.
// Buffer this to 10000, the maximum number of parts allowed by S3.
uploadedPartsCh := make(chan uploadedPartRes, 10000)
// Create a channel to communicate which part to upload.
// Buffer this to 10000, the maximum number of parts allowed by S3.
uploadPartsCh := make(chan uploadPartReq, 10000)
// Just for readability.
lastPartNumber := totalPartsCount
// Send each part through the partUploadCh to be uploaded.
for p := 1; p <= totalPartsCount; p++ {
part, ok := partsInfo[p]
if ok {
uploadPartsCh <- uploadPartReq{PartNum: p, Part: &part}
} else {
uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil}
}
}
close(uploadPartsCh)
// Use three 'workers' to upload parts in parallel.
for w := 1; w <= totalWorkers; w++ {
go func() {
// Deal with each part as it comes through the channel.
for uploadReq := range uploadPartsCh {
// Add hash algorithms that need to be calculated by computeHash()
// In case of a non-v4 signature or https connection, sha256 is not needed.
hashAlgos := make(map[string]hash.Hash)
hashSums := make(map[string][]byte)
hashAlgos["md5"] = md5.New()
if c.signature.isV4() && !c.secure {
hashAlgos["sha256"] = sha256.New()
}
// If partNumber was not uploaded we calculate the missing
// part offset and size. For all other part numbers we
// calculate offset based on multiples of partSize.
readOffset := int64(uploadReq.PartNum-1) * partSize
missingPartSize := partSize
// As a special case if partNumber is lastPartNumber, we
// calculate the offset based on the last part size.
if uploadReq.PartNum == lastPartNumber {
readOffset = (fileSize - lastPartSize)
missingPartSize = lastPartSize
}
// Get a section reader on a particular offset.
sectionReader := io.NewSectionReader(fileReader, readOffset, missingPartSize)
var prtSize int64
var err error
prtSize, err = computeHash(hashAlgos, hashSums, sectionReader)
if err != nil {
uploadedPartsCh <- uploadedPartRes{
Error: err,
}
// Exit the goroutine.
return
}
// Create the part to be uploaded.
verifyObjPart := ObjectPart{
ETag: hex.EncodeToString(hashSums["md5"]),
PartNumber: uploadReq.PartNum,
Size: partSize,
}
// If this is the last part do not give it the full part size.
if uploadReq.PartNum == lastPartNumber {
verifyObjPart.Size = lastPartSize
}
// Verify if part should be uploaded.
if shouldUploadPart(verifyObjPart, uploadReq) {
// Proceed to upload the part.
var objPart ObjectPart
objPart, err = c.uploadPart(bucketName, objectName, uploadID, sectionReader, uploadReq.PartNum, hashSums["md5"], hashSums["sha256"], prtSize)
if err != nil {
uploadedPartsCh <- uploadedPartRes{
Error: err,
}
// Exit the goroutine.
return
}
// Save successfully uploaded part metadata.
uploadReq.Part = &objPart
}
// Return through the channel the part size.
uploadedPartsCh <- uploadedPartRes{
Size: verifyObjPart.Size,
PartNum: uploadReq.PartNum,
Part: uploadReq.Part,
Error: nil,
}
}
}()
}
// Retrieve each uploaded part once it is done.
for u := 1; u <= totalPartsCount; u++ {
uploadRes := <-uploadedPartsCh
if uploadRes.Error != nil {
return totalUploadedSize, uploadRes.Error
}
// Retrieve each uploaded part and store it to be completed.
part := uploadRes.Part
if part == nil {
return totalUploadedSize, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum))
}
// Update the total uploaded size.
totalUploadedSize += uploadRes.Size
// Update the progress bar if there is one.
if progress != nil {
if _, err = io.CopyN(ioutil.Discard, progress, uploadRes.Size); err != nil {
return totalUploadedSize, err
}
}
// Store the part to be completed.
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
ETag: part.ETag,
PartNumber: part.PartNumber,
})
}
// Verify if we uploaded all data.
if totalUploadedSize != fileSize {
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, fileSize, bucketName, objectName)
}
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
if err != nil {
return totalUploadedSize, err
}
// Return final size.
return totalUploadedSize, nil
}

View File

@ -0,0 +1,489 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bytes"
"crypto/md5"
"crypto/sha256"
"encoding/hex"
"encoding/xml"
"fmt"
"hash"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"sort"
"strconv"
"strings"
)
// Comprehensive put object operation involving multipart resumable uploads.
//
// Following code handles these types of readers.
//
// - *os.File
// - *minio.Object
// - Any reader which has a method 'ReadAt()'
//
// If we exhaust all the known types, code proceeds to use stream as
// is where each part is re-downloaded, checksummed and verified
// before upload.
func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
if size > 0 && size > minPartSize {
// Verify if reader is *os.File, then use file system functionalities.
if isFile(reader) {
return c.putObjectMultipartFromFile(bucketName, objectName, reader.(*os.File), size, metaData, progress)
}
// Verify if reader is *minio.Object or io.ReaderAt.
// NOTE: Verification of object is kept for a specific purpose
// while it is going to be duck typed similar to io.ReaderAt.
// It is to indicate that *minio.Object implements io.ReaderAt.
// and such a functionality is used in the subsequent code
// path.
if isObject(reader) || isReadAt(reader) {
return c.putObjectMultipartFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, metaData, progress)
}
}
// For any other data size and reader type we do generic multipart
// approach by staging data in temporary files and uploading them.
return c.putObjectMultipartStream(bucketName, objectName, reader, size, metaData, progress)
}
// putObjectMultipartStreamNoChecksum - upload a large object using
// multipart upload and streaming signature for signing payload.
// N B We don't resume an incomplete multipart upload, we overwrite
// existing parts of an incomplete upload.
func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string,
reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (int64, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
// Get the upload id of a previously partially uploaded object or initiate a new multipart upload
uploadID, err := c.findUploadID(bucketName, objectName)
if err != nil {
return 0, err
}
if uploadID == "" {
// Initiates a new multipart request
uploadID, err = c.newUploadID(bucketName, objectName, metadata)
if err != nil {
return 0, err
}
}
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
if err != nil {
return 0, err
}
// Total data read and written to server. should be equal to 'size' at the end of the call.
var totalUploadedSize int64
// Initialize parts uploaded map.
partsInfo := make(map[int]ObjectPart)
// Part number always starts with '1'.
var partNumber int
for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
// Update progress reader appropriately to the latest offset
// as we read from the source.
hookReader := newHook(reader, progress)
// Proceed to upload the part.
if partNumber == totalPartsCount {
partSize = lastPartSize
}
var objPart ObjectPart
objPart, err = c.uploadPart(bucketName, objectName, uploadID,
io.LimitReader(hookReader, partSize), partNumber, nil, nil, partSize)
// For unknown size, Read EOF we break away.
// We do not have to upload till totalPartsCount.
if err == io.EOF && size < 0 {
break
}
if err != nil {
return totalUploadedSize, err
}
// Save successfully uploaded part metadata.
partsInfo[partNumber] = objPart
// Save successfully uploaded size.
totalUploadedSize += partSize
}
// Verify if we uploaded all the data.
if size > 0 {
if totalUploadedSize != size {
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
}
}
// Complete multipart upload.
var complMultipartUpload completeMultipartUpload
// Loop over total uploaded parts to save them in
// Parts array before completing the multipart request.
for i := 1; i < partNumber; i++ {
part, ok := partsInfo[i]
if !ok {
return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
}
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
ETag: part.ETag,
PartNumber: part.PartNumber,
})
}
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
if err != nil {
return totalUploadedSize, err
}
// Return final size.
return totalUploadedSize, nil
}
// putObjectStream uploads files bigger than 64MiB, and also supports
// special case where size is unknown i.e '-1'.
func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
// Total data read and written to server. should be equal to 'size' at the end of the call.
var totalUploadedSize int64
// Complete multipart upload.
var complMultipartUpload completeMultipartUpload
// Get the upload id of a previously partially uploaded object or initiate a new multipart upload
uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData)
if err != nil {
return 0, err
}
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, _, err := optimalPartInfo(size)
if err != nil {
return 0, err
}
// Part number always starts with '1'.
partNumber := 1
// Initialize a temporary buffer.
tmpBuffer := new(bytes.Buffer)
for partNumber <= totalPartsCount {
// Choose hash algorithms to be calculated by hashCopyN, avoid sha256
// with non-v4 signature request or HTTPS connection
hashSums := make(map[string][]byte)
hashAlgos := make(map[string]hash.Hash)
hashAlgos["md5"] = md5.New()
if c.signature.isV4() && !c.secure {
hashAlgos["sha256"] = sha256.New()
}
// Calculates hash sums while copying partSize bytes into tmpBuffer.
prtSize, rErr := hashCopyN(hashAlgos, hashSums, tmpBuffer, reader, partSize)
if rErr != nil && rErr != io.EOF {
return 0, rErr
}
var reader io.Reader
// Update progress reader appropriately to the latest offset
// as we read from the source.
reader = newHook(tmpBuffer, progress)
part, ok := partsInfo[partNumber]
// Verify if part should be uploaded.
if !ok || shouldUploadPart(ObjectPart{
ETag: hex.EncodeToString(hashSums["md5"]),
PartNumber: partNumber,
Size: prtSize,
}, uploadPartReq{PartNum: partNumber, Part: &part}) {
// Proceed to upload the part.
var objPart ObjectPart
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
if err != nil {
// Reset the temporary buffer upon any error.
tmpBuffer.Reset()
return totalUploadedSize, err
}
// Save successfully uploaded part metadata.
partsInfo[partNumber] = objPart
} else {
// Update the progress reader for the skipped part.
if progress != nil {
if _, err = io.CopyN(ioutil.Discard, progress, prtSize); err != nil {
return totalUploadedSize, err
}
}
}
// Reset the temporary buffer.
tmpBuffer.Reset()
// Save successfully uploaded size.
totalUploadedSize += prtSize
// Increment part number.
partNumber++
// For unknown size, Read EOF we break away.
// We do not have to upload till totalPartsCount.
if size < 0 && rErr == io.EOF {
break
}
}
// Verify if we uploaded all the data.
if size > 0 {
if totalUploadedSize != size {
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
}
}
// Loop over total uploaded parts to save them in
// Parts array before completing the multipart request.
for i := 1; i < partNumber; i++ {
part, ok := partsInfo[i]
if !ok {
return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
}
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
ETag: part.ETag,
PartNumber: part.PartNumber,
})
}
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
if err != nil {
return totalUploadedSize, err
}
// Return final size.
return totalUploadedSize, nil
}
// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData map[string][]string) (initiateMultipartUploadResult, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return initiateMultipartUploadResult{}, err
}
if err := isValidObjectName(objectName); err != nil {
return initiateMultipartUploadResult{}, err
}
// Initialize url queries.
urlValues := make(url.Values)
urlValues.Set("uploads", "")
// Set ContentType header.
customHeader := make(http.Header)
for k, v := range metaData {
if len(v) > 0 {
customHeader.Set(k, v[0])
}
}
// Set a default content-type header if the latter is not provided
if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 {
customHeader.Set("Content-Type", "application/octet-stream")
}
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
customHeader: customHeader,
}
// Execute POST on an objectName to initiate multipart upload.
resp, err := c.executeMethod("POST", reqMetadata)
defer closeResponse(resp)
if err != nil {
return initiateMultipartUploadResult{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
}
}
// Decode xml for new multipart upload.
initiateMultipartUploadResult := initiateMultipartUploadResult{}
err = xmlDecoder(resp.Body, &initiateMultipartUploadResult)
if err != nil {
return initiateMultipartUploadResult, err
}
return initiateMultipartUploadResult, nil
}
// uploadPart - Uploads a part in a multipart upload.
func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader, partNumber int, md5Sum, sha256Sum []byte, size int64) (ObjectPart, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return ObjectPart{}, err
}
if err := isValidObjectName(objectName); err != nil {
return ObjectPart{}, err
}
if size > maxPartSize {
return ObjectPart{}, ErrEntityTooLarge(size, maxPartSize, bucketName, objectName)
}
if size <= -1 {
return ObjectPart{}, ErrEntityTooSmall(size, bucketName, objectName)
}
if partNumber <= 0 {
return ObjectPart{}, ErrInvalidArgument("Part number cannot be negative or equal to zero.")
}
if uploadID == "" {
return ObjectPart{}, ErrInvalidArgument("UploadID cannot be empty.")
}
// Get resources properly escaped and lined up before using them in http request.
urlValues := make(url.Values)
// Set part number.
urlValues.Set("partNumber", strconv.Itoa(partNumber))
// Set upload id.
urlValues.Set("uploadId", uploadID)
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
contentBody: reader,
contentLength: size,
contentMD5Bytes: md5Sum,
contentSHA256Bytes: sha256Sum,
}
// Execute PUT on each part.
resp, err := c.executeMethod("PUT", reqMetadata)
defer closeResponse(resp)
if err != nil {
return ObjectPart{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return ObjectPart{}, httpRespToErrorResponse(resp, bucketName, objectName)
}
}
// Once successfully uploaded, return completed part.
objPart := ObjectPart{}
objPart.Size = size
objPart.PartNumber = partNumber
// Trim off the odd double quotes from ETag in the beginning and end.
objPart.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
objPart.ETag = strings.TrimSuffix(objPart.ETag, "\"")
return objPart, nil
}
// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, complete completeMultipartUpload) (completeMultipartUploadResult, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return completeMultipartUploadResult{}, err
}
if err := isValidObjectName(objectName); err != nil {
return completeMultipartUploadResult{}, err
}
// Initialize url queries.
urlValues := make(url.Values)
urlValues.Set("uploadId", uploadID)
// Marshal complete multipart body.
completeMultipartUploadBytes, err := xml.Marshal(complete)
if err != nil {
return completeMultipartUploadResult{}, err
}
// Instantiate all the complete multipart buffer.
completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes)
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
contentBody: completeMultipartUploadBuffer,
contentLength: int64(len(completeMultipartUploadBytes)),
contentSHA256Bytes: sum256(completeMultipartUploadBytes),
}
// Execute POST to complete multipart upload for an objectName.
resp, err := c.executeMethod("POST", reqMetadata)
defer closeResponse(resp)
if err != nil {
return completeMultipartUploadResult{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return completeMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
}
}
// Read resp.Body into a []bytes to parse for Error response inside the body
var b []byte
b, err = ioutil.ReadAll(resp.Body)
if err != nil {
return completeMultipartUploadResult{}, err
}
// Decode completed multipart upload response on success.
completeMultipartUploadResult := completeMultipartUploadResult{}
err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult)
if err != nil {
// xml parsing failure due to presence an ill-formed xml fragment
return completeMultipartUploadResult, err
} else if completeMultipartUploadResult.Bucket == "" {
// xml's Decode method ignores well-formed xml that don't apply to the type of value supplied.
// In this case, it would leave completeMultipartUploadResult with the corresponding zero-values
// of the members.
// Decode completed multipart upload response on failure
completeMultipartUploadErr := ErrorResponse{}
err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr)
if err != nil {
// xml parsing failure due to presence an ill-formed xml fragment
return completeMultipartUploadResult, err
}
return completeMultipartUploadResult, completeMultipartUploadErr
}
return completeMultipartUploadResult, nil
}

View File

@ -0,0 +1,201 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"io"
"strings"
"github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio-go/pkg/s3utils"
)
// PutObjectWithProgress - with progress.
func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.Reader, contentType string, progress io.Reader) (n int64, err error) {
metaData := make(map[string][]string)
metaData["Content-Type"] = []string{contentType}
return c.PutObjectWithMetadata(bucketName, objectName, reader, metaData, progress)
}
// PutEncryptedObject - Encrypt and store object.
func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials, metaData map[string][]string, progress io.Reader) (n int64, err error) {
if encryptMaterials == nil {
return 0, ErrInvalidArgument("Unable to recognize empty encryption properties")
}
if err := encryptMaterials.SetupEncryptMode(reader); err != nil {
return 0, err
}
if metaData == nil {
metaData = make(map[string][]string)
}
// Set the necessary encryption headers, for future decryption.
metaData[amzHeaderIV] = []string{encryptMaterials.GetIV()}
metaData[amzHeaderKey] = []string{encryptMaterials.GetKey()}
metaData[amzHeaderMatDesc] = []string{encryptMaterials.GetDesc()}
return c.PutObjectWithMetadata(bucketName, objectName, encryptMaterials, metaData, progress)
}
// PutObjectWithMetadata - with metadata.
func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.Reader, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
if reader == nil {
return 0, ErrInvalidArgument("Input reader is invalid, cannot be nil.")
}
// Size of the object.
var size int64
// Get reader size.
size, err = getReaderSize(reader)
if err != nil {
return 0, err
}
// Check for largest object size allowed.
if size > int64(maxMultipartPutObjectSize) {
return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
}
// NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT.
// So we fall back to single PUT operation with the maximum limit of 5GiB.
if s3utils.IsGoogleEndpoint(c.endpointURL) {
if size <= -1 {
return 0, ErrorResponse{
Code: "NotImplemented",
Message: "Content-Length cannot be negative for file uploads to Google Cloud Storage.",
Key: objectName,
BucketName: bucketName,
}
}
if size > maxSinglePutObjectSize {
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
}
// Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size.
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metaData, progress)
}
// putSmall object.
if size < minPartSize && size >= 0 {
return c.putObjectSingle(bucketName, objectName, reader, size, metaData, progress)
}
// For all sizes greater than 5MiB do multipart.
n, err = c.putObjectMultipart(bucketName, objectName, reader, size, metaData, progress)
if err != nil {
errResp := ToErrorResponse(err)
// Verify if multipart functionality is not available, if not
// fall back to single PutObject operation.
if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
// Verify if size of reader is greater than '5GiB'.
if size > maxSinglePutObjectSize {
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
}
// Fall back to uploading as single PutObject operation.
return c.putObjectSingle(bucketName, objectName, reader, size, metaData, progress)
}
return n, err
}
return n, nil
}
// PutObjectStreaming using AWS streaming signature V4
func (c Client) PutObjectStreaming(bucketName, objectName string, reader io.Reader) (n int64, err error) {
return c.PutObjectStreamingWithProgress(bucketName, objectName, reader, nil, nil)
}
// PutObjectStreamingWithMetadata using AWS streaming signature V4
func (c Client) PutObjectStreamingWithMetadata(bucketName, objectName string, reader io.Reader, metadata map[string][]string) (n int64, err error) {
return c.PutObjectStreamingWithProgress(bucketName, objectName, reader, metadata, nil)
}
// PutObjectStreamingWithProgress using AWS streaming signature V4
func (c Client) PutObjectStreamingWithProgress(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) {
// NOTE: Streaming signature is not supported by GCS.
if s3utils.IsGoogleEndpoint(c.endpointURL) {
return 0, ErrorResponse{
Code: "NotImplemented",
Message: "AWS streaming signature v4 is not supported with Google Cloud Storage",
Key: objectName,
BucketName: bucketName,
}
}
// This method should return error with signature v2 minioClient.
if c.signature.isV2() {
return 0, ErrorResponse{
Code: "NotImplemented",
Message: "AWS streaming signature v4 is not supported with minio client initialized for AWS signature v2",
Key: objectName,
BucketName: bucketName,
}
}
// Size of the object.
var size int64
// Get reader size.
size, err = getReaderSize(reader)
if err != nil {
return 0, err
}
// Check for largest object size allowed.
if size > int64(maxMultipartPutObjectSize) {
return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
}
// If size cannot be found on a stream, it is not possible
// to upload using streaming signature, fall back to multipart.
if size < 0 {
return c.putObjectMultipartStream(bucketName, objectName, reader, size, metadata, progress)
}
// Set signature type to streaming signature v4.
c.signature = SignatureV4Streaming
if size < minPartSize && size >= 0 {
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
}
// For all sizes greater than 64MiB do multipart.
n, err = c.putObjectMultipartStreamNoChecksum(bucketName, objectName, reader, size, metadata, progress)
if err != nil {
errResp := ToErrorResponse(err)
// Verify if multipart functionality is not available, if not
// fall back to single PutObject operation.
if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
// Verify if size of reader is greater than '5GiB'.
if size > maxSinglePutObjectSize {
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
}
// Fall back to uploading as single PutObject operation.
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
}
return n, err
}
return n, nil
}

View File

@ -0,0 +1,247 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bytes"
"crypto/md5"
"crypto/sha256"
"fmt"
"hash"
"io"
"io/ioutil"
"sort"
)
// uploadedPartRes - the response received from a part upload.
type uploadedPartRes struct {
Error error // Any error encountered while uploading the part.
PartNum int // Number of the part uploaded.
Size int64 // Size of the part uploaded.
Part *ObjectPart
}
type uploadPartReq struct {
PartNum int // Number of the part uploaded.
Part *ObjectPart // Size of the part uploaded.
}
// shouldUploadPartReadAt - verify if part should be uploaded.
func shouldUploadPartReadAt(objPart ObjectPart, uploadReq uploadPartReq) bool {
// If part not found part should be uploaded.
if uploadReq.Part == nil {
return true
}
// if size mismatches part should be uploaded.
if uploadReq.Part.Size != objPart.Size {
return true
}
return false
}
// putObjectMultipartFromReadAt - Uploads files bigger than 5MiB. Supports reader
// of type which implements io.ReaderAt interface (ReadAt method).
//
// NOTE: This function is meant to be used for all readers which
// implement io.ReaderAt which allows us for resuming multipart
// uploads but reading at an offset, which would avoid re-read the
// data which was already uploaded. Internally this function uses
// temporary files for staging all the data, these temporary files are
// cleaned automatically when the caller i.e http client closes the
// stream after uploading all the contents successfully.
func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, reader io.ReaderAt, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
// Get the upload id of a previously partially uploaded object or initiate a new multipart upload
uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData)
if err != nil {
return 0, err
}
// Total data read and written to server. should be equal to 'size' at the end of the call.
var totalUploadedSize int64
// Complete multipart upload.
var complMultipartUpload completeMultipartUpload
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
if err != nil {
return 0, err
}
// Used for readability, lastPartNumber is always totalPartsCount.
lastPartNumber := totalPartsCount
// Declare a channel that sends the next part number to be uploaded.
// Buffered to 10000 because thats the maximum number of parts allowed
// by S3.
uploadPartsCh := make(chan uploadPartReq, 10000)
// Declare a channel that sends back the response of a part upload.
// Buffered to 10000 because thats the maximum number of parts allowed
// by S3.
uploadedPartsCh := make(chan uploadedPartRes, 10000)
// Send each part number to the channel to be processed.
for p := 1; p <= totalPartsCount; p++ {
part, ok := partsInfo[p]
if ok {
uploadPartsCh <- uploadPartReq{PartNum: p, Part: &part}
} else {
uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil}
}
}
close(uploadPartsCh)
// Receive each part number from the channel allowing three parallel uploads.
for w := 1; w <= totalWorkers; w++ {
go func() {
// Read defaults to reading at 5MiB buffer.
readAtBuffer := make([]byte, optimalReadBufferSize)
// Each worker will draw from the part channel and upload in parallel.
for uploadReq := range uploadPartsCh {
// Declare a new tmpBuffer.
tmpBuffer := new(bytes.Buffer)
// If partNumber was not uploaded we calculate the missing
// part offset and size. For all other part numbers we
// calculate offset based on multiples of partSize.
readOffset := int64(uploadReq.PartNum-1) * partSize
missingPartSize := partSize
// As a special case if partNumber is lastPartNumber, we
// calculate the offset based on the last part size.
if uploadReq.PartNum == lastPartNumber {
readOffset = (size - lastPartSize)
missingPartSize = lastPartSize
}
// Get a section reader on a particular offset.
sectionReader := io.NewSectionReader(reader, readOffset, missingPartSize)
// Choose the needed hash algorithms to be calculated by hashCopyBuffer.
// Sha256 is avoided in non-v4 signature requests or HTTPS connections
hashSums := make(map[string][]byte)
hashAlgos := make(map[string]hash.Hash)
hashAlgos["md5"] = md5.New()
if c.signature.isV4() && !c.secure {
hashAlgos["sha256"] = sha256.New()
}
var prtSize int64
var err error
prtSize, err = hashCopyBuffer(hashAlgos, hashSums, tmpBuffer, sectionReader, readAtBuffer)
if err != nil {
// Send the error back through the channel.
uploadedPartsCh <- uploadedPartRes{
Size: 0,
Error: err,
}
// Exit the goroutine.
return
}
// Verify object if its uploaded.
verifyObjPart := ObjectPart{
PartNumber: uploadReq.PartNum,
Size: partSize,
}
// Special case if we see a last part number, save last part
// size as the proper part size.
if uploadReq.PartNum == lastPartNumber {
verifyObjPart.Size = lastPartSize
}
// Only upload the necessary parts. Otherwise return size through channel
// to update any progress bar.
if shouldUploadPartReadAt(verifyObjPart, uploadReq) {
// Proceed to upload the part.
var objPart ObjectPart
objPart, err = c.uploadPart(bucketName, objectName, uploadID, tmpBuffer, uploadReq.PartNum, hashSums["md5"], hashSums["sha256"], prtSize)
if err != nil {
uploadedPartsCh <- uploadedPartRes{
Size: 0,
Error: err,
}
// Exit the goroutine.
return
}
// Save successfully uploaded part metadata.
uploadReq.Part = &objPart
}
// Send successful part info through the channel.
uploadedPartsCh <- uploadedPartRes{
Size: verifyObjPart.Size,
PartNum: uploadReq.PartNum,
Part: uploadReq.Part,
Error: nil,
}
}
}()
}
// Gather the responses as they occur and update any
// progress bar.
for u := 1; u <= totalPartsCount; u++ {
uploadRes := <-uploadedPartsCh
if uploadRes.Error != nil {
return totalUploadedSize, uploadRes.Error
}
// Retrieve each uploaded part and store it to be completed.
// part, ok := partsInfo[uploadRes.PartNum]
part := uploadRes.Part
if part == nil {
return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum))
}
// Update the totalUploadedSize.
totalUploadedSize += uploadRes.Size
// Update the progress bar if there is one.
if progress != nil {
if _, err = io.CopyN(ioutil.Discard, progress, uploadRes.Size); err != nil {
return totalUploadedSize, err
}
}
// Store the parts to be completed in order.
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
ETag: part.ETag,
PartNumber: part.PartNumber,
})
}
// Verify if we uploaded all the data.
if totalUploadedSize != size {
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
}
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
if err != nil {
return totalUploadedSize, err
}
// Return final size.
return totalUploadedSize, nil
}

318
vendor/github.com/minio/minio-go/api-put-object.go generated vendored Normal file
View File

@ -0,0 +1,318 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bytes"
"crypto/md5"
"crypto/sha256"
"hash"
"io"
"io/ioutil"
"net/http"
"os"
"reflect"
"runtime"
"strings"
)
// toInt - converts go value to its integer representation based
// on the value kind if it is an integer.
func toInt(value reflect.Value) (size int64) {
size = -1
if value.IsValid() {
switch value.Kind() {
case reflect.Int:
fallthrough
case reflect.Int8:
fallthrough
case reflect.Int16:
fallthrough
case reflect.Int32:
fallthrough
case reflect.Int64:
size = value.Int()
}
}
return size
}
// getReaderSize - Determine the size of Reader if available.
func getReaderSize(reader io.Reader) (size int64, err error) {
size = -1
if reader == nil {
return -1, nil
}
// Verify if there is a method by name 'Size'.
sizeFn := reflect.ValueOf(reader).MethodByName("Size")
// Verify if there is a method by name 'Len'.
lenFn := reflect.ValueOf(reader).MethodByName("Len")
if sizeFn.IsValid() {
if sizeFn.Kind() == reflect.Func {
// Call the 'Size' function and save its return value.
result := sizeFn.Call([]reflect.Value{})
if len(result) == 1 {
size = toInt(result[0])
}
}
} else if lenFn.IsValid() {
if lenFn.Kind() == reflect.Func {
// Call the 'Len' function and save its return value.
result := lenFn.Call([]reflect.Value{})
if len(result) == 1 {
size = toInt(result[0])
}
}
} else {
// Fallback to Stat() method, two possible Stat() structs exist.
switch v := reader.(type) {
case *os.File:
var st os.FileInfo
st, err = v.Stat()
if err != nil {
// Handle this case specially for "windows",
// certain files for example 'Stdin', 'Stdout' and
// 'Stderr' it is not allowed to fetch file information.
if runtime.GOOS == "windows" {
if strings.Contains(err.Error(), "GetFileInformationByHandle") {
return -1, nil
}
}
return
}
// Ignore if input is a directory, throw an error.
if st.Mode().IsDir() {
return -1, ErrInvalidArgument("Input file cannot be a directory.")
}
// Ignore 'Stdin', 'Stdout' and 'Stderr', since they
// represent *os.File type but internally do not
// implement Seekable calls. Ignore them and treat
// them like a stream with unknown length.
switch st.Name() {
case "stdin", "stdout", "stderr":
return
// Ignore read/write stream of os.Pipe() which have unknown length too.
case "|0", "|1":
return
}
size = st.Size()
case *Object:
var st ObjectInfo
st, err = v.Stat()
if err != nil {
return
}
size = st.Size
}
}
// Returns the size here.
return size, err
}
// completedParts is a collection of parts sortable by their part numbers.
// used for sorting the uploaded parts before completing the multipart request.
type completedParts []CompletePart
func (a completedParts) Len() int { return len(a) }
func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
// PutObject creates an object in a bucket.
//
// You must have WRITE permissions on a bucket to create an object.
//
// - For size smaller than 5MiB PutObject automatically does a single atomic Put operation.
// - For size larger than 5MiB PutObject automatically does a resumable multipart Put operation.
// - For size input as -1 PutObject does a multipart Put operation until input stream reaches EOF.
// Maximum object size that can be uploaded through this operation will be 5TiB.
//
// NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT.
// So we fall back to single PUT operation with the maximum limit of 5GiB.
//
func (c Client) PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error) {
return c.PutObjectWithProgress(bucketName, objectName, reader, contentType, nil)
}
// putObjectNoChecksum special function used Google Cloud Storage. This special function
// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
if size > maxSinglePutObjectSize {
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
}
// Update progress reader appropriately to the latest offset as we
// read from the source.
readSeeker := newHook(reader, progress)
// This function does not calculate sha256 and md5sum for payload.
// Execute put object.
st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, metaData)
if err != nil {
return 0, err
}
if st.Size != size {
return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
}
return size, nil
}
// putObjectSingle is a special function for uploading single put object request.
// This special function is used as a fallback when multipart upload fails.
func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
if size > maxSinglePutObjectSize {
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
}
// If size is a stream, upload up to 5GiB.
if size <= -1 {
size = maxSinglePutObjectSize
}
// Add the appropriate hash algorithms that need to be calculated by hashCopyN
// In case of non-v4 signature request or HTTPS connection, sha256 is not needed.
hashAlgos := make(map[string]hash.Hash)
hashSums := make(map[string][]byte)
hashAlgos["md5"] = md5.New()
if c.signature.isV4() && !c.secure {
hashAlgos["sha256"] = sha256.New()
}
if size <= minPartSize {
// Initialize a new temporary buffer.
tmpBuffer := new(bytes.Buffer)
size, err = hashCopyN(hashAlgos, hashSums, tmpBuffer, reader, size)
reader = bytes.NewReader(tmpBuffer.Bytes())
tmpBuffer.Reset()
} else {
// Initialize a new temporary file.
var tmpFile *tempFile
tmpFile, err = newTempFile("single$-putobject-single")
if err != nil {
return 0, err
}
defer tmpFile.Close()
size, err = hashCopyN(hashAlgos, hashSums, tmpFile, reader, size)
if err != nil {
return 0, err
}
// Seek back to beginning of the temporary file.
if _, err = tmpFile.Seek(0, 0); err != nil {
return 0, err
}
reader = tmpFile
}
// Return error if its not io.EOF.
if err != nil && err != io.EOF {
return 0, err
}
// Execute put object.
st, err := c.putObjectDo(bucketName, objectName, reader, hashSums["md5"], hashSums["sha256"], size, metaData)
if err != nil {
return 0, err
}
if st.Size != size {
return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
}
// Progress the reader to the size if putObjectDo is successful.
if progress != nil {
if _, err = io.CopyN(ioutil.Discard, progress, size); err != nil {
return size, err
}
}
return size, nil
}
// putObjectDo - executes the put object http operation.
// NOTE: You must have WRITE permissions on a bucket to add an object to it.
func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, metaData map[string][]string) (ObjectInfo, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return ObjectInfo{}, err
}
if err := isValidObjectName(objectName); err != nil {
return ObjectInfo{}, err
}
if size <= -1 {
return ObjectInfo{}, ErrEntityTooSmall(size, bucketName, objectName)
}
if size > maxSinglePutObjectSize {
return ObjectInfo{}, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
}
// Set headers.
customHeader := make(http.Header)
// Set metadata to headers
for k, v := range metaData {
if len(v) > 0 {
customHeader.Set(k, v[0])
}
}
// If Content-Type is not provided, set the default application/octet-stream one
if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 {
customHeader.Set("Content-Type", "application/octet-stream")
}
// Populate request metadata.
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
customHeader: customHeader,
contentBody: reader,
contentLength: size,
contentMD5Bytes: md5Sum,
contentSHA256Bytes: sha256Sum,
}
// Execute PUT an objectName.
resp, err := c.executeMethod("PUT", reqMetadata)
defer closeResponse(resp)
if err != nil {
return ObjectInfo{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
}
}
var objInfo ObjectInfo
// Trim off the odd double quotes from ETag in the beginning and end.
objInfo.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
objInfo.ETag = strings.TrimSuffix(objInfo.ETag, "\"")
// A success here means data was written to server successfully.
objInfo.Size = size
// Return here.
return objInfo, nil
}

283
vendor/github.com/minio/minio-go/api-remove.go generated vendored Normal file
View File

@ -0,0 +1,283 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bytes"
"encoding/xml"
"io"
"net/http"
"net/url"
)
// RemoveBucket deletes the bucket name.
//
// All objects (including all object versions and delete markers).
// in the bucket must be deleted before successfully attempting this request.
func (c Client) RemoveBucket(bucketName string) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return err
}
// Execute DELETE on bucket.
resp, err := c.executeMethod("DELETE", requestMetadata{
bucketName: bucketName,
})
defer closeResponse(resp)
if err != nil {
return err
}
if resp != nil {
if resp.StatusCode != http.StatusNoContent {
return httpRespToErrorResponse(resp, bucketName, "")
}
}
// Remove the location from cache on a successful delete.
c.bucketLocCache.Delete(bucketName)
return nil
}
// RemoveObject remove an object from a bucket.
func (c Client) RemoveObject(bucketName, objectName string) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return err
}
if err := isValidObjectName(objectName); err != nil {
return err
}
// Execute DELETE on objectName.
resp, err := c.executeMethod("DELETE", requestMetadata{
bucketName: bucketName,
objectName: objectName,
})
defer closeResponse(resp)
if err != nil {
return err
}
if resp != nil {
// if some unexpected error happened and max retry is reached, we want to let client know
if resp.StatusCode != http.StatusNoContent {
return httpRespToErrorResponse(resp, bucketName, objectName)
}
}
// DeleteObject always responds with http '204' even for
// objects which do not exist. So no need to handle them
// specifically.
return nil
}
// RemoveObjectError - container of Multi Delete S3 API error
type RemoveObjectError struct {
ObjectName string
Err error
}
// generateRemoveMultiObjects - generate the XML request for remove multi objects request
func generateRemoveMultiObjectsRequest(objects []string) []byte {
rmObjects := []deleteObject{}
for _, obj := range objects {
rmObjects = append(rmObjects, deleteObject{Key: obj})
}
xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: rmObjects, Quiet: true})
return xmlBytes
}
// processRemoveMultiObjectsResponse - parse the remove multi objects web service
// and return the success/failure result status for each object
func processRemoveMultiObjectsResponse(body io.Reader, objects []string, errorCh chan<- RemoveObjectError) {
// Parse multi delete XML response
rmResult := &deleteMultiObjectsResult{}
err := xmlDecoder(body, rmResult)
if err != nil {
errorCh <- RemoveObjectError{ObjectName: "", Err: err}
return
}
// Fill deletion that returned an error.
for _, obj := range rmResult.UnDeletedObjects {
errorCh <- RemoveObjectError{
ObjectName: obj.Key,
Err: ErrorResponse{
Code: obj.Code,
Message: obj.Message,
},
}
}
}
// RemoveObjects remove multiples objects from a bucket.
// The list of objects to remove are received from objectsCh.
// Remove failures are sent back via error channel.
func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan RemoveObjectError {
errorCh := make(chan RemoveObjectError, 1)
// Validate if bucket name is valid.
if err := isValidBucketName(bucketName); err != nil {
defer close(errorCh)
errorCh <- RemoveObjectError{
Err: err,
}
return errorCh
}
// Validate objects channel to be properly allocated.
if objectsCh == nil {
defer close(errorCh)
errorCh <- RemoveObjectError{
Err: ErrInvalidArgument("Objects channel cannot be nil"),
}
return errorCh
}
// Generate and call MultiDelete S3 requests based on entries received from objectsCh
go func(errorCh chan<- RemoveObjectError) {
maxEntries := 1000
finish := false
urlValues := make(url.Values)
urlValues.Set("delete", "")
// Close error channel when Multi delete finishes.
defer close(errorCh)
// Loop over entries by 1000 and call MultiDelete requests
for {
if finish {
break
}
count := 0
var batch []string
// Try to gather 1000 entries
for object := range objectsCh {
batch = append(batch, object)
if count++; count >= maxEntries {
break
}
}
if count == 0 {
// Multi Objects Delete API doesn't accept empty object list, quit immediatly
break
}
if count < maxEntries {
// We didn't have 1000 entries, so this is the last batch
finish = true
}
// Generate remove multi objects XML request
removeBytes := generateRemoveMultiObjectsRequest(batch)
// Execute GET on bucket to list objects.
resp, err := c.executeMethod("POST", requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
contentBody: bytes.NewReader(removeBytes),
contentLength: int64(len(removeBytes)),
contentMD5Bytes: sumMD5(removeBytes),
contentSHA256Bytes: sum256(removeBytes),
})
if err != nil {
for _, b := range batch {
errorCh <- RemoveObjectError{ObjectName: b, Err: err}
}
continue
}
// Process multiobjects remove xml response
processRemoveMultiObjectsResponse(resp.Body, batch, errorCh)
closeResponse(resp)
}
}(errorCh)
return errorCh
}
// RemoveIncompleteUpload aborts an partially uploaded object.
func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return err
}
if err := isValidObjectName(objectName); err != nil {
return err
}
// Find multipart upload id of the object to be aborted.
uploadID, err := c.findUploadID(bucketName, objectName)
if err != nil {
return err
}
if uploadID != "" {
// Upload id found, abort the incomplete multipart upload.
err := c.abortMultipartUpload(bucketName, objectName, uploadID)
if err != nil {
return err
}
}
return nil
}
// abortMultipartUpload aborts a multipart upload for the given
// uploadID, all previously uploaded parts are deleted.
func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return err
}
if err := isValidObjectName(objectName); err != nil {
return err
}
// Initialize url queries.
urlValues := make(url.Values)
urlValues.Set("uploadId", uploadID)
// Execute DELETE on multipart upload.
resp, err := c.executeMethod("DELETE", requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
})
defer closeResponse(resp)
if err != nil {
return err
}
if resp != nil {
if resp.StatusCode != http.StatusNoContent {
// Abort has no response body, handle it for any errors.
var errorResponse ErrorResponse
switch resp.StatusCode {
case http.StatusNotFound:
// This is needed specifically for abort and it cannot
// be converged into default case.
errorResponse = ErrorResponse{
Code: "NoSuchUpload",
Message: "The specified multipart upload does not exist.",
BucketName: bucketName,
Key: objectName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
Region: resp.Header.Get("x-amz-bucket-region"),
}
default:
return httpRespToErrorResponse(resp, bucketName, objectName)
}
return errorResponse
}
}
return nil
}

244
vendor/github.com/minio/minio-go/api-s3-datatypes.go generated vendored Normal file
View File

@ -0,0 +1,244 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"encoding/xml"
"time"
)
// listAllMyBucketsResult container for listBuckets response.
type listAllMyBucketsResult struct {
// Container for one or more buckets.
Buckets struct {
Bucket []BucketInfo
}
Owner owner
}
// owner container for bucket owner information.
type owner struct {
DisplayName string
ID string
}
// commonPrefix container for prefix response.
type commonPrefix struct {
Prefix string
}
// ListBucketV2Result container for listObjects response version 2.
type ListBucketV2Result struct {
// A response can contain CommonPrefixes only if you have
// specified a delimiter.
CommonPrefixes []commonPrefix
// Metadata about each object returned.
Contents []ObjectInfo
Delimiter string
// Encoding type used to encode object keys in the response.
EncodingType string
// A flag that indicates whether or not ListObjects returned all of the results
// that satisfied the search criteria.
IsTruncated bool
MaxKeys int64
Name string
// Hold the token that will be sent in the next request to fetch the next group of keys
NextContinuationToken string
ContinuationToken string
Prefix string
// FetchOwner and StartAfter are currently not used
FetchOwner string
StartAfter string
}
// ListBucketResult container for listObjects response.
type ListBucketResult struct {
// A response can contain CommonPrefixes only if you have
// specified a delimiter.
CommonPrefixes []commonPrefix
// Metadata about each object returned.
Contents []ObjectInfo
Delimiter string
// Encoding type used to encode object keys in the response.
EncodingType string
// A flag that indicates whether or not ListObjects returned all of the results
// that satisfied the search criteria.
IsTruncated bool
Marker string
MaxKeys int64
Name string
// When response is truncated (the IsTruncated element value in
// the response is true), you can use the key name in this field
// as marker in the subsequent request to get next set of objects.
// Object storage lists objects in alphabetical order Note: This
// element is returned only if you have delimiter request
// parameter specified. If response does not include the NextMaker
// and it is truncated, you can use the value of the last Key in
// the response as the marker in the subsequent request to get the
// next set of object keys.
NextMarker string
Prefix string
}
// ListMultipartUploadsResult container for ListMultipartUploads response
type ListMultipartUploadsResult struct {
Bucket string
KeyMarker string
UploadIDMarker string `xml:"UploadIdMarker"`
NextKeyMarker string
NextUploadIDMarker string `xml:"NextUploadIdMarker"`
EncodingType string
MaxUploads int64
IsTruncated bool
Uploads []ObjectMultipartInfo `xml:"Upload"`
Prefix string
Delimiter string
// A response can contain CommonPrefixes only if you specify a delimiter.
CommonPrefixes []commonPrefix
}
// initiator container for who initiated multipart upload.
type initiator struct {
ID string
DisplayName string
}
// copyObjectResult container for copy object response.
type copyObjectResult struct {
ETag string
LastModified string // time string format "2006-01-02T15:04:05.000Z"
}
// ObjectPart container for particular part of an object.
type ObjectPart struct {
// Part number identifies the part.
PartNumber int
// Date and time the part was uploaded.
LastModified time.Time
// Entity tag returned when the part was uploaded, usually md5sum
// of the part.
ETag string
// Size of the uploaded part data.
Size int64
}
// ListObjectPartsResult container for ListObjectParts response.
type ListObjectPartsResult struct {
Bucket string
Key string
UploadID string `xml:"UploadId"`
Initiator initiator
Owner owner
StorageClass string
PartNumberMarker int
NextPartNumberMarker int
MaxParts int
// Indicates whether the returned list of parts is truncated.
IsTruncated bool
ObjectParts []ObjectPart `xml:"Part"`
EncodingType string
}
// initiateMultipartUploadResult container for InitiateMultiPartUpload
// response.
type initiateMultipartUploadResult struct {
Bucket string
Key string
UploadID string `xml:"UploadId"`
}
// completeMultipartUploadResult container for completed multipart
// upload response.
type completeMultipartUploadResult struct {
Location string
Bucket string
Key string
ETag string
}
// CompletePart sub container lists individual part numbers and their
// md5sum, part of completeMultipartUpload.
type CompletePart struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"`
// Part number identifies the part.
PartNumber int
ETag string
}
// completeMultipartUpload container for completing multipart upload.
type completeMultipartUpload struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"`
Parts []CompletePart `xml:"Part"`
}
// createBucketConfiguration container for bucket configuration.
type createBucketConfiguration struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"`
Location string `xml:"LocationConstraint"`
}
// deleteObject container for Delete element in MultiObjects Delete XML request
type deleteObject struct {
Key string
VersionID string `xml:"VersionId,omitempty"`
}
// deletedObject container for Deleted element in MultiObjects Delete XML response
type deletedObject struct {
Key string
VersionID string `xml:"VersionId,omitempty"`
// These fields are ignored.
DeleteMarker bool
DeleteMarkerVersionID string
}
// nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response
type nonDeletedObject struct {
Key string
Code string
Message string
}
// deletedMultiObjects container for MultiObjects Delete XML request
type deleteMultiObjects struct {
XMLName xml.Name `xml:"Delete"`
Quiet bool
Objects []deleteObject `xml:"Object"`
}
// deletedMultiObjectsResult container for MultiObjects Delete XML response
type deleteMultiObjectsResult struct {
XMLName xml.Name `xml:"DeleteResult"`
DeletedObjects []deletedObject `xml:"Deleted"`
UnDeletedObjects []nonDeletedObject `xml:"Error"`
}

158
vendor/github.com/minio/minio-go/api-stat.go generated vendored Normal file
View File

@ -0,0 +1,158 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"net/http"
"strconv"
"strings"
"time"
"github.com/minio/minio-go/pkg/s3utils"
)
// BucketExists verify if bucket exists and you have permission to access it.
func (c Client) BucketExists(bucketName string) (bool, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return false, err
}
// Execute HEAD on bucketName.
resp, err := c.executeMethod("HEAD", requestMetadata{
bucketName: bucketName,
})
defer closeResponse(resp)
if err != nil {
if ToErrorResponse(err).Code == "NoSuchBucket" {
return false, nil
}
return false, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return false, httpRespToErrorResponse(resp, bucketName, "")
}
}
return true, nil
}
// List of header keys to be filtered, usually
// from all S3 API http responses.
var defaultFilterKeys = []string{
"Transfer-Encoding",
"Accept-Ranges",
"Date",
"Server",
"Vary",
"x-amz-request-id",
"x-amz-id-2",
// Add new headers to be ignored.
}
// Extract only necessary metadata header key/values by
// filtering them out with a list of custom header keys.
func extractObjMetadata(header http.Header) http.Header {
filterKeys := append([]string{
"ETag",
"Content-Length",
"Last-Modified",
"Content-Type",
}, defaultFilterKeys...)
return filterHeader(header, filterKeys)
}
// StatObject verifies if object exists and you have permission to access.
func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return ObjectInfo{}, err
}
if err := isValidObjectName(objectName); err != nil {
return ObjectInfo{}, err
}
// Execute HEAD on objectName.
resp, err := c.executeMethod("HEAD", requestMetadata{
bucketName: bucketName,
objectName: objectName,
})
defer closeResponse(resp)
if err != nil {
return ObjectInfo{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
}
}
// Trim off the odd double quotes from ETag in the beginning and end.
md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
md5sum = strings.TrimSuffix(md5sum, "\"")
// Content-Length is not valid for Google Cloud Storage, do not verify.
var size int64 = -1
if !s3utils.IsGoogleEndpoint(c.endpointURL) {
// Parse content length.
size, err = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
if err != nil {
return ObjectInfo{}, ErrorResponse{
Code: "InternalError",
Message: "Content-Length is invalid. " + reportIssue,
BucketName: bucketName,
Key: objectName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
Region: resp.Header.Get("x-amz-bucket-region"),
}
}
}
// Parse Last-Modified has http time format.
date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
if err != nil {
return ObjectInfo{}, ErrorResponse{
Code: "InternalError",
Message: "Last-Modified time format is invalid. " + reportIssue,
BucketName: bucketName,
Key: objectName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
Region: resp.Header.Get("x-amz-bucket-region"),
}
}
// Fetch content type if any present.
contentType := strings.TrimSpace(resp.Header.Get("Content-Type"))
if contentType == "" {
contentType = "application/octet-stream"
}
// Extract only the relevant header keys describing the object.
// following function filters out a list of standard set of keys
// which are not part of object metadata.
metadata := extractObjMetadata(resp.Header)
// Save object metadata info.
return ObjectInfo{
ETag: md5sum,
Key: objectName,
Size: size,
LastModified: date,
ContentType: contentType,
Metadata: metadata,
}, nil
}

748
vendor/github.com/minio/minio-go/api.go generated vendored Normal file
View File

@ -0,0 +1,748 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bytes"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"net/http/httputil"
"net/url"
"os"
"regexp"
"runtime"
"strings"
"sync"
"time"
"github.com/minio/minio-go/pkg/s3signer"
"github.com/minio/minio-go/pkg/s3utils"
)
// Client implements Amazon S3 compatible methods.
type Client struct {
/// Standard options.
// Parsed endpoint url provided by the user.
endpointURL url.URL
// AccessKeyID required for authorized requests.
accessKeyID string
// SecretAccessKey required for authorized requests.
secretAccessKey string
// Choose a signature type if necessary.
signature SignatureType
// Set to 'true' if Client has no access and secret keys.
anonymous bool
// User supplied.
appInfo struct {
appName string
appVersion string
}
// Indicate whether we are using https or not
secure bool
// Needs allocation.
httpClient *http.Client
bucketLocCache *bucketLocationCache
// Advanced functionality.
isTraceEnabled bool
traceOutput io.Writer
// S3 specific accelerated endpoint.
s3AccelerateEndpoint string
// Region endpoint
region string
// Random seed.
random *rand.Rand
}
// Global constants.
const (
libraryName = "minio-go"
libraryVersion = "2.0.4"
)
// User Agent should always following the below style.
// Please open an issue to discuss any new changes here.
//
// Minio (OS; ARCH) LIB/VER APP/VER
const (
libraryUserAgentPrefix = "Minio (" + runtime.GOOS + "; " + runtime.GOARCH + ") "
libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion
)
// NewV2 - instantiate minio client with Amazon S3 signature version
// '2' compatibility.
func NewV2(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure)
if err != nil {
return nil, err
}
// Set to use signature version '2'.
clnt.signature = SignatureV2
return clnt, nil
}
// NewV4 - instantiate minio client with Amazon S3 signature version
// '4' compatibility.
func NewV4(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure)
if err != nil {
return nil, err
}
// Set to use signature version '4'.
clnt.signature = SignatureV4
return clnt, nil
}
// New - instantiate minio client Client, adds automatic verification of signature.
func New(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
return NewWithRegion(endpoint, accessKeyID, secretAccessKey, secure, "")
}
// NewWithRegion - instantiate minio client, with region configured. Unlike New(),
// NewWithRegion avoids bucket-location lookup operations and it is slightly faster.
// Use this function when if your application deals with single region.
func NewWithRegion(endpoint, accessKeyID, secretAccessKey string, secure bool, region string) (*Client, error) {
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure)
if err != nil {
return nil, err
}
// Google cloud storage should be set to signature V2, force it if not.
if s3utils.IsGoogleEndpoint(clnt.endpointURL) {
clnt.signature = SignatureV2
}
// If Amazon S3 set to signature v2.n
if s3utils.IsAmazonEndpoint(clnt.endpointURL) {
clnt.signature = SignatureV4
}
// Sets custom region, if region is empty bucket location cache is used automatically.
clnt.region = region
// Success..
return clnt, nil
}
// lockedRandSource provides protected rand source, implements rand.Source interface.
type lockedRandSource struct {
lk sync.Mutex
src rand.Source
}
// Int63 returns a non-negative pseudo-random 63-bit integer as an int64.
func (r *lockedRandSource) Int63() (n int64) {
r.lk.Lock()
n = r.src.Int63()
r.lk.Unlock()
return
}
// Seed uses the provided seed value to initialize the generator to a
// deterministic state.
func (r *lockedRandSource) Seed(seed int64) {
r.lk.Lock()
r.src.Seed(seed)
r.lk.Unlock()
}
// redirectHeaders copies all headers when following a redirect URL.
// This won't be needed anymore from go 1.8 (https://github.com/golang/go/issues/4800)
func redirectHeaders(req *http.Request, via []*http.Request) error {
if len(via) == 0 {
return nil
}
for key, val := range via[0].Header {
req.Header[key] = val
}
return nil
}
func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
// construct endpoint.
endpointURL, err := getEndpointURL(endpoint, secure)
if err != nil {
return nil, err
}
// instantiate new Client.
clnt := new(Client)
clnt.accessKeyID = accessKeyID
clnt.secretAccessKey = secretAccessKey
// Remember whether we are using https or not
clnt.secure = secure
// Save endpoint URL, user agent for future uses.
clnt.endpointURL = *endpointURL
// Instantiate http client and bucket location cache.
clnt.httpClient = &http.Client{
Transport: http.DefaultTransport,
CheckRedirect: redirectHeaders,
}
// Instantiae bucket location cache.
clnt.bucketLocCache = newBucketLocationCache()
// Introduce a new locked random seed.
clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())})
// Return.
return clnt, nil
}
// SetAppInfo - add application details to user agent.
func (c *Client) SetAppInfo(appName string, appVersion string) {
// if app name and version not set, we do not set a new user agent.
if appName != "" && appVersion != "" {
c.appInfo = struct {
appName string
appVersion string
}{}
c.appInfo.appName = appName
c.appInfo.appVersion = appVersion
}
}
// SetCustomTransport - set new custom transport.
func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) {
// Set this to override default transport
// ``http.DefaultTransport``.
//
// This transport is usually needed for debugging OR to add your
// own custom TLS certificates on the client transport, for custom
// CA's and certs which are not part of standard certificate
// authority follow this example :-
//
// tr := &http.Transport{
// TLSClientConfig: &tls.Config{RootCAs: pool},
// DisableCompression: true,
// }
// api.SetTransport(tr)
//
if c.httpClient != nil {
c.httpClient.Transport = customHTTPTransport
}
}
// TraceOn - enable HTTP tracing.
func (c *Client) TraceOn(outputStream io.Writer) {
// if outputStream is nil then default to os.Stdout.
if outputStream == nil {
outputStream = os.Stdout
}
// Sets a new output stream.
c.traceOutput = outputStream
// Enable tracing.
c.isTraceEnabled = true
}
// TraceOff - disable HTTP tracing.
func (c *Client) TraceOff() {
// Disable tracing.
c.isTraceEnabled = false
}
// SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your
// requests. This feature is only specific to S3 for all other endpoints this
// function does nothing. To read further details on s3 transfer acceleration
// please vist -
// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) {
if s3utils.IsAmazonEndpoint(c.endpointURL) {
c.s3AccelerateEndpoint = accelerateEndpoint
}
}
// requestMetadata - is container for all the values to make a request.
type requestMetadata struct {
// If set newRequest presigns the URL.
presignURL bool
// User supplied.
bucketName string
objectName string
queryValues url.Values
customHeader http.Header
expires int64
// Generated by our internal code.
bucketLocation string
contentBody io.Reader
contentLength int64
contentSHA256Bytes []byte
contentMD5Bytes []byte
}
// regCred matches credential string in HTTP header
var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/")
// regCred matches signature string in HTTP header
var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)")
// Filter out signature value from Authorization header.
func (c Client) filterSignature(req *http.Request) {
if _, ok := req.Header["Authorization"]; !ok {
return
}
// Handle if Signature V2.
if c.signature.isV2() {
// Set a temporary redacted auth
req.Header.Set("Authorization", "AWS **REDACTED**:**REDACTED**")
return
}
/// Signature V4 authorization header.
// Save the original auth.
origAuth := req.Header.Get("Authorization")
// Strip out accessKeyID from:
// Credential=<access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request
newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/")
// Strip out 256-bit signature from: Signature=<256-bit signature>
newAuth = regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**")
// Set a temporary redacted auth
req.Header.Set("Authorization", newAuth)
return
}
// dumpHTTP - dump HTTP request and response.
func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
// Starts http dump.
_, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------")
if err != nil {
return err
}
// Filter out Signature field from Authorization header.
c.filterSignature(req)
// Only display request header.
reqTrace, err := httputil.DumpRequestOut(req, false)
if err != nil {
return err
}
// Write request to trace output.
_, err = fmt.Fprint(c.traceOutput, string(reqTrace))
if err != nil {
return err
}
// Only display response header.
var respTrace []byte
// For errors we make sure to dump response body as well.
if resp.StatusCode != http.StatusOK &&
resp.StatusCode != http.StatusPartialContent &&
resp.StatusCode != http.StatusNoContent {
respTrace, err = httputil.DumpResponse(resp, true)
if err != nil {
return err
}
} else {
// WORKAROUND for https://github.com/golang/go/issues/13942.
// httputil.DumpResponse does not print response headers for
// all successful calls which have response ContentLength set
// to zero. Keep this workaround until the above bug is fixed.
if resp.ContentLength == 0 {
var buffer bytes.Buffer
if err = resp.Header.Write(&buffer); err != nil {
return err
}
respTrace = buffer.Bytes()
respTrace = append(respTrace, []byte("\r\n")...)
} else {
respTrace, err = httputil.DumpResponse(resp, false)
if err != nil {
return err
}
}
}
// Write response to trace output.
_, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n"))
if err != nil {
return err
}
// Ends the http dump.
_, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------")
if err != nil {
return err
}
// Returns success.
return nil
}
// do - execute http request.
func (c Client) do(req *http.Request) (*http.Response, error) {
var resp *http.Response
var err error
// Do the request in a loop in case of 307 http is met since golang still doesn't
// handle properly this situation (https://github.com/golang/go/issues/7912)
for {
resp, err = c.httpClient.Do(req)
if err != nil {
// Handle this specifically for now until future Golang
// versions fix this issue properly.
urlErr, ok := err.(*url.Error)
if ok && strings.Contains(urlErr.Err.Error(), "EOF") {
return nil, &url.Error{
Op: urlErr.Op,
URL: urlErr.URL,
Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."),
}
}
return nil, err
}
// Redo the request with the new redirect url if http 307 is returned, quit the loop otherwise
if resp != nil && resp.StatusCode == http.StatusTemporaryRedirect {
newURL, err := url.Parse(resp.Header.Get("Location"))
if err != nil {
break
}
req.URL = newURL
} else {
break
}
}
// Response cannot be non-nil, report if its the case.
if resp == nil {
msg := "Response is empty. " + reportIssue
return nil, ErrInvalidArgument(msg)
}
// If trace is enabled, dump http request and response.
if c.isTraceEnabled {
err = c.dumpHTTP(req, resp)
if err != nil {
return nil, err
}
}
return resp, nil
}
// List of success status.
var successStatus = []int{
http.StatusOK,
http.StatusNoContent,
http.StatusPartialContent,
}
// executeMethod - instantiates a given method, and retries the
// request upon any error up to maxRetries attempts in a binomially
// delayed manner using a standard back off algorithm.
func (c Client) executeMethod(method string, metadata requestMetadata) (res *http.Response, err error) {
var isRetryable bool // Indicates if request can be retried.
var bodySeeker io.Seeker // Extracted seeker from io.Reader.
if metadata.contentBody != nil {
// Check if body is seekable then it is retryable.
bodySeeker, isRetryable = metadata.contentBody.(io.Seeker)
switch bodySeeker {
case os.Stdin, os.Stdout, os.Stderr:
isRetryable = false
}
}
// Create a done channel to control 'newRetryTimer' go routine.
doneCh := make(chan struct{}, 1)
// Indicate to our routine to exit cleanly upon return.
defer close(doneCh)
// Blank indentifier is kept here on purpose since 'range' without
// blank identifiers is only supported since go1.4
// https://golang.org/doc/go1.4#forrange.
for _ = range c.newRetryTimer(MaxRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) {
// Retry executes the following function body if request has an
// error until maxRetries have been exhausted, retry attempts are
// performed after waiting for a given period of time in a
// binomial fashion.
if isRetryable {
// Seek back to beginning for each attempt.
if _, err = bodySeeker.Seek(0, 0); err != nil {
// If seek failed, no need to retry.
return nil, err
}
}
// Instantiate a new request.
var req *http.Request
req, err = c.newRequest(method, metadata)
if err != nil {
errResponse := ToErrorResponse(err)
if isS3CodeRetryable(errResponse.Code) {
continue // Retry.
}
return nil, err
}
// Initiate the request.
res, err = c.do(req)
if err != nil {
// For supported network errors verify.
if isNetErrorRetryable(err) {
continue // Retry.
}
// For other errors, return here no need to retry.
return nil, err
}
// For any known successful http status, return quickly.
for _, httpStatus := range successStatus {
if httpStatus == res.StatusCode {
return res, nil
}
}
// Read the body to be saved later.
errBodyBytes, err := ioutil.ReadAll(res.Body)
// res.Body should be closed
closeResponse(res)
if err != nil {
return nil, err
}
// Save the body.
errBodySeeker := bytes.NewReader(errBodyBytes)
res.Body = ioutil.NopCloser(errBodySeeker)
// For errors verify if its retryable otherwise fail quickly.
errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName))
// Save the body back again.
errBodySeeker.Seek(0, 0) // Seek back to starting point.
res.Body = ioutil.NopCloser(errBodySeeker)
// Bucket region if set in error response and the error
// code dictates invalid region, we can retry the request
// with the new region.
if errResponse.Code == "InvalidRegion" && errResponse.Region != "" {
c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
continue // Retry.
}
// Verify if error response code is retryable.
if isS3CodeRetryable(errResponse.Code) {
continue // Retry.
}
// Verify if http status code is retryable.
if isHTTPStatusRetryable(res.StatusCode) {
continue // Retry.
}
// For all other cases break out of the retry loop.
break
}
return res, err
}
// newRequest - instantiate a new HTTP request for a given method.
func (c Client) newRequest(method string, metadata requestMetadata) (req *http.Request, err error) {
// If no method is supplied default to 'POST'.
if method == "" {
method = "POST"
}
// Default all requests to "us-east-1" or "cn-north-1" (china region)
location := "us-east-1"
if s3utils.IsAmazonChinaEndpoint(c.endpointURL) {
// For china specifically we need to set everything to
// cn-north-1 for now, there is no easier way until AWS S3
// provides a cleaner compatible API across "us-east-1" and
// China region.
location = "cn-north-1"
}
// Gather location only if bucketName is present.
if metadata.bucketName != "" {
location, err = c.getBucketLocation(metadata.bucketName)
if err != nil {
return nil, err
}
}
// Save location.
metadata.bucketLocation = location
// Construct a new target URL.
targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, metadata.bucketLocation, metadata.queryValues)
if err != nil {
return nil, err
}
// Initialize a new HTTP request for the method.
req, err = http.NewRequest(method, targetURL.String(), metadata.contentBody)
if err != nil {
return nil, err
}
// Anonymous request.
anonymous := c.accessKeyID == "" || c.secretAccessKey == ""
// Generate presign url if needed, return right here.
if metadata.expires != 0 && metadata.presignURL {
if anonymous {
return nil, ErrInvalidArgument("Requests cannot be presigned with anonymous credentials.")
}
if c.signature.isV2() {
// Presign URL with signature v2.
req = s3signer.PreSignV2(*req, c.accessKeyID, c.secretAccessKey, metadata.expires)
} else if c.signature.isV4() {
// Presign URL with signature v4.
req = s3signer.PreSignV4(*req, c.accessKeyID, c.secretAccessKey, location, metadata.expires)
}
return req, nil
}
// Set 'User-Agent' header for the request.
c.setUserAgent(req)
// Set all headers.
for k, v := range metadata.customHeader {
req.Header.Set(k, v[0])
}
// set incoming content-length.
if metadata.contentLength > 0 {
req.ContentLength = metadata.contentLength
}
// set md5Sum for content protection.
if metadata.contentMD5Bytes != nil {
req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes))
}
if anonymous {
return req, nil
} // Sign the request for all authenticated requests.
if c.signature.isV2() {
// Add signature version '2' authorization header.
req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
} else if c.signature.isV4() || c.signature.isStreamingV4() &&
method != "PUT" {
// Set sha256 sum for signature calculation only with signature version '4'.
shaHeader := unsignedPayload
if !c.secure {
if metadata.contentSHA256Bytes == nil {
shaHeader = hex.EncodeToString(sum256([]byte{}))
} else {
shaHeader = hex.EncodeToString(metadata.contentSHA256Bytes)
}
}
req.Header.Set("X-Amz-Content-Sha256", shaHeader)
// Add signature version '4' authorization header.
req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, location)
} else if c.signature.isStreamingV4() {
req = s3signer.StreamingSignV4(req, c.accessKeyID,
c.secretAccessKey, location, metadata.contentLength, time.Now().UTC())
}
// Return request.
return req, nil
}
// set User agent.
func (c Client) setUserAgent(req *http.Request) {
req.Header.Set("User-Agent", libraryUserAgent)
if c.appInfo.appName != "" && c.appInfo.appVersion != "" {
req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion)
}
}
// makeTargetURL make a new target url.
func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, queryValues url.Values) (*url.URL, error) {
host := c.endpointURL.Host
// For Amazon S3 endpoint, try to fetch location based endpoint.
if s3utils.IsAmazonEndpoint(c.endpointURL) {
if c.s3AccelerateEndpoint != "" && bucketName != "" {
// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
// Disable transfer acceleration for non-compliant bucket names.
if strings.Contains(bucketName, ".") {
return nil, ErrTransferAccelerationBucket(bucketName)
}
// If transfer acceleration is requested set new host.
// For more details about enabling transfer acceleration read here.
// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
host = c.s3AccelerateEndpoint
} else {
// Fetch new host based on the bucket location.
host = getS3Endpoint(bucketLocation)
}
}
// Save scheme.
scheme := c.endpointURL.Scheme
urlStr := scheme + "://" + host + "/"
// Make URL only if bucketName is available, otherwise use the
// endpoint URL.
if bucketName != "" {
// Save if target url will have buckets which suppport virtual host.
isVirtualHostStyle := s3utils.IsVirtualHostSupported(c.endpointURL, bucketName)
// If endpoint supports virtual host style use that always.
// Currently only S3 and Google Cloud Storage would support
// virtual host style.
if isVirtualHostStyle {
urlStr = scheme + "://" + bucketName + "." + host + "/"
if objectName != "" {
urlStr = urlStr + s3utils.EncodePath(objectName)
}
} else {
// If not fall back to using path style.
urlStr = urlStr + bucketName + "/"
if objectName != "" {
urlStr = urlStr + s3utils.EncodePath(objectName)
}
}
}
// If there are any query values, add them to the end.
if len(queryValues) > 0 {
urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues)
}
u, err := url.Parse(urlStr)
if err != nil {
return nil, err
}
return u, nil
}

37
vendor/github.com/minio/minio-go/appveyor.yml generated vendored Normal file
View File

@ -0,0 +1,37 @@
# version format
version: "{build}"
# Operating system (build VM template)
os: Windows Server 2012 R2
clone_folder: c:\gopath\src\github.com\minio\minio-go
# environment variables
environment:
GOPATH: c:\gopath
GO15VENDOREXPERIMENT: 1
# scripts that run after cloning repository
install:
- set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
- go version
- go env
- go get -u github.com/golang/lint/golint
- go get -u github.com/remyoudompheng/go-misc/deadcode
- go get -u github.com/gordonklaus/ineffassign
# to run your custom scripts instead of automatic MSBuild
build_script:
- go vet ./...
- gofmt -s -l .
- golint -set_exit_status github.com/minio/minio-go...
- deadcode
- ineffassign .
- go test -short -v
- go test -short -race -v
# to disable automatic tests
test: off
# to disable deployment
deploy: off

202
vendor/github.com/minio/minio-go/bucket-cache.go generated vendored Normal file
View File

@ -0,0 +1,202 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"encoding/hex"
"net/http"
"net/url"
"path"
"sync"
"github.com/minio/minio-go/pkg/s3signer"
"github.com/minio/minio-go/pkg/s3utils"
)
// bucketLocationCache - Provides simple mechanism to hold bucket
// locations in memory.
type bucketLocationCache struct {
// mutex is used for handling the concurrent
// read/write requests for cache.
sync.RWMutex
// items holds the cached bucket locations.
items map[string]string
}
// newBucketLocationCache - Provides a new bucket location cache to be
// used internally with the client object.
func newBucketLocationCache() *bucketLocationCache {
return &bucketLocationCache{
items: make(map[string]string),
}
}
// Get - Returns a value of a given key if it exists.
func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) {
r.RLock()
defer r.RUnlock()
location, ok = r.items[bucketName]
return
}
// Set - Will persist a value into cache.
func (r *bucketLocationCache) Set(bucketName string, location string) {
r.Lock()
defer r.Unlock()
r.items[bucketName] = location
}
// Delete - Deletes a bucket name from cache.
func (r *bucketLocationCache) Delete(bucketName string) {
r.Lock()
defer r.Unlock()
delete(r.items, bucketName)
}
// GetBucketLocation - get location for the bucket name from location cache, if not
// fetch freshly by making a new request.
func (c Client) GetBucketLocation(bucketName string) (string, error) {
if err := isValidBucketName(bucketName); err != nil {
return "", err
}
return c.getBucketLocation(bucketName)
}
// getBucketLocation - Get location for the bucketName from location map cache, if not
// fetch freshly by making a new request.
func (c Client) getBucketLocation(bucketName string) (string, error) {
if err := isValidBucketName(bucketName); err != nil {
return "", err
}
if s3utils.IsAmazonChinaEndpoint(c.endpointURL) {
// For china specifically we need to set everything to
// cn-north-1 for now, there is no easier way until AWS S3
// provides a cleaner compatible API across "us-east-1" and
// China region.
return "cn-north-1", nil
}
// Region set then no need to fetch bucket location.
if c.region != "" {
return c.region, nil
}
if location, ok := c.bucketLocCache.Get(bucketName); ok {
return location, nil
}
// Initialize a new request.
req, err := c.getBucketLocationRequest(bucketName)
if err != nil {
return "", err
}
// Initiate the request.
resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return "", err
}
location, err := processBucketLocationResponse(resp, bucketName)
if err != nil {
return "", err
}
c.bucketLocCache.Set(bucketName, location)
return location, nil
}
// processes the getBucketLocation http response from the server.
func processBucketLocationResponse(resp *http.Response, bucketName string) (bucketLocation string, err error) {
if resp != nil {
if resp.StatusCode != http.StatusOK {
err = httpRespToErrorResponse(resp, bucketName, "")
errResp := ToErrorResponse(err)
// For access denied error, it could be an anonymous
// request. Move forward and let the top level callers
// succeed if possible based on their policy.
if errResp.Code == "AccessDenied" {
return "us-east-1", nil
}
return "", err
}
}
// Extract location.
var locationConstraint string
err = xmlDecoder(resp.Body, &locationConstraint)
if err != nil {
return "", err
}
location := locationConstraint
// Location is empty will be 'us-east-1'.
if location == "" {
location = "us-east-1"
}
// Location can be 'EU' convert it to meaningful 'eu-west-1'.
if location == "EU" {
location = "eu-west-1"
}
// Save the location into cache.
// Return.
return location, nil
}
// getBucketLocationRequest - Wrapper creates a new getBucketLocation request.
func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, error) {
// Set location query.
urlValues := make(url.Values)
urlValues.Set("location", "")
// Set get bucket location always as path style.
targetURL := c.endpointURL
targetURL.Path = path.Join(bucketName, "") + "/"
targetURL.RawQuery = urlValues.Encode()
// Get a new HTTP request for the method.
req, err := http.NewRequest("GET", targetURL.String(), nil)
if err != nil {
return nil, err
}
// Set UserAgent for the request.
c.setUserAgent(req)
// Set sha256 sum for signature calculation only with signature version '4'.
if c.signature.isV4() {
var contentSha256 string
if c.secure {
contentSha256 = unsignedPayload
} else {
contentSha256 = hex.EncodeToString(sum256([]byte{}))
}
req.Header.Set("X-Amz-Content-Sha256", contentSha256)
}
// Sign the request.
if c.signature.isV4() {
req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
} else if c.signature.isV2() {
req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
}
return req, nil
}

231
vendor/github.com/minio/minio-go/bucket-notification.go generated vendored Normal file
View File

@ -0,0 +1,231 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"encoding/xml"
"reflect"
)
// NotificationEventType is a S3 notification event associated to the bucket notification configuration
type NotificationEventType string
// The role of all event types are described in :
// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations
const (
ObjectCreatedAll NotificationEventType = "s3:ObjectCreated:*"
ObjectCreatedPut = "s3:ObjectCreated:Put"
ObjectCreatedPost = "s3:ObjectCreated:Post"
ObjectCreatedCopy = "s3:ObjectCreated:Copy"
ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload"
ObjectAccessedGet = "s3:ObjectAccessed:Get"
ObjectAccessedHead = "s3:ObjectAccessed:Head"
ObjectAccessedAll = "s3:ObjectAccessed:*"
ObjectRemovedAll = "s3:ObjectRemoved:*"
ObjectRemovedDelete = "s3:ObjectRemoved:Delete"
ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated"
ObjectReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject"
)
// FilterRule - child of S3Key, a tag in the notification xml which
// carries suffix/prefix filters
type FilterRule struct {
Name string `xml:"Name"`
Value string `xml:"Value"`
}
// S3Key - child of Filter, a tag in the notification xml which
// carries suffix/prefix filters
type S3Key struct {
FilterRules []FilterRule `xml:"FilterRule,omitempty"`
}
// Filter - a tag in the notification xml structure which carries
// suffix/prefix filters
type Filter struct {
S3Key S3Key `xml:"S3Key,omitempty"`
}
// Arn - holds ARN information that will be sent to the web service,
// ARN desciption can be found in http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
type Arn struct {
Partition string
Service string
Region string
AccountID string
Resource string
}
// NewArn creates new ARN based on the given partition, service, region, account id and resource
func NewArn(partition, service, region, accountID, resource string) Arn {
return Arn{Partition: partition,
Service: service,
Region: region,
AccountID: accountID,
Resource: resource}
}
// Return the string format of the ARN
func (arn Arn) String() string {
return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource
}
// NotificationConfig - represents one single notification configuration
// such as topic, queue or lambda configuration.
type NotificationConfig struct {
ID string `xml:"Id,omitempty"`
Arn Arn `xml:"-"`
Events []NotificationEventType `xml:"Event"`
Filter *Filter `xml:"Filter,omitempty"`
}
// NewNotificationConfig creates one notification config and sets the given ARN
func NewNotificationConfig(arn Arn) NotificationConfig {
return NotificationConfig{Arn: arn}
}
// AddEvents adds one event to the current notification config
func (t *NotificationConfig) AddEvents(events ...NotificationEventType) {
t.Events = append(t.Events, events...)
}
// AddFilterSuffix sets the suffix configuration to the current notification config
func (t *NotificationConfig) AddFilterSuffix(suffix string) {
if t.Filter == nil {
t.Filter = &Filter{}
}
newFilterRule := FilterRule{Name: "suffix", Value: suffix}
// Replace any suffix rule if existing and add to the list otherwise
for index := range t.Filter.S3Key.FilterRules {
if t.Filter.S3Key.FilterRules[index].Name == "suffix" {
t.Filter.S3Key.FilterRules[index] = newFilterRule
return
}
}
t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule)
}
// AddFilterPrefix sets the prefix configuration to the current notification config
func (t *NotificationConfig) AddFilterPrefix(prefix string) {
if t.Filter == nil {
t.Filter = &Filter{}
}
newFilterRule := FilterRule{Name: "prefix", Value: prefix}
// Replace any prefix rule if existing and add to the list otherwise
for index := range t.Filter.S3Key.FilterRules {
if t.Filter.S3Key.FilterRules[index].Name == "prefix" {
t.Filter.S3Key.FilterRules[index] = newFilterRule
return
}
}
t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule)
}
// TopicConfig carries one single topic notification configuration
type TopicConfig struct {
NotificationConfig
Topic string `xml:"Topic"`
}
// QueueConfig carries one single queue notification configuration
type QueueConfig struct {
NotificationConfig
Queue string `xml:"Queue"`
}
// LambdaConfig carries one single cloudfunction notification configuration
type LambdaConfig struct {
NotificationConfig
Lambda string `xml:"CloudFunction"`
}
// BucketNotification - the struct that represents the whole XML to be sent to the web service
type BucketNotification struct {
XMLName xml.Name `xml:"NotificationConfiguration"`
LambdaConfigs []LambdaConfig `xml:"CloudFunctionConfiguration"`
TopicConfigs []TopicConfig `xml:"TopicConfiguration"`
QueueConfigs []QueueConfig `xml:"QueueConfiguration"`
}
// AddTopic adds a given topic config to the general bucket notification config
func (b *BucketNotification) AddTopic(topicConfig NotificationConfig) {
newTopicConfig := TopicConfig{NotificationConfig: topicConfig, Topic: topicConfig.Arn.String()}
for _, n := range b.TopicConfigs {
if reflect.DeepEqual(n, newTopicConfig) {
// Avoid adding duplicated entry
return
}
}
b.TopicConfigs = append(b.TopicConfigs, newTopicConfig)
}
// AddQueue adds a given queue config to the general bucket notification config
func (b *BucketNotification) AddQueue(queueConfig NotificationConfig) {
newQueueConfig := QueueConfig{NotificationConfig: queueConfig, Queue: queueConfig.Arn.String()}
for _, n := range b.QueueConfigs {
if reflect.DeepEqual(n, newQueueConfig) {
// Avoid adding duplicated entry
return
}
}
b.QueueConfigs = append(b.QueueConfigs, newQueueConfig)
}
// AddLambda adds a given lambda config to the general bucket notification config
func (b *BucketNotification) AddLambda(lambdaConfig NotificationConfig) {
newLambdaConfig := LambdaConfig{NotificationConfig: lambdaConfig, Lambda: lambdaConfig.Arn.String()}
for _, n := range b.LambdaConfigs {
if reflect.DeepEqual(n, newLambdaConfig) {
// Avoid adding duplicated entry
return
}
}
b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig)
}
// RemoveTopicByArn removes all topic configurations that match the exact specified ARN
func (b *BucketNotification) RemoveTopicByArn(arn Arn) {
var topics []TopicConfig
for _, topic := range b.TopicConfigs {
if topic.Topic != arn.String() {
topics = append(topics, topic)
}
}
b.TopicConfigs = topics
}
// RemoveQueueByArn removes all queue configurations that match the exact specified ARN
func (b *BucketNotification) RemoveQueueByArn(arn Arn) {
var queues []QueueConfig
for _, queue := range b.QueueConfigs {
if queue.Queue != arn.String() {
queues = append(queues, queue)
}
}
b.QueueConfigs = queues
}
// RemoveLambdaByArn removes all lambda configurations that match the exact specified ARN
func (b *BucketNotification) RemoveLambdaByArn(arn Arn) {
var lambdas []LambdaConfig
for _, lambda := range b.LambdaConfigs {
if lambda.Lambda != arn.String() {
lambdas = append(lambdas, lambda)
}
}
b.LambdaConfigs = lambdas
}

62
vendor/github.com/minio/minio-go/constants.go generated vendored Normal file
View File

@ -0,0 +1,62 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
/// Multipart upload defaults.
// miniPartSize - minimum part size 64MiB per object after which
// putObject behaves internally as multipart.
const minPartSize = 1024 * 1024 * 64
// maxPartsCount - maximum number of parts for a single multipart session.
const maxPartsCount = 10000
// maxPartSize - maximum part size 5GiB for a single multipart upload
// operation.
const maxPartSize = 1024 * 1024 * 1024 * 5
// maxSinglePutObjectSize - maximum size 5GiB of object per PUT
// operation.
const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5
// maxMultipartPutObjectSize - maximum size 5TiB of object for
// Multipart operation.
const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5
// optimalReadBufferSize - optimal buffer 5MiB used for reading
// through Read operation.
const optimalReadBufferSize = 1024 * 1024 * 5
// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
// we don't want to sign the request payload
const unsignedPayload = "UNSIGNED-PAYLOAD"
// Total number of parallel workers used for multipart operation.
var totalWorkers = 3
// Signature related constants.
const (
signV4Algorithm = "AWS4-HMAC-SHA256"
iso8601DateFormat = "20060102T150405Z"
)
// Encryption headers stored along with the object.
const (
amzHeaderIV = "X-Amz-Meta-X-Amz-Iv"
amzHeaderKey = "X-Amz-Meta-X-Amz-Key"
amzHeaderMatDesc = "X-Amz-Meta-X-Amz-Matdesc"
)

99
vendor/github.com/minio/minio-go/copy-conditions.go generated vendored Normal file
View File

@ -0,0 +1,99 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"net/http"
"time"
)
// copyCondition explanation:
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html
//
// Example:
//
// copyCondition {
// key: "x-amz-copy-if-modified-since",
// value: "Tue, 15 Nov 1994 12:45:26 GMT",
// }
//
type copyCondition struct {
key string
value string
}
// CopyConditions - copy conditions.
type CopyConditions struct {
conditions []copyCondition
}
// NewCopyConditions - Instantiate new list of conditions. This
// function is left behind for backward compatibility. The idiomatic
// way to set an empty set of copy conditions is,
// ``copyConditions := CopyConditions{}``.
//
func NewCopyConditions() CopyConditions {
return CopyConditions{}
}
// SetMatchETag - set match etag.
func (c *CopyConditions) SetMatchETag(etag string) error {
if etag == "" {
return ErrInvalidArgument("ETag cannot be empty.")
}
c.conditions = append(c.conditions, copyCondition{
key: "x-amz-copy-source-if-match",
value: etag,
})
return nil
}
// SetMatchETagExcept - set match etag except.
func (c *CopyConditions) SetMatchETagExcept(etag string) error {
if etag == "" {
return ErrInvalidArgument("ETag cannot be empty.")
}
c.conditions = append(c.conditions, copyCondition{
key: "x-amz-copy-source-if-none-match",
value: etag,
})
return nil
}
// SetUnmodified - set unmodified time since.
func (c *CopyConditions) SetUnmodified(modTime time.Time) error {
if modTime.IsZero() {
return ErrInvalidArgument("Modified since cannot be empty.")
}
c.conditions = append(c.conditions, copyCondition{
key: "x-amz-copy-source-if-unmodified-since",
value: modTime.Format(http.TimeFormat),
})
return nil
}
// SetModified - set modified time since.
func (c *CopyConditions) SetModified(modTime time.Time) error {
if modTime.IsZero() {
return ErrInvalidArgument("Modified since cannot be empty.")
}
c.conditions = append(c.conditions, copyCondition{
key: "x-amz-copy-source-if-modified-since",
value: modTime.Format(http.TimeFormat),
})
return nil
}

100
vendor/github.com/minio/minio-go/core.go generated vendored Normal file
View File

@ -0,0 +1,100 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"io"
"github.com/minio/minio-go/pkg/policy"
)
// Core - Inherits Client and adds new methods to expose the low level S3 APIs.
type Core struct {
*Client
}
// NewCore - Returns new initialized a Core client, this CoreClient should be
// only used under special conditions such as need to access lower primitives
// and being able to use them to write your own wrappers.
func NewCore(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Core, error) {
var s3Client Core
client, err := NewV4(endpoint, accessKeyID, secretAccessKey, secure)
if err != nil {
return nil, err
}
s3Client.Client = client
return &s3Client, nil
}
// ListObjects - List all the objects at a prefix, optionally with marker and delimiter
// you can further filter the results.
func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) {
return c.listObjectsQuery(bucket, prefix, marker, delimiter, maxKeys)
}
// ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses
// continuationToken instead of marker to further filter the results.
func (c Core) ListObjectsV2(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (ListBucketV2Result, error) {
return c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, maxkeys)
}
// PutObject - Upload object. Uploads using single PUT call.
func (c Core) PutObject(bucket, object string, size int64, data io.Reader, md5Sum, sha256Sum []byte, metadata map[string][]string) (ObjectInfo, error) {
return c.putObjectDo(bucket, object, data, md5Sum, sha256Sum, size, metadata)
}
// NewMultipartUpload - Initiates new multipart upload and returns the new uploaID.
func (c Core) NewMultipartUpload(bucket, object string, metadata map[string][]string) (uploadID string, err error) {
result, err := c.initiateMultipartUpload(bucket, object, metadata)
return result.UploadID, err
}
// ListMultipartUploads - List incomplete uploads.
func (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartUploadsResult, err error) {
return c.listMultipartUploadsQuery(bucket, keyMarker, uploadIDMarker, prefix, delimiter, maxUploads)
}
// PutObjectPart - Upload an object part.
func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Sum, sha256Sum []byte) (ObjectPart, error) {
return c.uploadPart(bucket, object, uploadID, data, partID, md5Sum, sha256Sum, size)
}
// ListObjectParts - List uploaded parts of an incomplete upload.x
func (c Core) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListObjectPartsResult, err error) {
return c.listObjectPartsQuery(bucket, object, uploadID, partNumberMarker, maxParts)
}
// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object.
func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []CompletePart) error {
_, err := c.completeMultipartUpload(bucket, object, uploadID, completeMultipartUpload{Parts: parts})
return err
}
// AbortMultipartUpload - Abort an incomplete upload.
func (c Core) AbortMultipartUpload(bucket, object, uploadID string) error {
return c.abortMultipartUpload(bucket, object, uploadID)
}
// GetBucketPolicy - fetches bucket access policy for a given bucket.
func (c Core) GetBucketPolicy(bucket string) (policy.BucketAccessPolicy, error) {
return c.getBucketPolicy(bucket)
}
// PutBucketPolicy - applies a new bucket access policy for a given bucket.
func (c Core) PutBucketPolicy(bucket string, bucketPolicy policy.BucketAccessPolicy) error {
return c.putBucketPolicy(bucket, bucketPolicy)
}

70
vendor/github.com/minio/minio-go/hook-reader.go generated vendored Normal file
View File

@ -0,0 +1,70 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import "io"
// hookReader hooks additional reader in the source stream. It is
// useful for making progress bars. Second reader is appropriately
// notified about the exact number of bytes read from the primary
// source on each Read operation.
type hookReader struct {
source io.Reader
hook io.Reader
}
// Seek implements io.Seeker. Seeks source first, and if necessary
// seeks hook if Seek method is appropriately found.
func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) {
// Verify for source has embedded Seeker, use it.
sourceSeeker, ok := hr.source.(io.Seeker)
if ok {
return sourceSeeker.Seek(offset, whence)
}
// Verify if hook has embedded Seeker, use it.
hookSeeker, ok := hr.hook.(io.Seeker)
if ok {
return hookSeeker.Seek(offset, whence)
}
return n, nil
}
// Read implements io.Reader. Always reads from the source, the return
// value 'n' number of bytes are reported through the hook. Returns
// error for all non io.EOF conditions.
func (hr *hookReader) Read(b []byte) (n int, err error) {
n, err = hr.source.Read(b)
if err != nil && err != io.EOF {
return n, err
}
// Progress the hook with the total read bytes from the source.
if _, herr := hr.hook.Read(b[:n]); herr != nil {
if herr != io.EOF {
return n, herr
}
}
return n, err
}
// newHook returns a io.ReadSeeker which implements hookReader that
// reports the data read from the source to the hook.
func newHook(source, hook io.Reader) io.Reader {
if hook == nil {
return source
}
return &hookReader{source, hook}
}

284
vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go generated vendored Normal file
View File

@ -0,0 +1,284 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package encrypt
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"encoding/base64"
"errors"
"io"
)
// Crypt mode - encryption or decryption
type cryptMode int
const (
encryptMode cryptMode = iota
decryptMode
)
// CBCSecureMaterials encrypts/decrypts data using AES CBC algorithm
type CBCSecureMaterials struct {
// Data stream to encrypt/decrypt
stream io.Reader
// Last internal error
err error
// End of file reached
eof bool
// Holds initial data
srcBuf *bytes.Buffer
// Holds transformed data (encrypted or decrypted)
dstBuf *bytes.Buffer
// Encryption algorithm
encryptionKey Key
// Key to encrypts/decrypts data
contentKey []byte
// Encrypted form of contentKey
cryptedKey []byte
// Initialization vector
iv []byte
// matDesc - currently unused
matDesc []byte
// Indicate if we are going to encrypt or decrypt
cryptMode cryptMode
// Helper that encrypts/decrypts data
blockMode cipher.BlockMode
}
// NewCBCSecureMaterials builds new CBC crypter module with
// the specified encryption key (symmetric or asymmetric)
func NewCBCSecureMaterials(key Key) (*CBCSecureMaterials, error) {
if key == nil {
return nil, errors.New("Unable to recognize empty encryption properties")
}
return &CBCSecureMaterials{
srcBuf: bytes.NewBuffer([]byte{}),
dstBuf: bytes.NewBuffer([]byte{}),
encryptionKey: key,
matDesc: []byte("{}"),
}, nil
}
// SetupEncryptMode - tells CBC that we are going to encrypt data
func (s *CBCSecureMaterials) SetupEncryptMode(stream io.Reader) error {
// Set mode to encrypt
s.cryptMode = encryptMode
// Set underlying reader
s.stream = stream
s.eof = false
s.srcBuf.Reset()
s.dstBuf.Reset()
var err error
// Generate random content key
s.contentKey = make([]byte, aes.BlockSize*2)
if _, err := rand.Read(s.contentKey); err != nil {
return err
}
// Encrypt content key
s.cryptedKey, err = s.encryptionKey.Encrypt(s.contentKey)
if err != nil {
return err
}
// Generate random IV
s.iv = make([]byte, aes.BlockSize)
if _, err = rand.Read(s.iv); err != nil {
return err
}
// New cipher
encryptContentBlock, err := aes.NewCipher(s.contentKey)
if err != nil {
return err
}
s.blockMode = cipher.NewCBCEncrypter(encryptContentBlock, s.iv)
return nil
}
// SetupDecryptMode - tells CBC that we are going to decrypt data
func (s *CBCSecureMaterials) SetupDecryptMode(stream io.Reader, iv string, key string) error {
// Set mode to decrypt
s.cryptMode = decryptMode
// Set underlying reader
s.stream = stream
// Reset
s.eof = false
s.srcBuf.Reset()
s.dstBuf.Reset()
var err error
// Get IV
s.iv, err = base64.StdEncoding.DecodeString(iv)
if err != nil {
return err
}
// Get encrypted content key
s.cryptedKey, err = base64.StdEncoding.DecodeString(key)
if err != nil {
return err
}
// Decrypt content key
s.contentKey, err = s.encryptionKey.Decrypt(s.cryptedKey)
if err != nil {
return err
}
// New cipher
decryptContentBlock, err := aes.NewCipher(s.contentKey)
if err != nil {
return err
}
s.blockMode = cipher.NewCBCDecrypter(decryptContentBlock, s.iv)
return nil
}
// GetIV - return randomly generated IV (per S3 object), base64 encoded.
func (s *CBCSecureMaterials) GetIV() string {
return base64.StdEncoding.EncodeToString(s.iv)
}
// GetKey - return content encrypting key (cek) in encrypted form, base64 encoded.
func (s *CBCSecureMaterials) GetKey() string {
return base64.StdEncoding.EncodeToString(s.cryptedKey)
}
// GetDesc - user provided encryption material description in JSON (UTF8) format.
func (s *CBCSecureMaterials) GetDesc() string {
return string(s.matDesc)
}
// Fill buf with encrypted/decrypted data
func (s *CBCSecureMaterials) Read(buf []byte) (n int, err error) {
// Always fill buf from bufChunk at the end of this function
defer func() {
if s.err != nil {
n, err = 0, s.err
} else {
n, err = s.dstBuf.Read(buf)
}
}()
// Return
if s.eof {
return
}
// Fill dest buffer if its length is less than buf
for !s.eof && s.dstBuf.Len() < len(buf) {
srcPart := make([]byte, aes.BlockSize)
dstPart := make([]byte, aes.BlockSize)
// Fill src buffer
for s.srcBuf.Len() < aes.BlockSize*2 {
_, err = io.CopyN(s.srcBuf, s.stream, aes.BlockSize)
if err != nil {
break
}
}
// Quit immediately for errors other than io.EOF
if err != nil && err != io.EOF {
s.err = err
return
}
// Mark current encrypting/decrypting as finished
s.eof = (err == io.EOF)
if s.eof && s.cryptMode == encryptMode {
if srcPart, err = pkcs5Pad(s.srcBuf.Bytes(), aes.BlockSize); err != nil {
s.err = err
return
}
} else {
_, _ = s.srcBuf.Read(srcPart)
}
// Crypt srcPart content
for len(srcPart) > 0 {
// Crypt current part
s.blockMode.CryptBlocks(dstPart, srcPart[:aes.BlockSize])
// Unpad when this is the last part and we are decrypting
if s.eof && s.cryptMode == decryptMode {
dstPart, err = pkcs5Unpad(dstPart, aes.BlockSize)
if err != nil {
s.err = err
return
}
}
// Send crypted data to dstBuf
if _, wErr := s.dstBuf.Write(dstPart); wErr != nil {
s.err = wErr
return
}
// Move to the next part
srcPart = srcPart[aes.BlockSize:]
}
}
return
}
// Unpad a set of bytes following PKCS5 algorithm
func pkcs5Unpad(buf []byte, blockSize int) ([]byte, error) {
len := len(buf)
if len == 0 {
return nil, errors.New("buffer is empty")
}
pad := int(buf[len-1])
if pad > len || pad > blockSize {
return nil, errors.New("invalid padding size")
}
return buf[:len-pad], nil
}
// Pad a set of bytes following PKCS5 algorithm
func pkcs5Pad(buf []byte, blockSize int) ([]byte, error) {
len := len(buf)
pad := blockSize - (len % blockSize)
padText := bytes.Repeat([]byte{byte(pad)}, pad)
return append(buf, padText...), nil
}

View File

@ -0,0 +1,50 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package encrypt implements a generic interface to encrypt any stream of data.
// currently this package implements two types of encryption
// - Symmetric encryption using AES.
// - Asymmetric encrytion using RSA.
package encrypt
import "io"
// Materials - provides generic interface to encrypt any stream of data.
type Materials interface {
// Returns encrypted/decrypted data, io.Reader compatible.
Read(b []byte) (int, error)
// Get randomly generated IV, base64 encoded.
GetIV() (iv string)
// Get content encrypting key (cek) in encrypted form, base64 encoded.
GetKey() (key string)
// Get user provided encryption material description in
// JSON (UTF8) format. This is not used, kept for future.
GetDesc() (desc string)
// Setup encrypt mode, further calls of Read() function
// will return the encrypted form of data streamed
// by the passed reader
SetupEncryptMode(stream io.Reader) error
// Setup decrypted mode, further calls of Read() function
// will return the decrypted form of data streamed
// by the passed reader
SetupDecryptMode(stream io.Reader, iv string, key string) error
}

165
vendor/github.com/minio/minio-go/pkg/encrypt/keys.go generated vendored Normal file
View File

@ -0,0 +1,165 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package encrypt
import (
"crypto/aes"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"errors"
)
// Key - generic interface to encrypt/decrypt a key.
// We use it to encrypt/decrypt content key which is the key
// that encrypt/decrypt object data.
type Key interface {
// Encrypt data using to the set encryption key
Encrypt([]byte) ([]byte, error)
// Decrypt data using to the set encryption key
Decrypt([]byte) ([]byte, error)
}
// SymmetricKey - encrypts data with a symmetric master key
type SymmetricKey struct {
masterKey []byte
}
// Encrypt passed bytes
func (s *SymmetricKey) Encrypt(plain []byte) ([]byte, error) {
// Initialize an AES encryptor using a master key
keyBlock, err := aes.NewCipher(s.masterKey)
if err != nil {
return []byte{}, err
}
// Pad the key before encryption
plain, _ = pkcs5Pad(plain, aes.BlockSize)
encKey := []byte{}
encPart := make([]byte, aes.BlockSize)
// Encrypt the passed key by block
for {
if len(plain) < aes.BlockSize {
break
}
// Encrypt the passed key
keyBlock.Encrypt(encPart, plain[:aes.BlockSize])
// Add the encrypted block to the total encrypted key
encKey = append(encKey, encPart...)
// Pass to the next plain block
plain = plain[aes.BlockSize:]
}
return encKey, nil
}
// Decrypt passed bytes
func (s *SymmetricKey) Decrypt(cipher []byte) ([]byte, error) {
// Initialize AES decrypter
keyBlock, err := aes.NewCipher(s.masterKey)
if err != nil {
return nil, err
}
var plain []byte
plainPart := make([]byte, aes.BlockSize)
// Decrypt the encrypted data block by block
for {
if len(cipher) < aes.BlockSize {
break
}
keyBlock.Decrypt(plainPart, cipher[:aes.BlockSize])
// Add the decrypted block to the total result
plain = append(plain, plainPart...)
// Pass to the next cipher block
cipher = cipher[aes.BlockSize:]
}
// Unpad the resulted plain data
plain, err = pkcs5Unpad(plain, aes.BlockSize)
if err != nil {
return nil, err
}
return plain, nil
}
// NewSymmetricKey generates a new encrypt/decrypt crypto using
// an AES master key password
func NewSymmetricKey(b []byte) *SymmetricKey {
return &SymmetricKey{masterKey: b}
}
// AsymmetricKey - struct which encrypts/decrypts data
// using RSA public/private certificates
type AsymmetricKey struct {
publicKey *rsa.PublicKey
privateKey *rsa.PrivateKey
}
// Encrypt data using public key
func (a *AsymmetricKey) Encrypt(plain []byte) ([]byte, error) {
cipher, err := rsa.EncryptPKCS1v15(rand.Reader, a.publicKey, plain)
if err != nil {
return nil, err
}
return cipher, nil
}
// Decrypt data using public key
func (a *AsymmetricKey) Decrypt(cipher []byte) ([]byte, error) {
cipher, err := rsa.DecryptPKCS1v15(rand.Reader, a.privateKey, cipher)
if err != nil {
return nil, err
}
return cipher, nil
}
// NewAsymmetricKey - generates a crypto module able to encrypt/decrypt
// data using a pair for private and public key
func NewAsymmetricKey(privData []byte, pubData []byte) (*AsymmetricKey, error) {
// Parse private key from passed data
priv, err := x509.ParsePKCS8PrivateKey(privData)
if err != nil {
return nil, err
}
privKey, ok := priv.(*rsa.PrivateKey)
if !ok {
return nil, errors.New("not a valid private key")
}
// Parse public key from passed data
pub, err := x509.ParsePKIXPublicKey(pubData)
if err != nil {
return nil, err
}
pubKey, ok := pub.(*rsa.PublicKey)
if !ok {
return nil, errors.New("not a valid public key")
}
// Associate the private key with the passed public key
privKey.PublicKey = *pubKey
return &AsymmetricKey{
publicKey: pubKey,
privateKey: privKey,
}, nil
}

View File

@ -34,7 +34,7 @@ const (
BucketPolicyWriteOnly = "writeonly"
)
// isValidBucketPolicy - Is provided policy value supported.
// IsValidBucketPolicy - returns true if policy is valid and supported, false otherwise.
func (p BucketPolicy) IsValidBucketPolicy() bool {
switch p {
case BucketPolicyNone, BucketPolicyReadOnly, BucketPolicyReadWrite, BucketPolicyWriteOnly:
@ -508,7 +508,7 @@ func getObjectPolicy(statement Statement) (readOnly bool, writeOnly bool) {
return readOnly, writeOnly
}
// Returns policy of given bucket name, prefix in given statements.
// GetPolicy - Returns policy of given bucket name, prefix in given statements.
func GetPolicy(statements []Statement, bucketName string, prefix string) BucketPolicy {
bucketResource := awsResourcePrefix + bucketName
objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*"
@ -563,7 +563,7 @@ func GetPolicy(statements []Statement, bucketName string, prefix string) BucketP
return policy
}
// GetPolicies returns a map of policies rules of given bucket name, prefix in given statements.
// GetPolicies - returns a map of policies rules of given bucket name, prefix in given statements.
func GetPolicies(statements []Statement, bucketName string) map[string]BucketPolicy {
policyRules := map[string]BucketPolicy{}
objResources := set.NewStringSet()
@ -590,8 +590,7 @@ func GetPolicies(statements []Statement, bucketName string) map[string]BucketPol
return policyRules
}
// Returns new statements containing policy of given bucket name and
// prefix are appended.
// SetPolicy - Returns new statements containing policy of given bucket name and prefix are appended.
func SetPolicy(statements []Statement, policy BucketPolicy, bucketName string, prefix string) []Statement {
out := removeStatements(statements, bucketName, prefix)
// fmt.Println("out = ")

View File

@ -0,0 +1,285 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s3signer
import (
"bytes"
"encoding/hex"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
)
// Reference for constants used below -
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming
const (
streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
streamingEncoding = "aws-chunked"
streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD"
emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
payloadChunkSize = 64 * 1024
chunkSigConstLen = 17 // ";chunk-signature="
signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2"
crlfLen = 2 // CRLF
)
// Request headers to be ignored while calculating seed signature for
// a request.
var ignoredStreamingHeaders = map[string]bool{
"Authorization": true,
"User-Agent": true,
"Content-Type": true,
}
// getSignedChunkLength - calculates the length of chunk metadata
func getSignedChunkLength(chunkDataSize int64) int64 {
return int64(len(fmt.Sprintf("%x", chunkDataSize))) +
chunkSigConstLen +
signatureStrLen +
crlfLen +
chunkDataSize +
crlfLen
}
// getStreamLength - calculates the length of the overall stream (data + metadata)
func getStreamLength(dataLen, chunkSize int64) int64 {
if dataLen <= 0 {
return 0
}
chunksCount := int64(dataLen / chunkSize)
remainingBytes := int64(dataLen % chunkSize)
streamLen := int64(0)
streamLen += chunksCount * getSignedChunkLength(chunkSize)
if remainingBytes > 0 {
streamLen += getSignedChunkLength(remainingBytes)
}
streamLen += getSignedChunkLength(0)
return streamLen
}
// buildChunkStringToSign - returns the string to sign given chunk data
// and previous signature.
func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string {
stringToSignParts := []string{
streamingPayloadHdr,
t.Format(iso8601DateFormat),
getScope(region, t),
previousSig,
emptySHA256,
hex.EncodeToString(sum256(chunkData)),
}
return strings.Join(stringToSignParts, "\n")
}
// prepareStreamingRequest - prepares a request with appropriate
// headers before computing the seed signature.
func prepareStreamingRequest(req *http.Request, dataLen int64, timestamp time.Time) {
// Set x-amz-content-sha256 header.
req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm)
req.Header.Set("Content-Encoding", streamingEncoding)
req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat))
// Set content length with streaming signature for each chunk included.
req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize))
req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10))
}
// buildChunkHeader - returns the chunk header.
// e.g string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n
func buildChunkHeader(chunkLen int64, signature string) []byte {
return []byte(strconv.FormatInt(chunkLen, 16) + ";chunk-signature=" + signature + "\r\n")
}
// buildChunkSignature - returns chunk signature for a given chunk and previous signature.
func buildChunkSignature(chunkData []byte, reqTime time.Time, region,
previousSignature, secretAccessKey string) string {
chunkStringToSign := buildChunkStringToSign(reqTime, region,
previousSignature, chunkData)
signingKey := getSigningKey(secretAccessKey, region, reqTime)
return getSignature(signingKey, chunkStringToSign)
}
// getSeedSignature - returns the seed signature for a given request.
func (s *StreamingReader) setSeedSignature(req *http.Request) {
// Get canonical request
canonicalRequest := getCanonicalRequest(*req, ignoredStreamingHeaders)
// Get string to sign from canonical request.
stringToSign := getStringToSignV4(s.reqTime, s.region, canonicalRequest)
signingKey := getSigningKey(s.secretAccessKey, s.region, s.reqTime)
// Calculate signature.
s.seedSignature = getSignature(signingKey, stringToSign)
}
// StreamingReader implements chunked upload signature as a reader on
// top of req.Body's ReaderCloser chunk header;data;... repeat
type StreamingReader struct {
accessKeyID string
secretAccessKey string
region string
prevSignature string
seedSignature string
contentLen int64 // Content-Length from req header
baseReadCloser io.ReadCloser // underlying io.Reader
bytesRead int64 // bytes read from underlying io.Reader
buf bytes.Buffer // holds signed chunk
chunkBuf []byte // holds raw data read from req Body
chunkBufLen int // no. of bytes read so far into chunkBuf
done bool // done reading the underlying reader to EOF
reqTime time.Time
chunkNum int
totalChunks int
lastChunkSize int
}
// signChunk - signs a chunk read from s.baseReader of chunkLen size.
func (s *StreamingReader) signChunk(chunkLen int) {
// Compute chunk signature for next header
signature := buildChunkSignature(s.chunkBuf[:chunkLen], s.reqTime,
s.region, s.prevSignature, s.secretAccessKey)
// For next chunk signature computation
s.prevSignature = signature
// Write chunk header into streaming buffer
chunkHdr := buildChunkHeader(int64(chunkLen), signature)
s.buf.Write(chunkHdr)
// Write chunk data into streaming buffer
s.buf.Write(s.chunkBuf[:chunkLen])
// Write the chunk trailer.
s.buf.Write([]byte("\r\n"))
// Reset chunkBufLen for next chunk read.
s.chunkBufLen = 0
s.chunkNum++
}
// setStreamingAuthHeader - builds and sets authorization header value
// for streaming signature.
func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) {
credential := GetCredential(s.accessKeyID, s.region, s.reqTime)
authParts := []string{
signV4Algorithm + " Credential=" + credential,
"SignedHeaders=" + getSignedHeaders(*req, ignoredStreamingHeaders),
"Signature=" + s.seedSignature,
}
// Set authorization header.
auth := strings.Join(authParts, ",")
req.Header.Set("Authorization", auth)
}
// StreamingSignV4 - provides chunked upload signatureV4 support by
// implementing io.Reader.
func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey,
region string, dataLen int64, reqTime time.Time) *http.Request {
// Set headers needed for streaming signature.
prepareStreamingRequest(req, dataLen, reqTime)
stReader := &StreamingReader{
baseReadCloser: req.Body,
accessKeyID: accessKeyID,
secretAccessKey: secretAccessKey,
region: region,
reqTime: reqTime,
chunkBuf: make([]byte, payloadChunkSize),
contentLen: dataLen,
chunkNum: 1,
totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1,
lastChunkSize: int(dataLen % payloadChunkSize),
}
// Add the request headers required for chunk upload signing.
// Compute the seed signature.
stReader.setSeedSignature(req)
// Set the authorization header with the seed signature.
stReader.setStreamingAuthHeader(req)
// Set seed signature as prevSignature for subsequent
// streaming signing process.
stReader.prevSignature = stReader.seedSignature
req.Body = stReader
return req
}
// Read - this method performs chunk upload signature providing a
// io.Reader interface.
func (s *StreamingReader) Read(buf []byte) (int, error) {
switch {
// After the last chunk is read from underlying reader, we
// never re-fill s.buf.
case s.done:
// s.buf will be (re-)filled with next chunk when has lesser
// bytes than asked for.
case s.buf.Len() < len(buf):
s.chunkBufLen = 0
for {
n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:])
if err == nil || err == io.ErrUnexpectedEOF {
s.chunkBufLen += n1
s.bytesRead += int64(n1)
if s.chunkBufLen == payloadChunkSize ||
(s.chunkNum == s.totalChunks-1 &&
s.chunkBufLen == s.lastChunkSize) {
// Sign the chunk and write it to s.buf.
s.signChunk(s.chunkBufLen)
break
}
} else if err == io.EOF {
// No more data left in baseReader - last chunk.
// Done reading the last chunk from baseReader.
s.done = true
// bytes read from baseReader different than
// content length provided.
if s.bytesRead != s.contentLen {
return 0, io.ErrUnexpectedEOF
}
// Sign the chunk and write it to s.buf.
s.signChunk(0)
break
} else {
return 0, err
}
}
}
return s.buf.Read(buf)
}
// Close - this method makes underlying io.ReadCloser's Close method available.
func (s *StreamingReader) Close() error {
return s.baseReadCloser.Close()
}

View File

@ -29,6 +29,8 @@ import (
"strconv"
"strings"
"time"
"github.com/minio/minio-go/pkg/s3utils"
)
// Signature and API related constants.
@ -45,16 +47,16 @@ func encodeURL2Path(u *url.URL) (path string) {
bucketName := hostSplits[0]
path = "/" + bucketName
path += u.Path
path = urlEncodePath(path)
path = s3utils.EncodePath(path)
return
}
if strings.HasSuffix(u.Host, ".storage.googleapis.com") {
path = "/" + strings.TrimSuffix(u.Host, ".storage.googleapis.com")
path += u.Path
path = urlEncodePath(path)
path = s3utils.EncodePath(path)
return
}
path = urlEncodePath(u.Path)
path = s3utils.EncodePath(u.Path)
return
}
@ -95,10 +97,10 @@ func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires in
query.Set("Expires", strconv.FormatInt(epochExpires, 10))
// Encode query and save.
req.URL.RawQuery = queryEncode(query)
req.URL.RawQuery = s3utils.QueryEncode(query)
// Save signature finally.
req.URL.RawQuery += "&Signature=" + urlEncodePath(signature)
req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature)
// Return.
return &req
@ -287,7 +289,7 @@ func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, isPreSign b
// Get encoded URL path.
if len(requestURL.Query()) > 0 {
// Keep the usual queries unescaped for string to sign.
query, _ := url.QueryUnescape(queryEncode(requestURL.Query()))
query, _ := url.QueryUnescape(s3utils.QueryEncode(requestURL.Query()))
path = path + "?" + query
}
buf.WriteString(path)
@ -314,7 +316,7 @@ func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, isPreSign b
// Request parameters
if len(vv[0]) > 0 {
buf.WriteByte('=')
buf.WriteString(strings.Replace(url.QueryEscape(vv[0]), "+", "%20", -1))
buf.WriteString(vv[0])
}
}
}

View File

@ -24,6 +24,8 @@ import (
"strconv"
"strings"
"time"
"github.com/minio/minio-go/pkg/s3utils"
)
// Signature and API related constants.
@ -68,7 +70,7 @@ const (
///
/// Is skipped for obvious reasons
///
var ignoredHeaders = map[string]bool{
var v4IgnoredHeaders = map[string]bool{
"Authorization": true,
"Content-Type": true,
"Content-Length": true,
@ -120,7 +122,7 @@ func getHashedPayload(req http.Request) string {
// getCanonicalHeaders generate a list of request headers for
// signature.
func getCanonicalHeaders(req http.Request) string {
func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) string {
var headers []string
vals := make(map[string][]string)
for k, vv := range req.Header {
@ -159,7 +161,7 @@ func getCanonicalHeaders(req http.Request) string {
// getSignedHeaders generate all signed request headers.
// i.e lexically sorted, semicolon-separated list of lowercase
// request header names.
func getSignedHeaders(req http.Request) string {
func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string {
var headers []string
for k := range req.Header {
if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
@ -181,14 +183,14 @@ func getSignedHeaders(req http.Request) string {
// <CanonicalHeaders>\n
// <SignedHeaders>\n
// <HashedPayload>
func getCanonicalRequest(req http.Request) string {
func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool) string {
req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1)
canonicalRequest := strings.Join([]string{
req.Method,
urlEncodePath(req.URL.Path),
s3utils.EncodePath(req.URL.Path),
req.URL.RawQuery,
getCanonicalHeaders(req),
getSignedHeaders(req),
getCanonicalHeaders(req, ignoredHeaders),
getSignedHeaders(req, ignoredHeaders),
getHashedPayload(req),
}, "\n")
return canonicalRequest
@ -217,7 +219,7 @@ func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string,
credential := GetCredential(accessKeyID, location, t)
// Get all signed headers.
signedHeaders := getSignedHeaders(req)
signedHeaders := getSignedHeaders(req, v4IgnoredHeaders)
// Set URL query.
query := req.URL.Query()
@ -229,7 +231,7 @@ func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string,
req.URL.RawQuery = query.Encode()
// Get canonical request.
canonicalRequest := getCanonicalRequest(req)
canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders)
// Get string to sign from canonical request.
stringToSign := getStringToSignV4(t, location, canonicalRequest)
@ -271,7 +273,7 @@ func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *ht
req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat))
// Get canonical request.
canonicalRequest := getCanonicalRequest(req)
canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders)
// Get string to sign from canonical request.
stringToSign := getStringToSignV4(t, location, canonicalRequest)
@ -283,7 +285,7 @@ func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *ht
credential := GetCredential(accessKeyID, location, t)
// Get all signed headers.
signedHeaders := getSignedHeaders(req)
signedHeaders := getSignedHeaders(req, v4IgnoredHeaders)
// Calculate signature.
signature := getSignature(signingKey, stringToSign)

View File

@ -17,15 +17,8 @@
package s3signer
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"net/url"
"regexp"
"sort"
"strings"
"unicode/utf8"
)
// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
@ -44,75 +37,3 @@ func sumHMAC(key []byte, data []byte) []byte {
hash.Write(data)
return hash.Sum(nil)
}
//expects ascii encoded strings - from output of urlEncodePath
func percentEncodeSlash(s string) string {
return strings.Replace(s, "/", "%2F", -1)
}
// queryEncode - encodes query values in their URL encoded form. In
// addition to the percent encoding performed by urlEncodePath() used
// here, it also percent encodes '/' (forward slash)
func queryEncode(v url.Values) string {
if v == nil {
return ""
}
var buf bytes.Buffer
keys := make([]string, 0, len(v))
for k := range v {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
vs := v[k]
prefix := percentEncodeSlash(urlEncodePath(k)) + "="
for _, v := range vs {
if buf.Len() > 0 {
buf.WriteByte('&')
}
buf.WriteString(prefix)
buf.WriteString(percentEncodeSlash(urlEncodePath(v)))
}
}
return buf.String()
}
// urlEncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
//
// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
// non english characters cannot be parsed due to the nature in which url.Encode() is written
//
// This function on the other hand is a direct replacement for url.Encode() technique to support
// pretty much every UTF-8 character.
func urlEncodePath(pathName string) string {
// if object matches reserved string, no need to encode them
reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
if reservedNames.MatchString(pathName) {
return pathName
}
var encodedPathname string
for _, s := range pathName {
if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
encodedPathname = encodedPathname + string(s)
continue
}
switch s {
case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
encodedPathname = encodedPathname + string(s)
continue
default:
len := utf8.RuneLen(s)
if len < 0 {
// if utf8 cannot convert return the same string as is
return pathName
}
u := make([]byte, len)
utf8.EncodeRune(u, s)
for _, r := range u {
hex := hex.EncodeToString([]byte{r})
encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
}
}
}
return encodedPathname
}

View File

@ -85,10 +85,6 @@ func IsAmazonEndpoint(endpointURL url.URL) bool {
return true
}
if IsAmazonS3AccelerateEndpoint(endpointURL) {
return true
}
return endpointURL.Host == "s3.amazonaws.com"
}
@ -105,11 +101,6 @@ func IsAmazonChinaEndpoint(endpointURL url.URL) bool {
return endpointURL.Host == "s3.cn-north-1.amazonaws.com.cn"
}
// IsAmazonS3AccelerateEndpoint - Match if it is an Amazon S3 Accelerate
func IsAmazonS3AccelerateEndpoint(endpointURL url.URL) bool {
return strings.HasSuffix(endpointURL.Host, ".s3-accelerate.amazonaws.com")
}
// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint.
func IsGoogleEndpoint(endpointURL url.URL) bool {
if endpointURL == sentinelURL {

209
vendor/github.com/minio/minio-go/post-policy.go generated vendored Normal file
View File

@ -0,0 +1,209 @@
package minio
import (
"encoding/base64"
"fmt"
"strings"
"time"
)
// expirationDateFormat date format for expiration key in json policy.
const expirationDateFormat = "2006-01-02T15:04:05.999Z"
// policyCondition explanation:
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
//
// Example:
//
// policyCondition {
// matchType: "$eq",
// key: "$Content-Type",
// value: "image/png",
// }
//
type policyCondition struct {
matchType string
condition string
value string
}
// PostPolicy - Provides strict static type conversion and validation
// for Amazon S3's POST policy JSON string.
type PostPolicy struct {
// Expiration date and time of the POST policy.
expiration time.Time
// Collection of different policy conditions.
conditions []policyCondition
// ContentLengthRange minimum and maximum allowable size for the
// uploaded content.
contentLengthRange struct {
min int64
max int64
}
// Post form data.
formData map[string]string
}
// NewPostPolicy - Instantiate new post policy.
func NewPostPolicy() *PostPolicy {
p := &PostPolicy{}
p.conditions = make([]policyCondition, 0)
p.formData = make(map[string]string)
return p
}
// SetExpires - Sets expiration time for the new policy.
func (p *PostPolicy) SetExpires(t time.Time) error {
if t.IsZero() {
return ErrInvalidArgument("No expiry time set.")
}
p.expiration = t
return nil
}
// SetKey - Sets an object name for the policy based upload.
func (p *PostPolicy) SetKey(key string) error {
if strings.TrimSpace(key) == "" || key == "" {
return ErrInvalidArgument("Object name is empty.")
}
policyCond := policyCondition{
matchType: "eq",
condition: "$key",
value: key,
}
if err := p.addNewPolicy(policyCond); err != nil {
return err
}
p.formData["key"] = key
return nil
}
// SetKeyStartsWith - Sets an object name that an policy based upload
// can start with.
func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error {
if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" {
return ErrInvalidArgument("Object prefix is empty.")
}
policyCond := policyCondition{
matchType: "starts-with",
condition: "$key",
value: keyStartsWith,
}
if err := p.addNewPolicy(policyCond); err != nil {
return err
}
p.formData["key"] = keyStartsWith
return nil
}
// SetBucket - Sets bucket at which objects will be uploaded to.
func (p *PostPolicy) SetBucket(bucketName string) error {
if strings.TrimSpace(bucketName) == "" || bucketName == "" {
return ErrInvalidArgument("Bucket name is empty.")
}
policyCond := policyCondition{
matchType: "eq",
condition: "$bucket",
value: bucketName,
}
if err := p.addNewPolicy(policyCond); err != nil {
return err
}
p.formData["bucket"] = bucketName
return nil
}
// SetContentType - Sets content-type of the object for this policy
// based upload.
func (p *PostPolicy) SetContentType(contentType string) error {
if strings.TrimSpace(contentType) == "" || contentType == "" {
return ErrInvalidArgument("No content type specified.")
}
policyCond := policyCondition{
matchType: "eq",
condition: "$Content-Type",
value: contentType,
}
if err := p.addNewPolicy(policyCond); err != nil {
return err
}
p.formData["Content-Type"] = contentType
return nil
}
// SetContentLengthRange - Set new min and max content length
// condition for all incoming uploads.
func (p *PostPolicy) SetContentLengthRange(min, max int64) error {
if min > max {
return ErrInvalidArgument("Minimum limit is larger than maximum limit.")
}
if min < 0 {
return ErrInvalidArgument("Minimum limit cannot be negative.")
}
if max < 0 {
return ErrInvalidArgument("Maximum limit cannot be negative.")
}
p.contentLengthRange.min = min
p.contentLengthRange.max = max
return nil
}
// SetSuccessStatusAction - Sets the status success code of the object for this policy
// based upload.
func (p *PostPolicy) SetSuccessStatusAction(status string) error {
if strings.TrimSpace(status) == "" || status == "" {
return ErrInvalidArgument("Status is empty")
}
policyCond := policyCondition{
matchType: "eq",
condition: "$success_action_status",
value: status,
}
if err := p.addNewPolicy(policyCond); err != nil {
return err
}
p.formData["success_action_status"] = status
return nil
}
// addNewPolicy - internal helper to validate adding new policies.
func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error {
if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" {
return ErrInvalidArgument("Policy fields are empty.")
}
p.conditions = append(p.conditions, policyCond)
return nil
}
// Stringer interface for printing policy in json formatted string.
func (p PostPolicy) String() string {
return string(p.marshalJSON())
}
// marshalJSON - Provides Marshalled JSON in bytes.
func (p PostPolicy) marshalJSON() []byte {
expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"`
var conditionsStr string
conditions := []string{}
for _, po := range p.conditions {
conditions = append(conditions, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.condition, po.value))
}
if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 {
conditions = append(conditions, fmt.Sprintf("[\"content-length-range\", %d, %d]",
p.contentLengthRange.min, p.contentLengthRange.max))
}
if len(conditions) > 0 {
conditionsStr = `"conditions":[` + strings.Join(conditions, ",") + "]"
}
retStr := "{"
retStr = retStr + expirationStr + ","
retStr = retStr + conditionsStr
retStr = retStr + "}"
return []byte(retStr)
}
// base64 - Produces base64 of PostPolicy's Marshalled json.
func (p PostPolicy) base64() string {
return base64.StdEncoding.EncodeToString(p.marshalJSON())
}

52
vendor/github.com/minio/minio-go/retry-continous.go generated vendored Normal file
View File

@ -0,0 +1,52 @@
package minio
import "time"
// newRetryTimerContinous creates a timer with exponentially increasing delays forever.
func (c Client) newRetryTimerContinous(unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int {
attemptCh := make(chan int)
// normalize jitter to the range [0, 1.0]
if jitter < NoJitter {
jitter = NoJitter
}
if jitter > MaxJitter {
jitter = MaxJitter
}
// computes the exponential backoff duration according to
// https://www.awsarchitectureblog.com/2015/03/backoff.html
exponentialBackoffWait := func(attempt int) time.Duration {
// 1<<uint(attempt) below could overflow, so limit the value of attempt
maxAttempt := 30
if attempt > maxAttempt {
attempt = maxAttempt
}
//sleep = random_between(0, min(cap, base * 2 ** attempt))
sleep := unit * time.Duration(1<<uint(attempt))
if sleep > cap {
sleep = cap
}
if jitter != NoJitter {
sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
}
return sleep
}
go func() {
defer close(attemptCh)
var nextBackoff int
for {
select {
// Attempts starts.
case attemptCh <- nextBackoff:
nextBackoff++
case <-doneCh:
// Stop the routine.
return
}
time.Sleep(exponentialBackoffWait(nextBackoff))
}
}()
return attemptCh
}

152
vendor/github.com/minio/minio-go/retry.go generated vendored Normal file
View File

@ -0,0 +1,152 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"net"
"net/http"
"net/url"
"strings"
"time"
)
// MaxRetry is the maximum number of retries before stopping.
var MaxRetry = 5
// MaxJitter will randomize over the full exponential backoff time
const MaxJitter = 1.0
// NoJitter disables the use of jitter for randomizing the exponential backoff time
const NoJitter = 0.0
// DefaultRetryUnit - default unit multiplicative per retry.
// defaults to 1 second.
const DefaultRetryUnit = time.Second
// DefaultRetryCap - Each retry attempt never waits no longer than
// this maximum time duration.
const DefaultRetryCap = time.Second * 30
// newRetryTimer creates a timer with exponentially increasing
// delays until the maximum retry attempts are reached.
func (c Client) newRetryTimer(maxRetry int, unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int {
attemptCh := make(chan int)
// computes the exponential backoff duration according to
// https://www.awsarchitectureblog.com/2015/03/backoff.html
exponentialBackoffWait := func(attempt int) time.Duration {
// normalize jitter to the range [0, 1.0]
if jitter < NoJitter {
jitter = NoJitter
}
if jitter > MaxJitter {
jitter = MaxJitter
}
//sleep = random_between(0, min(cap, base * 2 ** attempt))
sleep := unit * time.Duration(1<<uint(attempt))
if sleep > cap {
sleep = cap
}
if jitter != NoJitter {
sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
}
return sleep
}
go func() {
defer close(attemptCh)
for i := 0; i < maxRetry; i++ {
select {
// Attempts start from 1.
case attemptCh <- i + 1:
case <-doneCh:
// Stop the routine.
return
}
time.Sleep(exponentialBackoffWait(i))
}
}()
return attemptCh
}
// isNetErrorRetryable - is network error retryable.
func isNetErrorRetryable(err error) bool {
if err == nil {
return false
}
switch err.(type) {
case net.Error:
switch err.(type) {
case *net.DNSError, *net.OpError, net.UnknownNetworkError:
return true
case *url.Error:
// For a URL error, where it replies back "connection closed"
// retry again.
if strings.Contains(err.Error(), "Connection closed by foreign host") {
return true
}
default:
if strings.Contains(err.Error(), "net/http: TLS handshake timeout") {
// If error is - tlsHandshakeTimeoutError, retry.
return true
} else if strings.Contains(err.Error(), "i/o timeout") {
// If error is - tcp timeoutError, retry.
return true
} else if strings.Contains(err.Error(), "connection timed out") {
// If err is a net.Dial timeout, retry.
return true
}
}
}
return false
}
// List of AWS S3 error codes which are retryable.
var retryableS3Codes = map[string]struct{}{
"RequestError": {},
"RequestTimeout": {},
"Throttling": {},
"ThrottlingException": {},
"RequestLimitExceeded": {},
"RequestThrottled": {},
"InternalError": {},
"ExpiredToken": {},
"ExpiredTokenException": {},
// Add more AWS S3 codes here.
}
// isS3CodeRetryable - is s3 error code retryable.
func isS3CodeRetryable(s3Code string) (ok bool) {
_, ok = retryableS3Codes[s3Code]
return ok
}
// List of HTTP status codes which are retryable.
var retryableHTTPStatusCodes = map[int]struct{}{
429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet
http.StatusInternalServerError: {},
http.StatusBadGateway: {},
http.StatusServiceUnavailable: {},
// Add more HTTP status codes here.
}
// isHTTPStatusRetryable - is HTTP error code retryable.
func isHTTPStatusRetryable(httpStatusCode int) (ok bool) {
_, ok = retryableHTTPStatusCodes[httpStatusCode]
return ok
}

47
vendor/github.com/minio/minio-go/s3-endpoints.go generated vendored Normal file
View File

@ -0,0 +1,47 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
// awsS3EndpointMap Amazon S3 endpoint map.
// "cn-north-1" adds support for AWS China.
var awsS3EndpointMap = map[string]string{
"us-east-1": "s3.amazonaws.com",
"us-east-2": "s3-us-east-2.amazonaws.com",
"us-west-2": "s3-us-west-2.amazonaws.com",
"us-west-1": "s3-us-west-1.amazonaws.com",
"ca-central-1": "s3.ca-central-1.amazonaws.com",
"eu-west-1": "s3-eu-west-1.amazonaws.com",
"eu-west-2": "s3-eu-west-2.amazonaws.com",
"eu-central-1": "s3-eu-central-1.amazonaws.com",
"ap-south-1": "s3-ap-south-1.amazonaws.com",
"ap-southeast-1": "s3-ap-southeast-1.amazonaws.com",
"ap-southeast-2": "s3-ap-southeast-2.amazonaws.com",
"ap-northeast-1": "s3-ap-northeast-1.amazonaws.com",
"ap-northeast-2": "s3-ap-northeast-2.amazonaws.com",
"sa-east-1": "s3-sa-east-1.amazonaws.com",
"cn-north-1": "s3.cn-north-1.amazonaws.com.cn",
}
// getS3Endpoint get Amazon S3 endpoint based on the bucket location.
func getS3Endpoint(bucketLocation string) (s3Endpoint string) {
s3Endpoint, ok := awsS3EndpointMap[bucketLocation]
if !ok {
// Default to 's3.amazonaws.com' endpoint.
s3Endpoint = "s3.amazonaws.com"
}
return s3Endpoint
}

60
vendor/github.com/minio/minio-go/s3-error.go generated vendored Normal file
View File

@ -0,0 +1,60 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
// Non exhaustive list of AWS S3 standard error responses -
// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
var s3ErrorResponseMap = map[string]string{
"AccessDenied": "Access Denied.",
"BadDigest": "The Content-Md5 you specified did not match what we received.",
"EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.",
"EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.",
"IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.",
"InternalError": "We encountered an internal error, please try again.",
"InvalidAccessKeyID": "The access key ID you provided does not exist in our records.",
"InvalidBucketName": "The specified bucket is not valid.",
"InvalidDigest": "The Content-Md5 you specified is not valid.",
"InvalidRange": "The requested range is not satisfiable",
"MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.",
"MissingContentLength": "You must provide the Content-Length HTTP header.",
"MissingContentMD5": "Missing required header for this request: Content-Md5.",
"MissingRequestBodyError": "Request body is empty.",
"NoSuchBucket": "The specified bucket does not exist",
"NoSuchBucketPolicy": "The bucket policy does not exist",
"NoSuchKey": "The specified key does not exist.",
"NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.",
"NotImplemented": "A header you provided implies functionality that is not implemented",
"PreconditionFailed": "At least one of the pre-conditions you specified did not hold",
"RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.",
"SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.",
"MethodNotAllowed": "The specified method is not allowed against this resource.",
"InvalidPart": "One or more of the specified parts could not be found.",
"InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.",
"InvalidObjectState": "The operation is not valid for the current state of the object.",
"AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.",
"MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.",
"BucketNotEmpty": "The bucket you tried to delete is not empty",
"AllAccessDisabled": "All access to this bucket has been disabled.",
"MalformedPolicy": "Policy has invalid resource.",
"MissingFields": "Missing fields in request.",
"AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"<YOUR-AKID>/YYYYMMDD/REGION/SERVICE/aws4_request\".",
"MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.",
"BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.",
"InvalidDuration": "Duration provided in the request is invalid.",
"XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.",
// Add new API errors here.
}

43
vendor/github.com/minio/minio-go/signature-type.go generated vendored Normal file
View File

@ -0,0 +1,43 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
// SignatureType is type of Authorization requested for a given HTTP request.
type SignatureType int
// Different types of supported signatures - default is Latest i.e SignatureV4.
const (
Latest SignatureType = iota
SignatureV4
SignatureV2
SignatureV4Streaming
)
// isV2 - is signature SignatureV2?
func (s SignatureType) isV2() bool {
return s == SignatureV2
}
// isV4 - is signature SignatureV4?
func (s SignatureType) isV4() bool {
return s == SignatureV4 || s == Latest
}
// isStreamingV4 - is signature SignatureV4Streaming?
func (s SignatureType) isStreamingV4() bool {
return s == SignatureV4Streaming
}

60
vendor/github.com/minio/minio-go/tempfile.go generated vendored Normal file
View File

@ -0,0 +1,60 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"io/ioutil"
"os"
"sync"
)
// tempFile - temporary file container.
type tempFile struct {
*os.File
mutex *sync.Mutex
}
// newTempFile returns a new temporary file, once closed it automatically deletes itself.
func newTempFile(prefix string) (*tempFile, error) {
// use platform specific temp directory.
file, err := ioutil.TempFile(os.TempDir(), prefix)
if err != nil {
return nil, err
}
return &tempFile{
File: file,
mutex: &sync.Mutex{},
}, nil
}
// Close - closer wrapper to close and remove temporary file.
func (t *tempFile) Close() error {
t.mutex.Lock()
defer t.mutex.Unlock()
if t.File != nil {
// Close the file.
if err := t.File.Close(); err != nil {
return err
}
// Remove file.
if err := os.Remove(t.File.Name()); err != nil {
return err
}
t.File = nil
}
return nil
}

227
vendor/github.com/minio/minio-go/utils.go generated vendored Normal file
View File

@ -0,0 +1,227 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"crypto/md5"
"crypto/sha256"
"encoding/xml"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"regexp"
"strings"
"time"
"unicode/utf8"
"github.com/minio/minio-go/pkg/s3utils"
)
// xmlDecoder provide decoded value in xml.
func xmlDecoder(body io.Reader, v interface{}) error {
d := xml.NewDecoder(body)
return d.Decode(v)
}
// sum256 calculate sha256 sum for an input byte array.
func sum256(data []byte) []byte {
hash := sha256.New()
hash.Write(data)
return hash.Sum(nil)
}
// sumMD5 calculate sumMD5 sum for an input byte array.
func sumMD5(data []byte) []byte {
hash := md5.New()
hash.Write(data)
return hash.Sum(nil)
}
// getEndpointURL - construct a new endpoint.
func getEndpointURL(endpoint string, secure bool) (*url.URL, error) {
if strings.Contains(endpoint, ":") {
host, _, err := net.SplitHostPort(endpoint)
if err != nil {
return nil, err
}
if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) {
msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
return nil, ErrInvalidArgument(msg)
}
} else {
if !s3utils.IsValidIP(endpoint) && !s3utils.IsValidDomain(endpoint) {
msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
return nil, ErrInvalidArgument(msg)
}
}
// If secure is false, use 'http' scheme.
scheme := "https"
if !secure {
scheme = "http"
}
// Construct a secured endpoint URL.
endpointURLStr := scheme + "://" + endpoint
endpointURL, err := url.Parse(endpointURLStr)
if err != nil {
return nil, err
}
// Validate incoming endpoint URL.
if err := isValidEndpointURL(*endpointURL); err != nil {
return nil, err
}
return endpointURL, nil
}
// closeResponse close non nil response with any response Body.
// convenient wrapper to drain any remaining data on response body.
//
// Subsequently this allows golang http RoundTripper
// to re-use the same connection for future requests.
func closeResponse(resp *http.Response) {
// Callers should close resp.Body when done reading from it.
// If resp.Body is not closed, the Client's underlying RoundTripper
// (typically Transport) may not be able to re-use a persistent TCP
// connection to the server for a subsequent "keep-alive" request.
if resp != nil && resp.Body != nil {
// Drain any remaining Body and then close the connection.
// Without this closing connection would disallow re-using
// the same connection for future uses.
// - http://stackoverflow.com/a/17961593/4465767
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}
}
// Sentinel URL is the default url value which is invalid.
var sentinelURL = url.URL{}
// Verify if input endpoint URL is valid.
func isValidEndpointURL(endpointURL url.URL) error {
if endpointURL == sentinelURL {
return ErrInvalidArgument("Endpoint url cannot be empty.")
}
if endpointURL.Path != "/" && endpointURL.Path != "" {
return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.")
}
if strings.Contains(endpointURL.Host, ".amazonaws.com") {
if !s3utils.IsAmazonEndpoint(endpointURL) {
return ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.")
}
}
if strings.Contains(endpointURL.Host, ".googleapis.com") {
if !s3utils.IsGoogleEndpoint(endpointURL) {
return ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.")
}
}
return nil
}
// Verify if input expires value is valid.
func isValidExpiry(expires time.Duration) error {
expireSeconds := int64(expires / time.Second)
if expireSeconds < 1 {
return ErrInvalidArgument("Expires cannot be lesser than 1 second.")
}
if expireSeconds > 604800 {
return ErrInvalidArgument("Expires cannot be greater than 7 days.")
}
return nil
}
// We support '.' with bucket names but we fallback to using path
// style requests instead for such buckets.
var validBucketName = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
// Invalid bucket name with double dot.
var invalidDotBucketName = regexp.MustCompile(`\.\.`)
// isValidBucketName - verify bucket name in accordance with
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
func isValidBucketName(bucketName string) error {
if strings.TrimSpace(bucketName) == "" {
return ErrInvalidBucketName("Bucket name cannot be empty.")
}
if len(bucketName) < 3 {
return ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters.")
}
if len(bucketName) > 63 {
return ErrInvalidBucketName("Bucket name cannot be greater than 63 characters.")
}
if bucketName[0] == '.' || bucketName[len(bucketName)-1] == '.' {
return ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.")
}
if invalidDotBucketName.MatchString(bucketName) {
return ErrInvalidBucketName("Bucket name cannot have successive periods.")
}
if !validBucketName.MatchString(bucketName) {
return ErrInvalidBucketName("Bucket name contains invalid characters.")
}
return nil
}
// isValidObjectName - verify object name in accordance with
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
func isValidObjectName(objectName string) error {
if strings.TrimSpace(objectName) == "" {
return ErrInvalidObjectName("Object name cannot be empty.")
}
if len(objectName) > 1024 {
return ErrInvalidObjectName("Object name cannot be greater than 1024 characters.")
}
if !utf8.ValidString(objectName) {
return ErrInvalidBucketName("Object name with non UTF-8 strings are not supported.")
}
return nil
}
// isValidObjectPrefix - verify if object prefix is valid.
func isValidObjectPrefix(objectPrefix string) error {
if len(objectPrefix) > 1024 {
return ErrInvalidObjectPrefix("Object prefix cannot be greater than 1024 characters.")
}
if !utf8.ValidString(objectPrefix) {
return ErrInvalidObjectPrefix("Object prefix with non UTF-8 strings are not supported.")
}
return nil
}
// make a copy of http.Header
func cloneHeader(h http.Header) http.Header {
h2 := make(http.Header, len(h))
for k, vv := range h {
vv2 := make([]string, len(vv))
copy(vv2, vv)
h2[k] = vv2
}
return h2
}
// Filter relevant response headers from
// the HEAD, GET http response. The function takes
// a list of headers which are filtered out and
// returned as a new http header.
func filterHeader(header http.Header, filterKeys []string) (filteredHeader http.Header) {
filteredHeader = cloneHeader(header)
for _, key := range filterKeys {
filteredHeader.Del(key)
}
return filteredHeader
}

46
vendor/vendor.json vendored
View File

@ -170,6 +170,12 @@
"revision": "50761b0867bd1d9d069276790bcd4a3bccf2324a",
"revisionTime": "2016-08-31T22:25:20Z"
},
{
"checksumSHA1": "isRGvFzUCh72bNQFmw7WQaeT7xY=",
"path": "github.com/lox/httpcache",
"revision": "fff585cf25b80f4ccc4ac7be6d3f564aa9c117ff",
"revisionTime": "2017-01-09T03:46:36Z"
},
{
"path": "github.com/mattn/go-colorable",
"revision": "9056b7a9f2d1f2d96498d6d146acd1f9d5ed3d59",
@ -210,28 +216,40 @@
"revisionTime": "2016-02-29T08:42:30-08:00"
},
{
"checksumSHA1": "qTxOBp3GVxCC70ykb7Hxg6UgWwA=",
"checksumSHA1": "rtVzxCyARW7zRG1jUf3K7o9vyt0=",
"path": "github.com/minio/minio-go",
"revision": "5297a818b482fa329b3dc1a3926e3c4c6fb5d459",
"revisionTime": "2017-04-26T18:23:05Z"
},
{
"checksumSHA1": "lsxCcRcNUDxhQyO999SOdvKzzfM=",
"path": "github.com/minio/minio-go/pkg/encrypt",
"revision": "5297a818b482fa329b3dc1a3926e3c4c6fb5d459",
"revisionTime": "2017-04-26T18:23:05Z"
},
{
"checksumSHA1": "neH34/65OXeKHM/MlV8MbhcdFBc=",
"path": "github.com/minio/minio-go/pkg/policy",
"revision": "40505f5d08c721dfe5a6450fbdef3bcd6567aa97",
"revisionTime": "2016-09-04T08:12:15Z"
"revision": "5297a818b482fa329b3dc1a3926e3c4c6fb5d459",
"revisionTime": "2017-04-26T18:23:05Z"
},
{
"checksumSHA1": "l95EyvF0yFAItXsXYqsZ6g7yGy4=",
"checksumSHA1": "/5IXp1nGKqOfHn8Piiod3OCkG2U=",
"path": "github.com/minio/minio-go/pkg/s3signer",
"revision": "d02caa62b9e1034f93a82d41da806651a666c5a3",
"revisionTime": "2016-12-14T00:10:05Z"
"revision": "5297a818b482fa329b3dc1a3926e3c4c6fb5d459",
"revisionTime": "2017-04-26T18:23:05Z"
},
{
"checksumSHA1": "bPvxFS1qu6W9lOqdt8aEfS5Sids=",
"checksumSHA1": "gRnCFKb4x83GBLVUZXoOjujd+U0=",
"path": "github.com/minio/minio-go/pkg/s3utils",
"revision": "532b920ff28900244a2ef7d07468003df36fe7c5",
"revisionTime": "2016-12-20T20:43:13Z"
"revision": "5297a818b482fa329b3dc1a3926e3c4c6fb5d459",
"revisionTime": "2017-04-26T18:23:05Z"
},
{
"checksumSHA1": "maUy+dbN6VfTTnfErrAW2lLit1w=",
"path": "github.com/minio/minio-go/pkg/set",
"revision": "7a3619e41885dcbcfafee193c10eb80530c2be53",
"revisionTime": "2017-02-17T20:03:45Z"
"revision": "5297a818b482fa329b3dc1a3926e3c4c6fb5d459",
"revisionTime": "2017-04-26T18:23:05Z"
},
{
"checksumSHA1": "URVle4qtadmW9w9BulDRHY3kxnA=",
@ -292,6 +310,12 @@
"revision": "c78aac22bd43883fd2817833b982153dcac17b3b",
"revisionTime": "2016-05-18T16:56:57+10:00"
},
{
"checksumSHA1": "NLwKGa5B0STKAvQV+lz/ujnbzN4=",
"path": "github.com/rainycape/vfs",
"revision": "a62fd22bcf7010946a44f6a1250a82c03110a14b",
"revisionTime": "2015-06-11T13:38:00Z"
},
{
"path": "github.com/rs/cors",
"revision": "a62a804a8a009876ca59105f7899938a1349f4b3",