mirror of
https://github.com/minio/minio.git
synced 2025-01-25 21:53:16 -05:00
Move to latest release of minio-go (#4886)
- Region handling can now use region endpoints directly. - All uploads are streaming no more large buffer needed. - Major API overhaul for CopyObject(dst, src) - Fixes bugs present in existing code for copying - metadata replace directive CopyObject - PutObjectPart doesn't require md5Sum and sha256
This commit is contained in:
parent
72490bf8db
commit
cf479eb401
@ -19,7 +19,7 @@ package cmd
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"encoding/hex"
|
||||
|
||||
@ -330,29 +330,32 @@ func (l *s3Objects) GetObjectInfo(bucket string, object string) (objInfo ObjectI
|
||||
return fromMinioClientObjectInfo(bucket, oi), nil
|
||||
}
|
||||
|
||||
// Decodes hex encoded md5, sha256 into their raw byte representations.
|
||||
func getMD5AndSha256SumBytes(md5Hex, sha256Hex string) (md5Bytes, sha256Bytes []byte, err error) {
|
||||
if md5Hex != "" {
|
||||
md5Bytes, err = hex.DecodeString(md5Hex)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
if sha256Hex != "" {
|
||||
sha256Bytes, err = hex.DecodeString(sha256Hex)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return md5Bytes, sha256Bytes, nil
|
||||
}
|
||||
|
||||
// PutObject creates a new object with the incoming data,
|
||||
func (l *s3Objects) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, e error) {
|
||||
var sha256sumBytes []byte
|
||||
|
||||
var err error
|
||||
if sha256sum != "" {
|
||||
sha256sumBytes, err = hex.DecodeString(sha256sum)
|
||||
if err != nil {
|
||||
return objInfo, s3ToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
md5Bytes, sha256Bytes, err := getMD5AndSha256SumBytes(metadata["etag"], sha256sum)
|
||||
if err != nil {
|
||||
return objInfo, s3ToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
delete(metadata, "etag")
|
||||
|
||||
var md5sumBytes []byte
|
||||
md5sum := metadata["etag"]
|
||||
if md5sum != "" {
|
||||
md5sumBytes, err = hex.DecodeString(md5sum)
|
||||
if err != nil {
|
||||
return objInfo, s3ToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
delete(metadata, "etag")
|
||||
}
|
||||
|
||||
oi, err := l.Client.PutObject(bucket, object, size, data, md5sumBytes, sha256sumBytes, toMinioClientMetadata(metadata))
|
||||
oi, err := l.Client.PutObject(bucket, object, size, data, md5Bytes, sha256Bytes, toMinioClientMetadata(metadata))
|
||||
if err != nil {
|
||||
return objInfo, s3ToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
@ -361,15 +364,31 @@ func (l *s3Objects) PutObject(bucket string, object string, size int64, data io.
|
||||
}
|
||||
|
||||
// CopyObject copies a blob from source container to destination container.
|
||||
func (l *s3Objects) CopyObject(srcBucket string, srcObject string, destBucket string, destObject string, metadata map[string]string) (objInfo ObjectInfo, e error) {
|
||||
err := l.Client.CopyObject(destBucket, destObject, path.Join(srcBucket, srcObject), minio.CopyConditions{})
|
||||
func (l *s3Objects) CopyObject(srcBucket string, srcObject string, dstBucket string, dstObject string, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
||||
// Source object
|
||||
src := minio.NewSourceInfo(srcBucket, srcObject, nil)
|
||||
|
||||
// Destination object
|
||||
var xamzMeta = map[string]string{}
|
||||
for key := range metadata {
|
||||
for _, prefix := range userMetadataKeyPrefixes {
|
||||
if strings.HasPrefix(key, prefix) {
|
||||
xamzMeta[key] = metadata[key]
|
||||
}
|
||||
}
|
||||
}
|
||||
dst, err := minio.NewDestinationInfo(dstBucket, dstObject, nil, xamzMeta)
|
||||
if err != nil {
|
||||
return objInfo, s3ToObjectError(traceError(err), dstBucket, dstObject)
|
||||
}
|
||||
|
||||
if err = l.Client.CopyObject(dst, src); err != nil {
|
||||
return objInfo, s3ToObjectError(traceError(err), srcBucket, srcObject)
|
||||
}
|
||||
|
||||
oi, err := l.GetObjectInfo(destBucket, destObject)
|
||||
oi, err := l.GetObjectInfo(dstBucket, dstObject)
|
||||
if err != nil {
|
||||
return objInfo, s3ToObjectError(traceError(err), destBucket, destObject)
|
||||
return objInfo, s3ToObjectError(traceError(err), dstBucket, dstObject)
|
||||
}
|
||||
|
||||
return oi, nil
|
||||
@ -474,17 +493,12 @@ func fromMinioClientObjectPart(op minio.ObjectPart) PartInfo {
|
||||
|
||||
// PutObjectPart puts a part of object in bucket
|
||||
func (l *s3Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (pi PartInfo, e error) {
|
||||
md5HexBytes, err := hex.DecodeString(md5Hex)
|
||||
md5Bytes, sha256Bytes, err := getMD5AndSha256SumBytes(md5Hex, sha256sum)
|
||||
if err != nil {
|
||||
return pi, err
|
||||
return pi, s3ToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
|
||||
sha256sumBytes, err := hex.DecodeString(sha256sum)
|
||||
if err != nil {
|
||||
return pi, err
|
||||
}
|
||||
|
||||
info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, size, data, md5HexBytes, sha256sumBytes)
|
||||
info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, size, data, md5Bytes, sha256Bytes)
|
||||
if err != nil {
|
||||
return pi, err
|
||||
}
|
||||
|
@ -15,3 +15,44 @@
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Tests extracting md5/sha256 bytes.
|
||||
func TestGetMD5AndSha256Bytes(t *testing.T) {
|
||||
testCases := []struct {
|
||||
md5Hex string
|
||||
sha256Hex string
|
||||
success bool
|
||||
}{
|
||||
// Test 1: Hex encoding failure.
|
||||
{
|
||||
md5Hex: "a",
|
||||
sha256Hex: "b",
|
||||
success: false,
|
||||
},
|
||||
// Test 2: Hex encoding success.
|
||||
{
|
||||
md5Hex: "91be0b892e47ede9de06aac14ca0369e",
|
||||
sha256Hex: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
success: true,
|
||||
},
|
||||
// Test 3: hex values are empty should return success.
|
||||
{
|
||||
md5Hex: "",
|
||||
sha256Hex: "",
|
||||
success: true,
|
||||
},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
_, _, err := getMD5AndSha256SumBytes(testCase.md5Hex, testCase.sha256Hex)
|
||||
if err != nil && testCase.success {
|
||||
t.Errorf("Test %d: Expected success, but got failure %s", i+1, err)
|
||||
}
|
||||
if err == nil && !testCase.success {
|
||||
t.Errorf("Test %d: Expected failure, but got success", i+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
33
vendor/github.com/minio/minio-go/README.md
generated
vendored
33
vendor/github.com/minio/minio-go/README.md
generated
vendored
@ -54,7 +54,7 @@ func main() {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
log.Println("%v", minioClient) // minioClient is now setup
|
||||
log.Printf("%#v\n", minioClient) // minioClient is now setup
|
||||
```
|
||||
|
||||
## Quick Start Example - File Uploader
|
||||
@ -130,7 +130,6 @@ The full API Reference is available here.
|
||||
* [Complete API Reference](https://docs.minio.io/docs/golang-client-api-reference)
|
||||
|
||||
### API Reference : Bucket Operations
|
||||
|
||||
* [`MakeBucket`](https://docs.minio.io/docs/golang-client-api-reference#MakeBucket)
|
||||
* [`ListBuckets`](https://docs.minio.io/docs/golang-client-api-reference#ListBuckets)
|
||||
* [`BucketExists`](https://docs.minio.io/docs/golang-client-api-reference#BucketExists)
|
||||
@ -140,25 +139,21 @@ The full API Reference is available here.
|
||||
* [`ListIncompleteUploads`](https://docs.minio.io/docs/golang-client-api-reference#ListIncompleteUploads)
|
||||
|
||||
### API Reference : Bucket policy Operations
|
||||
|
||||
* [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy)
|
||||
* [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy)
|
||||
* [`ListBucketPolicies`](https://docs.minio.io/docs/golang-client-api-reference#ListBucketPolicies)
|
||||
|
||||
### API Reference : Bucket notification Operations
|
||||
|
||||
* [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification)
|
||||
* [`GetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketNotification)
|
||||
* [`RemoveAllBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#RemoveAllBucketNotification)
|
||||
* [`ListenBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#ListenBucketNotification) (Minio Extension)
|
||||
|
||||
### API Reference : File Object Operations
|
||||
|
||||
* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
|
||||
* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
|
||||
|
||||
### API Reference : Object Operations
|
||||
|
||||
* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject)
|
||||
* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject)
|
||||
* [`PutObjectStreaming`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectStreaming)
|
||||
@ -169,14 +164,13 @@ The full API Reference is available here.
|
||||
* [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload)
|
||||
|
||||
### API Reference: Encrypted Object Operations
|
||||
|
||||
* [`GetEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#GetEncryptedObject)
|
||||
* [`PutEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#PutEncryptedObject)
|
||||
|
||||
### API Reference : Presigned Operations
|
||||
|
||||
* [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject)
|
||||
* [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject)
|
||||
* [`PresignedHeadObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedHeadObject)
|
||||
* [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy)
|
||||
|
||||
### API Reference : Client custom settings
|
||||
@ -185,11 +179,9 @@ The full API Reference is available here.
|
||||
* [`TraceOn`](http://docs.minio.io/docs/golang-client-api-reference#TraceOn)
|
||||
* [`TraceOff`](http://docs.minio.io/docs/golang-client-api-reference#TraceOff)
|
||||
|
||||
|
||||
## Full Examples
|
||||
|
||||
#### Full Examples : Bucket Operations
|
||||
|
||||
### Full Examples : Bucket Operations
|
||||
* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go)
|
||||
* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go)
|
||||
* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go)
|
||||
@ -198,26 +190,22 @@ The full API Reference is available here.
|
||||
* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go)
|
||||
* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go)
|
||||
|
||||
#### Full Examples : Bucket policy Operations
|
||||
|
||||
### Full Examples : Bucket policy Operations
|
||||
* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go)
|
||||
* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go)
|
||||
* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go)
|
||||
|
||||
#### Full Examples : Bucket notification Operations
|
||||
|
||||
### Full Examples : Bucket notification Operations
|
||||
* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
|
||||
* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)
|
||||
* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go)
|
||||
* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (Minio Extension)
|
||||
|
||||
#### Full Examples : File Object Operations
|
||||
|
||||
### Full Examples : File Object Operations
|
||||
* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go)
|
||||
* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go)
|
||||
|
||||
#### Full Examples : Object Operations
|
||||
|
||||
### Full Examples : Object Operations
|
||||
* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go)
|
||||
* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go)
|
||||
* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go)
|
||||
@ -226,14 +214,14 @@ The full API Reference is available here.
|
||||
* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go)
|
||||
* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go)
|
||||
|
||||
#### Full Examples : Encrypted Object Operations
|
||||
|
||||
### Full Examples : Encrypted Object Operations
|
||||
* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go)
|
||||
* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go)
|
||||
|
||||
#### Full Examples : Presigned Operations
|
||||
### Full Examples : Presigned Operations
|
||||
* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go)
|
||||
* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go)
|
||||
* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go)
|
||||
* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go)
|
||||
|
||||
## Explore Further
|
||||
@ -242,7 +230,6 @@ The full API Reference is available here.
|
||||
* [Go Music Player App Full Application Example](https://docs.minio.io/docs/go-music-player-app)
|
||||
|
||||
## Contribute
|
||||
|
||||
[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md)
|
||||
|
||||
[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go)
|
||||
|
532
vendor/github.com/minio/minio-go/api-compose-object.go
generated
vendored
Normal file
532
vendor/github.com/minio/minio-go/api-compose-object.go
generated
vendored
Normal file
@ -0,0 +1,532 @@
|
||||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/pkg/s3utils"
|
||||
)
|
||||
|
||||
// SSEInfo - represents Server-Side-Encryption parameters specified by
|
||||
// a user.
|
||||
type SSEInfo struct {
|
||||
key []byte
|
||||
algo string
|
||||
}
|
||||
|
||||
// NewSSEInfo - specifies (binary or un-encoded) encryption key and
|
||||
// algorithm name. If algo is empty, it defaults to "AES256". Ref:
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
|
||||
func NewSSEInfo(key []byte, algo string) SSEInfo {
|
||||
if algo == "" {
|
||||
algo = "AES256"
|
||||
}
|
||||
return SSEInfo{key, algo}
|
||||
}
|
||||
|
||||
// internal method that computes SSE-C headers
|
||||
func (s *SSEInfo) getSSEHeaders(isCopySource bool) map[string]string {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cs := ""
|
||||
if isCopySource {
|
||||
cs = "copy-source-"
|
||||
}
|
||||
return map[string]string{
|
||||
"x-amz-" + cs + "server-side-encryption-customer-algorithm": s.algo,
|
||||
"x-amz-" + cs + "server-side-encryption-customer-key": base64.StdEncoding.EncodeToString(s.key),
|
||||
"x-amz-" + cs + "server-side-encryption-customer-key-MD5": base64.StdEncoding.EncodeToString(sumMD5(s.key)),
|
||||
}
|
||||
}
|
||||
|
||||
// GetSSEHeaders - computes and returns headers for SSE-C as key-value
|
||||
// pairs. They can be set as metadata in PutObject* requests (for
|
||||
// encryption) or be set as request headers in `Core.GetObject` (for
|
||||
// decryption).
|
||||
func (s *SSEInfo) GetSSEHeaders() map[string]string {
|
||||
return s.getSSEHeaders(false)
|
||||
}
|
||||
|
||||
// DestinationInfo - type with information about the object to be
|
||||
// created via server-side copy requests, using the Compose API.
|
||||
type DestinationInfo struct {
|
||||
bucket, object string
|
||||
|
||||
// key for encrypting destination
|
||||
encryption *SSEInfo
|
||||
|
||||
// if no user-metadata is provided, it is copied from source
|
||||
// (when there is only once source object in the compose
|
||||
// request)
|
||||
userMetadata map[string]string
|
||||
}
|
||||
|
||||
// NewDestinationInfo - creates a compose-object/copy-source
|
||||
// destination info object.
|
||||
//
|
||||
// `encSSEC` is the key info for server-side-encryption with customer
|
||||
// provided key. If it is nil, no encryption is performed.
|
||||
//
|
||||
// `userMeta` is the user-metadata key-value pairs to be set on the
|
||||
// destination. The keys are automatically prefixed with `x-amz-meta-`
|
||||
// if needed. If nil is passed, and if only a single source (of any
|
||||
// size) is provided in the ComposeObject call, then metadata from the
|
||||
// source is copied to the destination.
|
||||
func NewDestinationInfo(bucket, object string, encryptSSEC *SSEInfo,
|
||||
userMeta map[string]string) (d DestinationInfo, err error) {
|
||||
|
||||
// Input validation.
|
||||
if err = s3utils.CheckValidBucketName(bucket); err != nil {
|
||||
return d, err
|
||||
}
|
||||
if err = s3utils.CheckValidObjectName(object); err != nil {
|
||||
return d, err
|
||||
}
|
||||
|
||||
// Process custom-metadata to remove a `x-amz-meta-` prefix if
|
||||
// present and validate that keys are distinct (after this
|
||||
// prefix removal).
|
||||
m := make(map[string]string)
|
||||
for k, v := range userMeta {
|
||||
if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") {
|
||||
k = k[len("x-amz-meta-"):]
|
||||
}
|
||||
if _, ok := m[k]; ok {
|
||||
return d, fmt.Errorf("Cannot add both %s and x-amz-meta-%s keys as custom metadata", k, k)
|
||||
}
|
||||
m[k] = v
|
||||
}
|
||||
|
||||
return DestinationInfo{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
encryption: encryptSSEC,
|
||||
userMetadata: m,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getUserMetaHeadersMap - construct appropriate key-value pairs to send
|
||||
// as headers from metadata map to pass into copy-object request. For
|
||||
// single part copy-object (i.e. non-multipart object), enable the
|
||||
// withCopyDirectiveHeader to set the `x-amz-metadata-directive` to
|
||||
// `REPLACE`, so that metadata headers from the source are not copied
|
||||
// over.
|
||||
func (d *DestinationInfo) getUserMetaHeadersMap(withCopyDirectiveHeader bool) map[string]string {
|
||||
if len(d.userMetadata) == 0 {
|
||||
return nil
|
||||
}
|
||||
r := make(map[string]string)
|
||||
if withCopyDirectiveHeader {
|
||||
r["x-amz-metadata-directive"] = "REPLACE"
|
||||
}
|
||||
for k, v := range d.userMetadata {
|
||||
r["x-amz-meta-"+k] = v
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// SourceInfo - represents a source object to be copied, using
|
||||
// server-side copying APIs.
|
||||
type SourceInfo struct {
|
||||
bucket, object string
|
||||
|
||||
start, end int64
|
||||
|
||||
decryptKey *SSEInfo
|
||||
// Headers to send with the upload-part-copy request involving
|
||||
// this source object.
|
||||
Headers http.Header
|
||||
}
|
||||
|
||||
// NewSourceInfo - create a compose-object/copy-object source info
|
||||
// object.
|
||||
//
|
||||
// `decryptSSEC` is the decryption key using server-side-encryption
|
||||
// with customer provided key. It may be nil if the source is not
|
||||
// encrypted.
|
||||
func NewSourceInfo(bucket, object string, decryptSSEC *SSEInfo) SourceInfo {
|
||||
r := SourceInfo{
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
start: -1, // range is unspecified by default
|
||||
decryptKey: decryptSSEC,
|
||||
Headers: make(http.Header),
|
||||
}
|
||||
|
||||
// Set the source header
|
||||
r.Headers.Set("x-amz-copy-source", s3utils.EncodePath(bucket+"/"+object))
|
||||
|
||||
// Assemble decryption headers for upload-part-copy request
|
||||
for k, v := range decryptSSEC.getSSEHeaders(true) {
|
||||
r.Headers.Set(k, v)
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// SetRange - Set the start and end offset of the source object to be
|
||||
// copied. If this method is not called, the whole source object is
|
||||
// copied.
|
||||
func (s *SourceInfo) SetRange(start, end int64) error {
|
||||
if start > end || start < 0 {
|
||||
return ErrInvalidArgument("start must be non-negative, and start must be at most end.")
|
||||
}
|
||||
// Note that 0 <= start <= end
|
||||
s.start, s.end = start, end
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetMatchETagCond - Set ETag match condition. The object is copied
|
||||
// only if the etag of the source matches the value given here.
|
||||
func (s *SourceInfo) SetMatchETagCond(etag string) error {
|
||||
if etag == "" {
|
||||
return ErrInvalidArgument("ETag cannot be empty.")
|
||||
}
|
||||
s.Headers.Set("x-amz-copy-source-if-match", etag)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetMatchETagExceptCond - Set the ETag match exception
|
||||
// condition. The object is copied only if the etag of the source is
|
||||
// not the value given here.
|
||||
func (s *SourceInfo) SetMatchETagExceptCond(etag string) error {
|
||||
if etag == "" {
|
||||
return ErrInvalidArgument("ETag cannot be empty.")
|
||||
}
|
||||
s.Headers.Set("x-amz-copy-source-if-none-match", etag)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetModifiedSinceCond - Set the modified since condition.
|
||||
func (s *SourceInfo) SetModifiedSinceCond(modTime time.Time) error {
|
||||
if modTime.IsZero() {
|
||||
return ErrInvalidArgument("Input time cannot be 0.")
|
||||
}
|
||||
s.Headers.Set("x-amz-copy-source-if-modified-since", modTime.Format(http.TimeFormat))
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetUnmodifiedSinceCond - Set the unmodified since condition.
|
||||
func (s *SourceInfo) SetUnmodifiedSinceCond(modTime time.Time) error {
|
||||
if modTime.IsZero() {
|
||||
return ErrInvalidArgument("Input time cannot be 0.")
|
||||
}
|
||||
s.Headers.Set("x-amz-copy-source-if-unmodified-since", modTime.Format(http.TimeFormat))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Helper to fetch size and etag of an object using a StatObject call.
|
||||
func (s *SourceInfo) getProps(c Client) (size int64, etag string, userMeta map[string]string, err error) {
|
||||
// Get object info - need size and etag here. Also, decryption
|
||||
// headers are added to the stat request if given.
|
||||
var objInfo ObjectInfo
|
||||
rh := NewGetReqHeaders()
|
||||
for k, v := range s.decryptKey.getSSEHeaders(false) {
|
||||
rh.Set(k, v)
|
||||
}
|
||||
objInfo, err = c.statObject(s.bucket, s.object, rh)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Could not stat object - %s/%s: %v", s.bucket, s.object, err)
|
||||
} else {
|
||||
size = objInfo.Size
|
||||
etag = objInfo.ETag
|
||||
userMeta = make(map[string]string)
|
||||
for k, v := range objInfo.Metadata {
|
||||
if strings.HasPrefix(k, "x-amz-meta-") {
|
||||
if len(v) > 0 {
|
||||
userMeta[k] = v[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// uploadPartCopy - helper function to create a part in a multipart
|
||||
// upload via an upload-part-copy request
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
|
||||
func (c Client) uploadPartCopy(bucket, object, uploadID string, partNumber int,
|
||||
headers http.Header) (p CompletePart, err error) {
|
||||
|
||||
// Build query parameters
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("partNumber", strconv.Itoa(partNumber))
|
||||
urlValues.Set("uploadId", uploadID)
|
||||
|
||||
// Send upload-part-copy request
|
||||
resp, err := c.executeMethod("PUT", requestMetadata{
|
||||
bucketName: bucket,
|
||||
objectName: object,
|
||||
customHeader: headers,
|
||||
queryValues: urlValues,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return p, err
|
||||
}
|
||||
|
||||
// Check if we got an error response.
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return p, httpRespToErrorResponse(resp, bucket, object)
|
||||
}
|
||||
|
||||
// Decode copy-part response on success.
|
||||
cpObjRes := copyObjectResult{}
|
||||
err = xmlDecoder(resp.Body, &cpObjRes)
|
||||
if err != nil {
|
||||
return p, err
|
||||
}
|
||||
p.PartNumber, p.ETag = partNumber, cpObjRes.ETag
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// ComposeObject - creates an object using server-side copying of
|
||||
// existing objects. It takes a list of source objects (with optional
|
||||
// offsets) and concatenates them into a new object using only
|
||||
// server-side copying operations.
|
||||
func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
|
||||
if len(srcs) < 1 || len(srcs) > maxPartsCount {
|
||||
return ErrInvalidArgument("There must be as least one and up to 10000 source objects.")
|
||||
}
|
||||
|
||||
srcSizes := make([]int64, len(srcs))
|
||||
var totalSize, size, totalParts int64
|
||||
var srcUserMeta map[string]string
|
||||
var etag string
|
||||
var err error
|
||||
for i, src := range srcs {
|
||||
size, etag, srcUserMeta, err = src.getProps(c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not get source props for %s/%s: %v", src.bucket, src.object, err)
|
||||
}
|
||||
|
||||
// Error out if client side encryption is used in this source object when
|
||||
// more than one source objects are given.
|
||||
if len(srcs) > 1 && src.Headers.Get("x-amz-meta-x-amz-key") != "" {
|
||||
return ErrInvalidArgument(
|
||||
fmt.Sprintf("Client side encryption is used in source object %s/%s", src.bucket, src.object))
|
||||
}
|
||||
|
||||
// Since we did a HEAD to get size, we use the ETag
|
||||
// value to make sure the object has not changed by
|
||||
// the time we perform the copy. This is done, only if
|
||||
// the user has not set their own ETag match
|
||||
// condition.
|
||||
if src.Headers.Get("x-amz-copy-source-if-match") == "" {
|
||||
src.SetMatchETagCond(etag)
|
||||
}
|
||||
|
||||
// Check if a segment is specified, and if so, is the
|
||||
// segment within object bounds?
|
||||
if src.start != -1 {
|
||||
// Since range is specified,
|
||||
// 0 <= src.start <= src.end
|
||||
// so only invalid case to check is:
|
||||
if src.end >= size {
|
||||
return ErrInvalidArgument(
|
||||
fmt.Sprintf("SourceInfo %d has invalid segment-to-copy [%d, %d] (size is %d)",
|
||||
i, src.start, src.end, size))
|
||||
}
|
||||
size = src.end - src.start + 1
|
||||
}
|
||||
|
||||
// Only the last source may be less than `absMinPartSize`
|
||||
if size < absMinPartSize && i < len(srcs)-1 {
|
||||
return ErrInvalidArgument(
|
||||
fmt.Sprintf("SourceInfo %d is too small (%d) and it is not the last part", i, size))
|
||||
}
|
||||
|
||||
// Is data to copy too large?
|
||||
totalSize += size
|
||||
if totalSize > maxMultipartPutObjectSize {
|
||||
return ErrInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize))
|
||||
}
|
||||
|
||||
// record source size
|
||||
srcSizes[i] = size
|
||||
|
||||
// calculate parts needed for current source
|
||||
totalParts += partsRequired(size)
|
||||
// Do we need more parts than we are allowed?
|
||||
if totalParts > maxPartsCount {
|
||||
return ErrInvalidArgument(fmt.Sprintf(
|
||||
"Your proposed compose object requires more than %d parts", maxPartsCount))
|
||||
}
|
||||
}
|
||||
|
||||
// Single source object case (i.e. when only one source is
|
||||
// involved, it is being copied wholly and at most 5GiB in
|
||||
// size).
|
||||
if totalParts == 1 && srcs[0].start == -1 && totalSize <= maxPartSize {
|
||||
h := srcs[0].Headers
|
||||
// Add destination encryption headers
|
||||
for k, v := range dst.encryption.getSSEHeaders(false) {
|
||||
h.Set(k, v)
|
||||
}
|
||||
|
||||
// If no user metadata is specified (and so, the
|
||||
// for-loop below is not entered), metadata from the
|
||||
// source is copied to the destination (due to
|
||||
// single-part copy-object PUT request behaviour).
|
||||
for k, v := range dst.getUserMetaHeadersMap(true) {
|
||||
h.Set(k, v)
|
||||
}
|
||||
|
||||
// Send copy request
|
||||
resp, err := c.executeMethod("PUT", requestMetadata{
|
||||
bucketName: dst.bucket,
|
||||
objectName: dst.object,
|
||||
customHeader: h,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Check if we got an error response.
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return httpRespToErrorResponse(resp, dst.bucket, dst.object)
|
||||
}
|
||||
|
||||
// Return nil on success.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Now, handle multipart-copy cases.
|
||||
|
||||
// 1. Initiate a new multipart upload.
|
||||
|
||||
// Set user-metadata on the destination object. If no
|
||||
// user-metadata is specified, and there is only one source,
|
||||
// (only) then metadata from source is copied.
|
||||
userMeta := dst.getUserMetaHeadersMap(false)
|
||||
metaMap := userMeta
|
||||
if len(userMeta) == 0 && len(srcs) == 1 {
|
||||
metaMap = srcUserMeta
|
||||
}
|
||||
metaHeaders := make(map[string][]string)
|
||||
for k, v := range metaMap {
|
||||
metaHeaders[k] = append(metaHeaders[k], v)
|
||||
}
|
||||
uploadID, err := c.newUploadID(dst.bucket, dst.object, metaHeaders)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating new upload: %v", err)
|
||||
}
|
||||
|
||||
// 2. Perform copy part uploads
|
||||
objParts := []CompletePart{}
|
||||
partIndex := 1
|
||||
for i, src := range srcs {
|
||||
h := src.Headers
|
||||
// Add destination encryption headers
|
||||
for k, v := range dst.encryption.getSSEHeaders(false) {
|
||||
h.Set(k, v)
|
||||
}
|
||||
|
||||
// calculate start/end indices of parts after
|
||||
// splitting.
|
||||
startIdx, endIdx := calculateEvenSplits(srcSizes[i], src)
|
||||
for j, start := range startIdx {
|
||||
end := endIdx[j]
|
||||
|
||||
// Add (or reset) source range header for
|
||||
// upload part copy request.
|
||||
h.Set("x-amz-copy-source-range",
|
||||
fmt.Sprintf("bytes=%d-%d", start, end))
|
||||
|
||||
// make upload-part-copy request
|
||||
complPart, err := c.uploadPartCopy(dst.bucket,
|
||||
dst.object, uploadID, partIndex, h)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error in upload-part-copy - %v", err)
|
||||
}
|
||||
objParts = append(objParts, complPart)
|
||||
partIndex++
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Make final complete-multipart request.
|
||||
_, err = c.completeMultipartUpload(dst.bucket, dst.object, uploadID,
|
||||
completeMultipartUpload{Parts: objParts})
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Error in complete-multipart request - %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// partsRequired is ceiling(size / copyPartSize)
|
||||
func partsRequired(size int64) int64 {
|
||||
r := size / copyPartSize
|
||||
if size%copyPartSize > 0 {
|
||||
r++
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// calculateEvenSplits - computes splits for a source and returns
|
||||
// start and end index slices. Splits happen evenly to be sure that no
|
||||
// part is less than 5MiB, as that could fail the multipart request if
|
||||
// it is not the last part.
|
||||
func calculateEvenSplits(size int64, src SourceInfo) (startIndex, endIndex []int64) {
|
||||
if size == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
reqParts := partsRequired(size)
|
||||
startIndex = make([]int64, reqParts)
|
||||
endIndex = make([]int64, reqParts)
|
||||
// Compute number of required parts `k`, as:
|
||||
//
|
||||
// k = ceiling(size / copyPartSize)
|
||||
//
|
||||
// Now, distribute the `size` bytes in the source into
|
||||
// k parts as evenly as possible:
|
||||
//
|
||||
// r parts sized (q+1) bytes, and
|
||||
// (k - r) parts sized q bytes, where
|
||||
//
|
||||
// size = q * k + r (by simple division of size by k,
|
||||
// so that 0 <= r < k)
|
||||
//
|
||||
start := src.start
|
||||
if start == -1 {
|
||||
start = 0
|
||||
}
|
||||
quot, rem := size/reqParts, size%reqParts
|
||||
nextStart := start
|
||||
for j := int64(0); j < reqParts; j++ {
|
||||
curPartSize := quot
|
||||
if j < rem {
|
||||
curPartSize++
|
||||
}
|
||||
|
||||
cStart := nextStart
|
||||
cEnd := cStart + curPartSize - 1
|
||||
nextStart = cEnd + 1
|
||||
|
||||
startIndex[j], endIndex[j] = cStart, cEnd
|
||||
}
|
||||
return
|
||||
}
|
18
vendor/github.com/minio/minio-go/api-get-object.go
generated
vendored
18
vendor/github.com/minio/minio-go/api-get-object.go
generated
vendored
@ -679,12 +679,18 @@ func (c Client) getObject(bucketName, objectName string, reqHeaders RequestHeade
|
||||
if contentType == "" {
|
||||
contentType = "application/octet-stream"
|
||||
}
|
||||
var objectStat ObjectInfo
|
||||
objectStat.ETag = md5sum
|
||||
objectStat.Key = objectName
|
||||
objectStat.Size = resp.ContentLength
|
||||
objectStat.LastModified = date
|
||||
objectStat.ContentType = contentType
|
||||
|
||||
objectStat := ObjectInfo{
|
||||
ETag: md5sum,
|
||||
Key: objectName,
|
||||
Size: resp.ContentLength,
|
||||
LastModified: date,
|
||||
ContentType: contentType,
|
||||
// Extract only the relevant header keys describing the object.
|
||||
// following function filters out a list of standard set of keys
|
||||
// which are not part of object metadata.
|
||||
Metadata: extractObjMetadata(resp.Header),
|
||||
}
|
||||
|
||||
// do not close body here, caller will close
|
||||
return resp.Body, objectStat, nil
|
||||
|
26
vendor/github.com/minio/minio-go/api-presigned.go
generated
vendored
26
vendor/github.com/minio/minio-go/api-presigned.go
generated
vendored
@ -84,19 +84,35 @@ func (c Client) presignURL(method string, bucketName string, objectName string,
|
||||
}
|
||||
|
||||
// PresignedGetObject - Returns a presigned URL to access an object
|
||||
// without credentials. Expires maximum is 7days - ie. 604800 and
|
||||
// minimum is 1. Additionally you can override a set of response
|
||||
// headers using the query parameters.
|
||||
// data without credentials. URL can have a maximum expiry of
|
||||
// upto 7days or a minimum of 1sec. Additionally you can override
|
||||
// a set of response headers using the query parameters.
|
||||
func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
|
||||
return c.presignURL("GET", bucketName, objectName, expires, reqParams)
|
||||
}
|
||||
|
||||
// PresignedPutObject - Returns a presigned URL to upload an object without credentials.
|
||||
// Expires maximum is 7days - ie. 604800 and minimum is 1.
|
||||
// PresignedHeadObject - Returns a presigned URL to access object
|
||||
// metadata without credentials. URL can have a maximum expiry of
|
||||
// upto 7days or a minimum of 1sec. Additionally you can override
|
||||
// a set of response headers using the query parameters.
|
||||
func (c Client) PresignedHeadObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
|
||||
return c.presignURL("HEAD", bucketName, objectName, expires, reqParams)
|
||||
}
|
||||
|
||||
// PresignedPutObject - Returns a presigned URL to upload an object
|
||||
// without credentials. URL can have a maximum expiry of upto 7days
|
||||
// or a minimum of 1sec.
|
||||
func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) {
|
||||
return c.presignURL("PUT", bucketName, objectName, expires, nil)
|
||||
}
|
||||
|
||||
// Presign - returns a presigned URL for any http method of your choice
|
||||
// along with custom request params. URL can have a maximum expiry of
|
||||
// upto 7days or a minimum of 1sec.
|
||||
func (c Client) Presign(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
|
||||
return c.presignURL(method, bucketName, objectName, expires, reqParams)
|
||||
}
|
||||
|
||||
// PresignedPostPolicy - Returns POST urlString, form data to upload an object.
|
||||
func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[string]string, err error) {
|
||||
// Validate input arguments.
|
||||
|
98
vendor/github.com/minio/minio-go/api-put-object-common.go
generated
vendored
98
vendor/github.com/minio/minio-go/api-put-object-common.go
generated
vendored
@ -17,10 +17,7 @@
|
||||
package minio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
|
||||
@ -78,77 +75,6 @@ func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, las
|
||||
return totalPartsCount, partSize, lastPartSize, nil
|
||||
}
|
||||
|
||||
// hashCopyBuffer is identical to hashCopyN except that it doesn't take
|
||||
// any size argument but takes a buffer argument and reader should be
|
||||
// of io.ReaderAt interface.
|
||||
//
|
||||
// Stages reads from offsets into the buffer, if buffer is nil it is
|
||||
// initialized to optimalBufferSize.
|
||||
func hashCopyBuffer(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, writer io.Writer, reader io.ReaderAt, buf []byte) (size int64, err error) {
|
||||
hashWriter := writer
|
||||
for _, v := range hashAlgorithms {
|
||||
hashWriter = io.MultiWriter(hashWriter, v)
|
||||
}
|
||||
|
||||
// Buffer is nil, initialize.
|
||||
if buf == nil {
|
||||
buf = make([]byte, optimalReadBufferSize)
|
||||
}
|
||||
|
||||
// Offset to start reading from.
|
||||
var readAtOffset int64
|
||||
|
||||
// Following block reads data at an offset from the input
|
||||
// reader and copies data to into local temporary file.
|
||||
for {
|
||||
readAtSize, rerr := reader.ReadAt(buf, readAtOffset)
|
||||
if rerr != nil {
|
||||
if rerr != io.EOF {
|
||||
return 0, rerr
|
||||
}
|
||||
}
|
||||
writeSize, werr := hashWriter.Write(buf[:readAtSize])
|
||||
if werr != nil {
|
||||
return 0, werr
|
||||
}
|
||||
if readAtSize != writeSize {
|
||||
return 0, fmt.Errorf("Read size was not completely written to writer. wanted %d, got %d - %s", readAtSize, writeSize, reportIssue)
|
||||
}
|
||||
readAtOffset += int64(writeSize)
|
||||
size += int64(writeSize)
|
||||
if rerr == io.EOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range hashAlgorithms {
|
||||
hashSums[k] = v.Sum(nil)
|
||||
}
|
||||
return size, err
|
||||
}
|
||||
|
||||
// hashCopyN - Calculates chosen hashes up to partSize amount of bytes.
|
||||
func hashCopyN(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, writer io.Writer, reader io.Reader, partSize int64) (size int64, err error) {
|
||||
hashWriter := writer
|
||||
for _, v := range hashAlgorithms {
|
||||
hashWriter = io.MultiWriter(hashWriter, v)
|
||||
}
|
||||
|
||||
// Copies to input at writer.
|
||||
size, err = io.CopyN(hashWriter, reader, partSize)
|
||||
if err != nil {
|
||||
// If not EOF return error right here.
|
||||
if err != io.EOF {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range hashAlgorithms {
|
||||
hashSums[k] = v.Sum(nil)
|
||||
}
|
||||
return size, err
|
||||
}
|
||||
|
||||
// getUploadID - fetch upload id if already present for an object name
|
||||
// or initiate a new request to fetch a new upload id.
|
||||
func (c Client) newUploadID(bucketName, objectName string, metaData map[string][]string) (uploadID string, err error) {
|
||||
@ -167,27 +93,3 @@ func (c Client) newUploadID(bucketName, objectName string, metaData map[string][
|
||||
}
|
||||
return initMultipartUploadResult.UploadID, nil
|
||||
}
|
||||
|
||||
// computeHash - Calculates hashes for an input read Seeker.
|
||||
func computeHash(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, reader io.ReadSeeker) (size int64, err error) {
|
||||
hashWriter := ioutil.Discard
|
||||
for _, v := range hashAlgorithms {
|
||||
hashWriter = io.MultiWriter(hashWriter, v)
|
||||
}
|
||||
|
||||
// If no buffer is provided, no need to allocate just use io.Copy.
|
||||
size, err = io.Copy(hashWriter, reader)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Seek back reader to the beginning location.
|
||||
if _, err := reader.Seek(0, 0); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
for k, v := range hashAlgorithms {
|
||||
hashSums[k] = v.Sum(nil)
|
||||
}
|
||||
return size, nil
|
||||
}
|
||||
|
56
vendor/github.com/minio/minio-go/api-put-object-copy.go
generated
vendored
56
vendor/github.com/minio/minio-go/api-put-object-copy.go
generated
vendored
@ -16,57 +16,7 @@
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/minio/minio-go/pkg/s3utils"
|
||||
)
|
||||
|
||||
// CopyObject - copy a source object into a new object with the provided name in the provided bucket
|
||||
func (c Client) CopyObject(bucketName string, objectName string, objectSource string, cpCond CopyConditions) error {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return err
|
||||
}
|
||||
if objectSource == "" {
|
||||
return ErrInvalidArgument("Object source cannot be empty.")
|
||||
}
|
||||
|
||||
// customHeaders apply headers.
|
||||
customHeaders := make(http.Header)
|
||||
for _, cond := range cpCond.conditions {
|
||||
customHeaders.Set(cond.key, cond.value)
|
||||
}
|
||||
|
||||
// Set copy source.
|
||||
customHeaders.Set("x-amz-copy-source", s3utils.EncodePath(objectSource))
|
||||
|
||||
// Execute PUT on objectName.
|
||||
resp, err := c.executeMethod("PUT", requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
customHeader: customHeaders,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
|
||||
// Decode copy response on success.
|
||||
cpObjRes := copyObjectResult{}
|
||||
err = xmlDecoder(resp.Body, &cpObjRes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Return nil on success.
|
||||
return nil
|
||||
// CopyObject - copy a source object into a new object
|
||||
func (c Client) CopyObject(dst DestinationInfo, src SourceInfo) error {
|
||||
return c.ComposeObject(dst, []SourceInfo{src})
|
||||
}
|
||||
|
46
vendor/github.com/minio/minio-go/api-put-object-encrypted.go
generated
vendored
Normal file
46
vendor/github.com/minio/minio-go/api-put-object-encrypted.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/minio/minio-go/pkg/encrypt"
|
||||
)
|
||||
|
||||
// PutEncryptedObject - Encrypt and store object.
|
||||
func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials, metadata map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
|
||||
if encryptMaterials == nil {
|
||||
return 0, ErrInvalidArgument("Unable to recognize empty encryption properties")
|
||||
}
|
||||
|
||||
if err := encryptMaterials.SetupEncryptMode(reader); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if metadata == nil {
|
||||
metadata = make(map[string][]string)
|
||||
}
|
||||
|
||||
// Set the necessary encryption headers, for future decryption.
|
||||
metadata[amzHeaderIV] = []string{encryptMaterials.GetIV()}
|
||||
metadata[amzHeaderKey] = []string{encryptMaterials.GetKey()}
|
||||
metadata[amzHeaderMatDesc] = []string{encryptMaterials.GetDesc()}
|
||||
|
||||
return c.putObjectMultipartStreamNoLength(bucketName, objectName, encryptMaterials, metadata, progress)
|
||||
}
|
201
vendor/github.com/minio/minio-go/api-put-object-file.go
generated
vendored
201
vendor/github.com/minio/minio-go/api-put-object-file.go
generated
vendored
@ -17,13 +17,9 @@
|
||||
package minio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
||||
"github.com/minio/minio-go/pkg/s3utils"
|
||||
)
|
||||
@ -55,11 +51,6 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
|
||||
// Save the file size.
|
||||
fileSize := fileStat.Size()
|
||||
|
||||
// Check for largest object size allowed.
|
||||
if fileSize > int64(maxMultipartPutObjectSize) {
|
||||
return 0, ErrEntityTooLarge(fileSize, maxMultipartPutObjectSize, bucketName, objectName)
|
||||
}
|
||||
|
||||
objMetadata := make(map[string][]string)
|
||||
|
||||
// Set contentType based on filepath extension if not given or default
|
||||
@ -71,195 +62,5 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
|
||||
}
|
||||
|
||||
objMetadata["Content-Type"] = []string{contentType}
|
||||
|
||||
// NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs.
|
||||
if s3utils.IsGoogleEndpoint(c.endpointURL) {
|
||||
// Do not compute MD5 for Google Cloud Storage.
|
||||
return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
|
||||
}
|
||||
|
||||
// Small object upload is initiated for uploads for input data size smaller than 5MiB.
|
||||
if fileSize < minPartSize && fileSize >= 0 {
|
||||
return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
|
||||
}
|
||||
|
||||
// Upload all large objects as multipart.
|
||||
n, err = c.putObjectMultipartFromFile(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
|
||||
if err != nil {
|
||||
errResp := ToErrorResponse(err)
|
||||
// Verify if multipart functionality is not available, if not
|
||||
// fall back to single PutObject operation.
|
||||
if errResp.Code == "NotImplemented" {
|
||||
// If size of file is greater than '5GiB' fail.
|
||||
if fileSize > maxSinglePutObjectSize {
|
||||
return 0, ErrEntityTooLarge(fileSize, maxSinglePutObjectSize, bucketName, objectName)
|
||||
}
|
||||
// Fall back to uploading as single PutObject operation.
|
||||
return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// putObjectMultipartFromFile - Creates object from contents of *os.File
|
||||
//
|
||||
// NOTE: This function is meant to be used for readers with local
|
||||
// file as in *os.File. This function effectively utilizes file
|
||||
// system capabilities of reading from specific sections and not
|
||||
// having to create temporary files.
|
||||
func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileReader io.ReaderAt, fileSize int64, metaData map[string][]string, progress io.Reader) (int64, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Initiate a new multipart upload.
|
||||
uploadID, err := c.newUploadID(bucketName, objectName, metaData)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Total data read and written to server. should be equal to 'size' at the end of the call.
|
||||
var totalUploadedSize int64
|
||||
|
||||
// Complete multipart upload.
|
||||
var complMultipartUpload completeMultipartUpload
|
||||
|
||||
// Calculate the optimal parts info for a given size.
|
||||
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(fileSize)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Create a channel to communicate a part was uploaded.
|
||||
// Buffer this to 10000, the maximum number of parts allowed by S3.
|
||||
uploadedPartsCh := make(chan uploadedPartRes, 10000)
|
||||
|
||||
// Create a channel to communicate which part to upload.
|
||||
// Buffer this to 10000, the maximum number of parts allowed by S3.
|
||||
uploadPartsCh := make(chan uploadPartReq, 10000)
|
||||
|
||||
// Just for readability.
|
||||
lastPartNumber := totalPartsCount
|
||||
|
||||
// Initialize parts uploaded map.
|
||||
partsInfo := make(map[int]ObjectPart)
|
||||
|
||||
// Send each part through the partUploadCh to be uploaded.
|
||||
for p := 1; p <= totalPartsCount; p++ {
|
||||
part, ok := partsInfo[p]
|
||||
if ok {
|
||||
uploadPartsCh <- uploadPartReq{PartNum: p, Part: &part}
|
||||
} else {
|
||||
uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil}
|
||||
}
|
||||
}
|
||||
close(uploadPartsCh)
|
||||
|
||||
// Use three 'workers' to upload parts in parallel.
|
||||
for w := 1; w <= totalWorkers; w++ {
|
||||
go func() {
|
||||
// Deal with each part as it comes through the channel.
|
||||
for uploadReq := range uploadPartsCh {
|
||||
// Add hash algorithms that need to be calculated by computeHash()
|
||||
// In case of a non-v4 signature or https connection, sha256 is not needed.
|
||||
hashAlgos, hashSums := c.hashMaterials()
|
||||
|
||||
// If partNumber was not uploaded we calculate the missing
|
||||
// part offset and size. For all other part numbers we
|
||||
// calculate offset based on multiples of partSize.
|
||||
readOffset := int64(uploadReq.PartNum-1) * partSize
|
||||
missingPartSize := partSize
|
||||
|
||||
// As a special case if partNumber is lastPartNumber, we
|
||||
// calculate the offset based on the last part size.
|
||||
if uploadReq.PartNum == lastPartNumber {
|
||||
readOffset = (fileSize - lastPartSize)
|
||||
missingPartSize = lastPartSize
|
||||
}
|
||||
|
||||
// Get a section reader on a particular offset.
|
||||
sectionReader := io.NewSectionReader(fileReader, readOffset, missingPartSize)
|
||||
var prtSize int64
|
||||
var err error
|
||||
|
||||
prtSize, err = computeHash(hashAlgos, hashSums, sectionReader)
|
||||
if err != nil {
|
||||
uploadedPartsCh <- uploadedPartRes{
|
||||
Error: err,
|
||||
}
|
||||
// Exit the goroutine.
|
||||
return
|
||||
}
|
||||
|
||||
// Proceed to upload the part.
|
||||
var objPart ObjectPart
|
||||
objPart, err = c.uploadPart(bucketName, objectName, uploadID, sectionReader, uploadReq.PartNum,
|
||||
hashSums["md5"], hashSums["sha256"], prtSize)
|
||||
if err != nil {
|
||||
uploadedPartsCh <- uploadedPartRes{
|
||||
Error: err,
|
||||
}
|
||||
// Exit the goroutine.
|
||||
return
|
||||
}
|
||||
|
||||
// Save successfully uploaded part metadata.
|
||||
uploadReq.Part = &objPart
|
||||
|
||||
// Return through the channel the part size.
|
||||
uploadedPartsCh <- uploadedPartRes{
|
||||
Size: missingPartSize,
|
||||
PartNum: uploadReq.PartNum,
|
||||
Part: uploadReq.Part,
|
||||
Error: nil,
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Retrieve each uploaded part once it is done.
|
||||
for u := 1; u <= totalPartsCount; u++ {
|
||||
uploadRes := <-uploadedPartsCh
|
||||
if uploadRes.Error != nil {
|
||||
return totalUploadedSize, uploadRes.Error
|
||||
}
|
||||
// Retrieve each uploaded part and store it to be completed.
|
||||
part := uploadRes.Part
|
||||
if part == nil {
|
||||
return totalUploadedSize, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum))
|
||||
}
|
||||
// Update the total uploaded size.
|
||||
totalUploadedSize += uploadRes.Size
|
||||
// Update the progress bar if there is one.
|
||||
if progress != nil {
|
||||
if _, err = io.CopyN(ioutil.Discard, progress, uploadRes.Size); err != nil {
|
||||
return totalUploadedSize, err
|
||||
}
|
||||
}
|
||||
// Store the part to be completed.
|
||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
})
|
||||
}
|
||||
|
||||
// Verify if we uploaded all data.
|
||||
if totalUploadedSize != fileSize {
|
||||
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, fileSize, bucketName, objectName)
|
||||
}
|
||||
|
||||
// Sort all completed parts.
|
||||
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
|
||||
if err != nil {
|
||||
return totalUploadedSize, err
|
||||
}
|
||||
|
||||
// Return final size.
|
||||
return totalUploadedSize, nil
|
||||
return c.putObjectCommon(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
|
||||
}
|
||||
|
257
vendor/github.com/minio/minio-go/api-put-object-multipart.go
generated
vendored
257
vendor/github.com/minio/minio-go/api-put-object-multipart.go
generated
vendored
@ -24,7 +24,7 @@ import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -32,228 +32,117 @@ import (
|
||||
"github.com/minio/minio-go/pkg/s3utils"
|
||||
)
|
||||
|
||||
// Comprehensive put object operation involving multipart uploads.
|
||||
//
|
||||
// Following code handles these types of readers.
|
||||
//
|
||||
// - *os.File
|
||||
// - *minio.Object
|
||||
// - Any reader which has a method 'ReadAt()'
|
||||
//
|
||||
func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
if size > 0 && size > minPartSize {
|
||||
// Verify if reader is *os.File, then use file system functionalities.
|
||||
if isFile(reader) {
|
||||
return c.putObjectMultipartFromFile(bucketName, objectName, reader.(*os.File), size, metaData, progress)
|
||||
}
|
||||
// Verify if reader is *minio.Object or io.ReaderAt.
|
||||
// NOTE: Verification of object is kept for a specific purpose
|
||||
// while it is going to be duck typed similar to io.ReaderAt.
|
||||
// It is to indicate that *minio.Object implements io.ReaderAt.
|
||||
// and such a functionality is used in the subsequent code
|
||||
// path.
|
||||
if isObject(reader) || isReadAt(reader) {
|
||||
return c.putObjectMultipartFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, metaData, progress)
|
||||
func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64,
|
||||
metadata map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
n, err = c.putObjectMultipartNoStream(bucketName, objectName, reader, metadata, progress)
|
||||
if err != nil {
|
||||
errResp := ToErrorResponse(err)
|
||||
// Verify if multipart functionality is not available, if not
|
||||
// fall back to single PutObject operation.
|
||||
if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
|
||||
// Verify if size of reader is greater than '5GiB'.
|
||||
if size > maxSinglePutObjectSize {
|
||||
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
|
||||
}
|
||||
// Fall back to uploading as single PutObject operation.
|
||||
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
|
||||
}
|
||||
}
|
||||
// For any other data size and reader type we do generic multipart
|
||||
// approach by staging data in temporary files and uploading them.
|
||||
return c.putObjectMultipartStream(bucketName, objectName, reader, size, metaData, progress)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// putObjectMultipartStreamNoChecksum - upload a large object using
|
||||
// multipart upload and streaming signature for signing payload.
|
||||
func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string,
|
||||
reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (int64, error) {
|
||||
|
||||
func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
if err = s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Initiates a new multipart request
|
||||
// Total data read and written to server. should be equal to
|
||||
// 'size' at the end of the call.
|
||||
var totalUploadedSize int64
|
||||
|
||||
// Complete multipart upload.
|
||||
var complMultipartUpload completeMultipartUpload
|
||||
|
||||
// Calculate the optimal parts info for a given size.
|
||||
totalPartsCount, partSize, _, err := optimalPartInfo(-1)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Initiate a new multipart upload.
|
||||
uploadID, err := c.newUploadID(bucketName, objectName, metadata)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Calculate the optimal parts info for a given size.
|
||||
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Total data read and written to server. should be equal to 'size' at the end of the call.
|
||||
var totalUploadedSize int64
|
||||
|
||||
// Initialize parts uploaded map.
|
||||
partsInfo := make(map[int]ObjectPart)
|
||||
|
||||
// Part number always starts with '1'.
|
||||
var partNumber int
|
||||
for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
|
||||
// Update progress reader appropriately to the latest offset
|
||||
// as we read from the source.
|
||||
hookReader := newHook(reader, progress)
|
||||
|
||||
// Proceed to upload the part.
|
||||
if partNumber == totalPartsCount {
|
||||
partSize = lastPartSize
|
||||
}
|
||||
|
||||
var objPart ObjectPart
|
||||
objPart, err = c.uploadPart(bucketName, objectName, uploadID,
|
||||
io.LimitReader(hookReader, partSize), partNumber, nil, nil, partSize)
|
||||
// For unknown size, Read EOF we break away.
|
||||
// We do not have to upload till totalPartsCount.
|
||||
if err == io.EOF && size < 0 {
|
||||
break
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
return totalUploadedSize, err
|
||||
c.abortMultipartUpload(bucketName, objectName, uploadID)
|
||||
}
|
||||
|
||||
// Save successfully uploaded part metadata.
|
||||
partsInfo[partNumber] = objPart
|
||||
|
||||
// Save successfully uploaded size.
|
||||
totalUploadedSize += partSize
|
||||
}
|
||||
|
||||
// Verify if we uploaded all the data.
|
||||
if size > 0 {
|
||||
if totalUploadedSize != size {
|
||||
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
|
||||
// Complete multipart upload.
|
||||
var complMultipartUpload completeMultipartUpload
|
||||
|
||||
// Loop over total uploaded parts to save them in
|
||||
// Parts array before completing the multipart request.
|
||||
for i := 1; i < partNumber; i++ {
|
||||
part, ok := partsInfo[i]
|
||||
if !ok {
|
||||
return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
|
||||
}
|
||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
})
|
||||
}
|
||||
|
||||
// Sort all completed parts.
|
||||
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
|
||||
if err != nil {
|
||||
return totalUploadedSize, err
|
||||
}
|
||||
|
||||
// Return final size.
|
||||
return totalUploadedSize, nil
|
||||
}
|
||||
|
||||
// putObjectStream uploads files bigger than 64MiB, and also supports
|
||||
// special case where size is unknown i.e '-1'.
|
||||
func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Total data read and written to server. should be equal to 'size' at the end of the call.
|
||||
var totalUploadedSize int64
|
||||
|
||||
// Complete multipart upload.
|
||||
var complMultipartUpload completeMultipartUpload
|
||||
|
||||
// Initiate a new multipart upload.
|
||||
uploadID, err := c.newUploadID(bucketName, objectName, metaData)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Calculate the optimal parts info for a given size.
|
||||
totalPartsCount, partSize, _, err := optimalPartInfo(size)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}()
|
||||
|
||||
// Part number always starts with '1'.
|
||||
partNumber := 1
|
||||
|
||||
// Initialize a temporary buffer.
|
||||
tmpBuffer := new(bytes.Buffer)
|
||||
|
||||
// Initialize parts uploaded map.
|
||||
partsInfo := make(map[int]ObjectPart)
|
||||
|
||||
// Create a buffer.
|
||||
buf := make([]byte, partSize)
|
||||
defer debug.FreeOSMemory()
|
||||
|
||||
for partNumber <= totalPartsCount {
|
||||
// Choose hash algorithms to be calculated by hashCopyN, avoid sha256
|
||||
// with non-v4 signature request or HTTPS connection
|
||||
// Choose hash algorithms to be calculated by hashCopyN,
|
||||
// avoid sha256 with non-v4 signature request or
|
||||
// HTTPS connection.
|
||||
hashAlgos, hashSums := c.hashMaterials()
|
||||
|
||||
// Calculates hash sums while copying partSize bytes into tmpBuffer.
|
||||
prtSize, rErr := hashCopyN(hashAlgos, hashSums, tmpBuffer, reader, partSize)
|
||||
if rErr != nil && rErr != io.EOF {
|
||||
length, rErr := io.ReadFull(reader, buf)
|
||||
if rErr == io.EOF {
|
||||
break
|
||||
}
|
||||
if rErr != nil && rErr != io.ErrUnexpectedEOF {
|
||||
return 0, rErr
|
||||
}
|
||||
|
||||
var reader io.Reader
|
||||
// Calculates hash sums while copying partSize bytes into cw.
|
||||
for k, v := range hashAlgos {
|
||||
v.Write(buf[:length])
|
||||
hashSums[k] = v.Sum(nil)
|
||||
}
|
||||
|
||||
// Update progress reader appropriately to the latest offset
|
||||
// as we read from the source.
|
||||
reader = newHook(tmpBuffer, progress)
|
||||
rd := newHook(bytes.NewReader(buf[:length]), progress)
|
||||
|
||||
// Proceed to upload the part.
|
||||
var objPart ObjectPart
|
||||
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
|
||||
objPart, err = c.uploadPart(bucketName, objectName, uploadID, rd, partNumber,
|
||||
hashSums["md5"], hashSums["sha256"], int64(length), metadata)
|
||||
if err != nil {
|
||||
// Reset the temporary buffer upon any error.
|
||||
tmpBuffer.Reset()
|
||||
return totalUploadedSize, err
|
||||
}
|
||||
|
||||
// Save successfully uploaded part metadata.
|
||||
partsInfo[partNumber] = objPart
|
||||
|
||||
// Update the progress reader for the skipped part.
|
||||
if progress != nil {
|
||||
if _, err = io.CopyN(ioutil.Discard, progress, prtSize); err != nil {
|
||||
return totalUploadedSize, err
|
||||
}
|
||||
}
|
||||
|
||||
// Reset the temporary buffer.
|
||||
tmpBuffer.Reset()
|
||||
|
||||
// Save successfully uploaded size.
|
||||
totalUploadedSize += prtSize
|
||||
totalUploadedSize += int64(length)
|
||||
|
||||
// Increment part number.
|
||||
partNumber++
|
||||
|
||||
// For unknown size, Read EOF we break away.
|
||||
// We do not have to upload till totalPartsCount.
|
||||
if size < 0 && rErr == io.EOF {
|
||||
if rErr == io.EOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Verify if we uploaded all the data.
|
||||
if size > 0 {
|
||||
if totalUploadedSize != size {
|
||||
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
|
||||
// Loop over total uploaded parts to save them in
|
||||
// Parts array before completing the multipart request.
|
||||
for i := 1; i < partNumber; i++ {
|
||||
@ -269,8 +158,7 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
|
||||
|
||||
// Sort all completed parts.
|
||||
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
|
||||
if err != nil {
|
||||
if _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload); err != nil {
|
||||
return totalUploadedSize, err
|
||||
}
|
||||
|
||||
@ -279,7 +167,7 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
|
||||
}
|
||||
|
||||
// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
|
||||
func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData map[string][]string) (initiateMultipartUploadResult, error) {
|
||||
func (c Client) initiateMultipartUpload(bucketName, objectName string, metadata map[string][]string) (initiateMultipartUploadResult, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return initiateMultipartUploadResult{}, err
|
||||
@ -294,14 +182,14 @@ func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData
|
||||
|
||||
// Set ContentType header.
|
||||
customHeader := make(http.Header)
|
||||
for k, v := range metaData {
|
||||
for k, v := range metadata {
|
||||
if len(v) > 0 {
|
||||
customHeader.Set(k, v[0])
|
||||
}
|
||||
}
|
||||
|
||||
// Set a default content-type header if the latter is not provided
|
||||
if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 {
|
||||
if v, ok := metadata["Content-Type"]; !ok || len(v) == 0 {
|
||||
customHeader.Set("Content-Type", "application/octet-stream")
|
||||
}
|
||||
|
||||
@ -332,8 +220,11 @@ func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData
|
||||
return initiateMultipartUploadResult, nil
|
||||
}
|
||||
|
||||
const serverEncryptionKeyPrefix = "x-amz-server-side-encryption"
|
||||
|
||||
// uploadPart - Uploads a part in a multipart upload.
|
||||
func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader, partNumber int, md5Sum, sha256Sum []byte, size int64) (ObjectPart, error) {
|
||||
func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader,
|
||||
partNumber int, md5Sum, sha256Sum []byte, size int64, metadata map[string][]string) (ObjectPart, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return ObjectPart{}, err
|
||||
@ -361,10 +252,21 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re
|
||||
// Set upload id.
|
||||
urlValues.Set("uploadId", uploadID)
|
||||
|
||||
// Set encryption headers, if any.
|
||||
customHeader := make(http.Header)
|
||||
for k, v := range metadata {
|
||||
if len(v) > 0 {
|
||||
if strings.HasPrefix(strings.ToLower(k), serverEncryptionKeyPrefix) {
|
||||
customHeader.Set(k, v[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
queryValues: urlValues,
|
||||
customHeader: customHeader,
|
||||
contentBody: reader,
|
||||
contentLength: size,
|
||||
contentMD5Bytes: md5Sum,
|
||||
@ -393,7 +295,8 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re
|
||||
}
|
||||
|
||||
// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
|
||||
func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, complete completeMultipartUpload) (completeMultipartUploadResult, error) {
|
||||
func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string,
|
||||
complete completeMultipartUpload) (completeMultipartUploadResult, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return completeMultipartUploadResult{}, err
|
||||
|
191
vendor/github.com/minio/minio-go/api-put-object-progress.go
generated
vendored
191
vendor/github.com/minio/minio-go/api-put-object-progress.go
generated
vendored
@ -1,191 +0,0 @@
|
||||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio-go/pkg/credentials"
|
||||
"github.com/minio/minio-go/pkg/encrypt"
|
||||
"github.com/minio/minio-go/pkg/s3utils"
|
||||
)
|
||||
|
||||
// PutObjectWithProgress - with progress.
|
||||
func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.Reader, contentType string, progress io.Reader) (n int64, err error) {
|
||||
metaData := make(map[string][]string)
|
||||
metaData["Content-Type"] = []string{contentType}
|
||||
return c.PutObjectWithMetadata(bucketName, objectName, reader, metaData, progress)
|
||||
}
|
||||
|
||||
// PutEncryptedObject - Encrypt and store object.
|
||||
func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
|
||||
if encryptMaterials == nil {
|
||||
return 0, ErrInvalidArgument("Unable to recognize empty encryption properties")
|
||||
}
|
||||
|
||||
if err := encryptMaterials.SetupEncryptMode(reader); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if metaData == nil {
|
||||
metaData = make(map[string][]string)
|
||||
}
|
||||
|
||||
// Set the necessary encryption headers, for future decryption.
|
||||
metaData[amzHeaderIV] = []string{encryptMaterials.GetIV()}
|
||||
metaData[amzHeaderKey] = []string{encryptMaterials.GetKey()}
|
||||
metaData[amzHeaderMatDesc] = []string{encryptMaterials.GetDesc()}
|
||||
|
||||
return c.PutObjectWithMetadata(bucketName, objectName, encryptMaterials, metaData, progress)
|
||||
}
|
||||
|
||||
// PutObjectWithMetadata - with metadata.
|
||||
func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.Reader, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if reader == nil {
|
||||
return 0, ErrInvalidArgument("Input reader is invalid, cannot be nil.")
|
||||
}
|
||||
|
||||
// Size of the object.
|
||||
var size int64
|
||||
|
||||
// Get reader size.
|
||||
size, err = getReaderSize(reader)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Check for largest object size allowed.
|
||||
if size > int64(maxMultipartPutObjectSize) {
|
||||
return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
|
||||
}
|
||||
|
||||
// NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT.
|
||||
if s3utils.IsGoogleEndpoint(c.endpointURL) {
|
||||
// Do not compute MD5 for Google Cloud Storage.
|
||||
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metaData, progress)
|
||||
}
|
||||
|
||||
// putSmall object.
|
||||
if size < minPartSize && size >= 0 {
|
||||
return c.putObjectSingle(bucketName, objectName, reader, size, metaData, progress)
|
||||
}
|
||||
|
||||
// For all sizes greater than 5MiB do multipart.
|
||||
n, err = c.putObjectMultipart(bucketName, objectName, reader, size, metaData, progress)
|
||||
if err != nil {
|
||||
errResp := ToErrorResponse(err)
|
||||
// Verify if multipart functionality is not available, if not
|
||||
// fall back to single PutObject operation.
|
||||
if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
|
||||
// Verify if size of reader is greater than '5GiB'.
|
||||
if size > maxSinglePutObjectSize {
|
||||
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
|
||||
}
|
||||
// Fall back to uploading as single PutObject operation.
|
||||
return c.putObjectSingle(bucketName, objectName, reader, size, metaData, progress)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// PutObjectStreaming using AWS streaming signature V4
|
||||
func (c Client) PutObjectStreaming(bucketName, objectName string, reader io.Reader) (n int64, err error) {
|
||||
return c.PutObjectStreamingWithProgress(bucketName, objectName, reader, nil, nil)
|
||||
}
|
||||
|
||||
// PutObjectStreamingWithMetadata using AWS streaming signature V4
|
||||
func (c Client) PutObjectStreamingWithMetadata(bucketName, objectName string, reader io.Reader, metadata map[string][]string) (n int64, err error) {
|
||||
return c.PutObjectStreamingWithProgress(bucketName, objectName, reader, metadata, nil)
|
||||
}
|
||||
|
||||
// PutObjectStreamingWithProgress using AWS streaming signature V4
|
||||
func (c Client) PutObjectStreamingWithProgress(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
// NOTE: Streaming signature is not supported by GCS.
|
||||
if s3utils.IsGoogleEndpoint(c.endpointURL) {
|
||||
return 0, ErrorResponse{
|
||||
Code: "NotImplemented",
|
||||
Message: "AWS streaming signature v4 is not supported with Google Cloud Storage",
|
||||
Key: objectName,
|
||||
BucketName: bucketName,
|
||||
}
|
||||
}
|
||||
|
||||
if c.overrideSignerType.IsV2() {
|
||||
return 0, ErrorResponse{
|
||||
Code: "NotImplemented",
|
||||
Message: "AWS streaming signature v4 is not supported with minio client initialized for AWS signature v2",
|
||||
Key: objectName,
|
||||
BucketName: bucketName,
|
||||
}
|
||||
}
|
||||
|
||||
// Size of the object.
|
||||
var size int64
|
||||
|
||||
// Get reader size.
|
||||
size, err = getReaderSize(reader)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Check for largest object size allowed.
|
||||
if size > int64(maxMultipartPutObjectSize) {
|
||||
return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
|
||||
}
|
||||
|
||||
// If size cannot be found on a stream, it is not possible
|
||||
// to upload using streaming signature, fall back to multipart.
|
||||
if size < 0 {
|
||||
return c.putObjectMultipartStream(bucketName, objectName, reader, size, metadata, progress)
|
||||
}
|
||||
|
||||
// Set streaming signature.
|
||||
c.overrideSignerType = credentials.SignatureV4Streaming
|
||||
|
||||
if size < minPartSize && size >= 0 {
|
||||
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
|
||||
}
|
||||
|
||||
// For all sizes greater than 64MiB do multipart.
|
||||
n, err = c.putObjectMultipartStreamNoChecksum(bucketName, objectName, reader, size, metadata, progress)
|
||||
if err != nil {
|
||||
errResp := ToErrorResponse(err)
|
||||
// Verify if multipart functionality is not available, if not
|
||||
// fall back to single PutObject operation.
|
||||
if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
|
||||
// Verify if size of reader is greater than '5GiB'.
|
||||
if size > maxSinglePutObjectSize {
|
||||
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
|
||||
}
|
||||
// Fall back to uploading as single PutObject operation.
|
||||
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
219
vendor/github.com/minio/minio-go/api-put-object-readat.go
generated
vendored
219
vendor/github.com/minio/minio-go/api-put-object-readat.go
generated
vendored
@ -1,219 +0,0 @@
|
||||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
|
||||
"github.com/minio/minio-go/pkg/s3utils"
|
||||
)
|
||||
|
||||
// uploadedPartRes - the response received from a part upload.
|
||||
type uploadedPartRes struct {
|
||||
Error error // Any error encountered while uploading the part.
|
||||
PartNum int // Number of the part uploaded.
|
||||
Size int64 // Size of the part uploaded.
|
||||
Part *ObjectPart
|
||||
}
|
||||
|
||||
type uploadPartReq struct {
|
||||
PartNum int // Number of the part uploaded.
|
||||
Part *ObjectPart // Size of the part uploaded.
|
||||
}
|
||||
|
||||
// putObjectMultipartFromReadAt - Uploads files bigger than 5MiB. Supports reader
|
||||
// of type which implements io.ReaderAt interface (ReadAt method).
|
||||
//
|
||||
// NOTE: This function is meant to be used for all readers which
|
||||
// implement io.ReaderAt which allows us for resuming multipart
|
||||
// uploads but reading at an offset, which would avoid re-read the
|
||||
// data which was already uploaded. Internally this function uses
|
||||
// temporary files for staging all the data, these temporary files are
|
||||
// cleaned automatically when the caller i.e http client closes the
|
||||
// stream after uploading all the contents successfully.
|
||||
func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, reader io.ReaderAt, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Initiate a new multipart upload.
|
||||
uploadID, err := c.newUploadID(bucketName, objectName, metaData)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Total data read and written to server. should be equal to 'size' at the end of the call.
|
||||
var totalUploadedSize int64
|
||||
|
||||
// Complete multipart upload.
|
||||
var complMultipartUpload completeMultipartUpload
|
||||
|
||||
// Calculate the optimal parts info for a given size.
|
||||
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Declare a channel that sends the next part number to be uploaded.
|
||||
// Buffered to 10000 because thats the maximum number of parts allowed
|
||||
// by S3.
|
||||
uploadPartsCh := make(chan uploadPartReq, 10000)
|
||||
|
||||
// Declare a channel that sends back the response of a part upload.
|
||||
// Buffered to 10000 because thats the maximum number of parts allowed
|
||||
// by S3.
|
||||
uploadedPartsCh := make(chan uploadedPartRes, 10000)
|
||||
|
||||
// Used for readability, lastPartNumber is always totalPartsCount.
|
||||
lastPartNumber := totalPartsCount
|
||||
|
||||
// Initialize parts uploaded map.
|
||||
partsInfo := make(map[int]ObjectPart)
|
||||
|
||||
// Send each part number to the channel to be processed.
|
||||
for p := 1; p <= totalPartsCount; p++ {
|
||||
part, ok := partsInfo[p]
|
||||
if ok {
|
||||
uploadPartsCh <- uploadPartReq{PartNum: p, Part: &part}
|
||||
} else {
|
||||
uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil}
|
||||
}
|
||||
}
|
||||
close(uploadPartsCh)
|
||||
|
||||
// Receive each part number from the channel allowing three parallel uploads.
|
||||
for w := 1; w <= totalWorkers; w++ {
|
||||
go func() {
|
||||
// Read defaults to reading at 5MiB buffer.
|
||||
readAtBuffer := make([]byte, optimalReadBufferSize)
|
||||
|
||||
// Each worker will draw from the part channel and upload in parallel.
|
||||
for uploadReq := range uploadPartsCh {
|
||||
// Declare a new tmpBuffer.
|
||||
tmpBuffer := new(bytes.Buffer)
|
||||
|
||||
// If partNumber was not uploaded we calculate the missing
|
||||
// part offset and size. For all other part numbers we
|
||||
// calculate offset based on multiples of partSize.
|
||||
readOffset := int64(uploadReq.PartNum-1) * partSize
|
||||
missingPartSize := partSize
|
||||
|
||||
// As a special case if partNumber is lastPartNumber, we
|
||||
// calculate the offset based on the last part size.
|
||||
if uploadReq.PartNum == lastPartNumber {
|
||||
readOffset = (size - lastPartSize)
|
||||
missingPartSize = lastPartSize
|
||||
}
|
||||
|
||||
// Get a section reader on a particular offset.
|
||||
sectionReader := io.NewSectionReader(reader, readOffset, missingPartSize)
|
||||
|
||||
// Choose the needed hash algorithms to be calculated by hashCopyBuffer.
|
||||
// Sha256 is avoided in non-v4 signature requests or HTTPS connections
|
||||
hashAlgos, hashSums := c.hashMaterials()
|
||||
|
||||
var prtSize int64
|
||||
var err error
|
||||
prtSize, err = hashCopyBuffer(hashAlgos, hashSums, tmpBuffer, sectionReader, readAtBuffer)
|
||||
if err != nil {
|
||||
// Send the error back through the channel.
|
||||
uploadedPartsCh <- uploadedPartRes{
|
||||
Size: 0,
|
||||
Error: err,
|
||||
}
|
||||
// Exit the goroutine.
|
||||
return
|
||||
}
|
||||
|
||||
// Proceed to upload the part.
|
||||
var objPart ObjectPart
|
||||
objPart, err = c.uploadPart(bucketName, objectName, uploadID, tmpBuffer,
|
||||
uploadReq.PartNum, hashSums["md5"], hashSums["sha256"], prtSize)
|
||||
if err != nil {
|
||||
uploadedPartsCh <- uploadedPartRes{
|
||||
Size: 0,
|
||||
Error: err,
|
||||
}
|
||||
// Exit the goroutine.
|
||||
return
|
||||
}
|
||||
|
||||
// Save successfully uploaded part metadata.
|
||||
uploadReq.Part = &objPart
|
||||
|
||||
// Send successful part info through the channel.
|
||||
uploadedPartsCh <- uploadedPartRes{
|
||||
Size: missingPartSize,
|
||||
PartNum: uploadReq.PartNum,
|
||||
Part: uploadReq.Part,
|
||||
Error: nil,
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Gather the responses as they occur and update any
|
||||
// progress bar.
|
||||
for u := 1; u <= totalPartsCount; u++ {
|
||||
uploadRes := <-uploadedPartsCh
|
||||
if uploadRes.Error != nil {
|
||||
return totalUploadedSize, uploadRes.Error
|
||||
}
|
||||
// Retrieve each uploaded part and store it to be completed.
|
||||
// part, ok := partsInfo[uploadRes.PartNum]
|
||||
part := uploadRes.Part
|
||||
if part == nil {
|
||||
return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum))
|
||||
}
|
||||
// Update the totalUploadedSize.
|
||||
totalUploadedSize += uploadRes.Size
|
||||
// Update the progress bar if there is one.
|
||||
if progress != nil {
|
||||
if _, err = io.CopyN(ioutil.Discard, progress, uploadRes.Size); err != nil {
|
||||
return totalUploadedSize, err
|
||||
}
|
||||
}
|
||||
// Store the parts to be completed in order.
|
||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
})
|
||||
}
|
||||
|
||||
// Verify if we uploaded all the data.
|
||||
if totalUploadedSize != size {
|
||||
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
|
||||
}
|
||||
|
||||
// Sort all completed parts.
|
||||
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
|
||||
if err != nil {
|
||||
return totalUploadedSize, err
|
||||
}
|
||||
|
||||
// Return final size.
|
||||
return totalUploadedSize, nil
|
||||
}
|
436
vendor/github.com/minio/minio-go/api-put-object-streaming.go
generated
vendored
Normal file
436
vendor/github.com/minio/minio-go/api-put-object-streaming.go
generated
vendored
Normal file
@ -0,0 +1,436 @@
|
||||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio-go/pkg/s3utils"
|
||||
)
|
||||
|
||||
// PutObjectStreaming using AWS streaming signature V4
|
||||
func (c Client) PutObjectStreaming(bucketName, objectName string, reader io.Reader) (n int64, err error) {
|
||||
return c.PutObjectWithProgress(bucketName, objectName, reader, nil, nil)
|
||||
}
|
||||
|
||||
// putObjectMultipartStream - upload a large object using
|
||||
// multipart upload and streaming signature for signing payload.
|
||||
// Comprehensive put object operation involving multipart uploads.
|
||||
//
|
||||
// Following code handles these types of readers.
|
||||
//
|
||||
// - *os.File
|
||||
// - *minio.Object
|
||||
// - Any reader which has a method 'ReadAt()'
|
||||
//
|
||||
func (c Client) putObjectMultipartStream(bucketName, objectName string,
|
||||
reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
|
||||
// Verify if reader is *minio.Object, *os.File or io.ReaderAt.
|
||||
// NOTE: Verification of object is kept for a specific purpose
|
||||
// while it is going to be duck typed similar to io.ReaderAt.
|
||||
// It is to indicate that *minio.Object implements io.ReaderAt.
|
||||
// and such a functionality is used in the subsequent code path.
|
||||
if isFile(reader) || !isObject(reader) && isReadAt(reader) {
|
||||
n, err = c.putObjectMultipartStreamFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, metadata, progress)
|
||||
} else {
|
||||
n, err = c.putObjectMultipartStreamNoChecksum(bucketName, objectName, reader, size, metadata, progress)
|
||||
}
|
||||
if err != nil {
|
||||
errResp := ToErrorResponse(err)
|
||||
// Verify if multipart functionality is not available, if not
|
||||
// fall back to single PutObject operation.
|
||||
if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
|
||||
// Verify if size of reader is greater than '5GiB'.
|
||||
if size > maxSinglePutObjectSize {
|
||||
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
|
||||
}
|
||||
// Fall back to uploading as single PutObject operation.
|
||||
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// uploadedPartRes - the response received from a part upload.
|
||||
type uploadedPartRes struct {
|
||||
Error error // Any error encountered while uploading the part.
|
||||
PartNum int // Number of the part uploaded.
|
||||
Size int64 // Size of the part uploaded.
|
||||
Part *ObjectPart
|
||||
}
|
||||
|
||||
type uploadPartReq struct {
|
||||
PartNum int // Number of the part uploaded.
|
||||
Part *ObjectPart // Size of the part uploaded.
|
||||
}
|
||||
|
||||
// putObjectMultipartFromReadAt - Uploads files bigger than 64MiB.
|
||||
// Supports all readers which implements io.ReaderAt interface
|
||||
// (ReadAt method).
|
||||
//
|
||||
// NOTE: This function is meant to be used for all readers which
|
||||
// implement io.ReaderAt which allows us for resuming multipart
|
||||
// uploads but reading at an offset, which would avoid re-read the
|
||||
// data which was already uploaded. Internally this function uses
|
||||
// temporary files for staging all the data, these temporary files are
|
||||
// cleaned automatically when the caller i.e http client closes the
|
||||
// stream after uploading all the contents successfully.
|
||||
func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string,
|
||||
reader io.ReaderAt, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
// Input validation.
|
||||
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err = s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Calculate the optimal parts info for a given size.
|
||||
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Initiate a new multipart upload.
|
||||
uploadID, err := c.newUploadID(bucketName, objectName, metadata)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Aborts the multipart upload in progress, if the
|
||||
// function returns any error, since we do not resume
|
||||
// we should purge the parts which have been uploaded
|
||||
// to relinquish storage space.
|
||||
defer func() {
|
||||
if err != nil {
|
||||
c.abortMultipartUpload(bucketName, objectName, uploadID)
|
||||
}
|
||||
}()
|
||||
|
||||
// Total data read and written to server. should be equal to 'size' at the end of the call.
|
||||
var totalUploadedSize int64
|
||||
|
||||
// Complete multipart upload.
|
||||
var complMultipartUpload completeMultipartUpload
|
||||
|
||||
// Declare a channel that sends the next part number to be uploaded.
|
||||
// Buffered to 10000 because thats the maximum number of parts allowed
|
||||
// by S3.
|
||||
uploadPartsCh := make(chan uploadPartReq, 10000)
|
||||
|
||||
// Declare a channel that sends back the response of a part upload.
|
||||
// Buffered to 10000 because thats the maximum number of parts allowed
|
||||
// by S3.
|
||||
uploadedPartsCh := make(chan uploadedPartRes, 10000)
|
||||
|
||||
// Used for readability, lastPartNumber is always totalPartsCount.
|
||||
lastPartNumber := totalPartsCount
|
||||
|
||||
// Send each part number to the channel to be processed.
|
||||
for p := 1; p <= totalPartsCount; p++ {
|
||||
uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil}
|
||||
}
|
||||
close(uploadPartsCh)
|
||||
|
||||
// Receive each part number from the channel allowing three parallel uploads.
|
||||
for w := 1; w <= totalWorkers; w++ {
|
||||
go func(partSize int64) {
|
||||
// Each worker will draw from the part channel and upload in parallel.
|
||||
for uploadReq := range uploadPartsCh {
|
||||
|
||||
// If partNumber was not uploaded we calculate the missing
|
||||
// part offset and size. For all other part numbers we
|
||||
// calculate offset based on multiples of partSize.
|
||||
readOffset := int64(uploadReq.PartNum-1) * partSize
|
||||
|
||||
// As a special case if partNumber is lastPartNumber, we
|
||||
// calculate the offset based on the last part size.
|
||||
if uploadReq.PartNum == lastPartNumber {
|
||||
readOffset = (size - lastPartSize)
|
||||
partSize = lastPartSize
|
||||
}
|
||||
|
||||
// Get a section reader on a particular offset.
|
||||
sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), progress)
|
||||
|
||||
// Proceed to upload the part.
|
||||
var objPart ObjectPart
|
||||
objPart, err = c.uploadPart(bucketName, objectName, uploadID,
|
||||
sectionReader, uploadReq.PartNum,
|
||||
nil, nil, partSize, metadata)
|
||||
if err != nil {
|
||||
uploadedPartsCh <- uploadedPartRes{
|
||||
Size: 0,
|
||||
Error: err,
|
||||
}
|
||||
// Exit the goroutine.
|
||||
return
|
||||
}
|
||||
|
||||
// Save successfully uploaded part metadata.
|
||||
uploadReq.Part = &objPart
|
||||
|
||||
// Send successful part info through the channel.
|
||||
uploadedPartsCh <- uploadedPartRes{
|
||||
Size: objPart.Size,
|
||||
PartNum: uploadReq.PartNum,
|
||||
Part: uploadReq.Part,
|
||||
Error: nil,
|
||||
}
|
||||
}
|
||||
}(partSize)
|
||||
}
|
||||
|
||||
// Gather the responses as they occur and update any
|
||||
// progress bar.
|
||||
for u := 1; u <= totalPartsCount; u++ {
|
||||
uploadRes := <-uploadedPartsCh
|
||||
if uploadRes.Error != nil {
|
||||
return totalUploadedSize, uploadRes.Error
|
||||
}
|
||||
// Retrieve each uploaded part and store it to be completed.
|
||||
// part, ok := partsInfo[uploadRes.PartNum]
|
||||
part := uploadRes.Part
|
||||
if part == nil {
|
||||
return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum))
|
||||
}
|
||||
// Update the totalUploadedSize.
|
||||
totalUploadedSize += uploadRes.Size
|
||||
// Store the parts to be completed in order.
|
||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
})
|
||||
}
|
||||
|
||||
// Verify if we uploaded all the data.
|
||||
if totalUploadedSize != size {
|
||||
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
|
||||
}
|
||||
|
||||
// Sort all completed parts.
|
||||
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
|
||||
if err != nil {
|
||||
return totalUploadedSize, err
|
||||
}
|
||||
|
||||
// Return final size.
|
||||
return totalUploadedSize, nil
|
||||
}
|
||||
|
||||
func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string,
|
||||
reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
// Input validation.
|
||||
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err = s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Calculate the optimal parts info for a given size.
|
||||
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Initiates a new multipart request
|
||||
uploadID, err := c.newUploadID(bucketName, objectName, metadata)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Aborts the multipart upload if the function returns
|
||||
// any error, since we do not resume we should purge
|
||||
// the parts which have been uploaded to relinquish
|
||||
// storage space.
|
||||
defer func() {
|
||||
if err != nil {
|
||||
c.abortMultipartUpload(bucketName, objectName, uploadID)
|
||||
}
|
||||
}()
|
||||
|
||||
// Total data read and written to server. should be equal to 'size' at the end of the call.
|
||||
var totalUploadedSize int64
|
||||
|
||||
// Initialize parts uploaded map.
|
||||
partsInfo := make(map[int]ObjectPart)
|
||||
|
||||
// Part number always starts with '1'.
|
||||
var partNumber int
|
||||
for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
|
||||
// Update progress reader appropriately to the latest offset
|
||||
// as we read from the source.
|
||||
hookReader := newHook(reader, progress)
|
||||
|
||||
// Proceed to upload the part.
|
||||
if partNumber == totalPartsCount {
|
||||
partSize = lastPartSize
|
||||
}
|
||||
|
||||
var objPart ObjectPart
|
||||
objPart, err = c.uploadPart(bucketName, objectName, uploadID,
|
||||
io.LimitReader(hookReader, partSize),
|
||||
partNumber, nil, nil, partSize, metadata)
|
||||
if err != nil {
|
||||
return totalUploadedSize, err
|
||||
}
|
||||
|
||||
// Save successfully uploaded part metadata.
|
||||
partsInfo[partNumber] = objPart
|
||||
|
||||
// Save successfully uploaded size.
|
||||
totalUploadedSize += partSize
|
||||
}
|
||||
|
||||
// Verify if we uploaded all the data.
|
||||
if size > 0 {
|
||||
if totalUploadedSize != size {
|
||||
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
|
||||
// Complete multipart upload.
|
||||
var complMultipartUpload completeMultipartUpload
|
||||
|
||||
// Loop over total uploaded parts to save them in
|
||||
// Parts array before completing the multipart request.
|
||||
for i := 1; i < partNumber; i++ {
|
||||
part, ok := partsInfo[i]
|
||||
if !ok {
|
||||
return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
|
||||
}
|
||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
})
|
||||
}
|
||||
|
||||
// Sort all completed parts.
|
||||
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
|
||||
if err != nil {
|
||||
return totalUploadedSize, err
|
||||
}
|
||||
|
||||
// Return final size.
|
||||
return totalUploadedSize, nil
|
||||
}
|
||||
|
||||
// putObjectNoChecksum special function used Google Cloud Storage. This special function
|
||||
// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
|
||||
func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Size -1 is only supported on Google Cloud Storage, we error
|
||||
// out in all other situations.
|
||||
if size < 0 && !s3utils.IsGoogleEndpoint(c.endpointURL) {
|
||||
return 0, ErrEntityTooSmall(size, bucketName, objectName)
|
||||
}
|
||||
if size > 0 {
|
||||
if isReadAt(reader) && !isObject(reader) {
|
||||
reader = io.NewSectionReader(reader.(io.ReaderAt), 0, size)
|
||||
}
|
||||
}
|
||||
|
||||
// Update progress reader appropriately to the latest offset as we
|
||||
// read from the source.
|
||||
readSeeker := newHook(reader, progress)
|
||||
|
||||
// This function does not calculate sha256 and md5sum for payload.
|
||||
// Execute put object.
|
||||
st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, metaData)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if st.Size != size {
|
||||
return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
|
||||
}
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// putObjectDo - executes the put object http operation.
|
||||
// NOTE: You must have WRITE permissions on a bucket to add an object to it.
|
||||
func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, metaData map[string][]string) (ObjectInfo, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
|
||||
// Set headers.
|
||||
customHeader := make(http.Header)
|
||||
|
||||
// Set metadata to headers
|
||||
for k, v := range metaData {
|
||||
if len(v) > 0 {
|
||||
customHeader.Set(k, v[0])
|
||||
}
|
||||
}
|
||||
|
||||
// If Content-Type is not provided, set the default application/octet-stream one
|
||||
if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 {
|
||||
customHeader.Set("Content-Type", "application/octet-stream")
|
||||
}
|
||||
|
||||
// Populate request metadata.
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
customHeader: customHeader,
|
||||
contentBody: reader,
|
||||
contentLength: size,
|
||||
contentMD5Bytes: md5Sum,
|
||||
contentSHA256Bytes: sha256Sum,
|
||||
}
|
||||
|
||||
// Execute PUT an objectName.
|
||||
resp, err := c.executeMethod("PUT", reqMetadata)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
|
||||
var objInfo ObjectInfo
|
||||
// Trim off the odd double quotes from ETag in the beginning and end.
|
||||
objInfo.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
|
||||
objInfo.ETag = strings.TrimSuffix(objInfo.ETag, "\"")
|
||||
// A success here means data was written to server successfully.
|
||||
objInfo.Size = size
|
||||
|
||||
// Return here.
|
||||
return objInfo, nil
|
||||
}
|
312
vendor/github.com/minio/minio-go/api-put-object.go
generated
vendored
312
vendor/github.com/minio/minio-go/api-put-object.go
generated
vendored
@ -17,12 +17,14 @@
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio-go/pkg/s3utils"
|
||||
@ -143,164 +145,178 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part
|
||||
//
|
||||
// You must have WRITE permissions on a bucket to create an object.
|
||||
//
|
||||
// - For size smaller than 64MiB PutObject automatically does a single atomic Put operation.
|
||||
// - For size larger than 64MiB PutObject automatically does a multipart Put operation.
|
||||
// - For size input as -1 PutObject does a multipart Put operation until input stream reaches EOF.
|
||||
// Maximum object size that can be uploaded through this operation will be 5TiB.
|
||||
//
|
||||
// NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT.
|
||||
// So we fall back to single PUT operation with the maximum limit of 5GiB.
|
||||
// - For size smaller than 64MiB PutObject automatically does a
|
||||
// single atomic Put operation.
|
||||
// - For size larger than 64MiB PutObject automatically does a
|
||||
// multipart Put operation.
|
||||
// - For size input as -1 PutObject does a multipart Put operation
|
||||
// until input stream reaches EOF. Maximum object size that can
|
||||
// be uploaded through this operation will be 5TiB.
|
||||
func (c Client) PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error) {
|
||||
return c.PutObjectWithProgress(bucketName, objectName, reader, contentType, nil)
|
||||
return c.PutObjectWithMetadata(bucketName, objectName, reader, map[string][]string{
|
||||
"Content-Type": []string{contentType},
|
||||
}, nil)
|
||||
}
|
||||
|
||||
// putObjectNoChecksum special function used Google Cloud Storage. This special function
|
||||
// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
|
||||
func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
// PutObjectWithSize - is a helper PutObject similar in behavior to PutObject()
|
||||
// but takes the size argument explicitly, this function avoids doing reflection
|
||||
// internally to figure out the size of input stream. Also if the input size is
|
||||
// lesser than 0 this function returns an error.
|
||||
func (c Client) PutObjectWithSize(bucketName, objectName string, reader io.Reader, readerSize int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
return c.putObjectCommon(bucketName, objectName, reader, readerSize, metadata, progress)
|
||||
}
|
||||
|
||||
// PutObjectWithMetadata using AWS streaming signature V4
|
||||
func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
return c.PutObjectWithProgress(bucketName, objectName, reader, metadata, progress)
|
||||
}
|
||||
|
||||
// PutObjectWithProgress using AWS streaming signature V4
|
||||
func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
// Size of the object.
|
||||
var size int64
|
||||
|
||||
// Get reader size.
|
||||
size, err = getReaderSize(reader)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return c.putObjectCommon(bucketName, objectName, reader, size, metadata, progress)
|
||||
}
|
||||
|
||||
func (c Client) putObjectCommon(bucketName, objectName string, reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
// Check for largest object size allowed.
|
||||
if size > int64(maxMultipartPutObjectSize) {
|
||||
return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
|
||||
}
|
||||
|
||||
// NOTE: Streaming signature is not supported by GCS.
|
||||
if s3utils.IsGoogleEndpoint(c.endpointURL) {
|
||||
// Do not compute MD5 for Google Cloud Storage.
|
||||
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
|
||||
}
|
||||
|
||||
if c.overrideSignerType.IsV2() {
|
||||
if size >= 0 && size < minPartSize {
|
||||
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
|
||||
}
|
||||
return c.putObjectMultipart(bucketName, objectName, reader, size, metadata, progress)
|
||||
}
|
||||
|
||||
if size < 0 {
|
||||
return c.putObjectMultipartStreamNoLength(bucketName, objectName, reader, metadata, progress)
|
||||
}
|
||||
|
||||
if size < minPartSize {
|
||||
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
|
||||
}
|
||||
|
||||
// For all sizes greater than 64MiB do multipart.
|
||||
return c.putObjectMultipartStream(bucketName, objectName, reader, size, metadata, progress)
|
||||
}
|
||||
|
||||
func (c Client) putObjectMultipartStreamNoLength(bucketName, objectName string, reader io.Reader, metadata map[string][]string,
|
||||
progress io.Reader) (n int64, err error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
if err = s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if size > 0 {
|
||||
readerAt, ok := reader.(io.ReaderAt)
|
||||
if ok {
|
||||
reader = io.NewSectionReader(readerAt, 0, size)
|
||||
}
|
||||
}
|
||||
|
||||
// Update progress reader appropriately to the latest offset as we
|
||||
// read from the source.
|
||||
readSeeker := newHook(reader, progress)
|
||||
// Total data read and written to server. should be equal to
|
||||
// 'size' at the end of the call.
|
||||
var totalUploadedSize int64
|
||||
|
||||
// This function does not calculate sha256 and md5sum for payload.
|
||||
// Execute put object.
|
||||
st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, metaData)
|
||||
// Complete multipart upload.
|
||||
var complMultipartUpload completeMultipartUpload
|
||||
|
||||
// Calculate the optimal parts info for a given size.
|
||||
totalPartsCount, partSize, _, err := optimalPartInfo(-1)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if st.Size != size {
|
||||
return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
|
||||
|
||||
// Initiate a new multipart upload.
|
||||
uploadID, err := c.newUploadID(bucketName, objectName, metadata)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// putObjectSingle is a special function for uploading single put object request.
|
||||
// This special function is used as a fallback when multipart upload fails.
|
||||
func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if size > maxSinglePutObjectSize {
|
||||
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
|
||||
}
|
||||
// If size is a stream, upload up to 5GiB.
|
||||
if size <= -1 {
|
||||
size = maxSinglePutObjectSize
|
||||
}
|
||||
|
||||
// Add the appropriate hash algorithms that need to be calculated by hashCopyN
|
||||
// In case of non-v4 signature request or HTTPS connection, sha256 is not needed.
|
||||
hashAlgos, hashSums := c.hashMaterials()
|
||||
|
||||
// Initialize a new temporary file.
|
||||
tmpFile, err := newTempFile("single$-putobject-single")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer tmpFile.Close()
|
||||
|
||||
size, err = hashCopyN(hashAlgos, hashSums, tmpFile, reader, size)
|
||||
// Return error if its not io.EOF.
|
||||
if err != nil && err != io.EOF {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Seek back to beginning of the temporary file.
|
||||
if _, err = tmpFile.Seek(0, 0); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
reader = tmpFile
|
||||
|
||||
// Execute put object.
|
||||
st, err := c.putObjectDo(bucketName, objectName, reader, hashSums["md5"], hashSums["sha256"], size, metaData)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if st.Size != size {
|
||||
return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
|
||||
}
|
||||
// Progress the reader to the size if putObjectDo is successful.
|
||||
if progress != nil {
|
||||
if _, err = io.CopyN(ioutil.Discard, progress, size); err != nil {
|
||||
return size, err
|
||||
}
|
||||
}
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// putObjectDo - executes the put object http operation.
|
||||
// NOTE: You must have WRITE permissions on a bucket to add an object to it.
|
||||
func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, metaData map[string][]string) (ObjectInfo, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
|
||||
// Set headers.
|
||||
customHeader := make(http.Header)
|
||||
|
||||
// Set metadata to headers
|
||||
for k, v := range metaData {
|
||||
if len(v) > 0 {
|
||||
customHeader.Set(k, v[0])
|
||||
}
|
||||
}
|
||||
|
||||
// If Content-Type is not provided, set the default application/octet-stream one
|
||||
if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 {
|
||||
customHeader.Set("Content-Type", "application/octet-stream")
|
||||
}
|
||||
|
||||
// Populate request metadata.
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
customHeader: customHeader,
|
||||
contentBody: reader,
|
||||
contentLength: size,
|
||||
contentMD5Bytes: md5Sum,
|
||||
contentSHA256Bytes: sha256Sum,
|
||||
}
|
||||
|
||||
// Execute PUT an objectName.
|
||||
resp, err := c.executeMethod("PUT", reqMetadata)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
|
||||
var objInfo ObjectInfo
|
||||
// Trim off the odd double quotes from ETag in the beginning and end.
|
||||
objInfo.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
|
||||
objInfo.ETag = strings.TrimSuffix(objInfo.ETag, "\"")
|
||||
// A success here means data was written to server successfully.
|
||||
objInfo.Size = size
|
||||
|
||||
// Return here.
|
||||
return objInfo, nil
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
c.abortMultipartUpload(bucketName, objectName, uploadID)
|
||||
}
|
||||
}()
|
||||
|
||||
// Part number always starts with '1'.
|
||||
partNumber := 1
|
||||
|
||||
// Initialize parts uploaded map.
|
||||
partsInfo := make(map[int]ObjectPart)
|
||||
|
||||
// Create a buffer.
|
||||
buf := make([]byte, partSize)
|
||||
defer debug.FreeOSMemory()
|
||||
|
||||
for partNumber <= totalPartsCount {
|
||||
length, rErr := io.ReadFull(reader, buf)
|
||||
if rErr == io.EOF {
|
||||
break
|
||||
}
|
||||
if rErr != nil && rErr != io.ErrUnexpectedEOF {
|
||||
return 0, rErr
|
||||
}
|
||||
|
||||
// Update progress reader appropriately to the latest offset
|
||||
// as we read from the source.
|
||||
rd := newHook(bytes.NewReader(buf[:length]), progress)
|
||||
|
||||
// Proceed to upload the part.
|
||||
var objPart ObjectPart
|
||||
objPart, err = c.uploadPart(bucketName, objectName, uploadID, rd, partNumber,
|
||||
nil, nil, int64(length), metadata)
|
||||
if err != nil {
|
||||
return totalUploadedSize, err
|
||||
}
|
||||
|
||||
// Save successfully uploaded part metadata.
|
||||
partsInfo[partNumber] = objPart
|
||||
|
||||
// Save successfully uploaded size.
|
||||
totalUploadedSize += int64(length)
|
||||
|
||||
// Increment part number.
|
||||
partNumber++
|
||||
|
||||
// For unknown size, Read EOF we break away.
|
||||
// We do not have to upload till totalPartsCount.
|
||||
if rErr == io.EOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Loop over total uploaded parts to save them in
|
||||
// Parts array before completing the multipart request.
|
||||
for i := 1; i < partNumber; i++ {
|
||||
part, ok := partsInfo[i]
|
||||
if !ok {
|
||||
return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
|
||||
}
|
||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
})
|
||||
}
|
||||
|
||||
// Sort all completed parts.
|
||||
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||
if _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload); err != nil {
|
||||
return totalUploadedSize, err
|
||||
}
|
||||
|
||||
// Return final size.
|
||||
return totalUploadedSize, nil
|
||||
}
|
||||
|
12
vendor/github.com/minio/minio-go/api-stat.go
generated
vendored
12
vendor/github.com/minio/minio-go/api-stat.go
generated
vendored
@ -55,11 +55,13 @@ func (c Client) BucketExists(bucketName string) (bool, error) {
|
||||
// List of header keys to be filtered, usually
|
||||
// from all S3 API http responses.
|
||||
var defaultFilterKeys = []string{
|
||||
"Connection",
|
||||
"Transfer-Encoding",
|
||||
"Accept-Ranges",
|
||||
"Date",
|
||||
"Server",
|
||||
"Vary",
|
||||
"x-amz-bucket-region",
|
||||
"x-amz-request-id",
|
||||
"x-amz-id-2",
|
||||
// Add new headers to be ignored.
|
||||
@ -165,11 +167,6 @@ func (c Client) statObject(bucketName, objectName string, reqHeaders RequestHead
|
||||
contentType = "application/octet-stream"
|
||||
}
|
||||
|
||||
// Extract only the relevant header keys describing the object.
|
||||
// following function filters out a list of standard set of keys
|
||||
// which are not part of object metadata.
|
||||
metadata := extractObjMetadata(resp.Header)
|
||||
|
||||
// Save object metadata info.
|
||||
return ObjectInfo{
|
||||
ETag: md5sum,
|
||||
@ -177,6 +174,9 @@ func (c Client) statObject(bucketName, objectName string, reqHeaders RequestHead
|
||||
Size: size,
|
||||
LastModified: date,
|
||||
ContentType: contentType,
|
||||
Metadata: metadata,
|
||||
// Extract only the relevant header keys describing the object.
|
||||
// following function filters out a list of standard set of keys
|
||||
// which are not part of object metadata.
|
||||
Metadata: extractObjMetadata(resp.Header),
|
||||
}, nil
|
||||
}
|
||||
|
71
vendor/github.com/minio/minio-go/api.go
generated
vendored
71
vendor/github.com/minio/minio-go/api.go
generated
vendored
@ -87,7 +87,7 @@ type Client struct {
|
||||
// Global constants.
|
||||
const (
|
||||
libraryName = "minio-go"
|
||||
libraryVersion = "2.1.0"
|
||||
libraryVersion = "3.0.2"
|
||||
)
|
||||
|
||||
// User Agent should always following the below style.
|
||||
@ -190,6 +190,31 @@ func redirectHeaders(req *http.Request, via []*http.Request) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// getRegionFromURL - parse region from URL if present.
|
||||
func getRegionFromURL(u url.URL) (region string) {
|
||||
region = ""
|
||||
if s3utils.IsGoogleEndpoint(u) {
|
||||
return
|
||||
} else if s3utils.IsAmazonChinaEndpoint(u) {
|
||||
// For china specifically we need to set everything to
|
||||
// cn-north-1 for now, there is no easier way until AWS S3
|
||||
// provides a cleaner compatible API across "us-east-1" and
|
||||
// China region.
|
||||
return "cn-north-1"
|
||||
} else if s3utils.IsAmazonGovCloudEndpoint(u) {
|
||||
// For us-gov specifically we need to set everything to
|
||||
// us-gov-west-1 for now, there is no easier way until AWS S3
|
||||
// provides a cleaner compatible API across "us-east-1" and
|
||||
// Gov cloud region.
|
||||
return "us-gov-west-1"
|
||||
}
|
||||
parts := s3utils.AmazonS3Host.FindStringSubmatch(u.Host)
|
||||
if len(parts) > 1 {
|
||||
region = parts[1]
|
||||
}
|
||||
return region
|
||||
}
|
||||
|
||||
func privateNew(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) {
|
||||
// construct endpoint.
|
||||
endpointURL, err := getEndpointURL(endpoint, secure)
|
||||
@ -211,11 +236,14 @@ func privateNew(endpoint string, creds *credentials.Credentials, secure bool, re
|
||||
|
||||
// Instantiate http client and bucket location cache.
|
||||
clnt.httpClient = &http.Client{
|
||||
Transport: http.DefaultTransport,
|
||||
Transport: defaultMinioTransport,
|
||||
CheckRedirect: redirectHeaders,
|
||||
}
|
||||
|
||||
// Sets custom region, if region is empty bucket location cache is used automatically.
|
||||
if region == "" {
|
||||
region = getRegionFromURL(clnt.endpointURL)
|
||||
}
|
||||
clnt.region = region
|
||||
|
||||
// Instantiate bucket location cache.
|
||||
@ -494,7 +522,7 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
|
||||
// Blank indentifier is kept here on purpose since 'range' without
|
||||
// blank identifiers is only supported since go1.4
|
||||
// https://golang.org/doc/go1.4#forrange.
|
||||
for _ = range c.newRetryTimer(MaxRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) {
|
||||
for range c.newRetryTimer(MaxRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) {
|
||||
// Retry executes the following function body if request has an
|
||||
// error until maxRetries have been exhausted, retry attempts are
|
||||
// performed after waiting for a given period of time in a
|
||||
@ -562,9 +590,14 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
|
||||
// Additionally we should only retry if bucketLocation and custom
|
||||
// region is empty.
|
||||
if metadata.bucketLocation == "" && c.region == "" {
|
||||
if res.StatusCode == http.StatusBadRequest && errResponse.Region != "" {
|
||||
c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
|
||||
continue // Retry.
|
||||
if errResponse.Code == "AuthorizationHeaderMalformed" || errResponse.Code == "InvalidRegion" {
|
||||
if metadata.bucketName != "" && errResponse.Region != "" {
|
||||
// Gather Cached location only if bucketName is present.
|
||||
if _, cachedLocationError := c.bucketLocCache.Get(metadata.bucketName); cachedLocationError != false {
|
||||
c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
|
||||
continue // Retry.
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -616,17 +649,8 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Go net/http notoriously closes the request body.
|
||||
// - The request Body, if non-nil, will be closed by the underlying Transport, even on errors.
|
||||
// This can cause underlying *os.File seekers to fail, avoid that
|
||||
// by making sure to wrap the closer as a nop.
|
||||
var body io.ReadCloser
|
||||
if metadata.contentBody != nil {
|
||||
body = ioutil.NopCloser(metadata.contentBody)
|
||||
}
|
||||
|
||||
// Initialize a new HTTP request for the method.
|
||||
req, err = http.NewRequest(method, targetURL.String(), body)
|
||||
req, err = http.NewRequest(method, targetURL.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -678,6 +702,16 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
|
||||
req.Header.Set(k, v[0])
|
||||
}
|
||||
|
||||
// Go net/http notoriously closes the request body.
|
||||
// - The request Body, if non-nil, will be closed by the underlying Transport, even on errors.
|
||||
// This can cause underlying *os.File seekers to fail, avoid that
|
||||
// by making sure to wrap the closer as a nop.
|
||||
if metadata.contentLength == 0 {
|
||||
req.Body = nil
|
||||
} else {
|
||||
req.Body = ioutil.NopCloser(metadata.contentBody)
|
||||
}
|
||||
|
||||
// Set incoming content-length.
|
||||
req.ContentLength = metadata.contentLength
|
||||
if req.ContentLength <= -1 {
|
||||
@ -699,7 +733,10 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
|
||||
case signerType.IsV2():
|
||||
// Add signature version '2' authorization header.
|
||||
req = s3signer.SignV2(*req, accessKeyID, secretAccessKey)
|
||||
case signerType.IsStreamingV4() && method == "PUT":
|
||||
case metadata.objectName != "" && method == "PUT" && metadata.customHeader.Get("X-Amz-Copy-Source") == "" && !c.secure:
|
||||
// Streaming signature is used by default for a PUT object request. Additionally we also
|
||||
// look if the initialized client is secure, if yes then we don't need to perform
|
||||
// streaming signature.
|
||||
req = s3signer.StreamingSignV4(req, accessKeyID,
|
||||
secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC())
|
||||
default:
|
||||
|
44
vendor/github.com/minio/minio-go/bucket-cache.go
generated
vendored
44
vendor/github.com/minio/minio-go/bucket-cache.go
generated
vendored
@ -91,20 +91,6 @@ func (c Client) getBucketLocation(bucketName string) (string, error) {
|
||||
return c.region, nil
|
||||
}
|
||||
|
||||
if s3utils.IsAmazonChinaEndpoint(c.endpointURL) {
|
||||
// For china specifically we need to set everything to
|
||||
// cn-north-1 for now, there is no easier way until AWS S3
|
||||
// provides a cleaner compatible API across "us-east-1" and
|
||||
// China region.
|
||||
return "cn-north-1", nil
|
||||
} else if s3utils.IsAmazonGovCloudEndpoint(c.endpointURL) {
|
||||
// For us-gov specifically we need to set everything to
|
||||
// us-gov-west-1 for now, there is no easier way until AWS S3
|
||||
// provides a cleaner compatible API across "us-east-1" and
|
||||
// Gov cloud region.
|
||||
return "us-gov-west-1", nil
|
||||
}
|
||||
|
||||
if location, ok := c.bucketLocCache.Get(bucketName); ok {
|
||||
return location, nil
|
||||
}
|
||||
@ -213,20 +199,24 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro
|
||||
signerType = credentials.SignatureAnonymous
|
||||
}
|
||||
|
||||
// Set sha256 sum for signature calculation only with signature version '4'.
|
||||
switch {
|
||||
case signerType.IsV4():
|
||||
var contentSha256 string
|
||||
if c.secure {
|
||||
contentSha256 = unsignedPayload
|
||||
} else {
|
||||
contentSha256 = hex.EncodeToString(sum256([]byte{}))
|
||||
}
|
||||
req.Header.Set("X-Amz-Content-Sha256", contentSha256)
|
||||
req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1")
|
||||
case signerType.IsV2():
|
||||
req = s3signer.SignV2(*req, accessKeyID, secretAccessKey)
|
||||
if signerType.IsAnonymous() {
|
||||
return req, nil
|
||||
}
|
||||
|
||||
if signerType.IsV2() {
|
||||
req = s3signer.SignV2(*req, accessKeyID, secretAccessKey)
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// Set sha256 sum for signature calculation only with signature version '4'.
|
||||
var contentSha256 string
|
||||
if c.secure {
|
||||
contentSha256 = unsignedPayload
|
||||
} else {
|
||||
contentSha256 = hex.EncodeToString(sum256([]byte{}))
|
||||
}
|
||||
|
||||
req.Header.Set("X-Amz-Content-Sha256", contentSha256)
|
||||
req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1")
|
||||
return req, nil
|
||||
}
|
||||
|
14
vendor/github.com/minio/minio-go/constants.go
generated
vendored
14
vendor/github.com/minio/minio-go/constants.go
generated
vendored
@ -18,10 +18,18 @@ package minio
|
||||
|
||||
/// Multipart upload defaults.
|
||||
|
||||
// miniPartSize - minimum part size 64MiB per object after which
|
||||
// absMinPartSize - absolute minimum part size (5 MiB) below which
|
||||
// a part in a multipart upload may not be uploaded.
|
||||
const absMinPartSize = 1024 * 1024 * 5
|
||||
|
||||
// minPartSize - minimum part size 64MiB per object after which
|
||||
// putObject behaves internally as multipart.
|
||||
const minPartSize = 1024 * 1024 * 64
|
||||
|
||||
// copyPartSize - default (and maximum) part size to copy in a
|
||||
// copy-object request (5GiB)
|
||||
const copyPartSize = 1024 * 1024 * 1024 * 5
|
||||
|
||||
// maxPartsCount - maximum number of parts for a single multipart session.
|
||||
const maxPartsCount = 10000
|
||||
|
||||
@ -37,10 +45,6 @@ const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5
|
||||
// Multipart operation.
|
||||
const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5
|
||||
|
||||
// optimalReadBufferSize - optimal buffer 5MiB used for reading
|
||||
// through Read operation.
|
||||
const optimalReadBufferSize = 1024 * 1024 * 5
|
||||
|
||||
// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
|
||||
// we don't want to sign the request payload
|
||||
const unsignedPayload = "UNSIGNED-PAYLOAD"
|
||||
|
99
vendor/github.com/minio/minio-go/copy-conditions.go
generated
vendored
99
vendor/github.com/minio/minio-go/copy-conditions.go
generated
vendored
@ -1,99 +0,0 @@
|
||||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// copyCondition explanation:
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// copyCondition {
|
||||
// key: "x-amz-copy-if-modified-since",
|
||||
// value: "Tue, 15 Nov 1994 12:45:26 GMT",
|
||||
// }
|
||||
//
|
||||
type copyCondition struct {
|
||||
key string
|
||||
value string
|
||||
}
|
||||
|
||||
// CopyConditions - copy conditions.
|
||||
type CopyConditions struct {
|
||||
conditions []copyCondition
|
||||
}
|
||||
|
||||
// NewCopyConditions - Instantiate new list of conditions. This
|
||||
// function is left behind for backward compatibility. The idiomatic
|
||||
// way to set an empty set of copy conditions is,
|
||||
// ``copyConditions := CopyConditions{}``.
|
||||
//
|
||||
func NewCopyConditions() CopyConditions {
|
||||
return CopyConditions{}
|
||||
}
|
||||
|
||||
// SetMatchETag - set match etag.
|
||||
func (c *CopyConditions) SetMatchETag(etag string) error {
|
||||
if etag == "" {
|
||||
return ErrInvalidArgument("ETag cannot be empty.")
|
||||
}
|
||||
c.conditions = append(c.conditions, copyCondition{
|
||||
key: "x-amz-copy-source-if-match",
|
||||
value: etag,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetMatchETagExcept - set match etag except.
|
||||
func (c *CopyConditions) SetMatchETagExcept(etag string) error {
|
||||
if etag == "" {
|
||||
return ErrInvalidArgument("ETag cannot be empty.")
|
||||
}
|
||||
c.conditions = append(c.conditions, copyCondition{
|
||||
key: "x-amz-copy-source-if-none-match",
|
||||
value: etag,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetUnmodified - set unmodified time since.
|
||||
func (c *CopyConditions) SetUnmodified(modTime time.Time) error {
|
||||
if modTime.IsZero() {
|
||||
return ErrInvalidArgument("Modified since cannot be empty.")
|
||||
}
|
||||
c.conditions = append(c.conditions, copyCondition{
|
||||
key: "x-amz-copy-source-if-unmodified-since",
|
||||
value: modTime.Format(http.TimeFormat),
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetModified - set modified time since.
|
||||
func (c *CopyConditions) SetModified(modTime time.Time) error {
|
||||
if modTime.IsZero() {
|
||||
return ErrInvalidArgument("Modified since cannot be empty.")
|
||||
}
|
||||
c.conditions = append(c.conditions, copyCondition{
|
||||
key: "x-amz-copy-source-if-modified-since",
|
||||
value: modTime.Format(http.TimeFormat),
|
||||
})
|
||||
return nil
|
||||
}
|
12
vendor/github.com/minio/minio-go/core.go
generated
vendored
12
vendor/github.com/minio/minio-go/core.go
generated
vendored
@ -70,7 +70,13 @@ func (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, de
|
||||
|
||||
// PutObjectPart - Upload an object part.
|
||||
func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Sum, sha256Sum []byte) (ObjectPart, error) {
|
||||
return c.uploadPart(bucket, object, uploadID, data, partID, md5Sum, sha256Sum, size)
|
||||
return c.PutObjectPartWithMetadata(bucket, object, uploadID, partID, size, data, md5Sum, sha256Sum, nil)
|
||||
}
|
||||
|
||||
// PutObjectPartWithMetadata - upload an object part with additional request metadata.
|
||||
func (c Core) PutObjectPartWithMetadata(bucket, object, uploadID string, partID int,
|
||||
size int64, data io.Reader, md5Sum, sha256Sum []byte, metadata map[string][]string) (ObjectPart, error) {
|
||||
return c.uploadPart(bucket, object, uploadID, data, partID, md5Sum, sha256Sum, size, metadata)
|
||||
}
|
||||
|
||||
// ListObjectParts - List uploaded parts of an incomplete upload.x
|
||||
@ -80,7 +86,9 @@ func (c Core) ListObjectParts(bucket, object, uploadID string, partNumberMarker
|
||||
|
||||
// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object.
|
||||
func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []CompletePart) error {
|
||||
_, err := c.completeMultipartUpload(bucket, object, uploadID, completeMultipartUpload{Parts: parts})
|
||||
_, err := c.completeMultipartUpload(bucket, object, uploadID, completeMultipartUpload{
|
||||
Parts: parts,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
|
85
vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go
generated
vendored
85
vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go
generated
vendored
@ -42,7 +42,7 @@ type IAM struct {
|
||||
// Required http Client to use when connecting to IAM metadata service.
|
||||
Client *http.Client
|
||||
|
||||
// Custom endpoint in place of
|
||||
// Custom endpoint to fetch IAM role credentials.
|
||||
endpoint string
|
||||
}
|
||||
|
||||
@ -58,14 +58,19 @@ func redirectHeaders(req *http.Request, via []*http.Request) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IAM Roles for Amazon EC2
|
||||
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
|
||||
const (
|
||||
defaultIAMRoleEndpoint = "http://169.254.169.254"
|
||||
defaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials"
|
||||
)
|
||||
|
||||
// NewIAM returns a pointer to a new Credentials object wrapping
|
||||
// the IAM. Takes a ConfigProvider to create a EC2Metadata client.
|
||||
// The ConfigProvider is satisfied by the session.Session type.
|
||||
func NewIAM(endpoint string) *Credentials {
|
||||
if endpoint == "" {
|
||||
// IAM Roles for Amazon EC2
|
||||
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
|
||||
endpoint = "http://169.254.169.254"
|
||||
endpoint = defaultIAMRoleEndpoint
|
||||
}
|
||||
p := &IAM{
|
||||
Client: &http.Client{
|
||||
@ -81,17 +86,7 @@ func NewIAM(endpoint string) *Credentials {
|
||||
// Error will be returned if the request fails, or unable to extract
|
||||
// the desired
|
||||
func (m *IAM) Retrieve() (Value, error) {
|
||||
credsList, err := requestCredList(m.Client, m.endpoint)
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
|
||||
if len(credsList) == 0 {
|
||||
return Value{}, errors.New("empty EC2 Role list")
|
||||
}
|
||||
credsName := credsList[0]
|
||||
|
||||
roleCreds, err := requestCred(m.Client, m.endpoint, credsName)
|
||||
roleCreds, err := getCredentials(m.Client, m.endpoint)
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
@ -119,18 +114,32 @@ type ec2RoleCredRespBody struct {
|
||||
// Error state
|
||||
Code string
|
||||
Message string
|
||||
|
||||
// Unused params.
|
||||
LastUpdated time.Time
|
||||
Type string
|
||||
}
|
||||
|
||||
const iamSecurityCredsPath = "/latest/meta-data/iam/security-credentials"
|
||||
|
||||
// requestCredList requests a list of credentials from the EC2 service.
|
||||
// If there are no credentials, or there is an error making or receiving the request
|
||||
func requestCredList(client *http.Client, endpoint string) ([]string, error) {
|
||||
// Get the final IAM role URL where the request will
|
||||
// be sent to fetch the rolling access credentials.
|
||||
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
|
||||
func getIAMRoleURL(endpoint string) (*url.URL, error) {
|
||||
if endpoint == "" {
|
||||
endpoint = defaultIAMRoleEndpoint
|
||||
}
|
||||
u, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u.Path = iamSecurityCredsPath
|
||||
u.Path = defaultIAMSecurityCredsPath
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// listRoleNames lists of credential role names associated
|
||||
// with the current EC2 service. If there are no credentials,
|
||||
// or there is an error making or receiving the request.
|
||||
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
|
||||
func listRoleNames(client *http.Client, u *url.URL) ([]string, error) {
|
||||
req, err := http.NewRequest("GET", u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -157,17 +166,39 @@ func requestCredList(client *http.Client, endpoint string) ([]string, error) {
|
||||
return credsList, nil
|
||||
}
|
||||
|
||||
// requestCred requests the credentials for a specific credentials from the EC2 service.
|
||||
// getCredentials - obtains the credentials from the IAM role name associated with
|
||||
// the current EC2 service.
|
||||
//
|
||||
// If the credentials cannot be found, or there is an error reading the response
|
||||
// and error will be returned.
|
||||
func requestCred(client *http.Client, endpoint string, credsName string) (ec2RoleCredRespBody, error) {
|
||||
u, err := url.Parse(endpoint)
|
||||
// If the credentials cannot be found, or there is an error
|
||||
// reading the response an error will be returned.
|
||||
func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) {
|
||||
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
|
||||
u, err := getIAMRoleURL(endpoint)
|
||||
if err != nil {
|
||||
return ec2RoleCredRespBody{}, err
|
||||
}
|
||||
|
||||
u.Path = path.Join(iamSecurityCredsPath, credsName)
|
||||
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
|
||||
roleNames, err := listRoleNames(client, u)
|
||||
if err != nil {
|
||||
return ec2RoleCredRespBody{}, err
|
||||
}
|
||||
|
||||
if len(roleNames) == 0 {
|
||||
return ec2RoleCredRespBody{}, errors.New("No IAM roles attached to this EC2 service")
|
||||
}
|
||||
|
||||
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
|
||||
// - An instance profile can contain only one IAM role. This limit cannot be increased.
|
||||
roleName := roleNames[0]
|
||||
|
||||
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
|
||||
// The following command retrieves the security credentials for an
|
||||
// IAM role named `s3access`.
|
||||
//
|
||||
// $ curl http://169.254.169.254/latest/meta-data/iam/security-credentials/s3access
|
||||
//
|
||||
u.Path = path.Join(u.Path, roleName)
|
||||
req, err := http.NewRequest("GET", u.String(), nil)
|
||||
if err != nil {
|
||||
return ec2RoleCredRespBody{}, err
|
||||
|
49
vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go
generated
vendored
49
vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go
generated
vendored
@ -21,6 +21,7 @@ import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -98,7 +99,7 @@ func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int
|
||||
if sessionToken != "" {
|
||||
req.Header.Set("X-Amz-Security-Token", sessionToken)
|
||||
}
|
||||
req.Header.Set("Content-Encoding", streamingEncoding)
|
||||
req.Header.Add("Content-Encoding", streamingEncoding)
|
||||
req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat))
|
||||
|
||||
// Set content length with streaming signature for each chunk included.
|
||||
@ -205,6 +206,10 @@ func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionTok
|
||||
// Set headers needed for streaming signature.
|
||||
prepareStreamingRequest(req, sessionToken, dataLen, reqTime)
|
||||
|
||||
if req.Body == nil {
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader([]byte("")))
|
||||
}
|
||||
|
||||
stReader := &StreamingReader{
|
||||
baseReadCloser: req.Body,
|
||||
accessKeyID: accessKeyID,
|
||||
@ -249,7 +254,18 @@ func (s *StreamingReader) Read(buf []byte) (int, error) {
|
||||
s.chunkBufLen = 0
|
||||
for {
|
||||
n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:])
|
||||
if err == nil || err == io.ErrUnexpectedEOF {
|
||||
// Usually we validate `err` first, but in this case
|
||||
// we are validating n > 0 for the following reasons.
|
||||
//
|
||||
// 1. n > 0, err is one of io.EOF, nil (near end of stream)
|
||||
// A Reader returning a non-zero number of bytes at the end
|
||||
// of the input stream may return either err == EOF or err == nil
|
||||
//
|
||||
// 2. n == 0, err is io.EOF (actual end of stream)
|
||||
//
|
||||
// Callers should always process the n > 0 bytes returned
|
||||
// before considering the error err.
|
||||
if n1 > 0 {
|
||||
s.chunkBufLen += n1
|
||||
s.bytesRead += int64(n1)
|
||||
|
||||
@ -260,25 +276,26 @@ func (s *StreamingReader) Read(buf []byte) (int, error) {
|
||||
s.signChunk(s.chunkBufLen)
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
// No more data left in baseReader - last chunk.
|
||||
// Done reading the last chunk from baseReader.
|
||||
s.done = true
|
||||
|
||||
} else if err == io.EOF {
|
||||
// No more data left in baseReader - last chunk.
|
||||
// Done reading the last chunk from baseReader.
|
||||
s.done = true
|
||||
// bytes read from baseReader different than
|
||||
// content length provided.
|
||||
if s.bytesRead != s.contentLen {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
// bytes read from baseReader different than
|
||||
// content length provided.
|
||||
if s.bytesRead != s.contentLen {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
// Sign the chunk and write it to s.buf.
|
||||
s.signChunk(0)
|
||||
break
|
||||
}
|
||||
|
||||
// Sign the chunk and write it to s.buf.
|
||||
s.signChunk(0)
|
||||
break
|
||||
|
||||
} else {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return s.buf.Read(buf)
|
||||
|
4
vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go
generated
vendored
4
vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go
generated
vendored
@ -42,9 +42,7 @@ const (
|
||||
func encodeURL2Path(u *url.URL) (path string) {
|
||||
// Encode URL path.
|
||||
if isS3, _ := filepath.Match("*.s3*.amazonaws.com", u.Host); isS3 {
|
||||
hostSplits := strings.SplitN(u.Host, ".", 4)
|
||||
// First element is the bucket name.
|
||||
bucketName := hostSplits[0]
|
||||
bucketName := u.Host[:strings.LastIndex(u.Host, ".s3")]
|
||||
path = "/" + bucketName
|
||||
path += u.Path
|
||||
path = s3utils.EncodePath(path)
|
||||
|
10
vendor/github.com/minio/minio-go/pkg/s3utils/utils.go
generated
vendored
10
vendor/github.com/minio/minio-go/pkg/s3utils/utils.go
generated
vendored
@ -80,6 +80,9 @@ func IsVirtualHostSupported(endpointURL url.URL, bucketName string) bool {
|
||||
return IsAmazonEndpoint(endpointURL) || IsGoogleEndpoint(endpointURL)
|
||||
}
|
||||
|
||||
// AmazonS3Host - regular expression used to determine if an arg is s3 host.
|
||||
var AmazonS3Host = regexp.MustCompile("^s3[.-]?(.*?)\\.amazonaws\\.com$")
|
||||
|
||||
// IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint.
|
||||
func IsAmazonEndpoint(endpointURL url.URL) bool {
|
||||
if IsAmazonChinaEndpoint(endpointURL) {
|
||||
@ -88,7 +91,7 @@ func IsAmazonEndpoint(endpointURL url.URL) bool {
|
||||
if IsAmazonGovCloudEndpoint(endpointURL) {
|
||||
return true
|
||||
}
|
||||
return endpointURL.Host == "s3.amazonaws.com"
|
||||
return AmazonS3Host.MatchString(endpointURL.Host)
|
||||
}
|
||||
|
||||
// IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint.
|
||||
@ -205,7 +208,7 @@ func EncodePath(pathName string) string {
|
||||
// We support '.' with bucket names but we fallback to using path
|
||||
// style requests instead for such buckets.
|
||||
var (
|
||||
validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-]{1,61}[A-Za-z0-9]$`)
|
||||
validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$`)
|
||||
validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
|
||||
ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
|
||||
)
|
||||
@ -240,14 +243,13 @@ func checkBucketNameCommon(bucketName string, strict bool) (err error) {
|
||||
}
|
||||
|
||||
// CheckValidBucketName - checks if we have a valid input bucket name.
|
||||
// This is a non stricter version.
|
||||
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
|
||||
func CheckValidBucketName(bucketName string) (err error) {
|
||||
return checkBucketNameCommon(bucketName, false)
|
||||
}
|
||||
|
||||
// CheckValidBucketNameStrict - checks if we have a valid input bucket name.
|
||||
// This is a stricter version.
|
||||
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
|
||||
func CheckValidBucketNameStrict(bucketName string) (err error) {
|
||||
return checkBucketNameCommon(bucketName, true)
|
||||
}
|
||||
|
60
vendor/github.com/minio/minio-go/tempfile.go
generated
vendored
60
vendor/github.com/minio/minio-go/tempfile.go
generated
vendored
@ -1,60 +0,0 @@
|
||||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// tempFile - temporary file container.
|
||||
type tempFile struct {
|
||||
*os.File
|
||||
mutex *sync.Mutex
|
||||
}
|
||||
|
||||
// newTempFile returns a new temporary file, once closed it automatically deletes itself.
|
||||
func newTempFile(prefix string) (*tempFile, error) {
|
||||
// use platform specific temp directory.
|
||||
file, err := ioutil.TempFile(os.TempDir(), prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &tempFile{
|
||||
File: file,
|
||||
mutex: &sync.Mutex{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close - closer wrapper to close and remove temporary file.
|
||||
func (t *tempFile) Close() error {
|
||||
t.mutex.Lock()
|
||||
defer t.mutex.Unlock()
|
||||
if t.File != nil {
|
||||
// Close the file.
|
||||
if err := t.File.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Remove file.
|
||||
if err := os.Remove(t.File.Name()); err != nil {
|
||||
return err
|
||||
}
|
||||
t.File = nil
|
||||
}
|
||||
return nil
|
||||
}
|
48
vendor/github.com/minio/minio-go/transport.go
generated
vendored
Normal file
48
vendor/github.com/minio/minio-go/transport.go
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
// +build go1.7 go1.8
|
||||
|
||||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// This default transport is similar to http.DefaultTransport
|
||||
// but with additional DisableCompression:
|
||||
var defaultMinioTransport http.RoundTripper = &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
DualStack: true,
|
||||
}).DialContext,
|
||||
MaxIdleConns: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
// Set this value so that the underlying transport round-tripper
|
||||
// doesn't try to auto decode the body of objects with
|
||||
// content-encoding set to `gzip`.
|
||||
//
|
||||
// Refer:
|
||||
// https://golang.org/src/net/http/transport.go?h=roundTrip#L1843
|
||||
DisableCompression: true,
|
||||
}
|
39
vendor/github.com/minio/minio-go/transport_1_5.go
generated
vendored
Normal file
39
vendor/github.com/minio/minio-go/transport_1_5.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
// +build go1.5,!go1.6,!go1.7,!go1.8
|
||||
|
||||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// This default transport is similar to http.DefaultTransport
|
||||
// but with additional DisableCompression:
|
||||
var defaultMinioTransport http.RoundTripper = &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
// Set this value so that the underlying transport round-tripper
|
||||
// doesn't try to auto decode the body of objects with
|
||||
// content-encoding set to `gzip`.
|
||||
//
|
||||
// Refer:
|
||||
// https://golang.org/src/net/http/transport.go?h=roundTrip#L1843
|
||||
DisableCompression: true,
|
||||
}
|
40
vendor/github.com/minio/minio-go/transport_1_6.go
generated
vendored
Normal file
40
vendor/github.com/minio/minio-go/transport_1_6.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
// +build go1.6,!go1.7,!go1.8
|
||||
|
||||
/*
|
||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// This default transport is similar to http.DefaultTransport
|
||||
// but with additional DisableCompression:
|
||||
var defaultMinioTransport http.RoundTripper = &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
// Set this value so that the underlying transport round-tripper
|
||||
// doesn't try to auto decode the body of objects with
|
||||
// content-encoding set to `gzip`.
|
||||
//
|
||||
// Refer:
|
||||
// https://golang.org/src/net/http/transport.go?h=roundTrip#L1843
|
||||
DisableCompression: true,
|
||||
}
|
2
vendor/github.com/minio/minio-go/utils.go
generated
vendored
2
vendor/github.com/minio/minio-go/utils.go
generated
vendored
@ -122,7 +122,7 @@ func isValidEndpointURL(endpointURL url.URL) error {
|
||||
if endpointURL.Path != "/" && endpointURL.Path != "" {
|
||||
return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.")
|
||||
}
|
||||
if strings.Contains(endpointURL.Host, ".amazonaws.com") {
|
||||
if strings.Contains(endpointURL.Host, ".s3.amazonaws.com") {
|
||||
if !s3utils.IsAmazonEndpoint(endpointURL) {
|
||||
return ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.")
|
||||
}
|
||||
|
36
vendor/vendor.json
vendored
36
vendor/vendor.json
vendored
@ -312,46 +312,46 @@
|
||||
"revisionTime": "2016-02-29T08:42:30-08:00"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "vRZLSG4FsBdanI19VMtKf17jzZA=",
|
||||
"checksumSHA1": "RoElkV9hrX7Zd8YivXD+JOJOumA=",
|
||||
"path": "github.com/minio/minio-go",
|
||||
"revision": "2cca719d0760cc8906b0843a3e1e93fe9dbd8bb4",
|
||||
"revisionTime": "2017-06-23T21:21:08Z"
|
||||
"revision": "84539d76271caeffb7a1d5f058bd83c6449f8145",
|
||||
"revisionTime": "2017-09-01T08:51:27Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "wDNvEYgDy1gOkzJ81WuuYore3dw=",
|
||||
"checksumSHA1": "5juljGXPkBWENR2Os7dlnPQER48=",
|
||||
"path": "github.com/minio/minio-go/pkg/credentials",
|
||||
"revision": "79aa9c39f7be2cdf802aa550a00850dbc1e37835",
|
||||
"revisionTime": "2017-06-19T22:00:32Z"
|
||||
"revision": "84539d76271caeffb7a1d5f058bd83c6449f8145",
|
||||
"revisionTime": "2017-09-01T08:51:27Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "pggIpSePizRBQ7ybhB0CuaSQydw=",
|
||||
"path": "github.com/minio/minio-go/pkg/encrypt",
|
||||
"revision": "79aa9c39f7be2cdf802aa550a00850dbc1e37835",
|
||||
"revisionTime": "2017-06-19T22:00:32Z"
|
||||
"revision": "84539d76271caeffb7a1d5f058bd83c6449f8145",
|
||||
"revisionTime": "2017-09-01T08:51:27Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "3tl2ehmod/EzXE9o9WJ5HM2AQPE=",
|
||||
"path": "github.com/minio/minio-go/pkg/policy",
|
||||
"revision": "79aa9c39f7be2cdf802aa550a00850dbc1e37835",
|
||||
"revisionTime": "2017-06-19T22:00:32Z"
|
||||
"revision": "84539d76271caeffb7a1d5f058bd83c6449f8145",
|
||||
"revisionTime": "2017-09-01T08:51:27Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "uvEv7QS9WamqQHyru27ugQGzyLU=",
|
||||
"checksumSHA1": "ENjhnv4qjgfc3/v6nJhLNR4COOQ=",
|
||||
"path": "github.com/minio/minio-go/pkg/s3signer",
|
||||
"revision": "79aa9c39f7be2cdf802aa550a00850dbc1e37835",
|
||||
"revisionTime": "2017-06-19T22:00:32Z"
|
||||
"revision": "84539d76271caeffb7a1d5f058bd83c6449f8145",
|
||||
"revisionTime": "2017-09-01T08:51:27Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "XTEUN/pAWAusSXT3yn6UznCl3iA=",
|
||||
"checksumSHA1": "jWv8ONT9vgsX6MAMfCWHsyJtmHU=",
|
||||
"path": "github.com/minio/minio-go/pkg/s3utils",
|
||||
"revision": "79aa9c39f7be2cdf802aa550a00850dbc1e37835",
|
||||
"revisionTime": "2017-06-19T22:00:32Z"
|
||||
"revision": "84539d76271caeffb7a1d5f058bd83c6449f8145",
|
||||
"revisionTime": "2017-09-01T08:51:27Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "maUy+dbN6VfTTnfErrAW2lLit1w=",
|
||||
"path": "github.com/minio/minio-go/pkg/set",
|
||||
"revision": "79aa9c39f7be2cdf802aa550a00850dbc1e37835",
|
||||
"revisionTime": "2017-06-19T22:00:32Z"
|
||||
"revision": "84539d76271caeffb7a1d5f058bd83c6449f8145",
|
||||
"revisionTime": "2017-09-01T08:51:27Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "cYuXpiVBMypgkEr0Wqd79jPPyBg=",
|
||||
|
Loading…
x
Reference in New Issue
Block a user